Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenlib')
-rw-r--r--source/blender/blenlib/BLI_any.hh55
-rw-r--r--source/blender/blenlib/BLI_array_store.h1
-rw-r--r--source/blender/blenlib/BLI_bit_vector.hh529
-rw-r--r--source/blender/blenlib/BLI_bitmap.h13
-rw-r--r--source/blender/blenlib/BLI_color.hh10
-rw-r--r--source/blender/blenlib/BLI_cpp_type.hh7
-rw-r--r--source/blender/blenlib/BLI_float3x3.hh7
-rw-r--r--source/blender/blenlib/BLI_float4x4.hh2
-rw-r--r--source/blender/blenlib/BLI_function_ref.hh1
-rw-r--r--source/blender/blenlib/BLI_generic_span.hh66
-rw-r--r--source/blender/blenlib/BLI_generic_virtual_array.hh240
-rw-r--r--source/blender/blenlib/BLI_index_mask_ops.hh14
-rw-r--r--source/blender/blenlib/BLI_index_range.hh48
-rw-r--r--source/blender/blenlib/BLI_kdopbvh.h23
-rw-r--r--source/blender/blenlib/BLI_kdtree.h8
-rw-r--r--source/blender/blenlib/BLI_kdtree_impl.h27
-rw-r--r--source/blender/blenlib/BLI_length_parameterize.hh152
-rw-r--r--source/blender/blenlib/BLI_listbase.h8
-rw-r--r--source/blender/blenlib/BLI_listbase_wrapper.hh2
-rw-r--r--source/blender/blenlib/BLI_map.hh4
-rw-r--r--source/blender/blenlib/BLI_math_base.h13
-rw-r--r--source/blender/blenlib/BLI_math_base.hh10
-rw-r--r--source/blender/blenlib/BLI_math_color.h4
-rw-r--r--source/blender/blenlib/BLI_math_geom.h4
-rw-r--r--source/blender/blenlib/BLI_math_matrix.h270
-rw-r--r--source/blender/blenlib/BLI_math_rotation.h101
-rw-r--r--source/blender/blenlib/BLI_math_rotation.hh9
-rw-r--r--source/blender/blenlib/BLI_math_vector.h92
-rw-r--r--source/blender/blenlib/BLI_memory_utils.hh17
-rw-r--r--source/blender/blenlib/BLI_multi_value_map.hh5
-rw-r--r--source/blender/blenlib/BLI_pool.hh84
-rw-r--r--source/blender/blenlib/BLI_scanfill.h2
-rw-r--r--source/blender/blenlib/BLI_serialize.hh2
-rw-r--r--source/blender/blenlib/BLI_set.hh4
-rw-r--r--source/blender/blenlib/BLI_string_utf8.h16
-rw-r--r--source/blender/blenlib/BLI_task.hh16
-rw-r--r--source/blender/blenlib/BLI_vector.hh10
-rw-r--r--source/blender/blenlib/BLI_vector_set.hh2
-rw-r--r--source/blender/blenlib/BLI_virtual_array.hh325
-rw-r--r--source/blender/blenlib/CMakeLists.txt10
-rw-r--r--source/blender/blenlib/intern/BLI_index_range.cc30
-rw-r--r--source/blender/blenlib/intern/BLI_kdopbvh.c2
-rw-r--r--source/blender/blenlib/intern/BLI_memarena.c1
-rw-r--r--source/blender/blenlib/intern/BLI_memblock.c1
-rw-r--r--source/blender/blenlib/intern/bitmap.c20
-rw-r--r--source/blender/blenlib/intern/boxpack_2d.c1
-rw-r--r--source/blender/blenlib/intern/filereader_zstd.c5
-rw-r--r--source/blender/blenlib/intern/generic_virtual_array.cc264
-rw-r--r--source/blender/blenlib/intern/index_mask.cc52
-rw-r--r--source/blender/blenlib/intern/kdtree_impl.h27
-rw-r--r--source/blender/blenlib/intern/length_parameterize.cc150
-rw-r--r--source/blender/blenlib/intern/math_base_inline.c18
-rw-r--r--source/blender/blenlib/intern/math_geom.c6
-rw-r--r--source/blender/blenlib/intern/math_matrix.c168
-rw-r--r--source/blender/blenlib/intern/math_rotation.c150
-rw-r--r--source/blender/blenlib/intern/math_rotation.cc13
-rw-r--r--source/blender/blenlib/intern/math_vector_inline.c29
-rw-r--r--source/blender/blenlib/intern/mesh_boolean.cc34
-rw-r--r--source/blender/blenlib/intern/mesh_intersect.cc2
-rw-r--r--source/blender/blenlib/intern/noise.c8
-rw-r--r--source/blender/blenlib/intern/noise.cc69
-rw-r--r--source/blender/blenlib/intern/path_util.c16
-rw-r--r--source/blender/blenlib/intern/rct.c4
-rw-r--r--source/blender/blenlib/intern/smallhash.c3
-rw-r--r--source/blender/blenlib/intern/string_search.cc4
-rw-r--r--source/blender/blenlib/intern/string_utf8.c60
-rw-r--r--source/blender/blenlib/intern/system.c10
-rw-r--r--source/blender/blenlib/intern/task_pool.cc8
-rw-r--r--source/blender/blenlib/intern/timeit.cc18
-rw-r--r--source/blender/blenlib/intern/winstuff.c33
-rw-r--r--source/blender/blenlib/tests/BLI_array_store_test.cc10
-rw-r--r--source/blender/blenlib/tests/BLI_bit_vector_test.cc186
-rw-r--r--source/blender/blenlib/tests/BLI_bitmap_test.cc46
-rw-r--r--source/blender/blenlib/tests/BLI_index_range_test.cc52
-rw-r--r--source/blender/blenlib/tests/BLI_kdtree_test.cc63
-rw-r--r--source/blender/blenlib/tests/BLI_length_parameterize_test.cc40
-rw-r--r--source/blender/blenlib/tests/BLI_math_rotation_test.cc103
-rw-r--r--source/blender/blenlib/tests/BLI_pool_test.cc51
-rw-r--r--source/blender/blenlib/tests/BLI_set_test.cc10
-rw-r--r--source/blender/blenlib/tests/BLI_string_search_test.cc2
-rw-r--r--source/blender/blenlib/tests/BLI_vector_test.cc4
-rw-r--r--source/blender/blenlib/tests/BLI_virtual_array_test.cc35
82 files changed, 2902 insertions, 1119 deletions
diff --git a/source/blender/blenlib/BLI_any.hh b/source/blender/blenlib/BLI_any.hh
index e80dad82d01..f9b53436763 100644
--- a/source/blender/blenlib/BLI_any.hh
+++ b/source/blender/blenlib/BLI_any.hh
@@ -13,6 +13,7 @@
*/
#include <algorithm>
+#include <cstring>
#include <utility>
#include "BLI_memory_utils.hh"
@@ -26,6 +27,7 @@ namespace detail {
* Additional type specific #ExtraInfo can be embedded here as well.
*/
template<typename ExtraInfo> struct AnyTypeInfo {
+ /* The pointers are allowed to be null, which means that the implementation is trivial. */
void (*copy_construct)(void *dst, const void *src);
void (*move_construct)(void *dst, void *src);
void (*destruct)(void *src);
@@ -37,11 +39,16 @@ template<typename ExtraInfo> struct AnyTypeInfo {
* Used when #T is stored directly in the inline buffer of the #Any.
*/
template<typename ExtraInfo, typename T>
-static constexpr AnyTypeInfo<ExtraInfo> info_for_inline = {
- [](void *dst, const void *src) { new (dst) T(*(const T *)src); },
- [](void *dst, void *src) { new (dst) T(std::move(*(T *)src)); },
- [](void *src) { std::destroy_at(((T *)src)); },
- [](const void *src) { return src; },
+inline constexpr AnyTypeInfo<ExtraInfo> info_for_inline = {
+ is_trivially_copy_constructible_extended_v<T> ?
+ nullptr :
+ +[](void *dst, const void *src) { new (dst) T(*(const T *)src); },
+ is_trivially_move_constructible_extended_v<T> ?
+ nullptr :
+ +[](void *dst, void *src) { new (dst) T(std::move(*(T *)src)); },
+ is_trivially_destructible_extended_v<T> ? nullptr :
+ +[](void *src) { std::destroy_at(((T *)src)); },
+ nullptr,
ExtraInfo::template get<T>()};
/**
@@ -50,7 +57,7 @@ static constexpr AnyTypeInfo<ExtraInfo> info_for_inline = {
*/
template<typename T> using Ptr = std::unique_ptr<T>;
template<typename ExtraInfo, typename T>
-static constexpr AnyTypeInfo<ExtraInfo> info_for_unique_ptr = {
+inline constexpr AnyTypeInfo<ExtraInfo> info_for_unique_ptr = {
[](void *dst, const void *src) { new (dst) Ptr<T>(new T(**(const Ptr<T> *)src)); },
[](void *dst, void *src) { new (dst) Ptr<T>(new T(std::move(**(Ptr<T> *)src))); },
[](void *src) { std::destroy_at((Ptr<T> *)src); },
@@ -92,12 +99,14 @@ class Any {
using RealExtraInfo =
std::conditional_t<std::is_void_v<ExtraInfo>, detail::NoExtraInfo, ExtraInfo>;
using Info = detail::AnyTypeInfo<RealExtraInfo>;
+ static constexpr size_t RealInlineBufferCapacity = std::max(InlineBufferCapacity,
+ sizeof(std::unique_ptr<int>));
/**
* Inline buffer that either contains nothing, the stored value directly, or a #std::unique_ptr
* to the value.
*/
- AlignedBuffer<std::max(InlineBufferCapacity, sizeof(std::unique_ptr<int>)), Alignment> buffer_{};
+ AlignedBuffer<RealInlineBufferCapacity, Alignment> buffer_{};
/**
* Information about the type that is currently stored.
@@ -144,7 +153,12 @@ class Any {
Any(const Any &other) : info_(other.info_)
{
if (info_ != nullptr) {
- info_->copy_construct(&buffer_, &other.buffer_);
+ if (info_->copy_construct != nullptr) {
+ info_->copy_construct(&buffer_, &other.buffer_);
+ }
+ else {
+ memcpy(&buffer_, &other.buffer_, RealInlineBufferCapacity);
+ }
}
}
@@ -155,7 +169,12 @@ class Any {
Any(Any &&other) noexcept : info_(other.info_)
{
if (info_ != nullptr) {
- info_->move_construct(&buffer_, &other.buffer_);
+ if (info_->move_construct != nullptr) {
+ info_->move_construct(&buffer_, &other.buffer_);
+ }
+ else {
+ memcpy(&buffer_, &other.buffer_, RealInlineBufferCapacity);
+ }
}
}
@@ -179,7 +198,9 @@ class Any {
~Any()
{
if (info_ != nullptr) {
- info_->destruct(&buffer_);
+ if (info_->destruct != nullptr) {
+ info_->destruct(&buffer_);
+ }
}
}
@@ -213,7 +234,9 @@ class Any {
void reset()
{
if (info_ != nullptr) {
- info_->destruct(&buffer_);
+ if (info_->destruct != nullptr) {
+ info_->destruct(&buffer_);
+ }
}
info_ = nullptr;
}
@@ -265,14 +288,20 @@ class Any {
void *get()
{
BLI_assert(info_ != nullptr);
- return const_cast<void *>(info_->get(&buffer_));
+ if (info_->get != nullptr) {
+ return const_cast<void *>(info_->get(&buffer_));
+ }
+ return &buffer_;
}
/** Get a pointer to the stored value. */
const void *get() const
{
BLI_assert(info_ != nullptr);
- return info_->get(&buffer_);
+ if (info_->get != nullptr) {
+ return info_->get(&buffer_);
+ }
+ return &buffer_;
}
/**
diff --git a/source/blender/blenlib/BLI_array_store.h b/source/blender/blenlib/BLI_array_store.h
index 8a91825da6f..c04c392627d 100644
--- a/source/blender/blenlib/BLI_array_store.h
+++ b/source/blender/blenlib/BLI_array_store.h
@@ -57,7 +57,6 @@ size_t BLI_array_store_calc_size_expanded_get(const BArrayStore *bs);
size_t BLI_array_store_calc_size_compacted_get(const BArrayStore *bs);
/**
- *
* \param data: Data used to create
* \param state_reference: The state to use as a reference when adding the new state,
* typically this is the previous state,
diff --git a/source/blender/blenlib/BLI_bit_vector.hh b/source/blender/blenlib/BLI_bit_vector.hh
new file mode 100644
index 00000000000..3cbd2483a31
--- /dev/null
+++ b/source/blender/blenlib/BLI_bit_vector.hh
@@ -0,0 +1,529 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#pragma once
+
+/** \file
+ * \ingroup bli
+ *
+ * A `blender::BitVector` is a dynamically growing contiguous arrays of bits. Its main purpose is
+ * to provide a compact way to map indices to bools. It requires 8 times less memory compared to a
+ * `blender::Vector<bool>`.
+ *
+ * Advantages of using a bit- instead of byte-vector are:
+ * - Uses less memory.
+ * - Allows checking the state of many elements at the same time (8 times more bits than bytes fit
+ * into a CPU register). This can improve performance.
+ *
+ * The compact nature of storing bools in individual bits has some downsides that have to be kept
+ * in mind:
+ * - Writing to separate bits in the same byte is not thread-safe. Therefore, an existing vector of
+ * bool can't easily be replaced with a bit vector, if it is written to from multiple threads.
+ * Read-only access from multiple threads is fine though.
+ * - Writing individual elements is more expensive when the array is in cache already. That is
+ * because changing a bit is always a read-modify-write operation on the byte the bit resides in.
+ * - Reading individual elements is more expensive when the array is in cache already. That is
+ * because additional bit-wise operations have to be applied after the corresponding byte is
+ * read.
+ *
+ * Comparison to `std::vector<bool>`:
+ * - `blender::BitVector` has an interface that is more optimized for dealing with bits.
+ * - `blender::BitVector` has an inline buffer that is used to avoid allocations when the vector is
+ * small.
+ *
+ * Comparison to `BLI_bitmap`:
+ * - `blender::BitVector` offers a more C++ friendly interface.
+ * - `BLI_bitmap` should only be used in C code that can not use `blender::BitVector`.
+ */
+
+#include <cstring>
+
+#include "BLI_allocator.hh"
+#include "BLI_index_range.hh"
+#include "BLI_memory_utils.hh"
+#include "BLI_span.hh"
+
+namespace blender {
+
+/**
+ * This is a read-only pointer to a specific bit. The value of the bit can be retrieved, but not
+ * changed.
+ */
+class BitRef {
+ private:
+ /** Points to the exact byte that the bit is in. */
+ const uint8_t *byte_ptr_;
+ /** All zeros except for a single one at the bit that is referenced. */
+ uint8_t mask_;
+
+ friend class MutableBitRef;
+
+ public:
+ BitRef() = default;
+
+ /**
+ * Reference a specific bit in a byte array. Note that #byte_ptr does *not* have to point to the
+ * exact byte the bit is in.
+ */
+ BitRef(const uint8_t *byte_ptr, const int64_t bit_index)
+ {
+ byte_ptr_ = byte_ptr + (bit_index >> 3);
+ mask_ = 1 << (bit_index & 7);
+ }
+
+ /**
+ * Return true when the bit is currently 1 and false otherwise.
+ */
+ bool test() const
+ {
+ const uint8_t byte = *byte_ptr_;
+ const uint8_t masked_byte = byte & mask_;
+ return masked_byte != 0;
+ }
+
+ operator bool() const
+ {
+ return this->test();
+ }
+};
+
+/**
+ * Similar to #BitRef, but also allows changing the referenced bit.
+ */
+class MutableBitRef {
+ private:
+ /** Points to the exact byte that the bit is in. */
+ uint8_t *byte_ptr_;
+ /** All zeros except for a single one at the bit that is referenced. */
+ uint8_t mask_;
+
+ public:
+ MutableBitRef() = default;
+
+ /**
+ * Reference a specific bit in a byte array. Note that #byte_ptr does *not* have to point to the
+ * exact byte the bit is in.
+ */
+ MutableBitRef(uint8_t *byte_ptr, const int64_t bit_index)
+ {
+ byte_ptr_ = byte_ptr + (bit_index >> 3);
+ mask_ = 1 << static_cast<uint8_t>(bit_index & 7);
+ }
+
+ /**
+ * Support implicitly casting to a read-only #BitRef.
+ */
+ operator BitRef() const
+ {
+ BitRef bit_ref;
+ bit_ref.byte_ptr_ = byte_ptr_;
+ bit_ref.mask_ = mask_;
+ return bit_ref;
+ }
+
+ /**
+ * Return true when the bit is currently 1 and false otherwise.
+ */
+ bool test() const
+ {
+ const uint8_t byte = *byte_ptr_;
+ const uint8_t masked_byte = byte & mask_;
+ return masked_byte != 0;
+ }
+
+ operator bool() const
+ {
+ return this->test();
+ }
+
+ /**
+ * Change the bit to a 1.
+ */
+ void set()
+ {
+ *byte_ptr_ |= mask_;
+ }
+
+ /**
+ * Change the bit to a 0.
+ */
+ void reset()
+ {
+ *byte_ptr_ &= ~mask_;
+ }
+
+ /**
+ * Change the bit to a 1 if #value is true and 0 otherwise.
+ */
+ void set(const bool value)
+ {
+ if (value) {
+ this->set();
+ }
+ else {
+ this->reset();
+ }
+ }
+};
+
+template<
+ /**
+ * Number of bits that can be stored in the vector without doing an allocation.
+ */
+ int64_t InlineBufferCapacity = 32,
+ /**
+ * The allocator used by this vector. Should rarely be changed, except when you don't want that
+ * MEM_* is used internally.
+ */
+ typename Allocator = GuardedAllocator>
+class BitVector {
+ private:
+ static constexpr int64_t required_bytes_for_bits(const int64_t number_of_bits)
+ {
+ return (number_of_bits + BitsPerByte - 1) / BitsPerByte;
+ }
+
+ static constexpr int64_t BitsPerByte = 8;
+ static constexpr int64_t BytesInInlineBuffer = required_bytes_for_bits(InlineBufferCapacity);
+ static constexpr int64_t BitsInInlineBuffer = BytesInInlineBuffer * BitsPerByte;
+ static constexpr int64_t AllocationAlignment = 8;
+
+ /**
+ * Points to the first byte used by the vector. It might point to the memory in the inline
+ * buffer.
+ */
+ uint8_t *data_;
+
+ /** Current size of the vector in bits. */
+ int64_t size_in_bits_;
+
+ /** Number of bits that fit into the vector until a reallocation has to occure. */
+ int64_t capacity_in_bits_;
+
+ /** Used for allocations when the inline buffer is too small. */
+ Allocator allocator_;
+
+ /** Contains the bits as long as the vector is small enough. */
+ TypedBuffer<uint8_t, BytesInInlineBuffer> inline_buffer_;
+
+ public:
+ BitVector(Allocator allocator = {}) noexcept : allocator_(allocator)
+ {
+ data_ = inline_buffer_;
+ size_in_bits_ = 0;
+ capacity_in_bits_ = BitsInInlineBuffer;
+ uninitialized_fill_n(data_, BytesInInlineBuffer, static_cast<uint8_t>(0));
+ }
+
+ BitVector(NoExceptConstructor, Allocator allocator = {}) noexcept : BitVector(allocator)
+ {
+ }
+
+ BitVector(const BitVector &other) : BitVector(NoExceptConstructor(), other.allocator_)
+ {
+ const int64_t bytes_to_copy = other.used_bytes_amount();
+ if (other.size_in_bits_ <= BitsInInlineBuffer) {
+ /* The data is copied into the owned inline buffer. */
+ data_ = inline_buffer_;
+ capacity_in_bits_ = BitsInInlineBuffer;
+ }
+ else {
+ /* Allocate a new byte array because the inline buffer is too small. */
+ data_ = static_cast<uint8_t *>(
+ allocator_.allocate(bytes_to_copy, AllocationAlignment, __func__));
+ capacity_in_bits_ = bytes_to_copy * BitsPerByte;
+ }
+ size_in_bits_ = other.size_in_bits_;
+ uninitialized_copy_n(other.data_, bytes_to_copy, data_);
+ }
+
+ BitVector(BitVector &&other) noexcept : BitVector(NoExceptConstructor(), other.allocator_)
+ {
+ if (other.is_inline()) {
+ /* Copy the data into the inline buffer. */
+ const int64_t bytes_to_copy = other.used_bytes_amount();
+ data_ = inline_buffer_;
+ uninitialized_copy_n(other.data_, bytes_to_copy, data_);
+ }
+ else {
+ /* Steal the pointer. */
+ data_ = other.data_;
+ }
+ size_in_bits_ = other.size_in_bits_;
+ capacity_in_bits_ = other.capacity_in_bits_;
+
+ /* Clear the other vector because it has been moved from. */
+ other.data_ = other.inline_buffer_;
+ other.size_in_bits_ = 0;
+ other.capacity_in_bits_ = BitsInInlineBuffer;
+ }
+
+ /**
+ * Create a new vector with the given size and fill it with #value.
+ */
+ BitVector(const int64_t size_in_bits, const bool value = false, Allocator allocator = {})
+ : BitVector(NoExceptConstructor(), allocator)
+ {
+ this->resize(size_in_bits, value);
+ }
+
+ /**
+ * Create a bit vector based on an array of bools. Each byte of the input array maps to one bit.
+ */
+ explicit BitVector(const Span<bool> values, Allocator allocator = {})
+ : BitVector(NoExceptConstructor(), allocator)
+ {
+ this->resize(values.size());
+ for (const int64_t i : this->index_range()) {
+ (*this)[i].set(values[i]);
+ }
+ }
+
+ ~BitVector()
+ {
+ if (!this->is_inline()) {
+ allocator_.deallocate(data_);
+ }
+ }
+
+ BitVector &operator=(const BitVector &other)
+ {
+ return copy_assign_container(*this, other);
+ }
+
+ BitVector &operator=(BitVector &&other)
+ {
+ return move_assign_container(*this, std::move(other));
+ }
+
+ /**
+ * Number of bits in the bit vector.
+ */
+ int64_t size() const
+ {
+ return size_in_bits_;
+ }
+
+ /**
+ * Get a read-only reference to a specific bit.
+ */
+ BitRef operator[](const int64_t index) const
+ {
+ BLI_assert(index >= 0);
+ BLI_assert(index < size_in_bits_);
+ return {data_, index};
+ }
+
+ /**
+ * Get a mutable reference to a specific bit.
+ */
+ MutableBitRef operator[](const int64_t index)
+ {
+ BLI_assert(index >= 0);
+ BLI_assert(index < size_in_bits_);
+ return {data_, index};
+ }
+
+ IndexRange index_range() const
+ {
+ return {0, size_in_bits_};
+ }
+
+ /**
+ * Add a new bit to the end of the vector.
+ */
+ void append(const bool value)
+ {
+ this->ensure_space_for_one();
+ MutableBitRef bit{data_, size_in_bits_};
+ bit.set(value);
+ size_in_bits_++;
+ }
+
+ class Iterator {
+ private:
+ const BitVector *vector_;
+ int64_t index_;
+
+ public:
+ Iterator(const BitVector &vector, const int64_t index) : vector_(&vector), index_(index)
+ {
+ }
+
+ Iterator &operator++()
+ {
+ index_++;
+ return *this;
+ }
+
+ friend bool operator!=(const Iterator &a, const Iterator &b)
+ {
+ BLI_assert(a.vector_ == b.vector_);
+ return a.index_ != b.index_;
+ }
+
+ BitRef operator*() const
+ {
+ return (*vector_)[index_];
+ }
+ };
+
+ class MutableIterator {
+ private:
+ BitVector *vector_;
+ int64_t index_;
+
+ public:
+ MutableIterator(BitVector &vector, const int64_t index) : vector_(&vector), index_(index)
+ {
+ }
+
+ MutableIterator &operator++()
+ {
+ index_++;
+ return *this;
+ }
+
+ friend bool operator!=(const MutableIterator &a, const MutableIterator &b)
+ {
+ BLI_assert(a.vector_ == b.vector_);
+ return a.index_ != b.index_;
+ }
+
+ MutableBitRef operator*() const
+ {
+ return (*vector_)[index_];
+ }
+ };
+
+ Iterator begin() const
+ {
+ return {*this, 0};
+ }
+
+ Iterator end() const
+ {
+ return {*this, size_in_bits_};
+ }
+
+ MutableIterator begin()
+ {
+ return {*this, 0};
+ }
+
+ MutableIterator end()
+ {
+ return {*this, size_in_bits_};
+ }
+
+ /**
+ * Change the size of the vector. If the new vector is larger than the old one, the new elements
+ * are filled with #value.
+ */
+ void resize(const int64_t new_size_in_bits, const bool value = false)
+ {
+ BLI_assert(new_size_in_bits >= 0);
+ const int64_t old_size_in_bits = size_in_bits_;
+ if (new_size_in_bits > old_size_in_bits) {
+ this->reserve(new_size_in_bits);
+ }
+ size_in_bits_ = new_size_in_bits;
+ if (old_size_in_bits < new_size_in_bits) {
+ this->fill_range(IndexRange(old_size_in_bits, new_size_in_bits - old_size_in_bits), value);
+ }
+ }
+
+ /**
+ * Set #value for every element in #range.
+ */
+ void fill_range(const IndexRange range, const bool value)
+ {
+ const AlignedIndexRanges aligned_ranges = split_index_range_by_alignment(range, BitsPerByte);
+
+ /* Fill first few bits. */
+ for (const int64_t i : aligned_ranges.prefix) {
+ (*this)[i].set(value);
+ }
+
+ /* Fill entire bytes at once. */
+ const int64_t start_fill_byte_index = aligned_ranges.aligned.start() / BitsPerByte;
+ const int64_t bytes_to_fill = aligned_ranges.aligned.size() / BitsPerByte;
+ const uint8_t fill_value = value ? (uint8_t)0xff : (uint8_t)0x00;
+ initialized_fill_n(data_ + start_fill_byte_index, bytes_to_fill, fill_value);
+
+ /* Fill bits in the end that don't cover a full byte. */
+ for (const int64_t i : aligned_ranges.suffix) {
+ (*this)[i].set(value);
+ }
+ }
+
+ /**
+ * Set #value on every element.
+ */
+ void fill(const bool value)
+ {
+ this->fill_range(IndexRange(0, size_in_bits_), value);
+ }
+
+ /**
+ * Make sure that the capacity of the vector is large enough to hold the given amount of bits.
+ * The actual size is not changed.
+ */
+ void reserve(const int new_capacity_in_bits)
+ {
+ this->realloc_to_at_least(new_capacity_in_bits);
+ }
+
+ private:
+ void ensure_space_for_one()
+ {
+ if (UNLIKELY(size_in_bits_ >= capacity_in_bits_)) {
+ this->realloc_to_at_least(size_in_bits_ + 1);
+ }
+ }
+
+ BLI_NOINLINE void realloc_to_at_least(const int64_t min_capacity_in_bits,
+ const uint8_t initial_value_for_new_bytes = 0x00)
+ {
+ if (capacity_in_bits_ >= min_capacity_in_bits) {
+ return;
+ }
+
+ const int64_t min_capacity_in_bytes = this->required_bytes_for_bits(min_capacity_in_bits);
+
+ /* At least double the size of the previous allocation. */
+ const int64_t min_new_capacity_in_bytes = capacity_in_bits_ * 2;
+
+ const int64_t new_capacity_in_bytes = std::max(min_capacity_in_bytes,
+ min_new_capacity_in_bytes);
+ const int64_t bytes_to_copy = this->used_bytes_amount();
+
+ uint8_t *new_data = static_cast<uint8_t *>(
+ allocator_.allocate(new_capacity_in_bytes, AllocationAlignment, __func__));
+ uninitialized_copy_n(data_, bytes_to_copy, new_data);
+ /* Always initialize new capacity even if it isn't used yet. That's necessary to avoid warnings
+ * caused by using uninitialized memory. This happens when e.g. setting a clearing a bit in an
+ * uninitialized byte. */
+ uninitialized_fill_n(new_data + bytes_to_copy,
+ new_capacity_in_bytes - bytes_to_copy,
+ (uint8_t)initial_value_for_new_bytes);
+
+ if (!this->is_inline()) {
+ allocator_.deallocate(data_);
+ }
+
+ data_ = new_data;
+ capacity_in_bits_ = new_capacity_in_bytes * BitsPerByte;
+ }
+
+ bool is_inline() const
+ {
+ return data_ == inline_buffer_;
+ }
+
+ int64_t used_bytes_amount() const
+ {
+ return this->required_bytes_for_bits(size_in_bits_);
+ }
+};
+
+} // namespace blender
diff --git a/source/blender/blenlib/BLI_bitmap.h b/source/blender/blenlib/BLI_bitmap.h
index 19d8525311c..973cc5c3d1e 100644
--- a/source/blender/blenlib/BLI_bitmap.h
+++ b/source/blender/blenlib/BLI_bitmap.h
@@ -27,7 +27,7 @@ typedef unsigned int BLI_bitmap;
/**
* Number of blocks needed to hold '_num' bits.
*/
-#define _BITMAP_NUM_BLOCKS(_num) (((_num) >> _BITMAP_POWER) + 1)
+#define _BITMAP_NUM_BLOCKS(_num) (((_num) + _BITMAP_MASK) >> _BITMAP_POWER)
/**
* Size (in bytes) used to hold '_num' bits.
@@ -54,6 +54,11 @@ typedef unsigned int BLI_bitmap;
((BLI_bitmap *)BLI_memarena_calloc(_mem, BLI_BITMAP_SIZE(_num))))
/**
+ * Declares a bitmap as a variable.
+ */
+#define BLI_BITMAP_DECLARE(_name, _num) BLI_bitmap _name[_BITMAP_NUM_BLOCKS(_num)] = {}
+
+/**
* Get the value of a single bit at '_index'.
*/
#define BLI_BITMAP_TEST(_bitmap, _index) \
@@ -137,6 +142,12 @@ void BLI_bitmap_and_all(BLI_bitmap *dst, const BLI_bitmap *src, size_t bits);
*/
void BLI_bitmap_or_all(BLI_bitmap *dst, const BLI_bitmap *src, size_t bits);
+/**
+ * Find index of the lowest unset bit.
+ * Returns -1 if all the bits are set.
+ */
+int BLI_bitmap_find_first_unset(const BLI_bitmap *bitmap, size_t bits);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/blender/blenlib/BLI_color.hh b/source/blender/blenlib/BLI_color.hh
index 98fd7d0f15d..b1b9aeb17f1 100644
--- a/source/blender/blenlib/BLI_color.hh
+++ b/source/blender/blenlib/BLI_color.hh
@@ -79,7 +79,7 @@ enum class eSpace {
};
std::ostream &operator<<(std::ostream &stream, const eSpace &space);
-/** Template class to store RGBA values with different precision, space and alpha association. */
+/** Template class to store RGBA values with different precision, space, and alpha association. */
template<typename ChannelStorageType, eSpace Space, eAlpha Alpha> class ColorRGBA {
public:
ChannelStorageType r, g, b, a;
@@ -167,7 +167,7 @@ class ColorSceneLinear4f final : public ColorRGBA<float, eSpace::SceneLinear, Al
}
/**
- * Convert to its byte encoded counter space.
+ * Convert to its byte encoded counterpart.
*/
ColorSceneLinearByteEncoded4b<Alpha> encode() const
{
@@ -179,7 +179,7 @@ class ColorSceneLinear4f final : public ColorRGBA<float, eSpace::SceneLinear, Al
/**
* Convert color and alpha association to premultiplied alpha.
*
- * Does nothing when color has already a premultiplied alpha.
+ * Does nothing when color already has a premultiplied alpha.
*/
ColorSceneLinear4f<eAlpha::Premultiplied> premultiply_alpha() const
{
@@ -196,7 +196,7 @@ class ColorSceneLinear4f final : public ColorRGBA<float, eSpace::SceneLinear, Al
/**
* Convert color and alpha association to straight alpha.
*
- * Does nothing when color has straighten alpha.
+ * Does nothing when color has straight alpha.
*/
ColorSceneLinear4f<eAlpha::Straight> unpremultiply_alpha() const
{
@@ -228,7 +228,7 @@ class ColorSceneLinearByteEncoded4b final
}
/**
- * Convert to back to float color.
+ * Convert to a float color.
*/
ColorSceneLinear4f<Alpha> decode() const
{
diff --git a/source/blender/blenlib/BLI_cpp_type.hh b/source/blender/blenlib/BLI_cpp_type.hh
index ed680214602..8cf5ead1c7b 100644
--- a/source/blender/blenlib/BLI_cpp_type.hh
+++ b/source/blender/blenlib/BLI_cpp_type.hh
@@ -20,7 +20,7 @@
* cost of longer compile time, a larger binary and the complexity that comes from using
* templates).
* - If the code is not performance sensitive, it usually makes sense to use #CPPType instead.
- * - Sometimes a combination can make sense. Optimized code can be be generated at compile-time for
+ * - Sometimes a combination can make sense. Optimized code can be generated at compile-time for
* some types, while there is a fallback code path using #CPPType for all other types.
* #CPPType::to_static_type allows dispatching between both versions based on the type.
*
@@ -626,6 +626,11 @@ class CPPType : NonCopyable, NonMovable {
fill_construct_indices_(value, dst, mask);
}
+ bool can_exist_in_buffer(const int64_t buffer_size, const int64_t buffer_alignment) const
+ {
+ return size_ <= buffer_size && alignment_ <= buffer_alignment;
+ }
+
void print(const void *value, std::stringstream &ss) const
{
BLI_assert(this->pointer_can_point_to_instance(value));
diff --git a/source/blender/blenlib/BLI_float3x3.hh b/source/blender/blenlib/BLI_float3x3.hh
index 62478556d9b..6a9e7dd04f0 100644
--- a/source/blender/blenlib/BLI_float3x3.hh
+++ b/source/blender/blenlib/BLI_float3x3.hh
@@ -152,6 +152,13 @@ struct float3x3 {
return result;
}
+ friend float3 operator*(const float3x3 &a, const float3 &b)
+ {
+ float3 result;
+ mul_v3_m3v3(result, a.values, b);
+ return result;
+ }
+
void operator*=(const float3x3 &other)
{
mul_m3_m3_post(values, other.values);
diff --git a/source/blender/blenlib/BLI_float4x4.hh b/source/blender/blenlib/BLI_float4x4.hh
index 64e6e68432f..ca0d9ea0028 100644
--- a/source/blender/blenlib/BLI_float4x4.hh
+++ b/source/blender/blenlib/BLI_float4x4.hh
@@ -266,7 +266,7 @@ struct float4x4 {
for (int j = 0; j < 4; j++) {
snprintf(fchar, sizeof(fchar), "%11.6f", mat[j][i]);
stream << fchar;
- if (i != 3) {
+ if (j != 3) {
stream << ", ";
}
}
diff --git a/source/blender/blenlib/BLI_function_ref.hh b/source/blender/blenlib/BLI_function_ref.hh
index 5f18e994991..9a38176c988 100644
--- a/source/blender/blenlib/BLI_function_ref.hh
+++ b/source/blender/blenlib/BLI_function_ref.hh
@@ -63,7 +63,6 @@
*
* void some_function(FunctionRef<int()> f);
* some_function([]() { return 0; });
- *
*/
#include "BLI_memory_utils.hh"
diff --git a/source/blender/blenlib/BLI_generic_span.hh b/source/blender/blenlib/BLI_generic_span.hh
index 4c0bfc83ba8..143ab235d2e 100644
--- a/source/blender/blenlib/BLI_generic_span.hh
+++ b/source/blender/blenlib/BLI_generic_span.hh
@@ -16,20 +16,31 @@ namespace blender {
*/
class GSpan {
protected:
- const CPPType *type_;
- const void *data_;
- int64_t size_;
+ const CPPType *type_ = nullptr;
+ const void *data_ = nullptr;
+ int64_t size_ = 0;
public:
- GSpan(const CPPType &type, const void *buffer, int64_t size)
- : type_(&type), data_(buffer), size_(size)
+ GSpan() = default;
+
+ GSpan(const CPPType *type, const void *buffer, int64_t size)
+ : type_(type), data_(buffer), size_(size)
{
BLI_assert(size >= 0);
BLI_assert(buffer != nullptr || size == 0);
- BLI_assert(type.pointer_has_valid_alignment(buffer));
+ BLI_assert(size == 0 || type != nullptr);
+ BLI_assert(type == nullptr || type->pointer_has_valid_alignment(buffer));
+ }
+
+ GSpan(const CPPType &type, const void *buffer, int64_t size) : GSpan(&type, buffer, size)
+ {
+ }
+
+ GSpan(const CPPType &type) : type_(&type)
+ {
}
- GSpan(const CPPType &type) : GSpan(type, nullptr, 0)
+ GSpan(const CPPType *type) : type_(type)
{
}
@@ -41,9 +52,15 @@ class GSpan {
const CPPType &type() const
{
+ BLI_assert(type_ != nullptr);
return *type_;
}
+ const CPPType *type_ptr() const
+ {
+ return type_;
+ }
+
bool is_empty() const
{
return size_ == 0;
@@ -76,7 +93,7 @@ class GSpan {
BLI_assert(start >= 0);
BLI_assert(size >= 0);
const int64_t new_size = std::max<int64_t>(0, std::min(size, size_ - start));
- return GSpan(*type_, POINTER_OFFSET(data_, type_->size() * start), new_size);
+ return GSpan(type_, POINTER_OFFSET(data_, type_->size() * start), new_size);
}
GSpan slice(const IndexRange range) const
@@ -91,20 +108,31 @@ class GSpan {
*/
class GMutableSpan {
protected:
- const CPPType *type_;
- void *data_;
- int64_t size_;
+ const CPPType *type_ = nullptr;
+ void *data_ = nullptr;
+ int64_t size_ = 0;
public:
- GMutableSpan(const CPPType &type, void *buffer, int64_t size)
- : type_(&type), data_(buffer), size_(size)
+ GMutableSpan() = default;
+
+ GMutableSpan(const CPPType *type, void *buffer, int64_t size)
+ : type_(type), data_(buffer), size_(size)
{
BLI_assert(size >= 0);
BLI_assert(buffer != nullptr || size == 0);
- BLI_assert(type.pointer_has_valid_alignment(buffer));
+ BLI_assert(size == 0 || type != nullptr);
+ BLI_assert(type == nullptr || type->pointer_has_valid_alignment(buffer));
+ }
+
+ GMutableSpan(const CPPType &type, void *buffer, int64_t size) : GMutableSpan(&type, buffer, size)
+ {
+ }
+
+ GMutableSpan(const CPPType &type) : type_(&type)
+ {
}
- GMutableSpan(const CPPType &type) : GMutableSpan(type, nullptr, 0)
+ GMutableSpan(const CPPType *type) : type_(type)
{
}
@@ -116,14 +144,20 @@ class GMutableSpan {
operator GSpan() const
{
- return GSpan(*type_, data_, size_);
+ return GSpan(type_, data_, size_);
}
const CPPType &type() const
{
+ BLI_assert(type_ != nullptr);
return *type_;
}
+ const CPPType *type_ptr() const
+ {
+ return type_;
+ }
+
bool is_empty() const
{
return size_ == 0;
diff --git a/source/blender/blenlib/BLI_generic_virtual_array.hh b/source/blender/blenlib/BLI_generic_virtual_array.hh
index 3fce2947d0d..21549896f45 100644
--- a/source/blender/blenlib/BLI_generic_virtual_array.hh
+++ b/source/blender/blenlib/BLI_generic_virtual_array.hh
@@ -42,11 +42,7 @@ class GVArrayImpl {
virtual void get(int64_t index, void *r_value) const;
virtual void get_to_uninitialized(int64_t index, void *r_value) const = 0;
- virtual bool is_span() const;
- virtual GSpan get_internal_span() const;
-
- virtual bool is_single() const;
- virtual void get_internal_single(void *UNUSED(r_value)) const;
+ virtual CommonVArrayInfo common_info() const;
virtual void materialize(const IndexMask mask, void *dst) const;
virtual void materialize_to_uninitialized(const IndexMask mask, void *dst) const;
@@ -55,7 +51,6 @@ class GVArrayImpl {
virtual void materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const;
virtual bool try_assign_VArray(void *varray) const;
- virtual bool may_have_ownership() const;
};
/* A generic version of #VMutableArrayImpl. */
@@ -141,6 +136,8 @@ class GVArrayCommon {
void materialize_compressed(IndexMask mask, void *dst) const;
void materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const;
+ CommonVArrayInfo common_info() const;
+
/**
* Returns true when the virtual array is stored as a span internally.
*/
@@ -260,22 +257,25 @@ class GVMutableArray : public GVArrayCommon {
/** \} */
/* -------------------------------------------------------------------- */
-/** \name #GVArray_GSpan and #GVMutableArray_GSpan.
+/** \name #GVArraySpan and #GMutableVArraySpan.
* \{ */
-/* A generic version of VArray_Span. */
-class GVArray_GSpan : public GSpan {
+/* A generic version of VArraySpan. */
+class GVArraySpan : public GSpan {
private:
GVArray varray_;
void *owned_data_ = nullptr;
public:
- GVArray_GSpan(GVArray varray);
- ~GVArray_GSpan();
+ GVArraySpan();
+ GVArraySpan(GVArray varray);
+ GVArraySpan(GVArraySpan &&other);
+ ~GVArraySpan();
+ GVArraySpan &operator=(GVArraySpan &&other);
};
-/* A generic version of VMutableArray_Span. */
-class GVMutableArray_GSpan : public GMutableSpan {
+/* A generic version of MutableVArraySpan. */
+class GMutableVArraySpan : public GMutableSpan, NonCopyable, NonMovable {
private:
GVMutableArray varray_;
void *owned_data_ = nullptr;
@@ -283,8 +283,13 @@ class GVMutableArray_GSpan : public GMutableSpan {
bool show_not_saved_warning_ = true;
public:
- GVMutableArray_GSpan(GVMutableArray varray, bool copy_values_to_span = true);
- ~GVMutableArray_GSpan();
+ GMutableVArraySpan();
+ GMutableVArraySpan(GVMutableArray varray, bool copy_values_to_span = true);
+ GMutableVArraySpan(GMutableVArraySpan &&other);
+ ~GMutableVArraySpan();
+ GMutableVArraySpan &operator=(GMutableVArraySpan &&other);
+
+ const GVMutableArray &varray() const;
void save();
void disable_not_applied_warning();
@@ -318,26 +323,6 @@ template<typename T> class GVArrayImpl_For_VArray : public GVArrayImpl {
new (r_value) T(varray_[index]);
}
- bool is_span() const override
- {
- return varray_.is_span();
- }
-
- GSpan get_internal_span() const override
- {
- return GSpan(varray_.get_internal_span());
- }
-
- bool is_single() const override
- {
- return varray_.is_single();
- }
-
- void get_internal_single(void *r_value) const override
- {
- *(T *)r_value = varray_.get_internal_single();
- }
-
void materialize(const IndexMask mask, void *dst) const override
{
varray_.materialize(mask, MutableSpan((T *)dst, mask.min_array_size()));
@@ -364,9 +349,9 @@ template<typename T> class GVArrayImpl_For_VArray : public GVArrayImpl {
return true;
}
- bool may_have_ownership() const override
+ CommonVArrayInfo common_info() const override
{
- return varray_.may_have_ownership();
+ return varray_.common_info();
}
};
@@ -390,26 +375,9 @@ template<typename T> class VArrayImpl_For_GVArray : public VArrayImpl<T> {
return value;
}
- bool is_span() const override
+ CommonVArrayInfo common_info() const override
{
- return varray_.is_span();
- }
-
- Span<T> get_internal_span() const override
- {
- return varray_.get_internal_span().template typed<T>();
- }
-
- bool is_single() const override
- {
- return varray_.is_single();
- }
-
- T get_internal_single() const override
- {
- T value;
- varray_.get_internal_single(&value);
- return value;
+ return varray_.common_info();
}
bool try_assign_GVArray(GVArray &varray) const override
@@ -418,11 +386,6 @@ template<typename T> class VArrayImpl_For_GVArray : public VArrayImpl<T> {
return true;
}
- bool may_have_ownership() const override
- {
- return varray_.may_have_ownership();
- }
-
void materialize(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize(mask, r_span.data());
@@ -467,25 +430,9 @@ template<typename T> class GVMutableArrayImpl_For_VMutableArray : public GVMutab
new (r_value) T(varray_[index]);
}
- bool is_span() const override
+ CommonVArrayInfo common_info() const override
{
- return varray_.is_span();
- }
-
- GSpan get_internal_span() const override
- {
- Span<T> span = varray_.get_internal_span();
- return span;
- }
-
- bool is_single() const override
- {
- return varray_.is_single();
- }
-
- void get_internal_single(void *r_value) const override
- {
- *(T *)r_value = varray_.get_internal_single();
+ return varray_.common_info();
}
void set_by_copy(const int64_t index, const void *value) override
@@ -543,11 +490,6 @@ template<typename T> class GVMutableArrayImpl_For_VMutableArray : public GVMutab
*(VMutableArray<T> *)varray = varray_;
return true;
}
-
- bool may_have_ownership() const override
- {
- return varray_.may_have_ownership();
- }
};
/* Used to convert an generic mutable virtual array into a typed one. */
@@ -576,26 +518,9 @@ template<typename T> class VMutableArrayImpl_For_GVMutableArray : public VMutabl
varray_.set_by_relocate(index, &value);
}
- bool is_span() const override
- {
- return varray_.is_span();
- }
-
- Span<T> get_internal_span() const override
- {
- return varray_.get_internal_span().template typed<T>();
- }
-
- bool is_single() const override
+ CommonVArrayInfo common_info() const override
{
- return varray_.is_single();
- }
-
- T get_internal_single() const override
- {
- T value;
- varray_.get_internal_single(&value);
- return value;
+ return varray_.common_info();
}
bool try_assign_GVArray(GVArray &varray) const override
@@ -610,11 +535,6 @@ template<typename T> class VMutableArrayImpl_For_GVMutableArray : public VMutabl
return true;
}
- bool may_have_ownership() const override
- {
- return varray_.may_have_ownership();
- }
-
void materialize(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize(mask, r_span.data());
@@ -670,8 +590,7 @@ class GVArrayImpl_For_GSpan : public GVMutableArrayImpl {
void set_by_move(int64_t index, void *value) override;
void set_by_relocate(int64_t index, void *value) override;
- bool is_span() const override;
- GSpan get_internal_span() const override;
+ CommonVArrayInfo common_info() const override;
virtual void materialize(const IndexMask mask, void *dst) const override;
virtual void materialize_to_uninitialized(const IndexMask mask, void *dst) const override;
@@ -686,12 +605,11 @@ class GVArrayImpl_For_GSpan_final final : public GVArrayImpl_For_GSpan {
using GVArrayImpl_For_GSpan::GVArrayImpl_For_GSpan;
private:
- bool may_have_ownership() const override
- {
- return false;
- }
+ CommonVArrayInfo common_info() const override;
};
+template<> inline constexpr bool is_trivial_extended_v<GVArrayImpl_For_GSpan_final> = true;
+
/** \} */
/* -------------------------------------------------------------------- */
@@ -715,10 +633,7 @@ class GVArrayImpl_For_SingleValueRef : public GVArrayImpl {
void get(const int64_t index, void *r_value) const override;
void get_to_uninitialized(const int64_t index, void *r_value) const override;
- bool is_span() const override;
- GSpan get_internal_span() const override;
- bool is_single() const override;
- void get_internal_single(void *r_value) const override;
+ CommonVArrayInfo common_info() const override;
void materialize(const IndexMask mask, void *dst) const override;
void materialize_to_uninitialized(const IndexMask mask, void *dst) const override;
void materialize_compressed(const IndexMask mask, void *dst) const override;
@@ -730,12 +645,12 @@ class GVArrayImpl_For_SingleValueRef_final final : public GVArrayImpl_For_Single
using GVArrayImpl_For_SingleValueRef::GVArrayImpl_For_SingleValueRef;
private:
- bool may_have_ownership() const override
- {
- return false;
- }
+ CommonVArrayInfo common_info() const override;
};
+template<>
+inline constexpr bool is_trivial_extended_v<GVArrayImpl_For_SingleValueRef_final> = true;
+
/** \} */
/* -------------------------------------------------------------------- */
@@ -859,6 +774,11 @@ inline GVArrayCommon::operator bool() const
return impl_ != nullptr;
}
+inline CommonVArrayInfo GVArrayCommon::common_info() const
+{
+ return impl_->common_info();
+}
+
inline int64_t GVArrayCommon::size() const
{
if (impl_ == nullptr) {
@@ -931,25 +851,21 @@ template<typename T> inline GVArray::GVArray(const VArray<T> &varray)
if (!varray) {
return;
}
- if (varray.try_assign_GVArray(*this)) {
+ const CommonVArrayInfo info = varray.common_info();
+ if (info.type == CommonVArrayInfo::Type::Single) {
+ *this = GVArray::ForSingle(CPPType::get<T>(), varray.size(), info.data);
return;
}
- /* Need to check this before the span/single special cases, because otherwise we might loose
- * ownership to the referenced data when #varray goes out of scope. */
- if (varray.may_have_ownership()) {
- *this = GVArray::For<GVArrayImpl_For_VArray<T>>(varray);
- }
- else if (varray.is_single()) {
- T value = varray.get_internal_single();
- *this = GVArray::ForSingle(CPPType::get<T>(), varray.size(), &value);
- }
- else if (varray.is_span()) {
- Span<T> data = varray.get_internal_span();
- *this = GVArray::ForSpan(data);
+ /* Need to check for ownership, because otherwise the referenced data can be destructed when
+ * #this is destructed. */
+ if (info.type == CommonVArrayInfo::Type::Span && !info.may_have_ownership) {
+ *this = GVArray::ForSpan(GSpan(CPPType::get<T>(), info.data, varray.size()));
+ return;
}
- else {
- *this = GVArray::For<GVArrayImpl_For_VArray<T>>(varray);
+ if (varray.try_assign_GVArray(*this)) {
+ return;
}
+ *this = GVArray::For<GVArrayImpl_For_VArray<T>>(varray);
}
template<typename T> inline VArray<T> GVArray::typed() const
@@ -958,22 +874,19 @@ template<typename T> inline VArray<T> GVArray::typed() const
return {};
}
BLI_assert(impl_->type().is<T>());
+ const CommonVArrayInfo info = this->common_info();
+ if (info.type == CommonVArrayInfo::Type::Single) {
+ return VArray<T>::ForSingle(*static_cast<const T *>(info.data), this->size());
+ }
+ /* Need to check for ownership, because otherwise the referenced data can be destructed when
+ * #this is destructed. */
+ if (info.type == CommonVArrayInfo::Type::Span && !info.may_have_ownership) {
+ return VArray<T>::ForSpan(Span<T>(static_cast<const T *>(info.data), this->size()));
+ }
VArray<T> varray;
if (this->try_assign_VArray(varray)) {
return varray;
}
- if (this->may_have_ownership()) {
- return VArray<T>::template For<VArrayImpl_For_GVArray<T>>(*this);
- }
- if (this->is_single()) {
- T value;
- this->get_internal_single(&value);
- return VArray<T>::ForSingle(value, this->size());
- }
- if (this->is_span()) {
- const Span<T> span = this->get_internal_span().typed<T>();
- return VArray<T>::ForSpan(span);
- }
return VArray<T>::template For<VArrayImpl_For_GVArray<T>>(*this);
}
@@ -997,19 +910,16 @@ template<typename T> inline GVMutableArray::GVMutableArray(const VMutableArray<T
if (!varray) {
return;
}
- if (varray.try_assign_GVMutableArray(*this)) {
+ const CommonVArrayInfo info = varray.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span && !info.may_have_ownership) {
+ *this = GVMutableArray::ForSpan(
+ GMutableSpan(CPPType::get<T>(), const_cast<void *>(info.data), varray.size()));
return;
}
- if (varray.may_have_ownership()) {
- *this = GVMutableArray::For<GVMutableArrayImpl_For_VMutableArray<T>>(varray);
- }
- else if (varray.is_span()) {
- MutableSpan<T> data = varray.get_internal_span();
- *this = GVMutableArray::ForSpan(data);
- }
- else {
- *this = GVMutableArray::For<GVMutableArrayImpl_For_VMutableArray<T>>(varray);
+ if (varray.try_assign_GVMutableArray(*this)) {
+ return;
}
+ *this = GVMutableArray::For<GVMutableArrayImpl_For_VMutableArray<T>>(varray);
}
template<typename T> inline VMutableArray<T> GVMutableArray::typed() const
@@ -1018,17 +928,15 @@ template<typename T> inline VMutableArray<T> GVMutableArray::typed() const
return {};
}
BLI_assert(this->type().is<T>());
+ const CommonVArrayInfo info = this->common_info();
+ if (info.type == CommonVArrayInfo::Type::Span && !info.may_have_ownership) {
+ return VMutableArray<T>::ForSpan(
+ MutableSpan<T>(const_cast<T *>(static_cast<const T *>(info.data)), this->size()));
+ }
VMutableArray<T> varray;
if (this->try_assign_VMutableArray(varray)) {
return varray;
}
- if (this->may_have_ownership()) {
- return VMutableArray<T>::template For<VMutableArrayImpl_For_GVMutableArray<T>>(*this);
- }
- if (this->is_span()) {
- const MutableSpan<T> span = this->get_internal_span().typed<T>();
- return VMutableArray<T>::ForSpan(span);
- }
return VMutableArray<T>::template For<VMutableArrayImpl_For_GVMutableArray<T>>(*this);
}
diff --git a/source/blender/blenlib/BLI_index_mask_ops.hh b/source/blender/blenlib/BLI_index_mask_ops.hh
index 48a1f27a2fa..e4eece11e83 100644
--- a/source/blender/blenlib/BLI_index_mask_ops.hh
+++ b/source/blender/blenlib/BLI_index_mask_ops.hh
@@ -13,6 +13,7 @@
#include "BLI_index_mask.hh"
#include "BLI_task.hh"
#include "BLI_vector.hh"
+#include "BLI_virtual_array.hh"
namespace blender::index_mask_ops {
@@ -57,4 +58,17 @@ inline IndexMask find_indices_based_on_predicate(const IndexMask indices_to_chec
return detail::find_indices_based_on_predicate__merge(indices_to_check, sub_masks, r_indices);
}
+/**
+ * Find the true indices in a virtual array. This is a version of
+ * #find_indices_based_on_predicate optimized for a virtual array input.
+ *
+ * \param parallel_grain_size: The grain size for when the virtual array isn't a span or a single
+ * value internally. This should be adjusted based on the expected cost of evaluating the virtual
+ * array-- more expensive virtual arrays should have smaller grain sizes.
+ */
+IndexMask find_indices_from_virtual_array(IndexMask indices_to_check,
+ const VArray<bool> &virtual_array,
+ int64_t parallel_grain_size,
+ Vector<int64_t> &r_indices);
+
} // namespace blender::index_mask_ops
diff --git a/source/blender/blenlib/BLI_index_range.hh b/source/blender/blenlib/BLI_index_range.hh
index 7d5c2400bba..a0f129c7324 100644
--- a/source/blender/blenlib/BLI_index_range.hh
+++ b/source/blender/blenlib/BLI_index_range.hh
@@ -90,10 +90,10 @@ class IndexRange {
return *this;
}
- constexpr Iterator operator++(int) const
+ constexpr Iterator operator++(int)
{
Iterator copied_iterator = *this;
- ++copied_iterator;
+ ++(*this);
return copied_iterator;
}
@@ -186,13 +186,25 @@ class IndexRange {
}
/**
- * Get the last element in the range.
- * Asserts when the range is empty.
+ * Get the nth last element in the range.
+ * Asserts when the range is empty or when n is negative.
*/
- constexpr int64_t last() const
+ constexpr int64_t last(const int64_t n = 0) const
{
+ BLI_assert(n >= 0);
+ BLI_assert(n < size_);
BLI_assert(this->size() > 0);
- return start_ + size_ - 1;
+ return start_ + size_ - 1 - n;
+ }
+
+ /**
+ * Get the element one before the beginning. The returned value is undefined when the range is
+ * empty, and the range must start after zero already.
+ */
+ constexpr int64_t one_before_start() const
+ {
+ BLI_assert(start_ > 0);
+ return start_ - 1;
}
/**
@@ -280,6 +292,15 @@ class IndexRange {
}
/**
+ * Move the range forward or backward within the larger array. The amount may be negative,
+ * but its absolute value cannot be greater than the existing start of the range.
+ */
+ constexpr IndexRange shift(int64_t n) const
+ {
+ return IndexRange(start_ + n, size_);
+ }
+
+ /**
* Get read-only access to a memory buffer that contains the range as actual numbers.
*/
Span<int64_t> as_span() const;
@@ -297,4 +318,19 @@ class IndexRange {
Span<int64_t> as_span_internal() const;
};
+struct AlignedIndexRanges {
+ IndexRange prefix;
+ IndexRange aligned;
+ IndexRange suffix;
+};
+
+/**
+ * Split a range into three parts so that the boundaries of the middle part are aligned to some
+ * power of two.
+ *
+ * This can be used when an algorithm can be optimized on aligned indices/memory. The algorithm
+ * then needs a slow path for the beginning and end, and a fast path for the aligned elements.
+ */
+AlignedIndexRanges split_index_range_by_alignment(const IndexRange range, const int64_t alignment);
+
} // namespace blender
diff --git a/source/blender/blenlib/BLI_kdopbvh.h b/source/blender/blenlib/BLI_kdopbvh.h
index 8642d728909..17759b6a8ac 100644
--- a/source/blender/blenlib/BLI_kdopbvh.h
+++ b/source/blender/blenlib/BLI_kdopbvh.h
@@ -332,6 +332,29 @@ extern const float bvhtree_kdop_axes[13][3];
namespace blender {
+using BVHTree_RayCastCallback_CPP =
+ FunctionRef<void(int index, const BVHTreeRay &ray, BVHTreeRayHit &hit)>;
+
+inline void BLI_bvhtree_ray_cast_all_cpp(BVHTree &tree,
+ const float3 co,
+ const float3 dir,
+ float radius,
+ float hit_dist,
+ BVHTree_RayCastCallback_CPP fn)
+{
+ BLI_bvhtree_ray_cast_all(
+ &tree,
+ co,
+ dir,
+ radius,
+ hit_dist,
+ [](void *userdata, int index, const BVHTreeRay *ray, BVHTreeRayHit *hit) {
+ BVHTree_RayCastCallback_CPP fn = *static_cast<BVHTree_RayCastCallback_CPP *>(userdata);
+ fn(index, *ray, *hit);
+ },
+ &fn);
+}
+
using BVHTree_RangeQuery_CPP = FunctionRef<void(int index, const float3 &co, float dist_sq)>;
inline void BLI_bvhtree_range_query_cpp(BVHTree &tree,
diff --git a/source/blender/blenlib/BLI_kdtree.h b/source/blender/blenlib/BLI_kdtree.h
index 696b2206b5f..954282b099a 100644
--- a/source/blender/blenlib/BLI_kdtree.h
+++ b/source/blender/blenlib/BLI_kdtree.h
@@ -2,10 +2,6 @@
#pragma once
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/** \file
* \ingroup bli
* \brief A KD-tree for nearest neighbor search.
@@ -54,7 +50,3 @@ extern "C" {
#undef KDTree
#undef KDTreeNearest
#undef KDTREE_PREFIX_ID
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/source/blender/blenlib/BLI_kdtree_impl.h b/source/blender/blenlib/BLI_kdtree_impl.h
index 08fa40fd972..4187724fbda 100644
--- a/source/blender/blenlib/BLI_kdtree_impl.h
+++ b/source/blender/blenlib/BLI_kdtree_impl.h
@@ -12,6 +12,10 @@
#define _BLI_CONCAT(MACRO_ARG1, MACRO_ARG2) _BLI_CONCAT_AUX(MACRO_ARG1, MACRO_ARG2)
#define BLI_kdtree_nd_(id) _BLI_CONCAT(KDTREE_PREFIX_ID, _##id)
+#ifdef __cplusplus
+extern "C" {
+#endif
+
struct KDTree;
typedef struct KDTree KDTree;
@@ -80,6 +84,29 @@ int BLI_kdtree_nd_(range_search_with_len_squared_cb)(
const void *user_data),
const void *user_data) ATTR_NONNULL(1, 2) ATTR_WARN_UNUSED_RESULT;
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef __cplusplus
+template<typename Fn>
+inline void BLI_kdtree_nd_(range_search_cb_cpp)(const KDTree *tree,
+ const float co[KD_DIMS],
+ float distance,
+ const Fn &fn)
+{
+ BLI_kdtree_nd_(range_search_cb)(
+ tree,
+ co,
+ distance,
+ [](void *user_data, const int index, const float *co, const float dist_sq) {
+ const Fn &fn = *static_cast<const Fn *>(user_data);
+ return fn(index, co, dist_sq);
+ },
+ const_cast<Fn *>(&fn));
+}
+#endif
+
#undef _BLI_CONCAT_AUX
#undef _BLI_CONCAT
#undef BLI_kdtree_nd_
diff --git a/source/blender/blenlib/BLI_length_parameterize.hh b/source/blender/blenlib/BLI_length_parameterize.hh
index ec0fabb75b4..d81bcbe1e7a 100644
--- a/source/blender/blenlib/BLI_length_parameterize.hh
+++ b/source/blender/blenlib/BLI_length_parameterize.hh
@@ -6,10 +6,10 @@
* \ingroup bli
*/
+#include "BLI_index_mask.hh"
#include "BLI_math_base.hh"
#include "BLI_math_color.hh"
#include "BLI_math_vector.hh"
-#include "BLI_vector.hh"
namespace blender::length_parameterize {
@@ -17,9 +17,9 @@ namespace blender::length_parameterize {
* Return the size of the necessary lengths array for a group of points, taking into account the
* possible last cyclic segment.
*
- * \note This is the same as #bke::curves::curve_segment_num.
+ * \note This is the same as #bke::curves::segments_num.
*/
-inline int lengths_num(const int points_num, const bool cyclic)
+inline int segments_num(const int points_num, const bool cyclic)
{
return cyclic ? points_num : points_num - 1;
}
@@ -30,7 +30,7 @@ inline int lengths_num(const int points_num, const bool cyclic)
template<typename T>
void accumulate_lengths(const Span<T> values, const bool cyclic, MutableSpan<float> lengths)
{
- BLI_assert(lengths.size() == lengths_num(values.size(), cyclic));
+ BLI_assert(lengths.size() == segments_num(values.size(), cyclic));
float length = 0.0f;
for (const int i : IndexRange(values.size() - 1)) {
length += math::distance(values[i], values[i + 1]);
@@ -42,57 +42,135 @@ void accumulate_lengths(const Span<T> values, const bool cyclic, MutableSpan<flo
}
template<typename T>
-void linear_interpolation(const Span<T> src,
- const Span<int> indices,
- const Span<float> factors,
- MutableSpan<T> dst)
+inline void interpolate_to_masked(const Span<T> src,
+ const Span<int> indices,
+ const Span<float> factors,
+ const IndexMask dst_mask,
+ MutableSpan<T> dst)
{
BLI_assert(indices.size() == factors.size());
- BLI_assert(indices.size() == dst.size());
- const int last_src_index = src.index_range().last();
+ BLI_assert(indices.size() == dst_mask.size());
+ const int last_src_index = src.size() - 1;
- int cyclic_sample_count = 0;
- for (int i = indices.index_range().last(); i > 0; i--) {
- if (indices[i] != last_src_index) {
- break;
+ dst_mask.to_best_mask_type([&](auto dst_mask) {
+ for (const int i : IndexRange(dst_mask.size())) {
+ const int prev_index = indices[i];
+ const float factor = factors[i];
+ const bool is_cyclic_case = prev_index == last_src_index;
+ if (is_cyclic_case) {
+ dst[dst_mask[i]] = math::interpolate(src.last(), src.first(), factor);
+ }
+ else {
+ dst[dst_mask[i]] = math::interpolate(src[prev_index], src[prev_index + 1], factor);
+ }
}
- dst[i] = math::interpolate(src.last(), src.first(), factors[i]);
- cyclic_sample_count++;
+ });
+}
+
+template<typename T>
+inline void interpolate(const Span<T> src,
+ const Span<int> indices,
+ const Span<float> factors,
+ MutableSpan<T> dst)
+{
+ interpolate_to_masked(src, indices, factors, dst.index_range(), dst);
+}
+
+/**
+ * Passing this to consecutive calls of #sample_at_length can increase performance.
+ */
+struct SampleSegmentHint {
+ int segment_index = -1;
+ float segment_start;
+ float segment_length_inv;
+};
+
+/**
+ * \param accumulated_segment_lengths: Lengths of individual segments added up.
+ * Each value describes the total length at the end of the segment following a point.
+ * \param sample_length: The position to sample at.
+ * \param r_segment_index: Returns the index of the segment that #sample_length is in.
+ * \param r_factor: Returns the position within the segment.
+ *
+ * \note #sample_length must not be outside of any segment.
+ */
+inline void sample_at_length(const Span<float> accumulated_segment_lengths,
+ const float sample_length,
+ int &r_segment_index,
+ float &r_factor,
+ SampleSegmentHint *hint = nullptr)
+{
+ /* Use a shorter variable name. */
+ const Span<float> lengths = accumulated_segment_lengths;
+
+ BLI_assert(lengths.size() > 0);
+ BLI_assert(sample_length >= 0.0f);
+ BLI_assert(sample_length <= lengths.last());
+
+ if (hint != nullptr && hint->segment_index >= 0) {
+ const float length_in_segment = sample_length - hint->segment_start;
+ const float factor = length_in_segment * hint->segment_length_inv;
+ if (factor >= 0.0f && factor < 1.0f) {
+ r_segment_index = hint->segment_index;
+ r_factor = factor;
+ return;
+ }
+ }
+
+ const float total_length = lengths.last();
+ if (sample_length >= total_length) {
+ /* Return the last position on the last segment. */
+ r_segment_index = lengths.size() - 1;
+ r_factor = 1.0f;
+ return;
}
- for (const int i : dst.index_range().drop_back(cyclic_sample_count)) {
- dst[i] = math::interpolate(src[indices[i]], src[indices[i] + 1], factors[i]);
+ const int prev_point_index = std::upper_bound(lengths.begin(), lengths.end(), sample_length) -
+ lengths.begin();
+ const float segment_start = prev_point_index == 0 ? 0.0f : lengths[prev_point_index - 1];
+ const float segment_end = lengths[prev_point_index];
+ const float segment_length = segment_end - segment_start;
+ const float segment_length_inv = math::safe_divide(1.0f, segment_length);
+ const float length_in_segment = sample_length - segment_start;
+ const float factor = length_in_segment * segment_length_inv;
+
+ r_segment_index = prev_point_index;
+ r_factor = factor;
+
+ if (hint != nullptr) {
+ hint->segment_index = r_segment_index;
+ hint->segment_start = segment_start;
+ hint->segment_length_inv = segment_length_inv;
}
}
/**
- * Find the given number of points, evenly spaced along the provided length. For non-cyclic
- * sequences, the first point will always be included, and last point will always be included if
- * the #count is greater than zero. For cyclic sequences, the first point will always be included.
+ * Find evenly spaced samples along the lengths.
*
- * \warning The #count argument must be greater than zero.
+ * \param accumulated_segment_lengths: The accumulated lengths of the original elements being
+ * sampled. Could be calculated by #accumulate_lengths.
+ * \param include_last_point: Generally false for cyclic sequences and true otherwise.
+ * \param r_segment_indices: The index of the previous point at each sample.
+ * \param r_factors: The portion of the length in each segment at each sample.
*/
-void create_uniform_samples(Span<float> lengths,
- bool cyclic,
- MutableSpan<int> indices,
- MutableSpan<float> factors);
+void sample_uniform(Span<float> accumulated_segment_lengths,
+ bool include_last_point,
+ MutableSpan<int> r_segment_indices,
+ MutableSpan<float> r_factors);
/**
* For each provided sample length, find the segment index and interpolation factor.
*
- * \param lengths: The accumulated lengths of the original elements being sampled.
- * Could be calculated by #accumulate_lengths.
+ * \param accumulated_segment_lengths: The accumulated lengths of the original elements being
+ * sampled. Could be calculated by #accumulate_lengths.
* \param sample_lengths: Sampled locations in the #lengths array. Must be sorted and is expected
* to be within the range of the #lengths values.
- * \param cyclic: Whether the points described by the #lengths input is cyclic. This is likely
- * redundant information theoretically.
- * \param indices: The index of the previous point at each sample.
- * \param factors: The portion of the length in each segment at each sample.
+ * \param r_segment_indices: The index of the previous point at each sample.
+ * \param r_factors: The portion of the length in each segment at each sample.
*/
-void create_samples_from_sorted_lengths(Span<float> lengths,
- Span<float> sample_lengths,
- bool cyclic,
- MutableSpan<int> indices,
- MutableSpan<float> factors);
+void sample_at_lengths(Span<float> accumulated_segment_lengths,
+ Span<float> sample_lengths,
+ MutableSpan<int> r_segment_indices,
+ MutableSpan<float> r_factors);
} // namespace blender::length_parameterize
diff --git a/source/blender/blenlib/BLI_listbase.h b/source/blender/blenlib/BLI_listbase.h
index 237fcdae8b9..9322fa4c85b 100644
--- a/source/blender/blenlib/BLI_listbase.h
+++ b/source/blender/blenlib/BLI_listbase.h
@@ -320,13 +320,13 @@ struct LinkData *BLI_genericNodeN(void *data);
} \
((void)0)
-#define LISTBASE_CIRCULAR_BACKWARD_BEGIN(lb, lb_iter, lb_init) \
- if ((lb)->last && (lb_init || (lb_init = (lb)->last))) { \
+#define LISTBASE_CIRCULAR_BACKWARD_BEGIN(type, lb, lb_iter, lb_init) \
+ if ((lb)->last && (lb_init || (lb_init = (type)(lb)->last))) { \
lb_iter = lb_init; \
do {
-#define LISTBASE_CIRCULAR_BACKWARD_END(lb, lb_iter, lb_init) \
+#define LISTBASE_CIRCULAR_BACKWARD_END(type, lb, lb_iter, lb_init) \
} \
- while ((lb_iter = (lb_iter)->prev ? (lb_iter)->prev : (lb)->last), (lb_iter != lb_init)) \
+ while ((lb_iter = (lb_iter)->prev ? (lb_iter)->prev : (type)(lb)->last), (lb_iter != lb_init)) \
; \
} \
((void)0)
diff --git a/source/blender/blenlib/BLI_listbase_wrapper.hh b/source/blender/blenlib/BLI_listbase_wrapper.hh
index 25e029a5616..2d631cb2441 100644
--- a/source/blender/blenlib/BLI_listbase_wrapper.hh
+++ b/source/blender/blenlib/BLI_listbase_wrapper.hh
@@ -50,7 +50,7 @@ template<typename T> class ListBaseWrapper {
Iterator operator++(int)
{
Iterator iterator = *this;
- ++*this;
+ ++(*this);
return iterator;
}
diff --git a/source/blender/blenlib/BLI_map.hh b/source/blender/blenlib/BLI_map.hh
index 55233676ed8..95d1e344894 100644
--- a/source/blender/blenlib/BLI_map.hh
+++ b/source/blender/blenlib/BLI_map.hh
@@ -669,10 +669,10 @@ class Map {
return *this;
}
- BaseIterator operator++(int) const
+ BaseIterator operator++(int)
{
BaseIterator copied_iterator = *this;
- ++copied_iterator;
+ ++(*this);
return copied_iterator;
}
diff --git a/source/blender/blenlib/BLI_math_base.h b/source/blender/blenlib/BLI_math_base.h
index f072a17f384..c0c4594ddc0 100644
--- a/source/blender/blenlib/BLI_math_base.h
+++ b/source/blender/blenlib/BLI_math_base.h
@@ -221,6 +221,19 @@ MINLINE unsigned int power_of_2_min_u(unsigned int x);
* with integers, to avoid gradual darkening when rounding down.
*/
MINLINE int divide_round_i(int a, int b);
+
+/**
+ * Integer division that returns the ceiling, instead of flooring like normal C division.
+ */
+MINLINE uint divide_ceil_u(uint a, uint b);
+MINLINE uint64_t divide_ceil_ul(uint64_t a, uint64_t b);
+
+/**
+ * Returns \a a if it is a multiple of \a b or the next multiple or \a b after \b a .
+ */
+MINLINE uint ceil_to_multiple_u(uint a, uint b);
+MINLINE uint64_t ceil_to_multiple_ul(uint64_t a, uint64_t b);
+
/**
* modulo that handles negative numbers, works the same as Python's.
*/
diff --git a/source/blender/blenlib/BLI_math_base.hh b/source/blender/blenlib/BLI_math_base.hh
index 3057e30dc03..b15179f75b6 100644
--- a/source/blender/blenlib/BLI_math_base.hh
+++ b/source/blender/blenlib/BLI_math_base.hh
@@ -44,6 +44,16 @@ template<typename T> inline T max(const T &a, const T &b)
return std::max(a, b);
}
+template<typename T> inline void max_inplace(T &a, const T &b)
+{
+ a = math::max(a, b);
+}
+
+template<typename T> inline void min_inplace(T &a, const T &b)
+{
+ a = math::min(a, b);
+}
+
template<typename T> inline T clamp(const T &a, const T &min, const T &max)
{
return std::clamp(a, min, max);
diff --git a/source/blender/blenlib/BLI_math_color.h b/source/blender/blenlib/BLI_math_color.h
index 6386a7f76f8..3aa2e35476d 100644
--- a/source/blender/blenlib/BLI_math_color.h
+++ b/source/blender/blenlib/BLI_math_color.h
@@ -164,7 +164,9 @@ void rgba_float_to_uchar(unsigned char r_col[4], const float col_f[4]);
MINLINE float rgb_to_grayscale(const float rgb[3]);
MINLINE unsigned char rgb_to_grayscale_byte(const unsigned char rgb[3]);
-MINLINE int compare_rgb_uchar(const unsigned char a[3], const unsigned char b[3], int limit);
+MINLINE int compare_rgb_uchar(const unsigned char col_a[3],
+ const unsigned char col_b[3],
+ int limit);
/**
* Return triangle noise in [-0.5..1.5] range.
diff --git a/source/blender/blenlib/BLI_math_geom.h b/source/blender/blenlib/BLI_math_geom.h
index 93b413ab755..d056c42e019 100644
--- a/source/blender/blenlib/BLI_math_geom.h
+++ b/source/blender/blenlib/BLI_math_geom.h
@@ -1270,8 +1270,8 @@ MINLINE void mul_sh_fl(float r[9], float f);
MINLINE void add_sh_shsh(float r[9], const float a[9], const float b[9]);
MINLINE float dot_shsh(const float a[9], const float b[9]);
-MINLINE float eval_shv3(float r[9], const float v[3]);
-MINLINE float diffuse_shv3(const float r[9], const float v[3]);
+MINLINE float eval_shv3(float sh[9], const float v[3]);
+MINLINE float diffuse_shv3(const float sh[9], const float v[3]);
MINLINE void vec_fac_to_sh(float r[9], const float v[3], float f);
MINLINE void madd_sh_shfl(float r[9], const float sh[9], float f);
diff --git a/source/blender/blenlib/BLI_math_matrix.h b/source/blender/blenlib/BLI_math_matrix.h
index 2cd2a299d53..467e6db4805 100644
--- a/source/blender/blenlib/BLI_math_matrix.h
+++ b/source/blender/blenlib/BLI_math_matrix.h
@@ -98,110 +98,110 @@ void mul_m4_m4_post(float R[4][4], const float B[4][4]);
/* Implement #mul_m3_series macro. */
-void _va_mul_m3_series_3(float R[3][3], const float M1[3][3], const float M2[3][3]) ATTR_NONNULL();
-void _va_mul_m3_series_4(float R[3][3],
- const float M1[3][3],
- const float M2[3][3],
- const float M3[3][3]) ATTR_NONNULL();
-void _va_mul_m3_series_5(float R[3][3],
- const float M1[3][3],
- const float M2[3][3],
- const float M3[3][3],
- const float M4[3][3]) ATTR_NONNULL();
-void _va_mul_m3_series_6(float R[3][3],
- const float M1[3][3],
- const float M2[3][3],
- const float M3[3][3],
- const float M4[3][3],
- const float M5[3][3]) ATTR_NONNULL();
-void _va_mul_m3_series_7(float R[3][3],
- const float M1[3][3],
- const float M2[3][3],
- const float M3[3][3],
- const float M4[3][3],
- const float M5[3][3],
- const float M6[3][3]) ATTR_NONNULL();
-void _va_mul_m3_series_8(float R[3][3],
- const float M1[3][3],
- const float M2[3][3],
- const float M3[3][3],
- const float M4[3][3],
- const float M5[3][3],
- const float M6[3][3],
- const float M7[3][3]) ATTR_NONNULL();
-void _va_mul_m3_series_9(float R[3][3],
- const float M1[3][3],
- const float M2[3][3],
- const float M3[3][3],
- const float M4[3][3],
- const float M5[3][3],
- const float M6[3][3],
- const float M7[3][3],
- const float M8[3][3]) ATTR_NONNULL();
+void _va_mul_m3_series_3(float r[3][3], const float m1[3][3], const float m2[3][3]) ATTR_NONNULL();
+void _va_mul_m3_series_4(float r[3][3],
+ const float m1[3][3],
+ const float m2[3][3],
+ const float m3[3][3]) ATTR_NONNULL();
+void _va_mul_m3_series_5(float r[3][3],
+ const float m1[3][3],
+ const float m2[3][3],
+ const float m3[3][3],
+ const float m4[3][3]) ATTR_NONNULL();
+void _va_mul_m3_series_6(float r[3][3],
+ const float m1[3][3],
+ const float m2[3][3],
+ const float m3[3][3],
+ const float m4[3][3],
+ const float m5[3][3]) ATTR_NONNULL();
+void _va_mul_m3_series_7(float r[3][3],
+ const float m1[3][3],
+ const float m2[3][3],
+ const float m3[3][3],
+ const float m4[3][3],
+ const float m5[3][3],
+ const float m6[3][3]) ATTR_NONNULL();
+void _va_mul_m3_series_8(float r[3][3],
+ const float m1[3][3],
+ const float m2[3][3],
+ const float m3[3][3],
+ const float m4[3][3],
+ const float m5[3][3],
+ const float m6[3][3],
+ const float m7[3][3]) ATTR_NONNULL();
+void _va_mul_m3_series_9(float r[3][3],
+ const float m1[3][3],
+ const float m2[3][3],
+ const float m3[3][3],
+ const float m4[3][3],
+ const float m5[3][3],
+ const float m6[3][3],
+ const float m7[3][3],
+ const float m8[3][3]) ATTR_NONNULL();
/* Implement #mul_m4_series macro. */
-void _va_mul_m4_series_3(float R[4][4], const float M1[4][4], const float M2[4][4]) ATTR_NONNULL();
-void _va_mul_m4_series_4(float R[4][4],
- const float M1[4][4],
- const float M2[4][4],
- const float M3[4][4]) ATTR_NONNULL();
-void _va_mul_m4_series_5(float R[4][4],
- const float M1[4][4],
- const float M2[4][4],
- const float M3[4][4],
- const float M4[4][4]) ATTR_NONNULL();
-void _va_mul_m4_series_6(float R[4][4],
- const float M1[4][4],
- const float M2[4][4],
- const float M3[4][4],
- const float M4[4][4],
- const float M5[4][4]) ATTR_NONNULL();
-void _va_mul_m4_series_7(float R[4][4],
- const float M1[4][4],
- const float M2[4][4],
- const float M3[4][4],
- const float M4[4][4],
- const float M5[4][4],
- const float M6[4][4]) ATTR_NONNULL();
-void _va_mul_m4_series_8(float R[4][4],
- const float M1[4][4],
- const float M2[4][4],
- const float M3[4][4],
- const float M4[4][4],
- const float M5[4][4],
- const float M6[4][4],
- const float M7[4][4]) ATTR_NONNULL();
-void _va_mul_m4_series_9(float R[4][4],
- const float M1[4][4],
- const float M2[4][4],
- const float M3[4][4],
- const float M4[4][4],
- const float M5[4][4],
- const float M6[4][4],
- const float M7[4][4],
- const float M8[4][4]) ATTR_NONNULL();
+void _va_mul_m4_series_3(float r[4][4], const float m1[4][4], const float m2[4][4]) ATTR_NONNULL();
+void _va_mul_m4_series_4(float r[4][4],
+ const float m1[4][4],
+ const float m2[4][4],
+ const float m3[4][4]) ATTR_NONNULL();
+void _va_mul_m4_series_5(float r[4][4],
+ const float m1[4][4],
+ const float m2[4][4],
+ const float m3[4][4],
+ const float m4[4][4]) ATTR_NONNULL();
+void _va_mul_m4_series_6(float r[4][4],
+ const float m1[4][4],
+ const float m2[4][4],
+ const float m3[4][4],
+ const float m4[4][4],
+ const float m5[4][4]) ATTR_NONNULL();
+void _va_mul_m4_series_7(float r[4][4],
+ const float m1[4][4],
+ const float m2[4][4],
+ const float m3[4][4],
+ const float m4[4][4],
+ const float m5[4][4],
+ const float m6[4][4]) ATTR_NONNULL();
+void _va_mul_m4_series_8(float r[4][4],
+ const float m1[4][4],
+ const float m2[4][4],
+ const float m3[4][4],
+ const float m4[4][4],
+ const float m5[4][4],
+ const float m6[4][4],
+ const float m7[4][4]) ATTR_NONNULL();
+void _va_mul_m4_series_9(float r[4][4],
+ const float m1[4][4],
+ const float m2[4][4],
+ const float m3[4][4],
+ const float m4[4][4],
+ const float m5[4][4],
+ const float m6[4][4],
+ const float m7[4][4],
+ const float m8[4][4]) ATTR_NONNULL();
#define mul_m3_series(...) VA_NARGS_CALL_OVERLOAD(_va_mul_m3_series_, __VA_ARGS__)
#define mul_m4_series(...) VA_NARGS_CALL_OVERLOAD(_va_mul_m4_series_, __VA_ARGS__)
void mul_m4_v3(const float M[4][4], float r[3]);
-void mul_v3_m4v3(float r[3], const float M[4][4], const float v[3]);
+void mul_v3_m4v3(float r[3], const float mat[4][4], const float vec[3]);
void mul_v3_m4v3_db(double r[3], const double mat[4][4], const double vec[3]);
void mul_v4_m4v3_db(double r[4], const double mat[4][4], const double vec[3]);
-void mul_v2_m4v3(float r[2], const float M[4][4], const float v[3]);
-void mul_v2_m2v2(float r[2], const float M[2][2], const float v[2]);
-void mul_m2_v2(const float M[2][2], float v[2]);
+void mul_v2_m4v3(float r[2], const float mat[4][4], const float vec[3]);
+void mul_v2_m2v2(float r[2], const float mat[2][2], const float vec[2]);
+void mul_m2_v2(const float mat[2][2], float vec[2]);
/** Same as #mul_m4_v3() but doesn't apply translation component. */
-void mul_mat3_m4_v3(const float M[4][4], float r[3]);
-void mul_v3_mat3_m4v3(float r[3], const float M[4][4], const float v[3]);
-void mul_v3_mat3_m4v3_db(double r[3], const double M[4][4], const double v[3]);
-void mul_m4_v4(const float M[4][4], float r[4]);
-void mul_v4_m4v4(float r[4], const float M[4][4], const float v[4]);
+void mul_mat3_m4_v3(const float mat[4][4], float r[3]);
+void mul_v3_mat3_m4v3(float r[3], const float mat[4][4], const float vec[3]);
+void mul_v3_mat3_m4v3_db(double r[3], const double mat[4][4], const double vec[3]);
+void mul_m4_v4(const float mat[4][4], float r[4]);
+void mul_v4_m4v4(float r[4], const float mat[4][4], const float v[4]);
void mul_v4_m4v3(float r[4], const float M[4][4], const float v[3]); /* v has implicit w = 1.0f */
-void mul_project_m4_v3(const float M[4][4], float vec[3]);
+void mul_project_m4_v3(const float mat[4][4], float vec[3]);
void mul_v3_project_m4_v3(float r[3], const float mat[4][4], const float vec[3]);
-void mul_v2_project_m4_v3(float r[2], const float M[4][4], const float vec[3]);
+void mul_v2_project_m4_v3(float r[2], const float mat[4][4], const float vec[3]);
void mul_m3_v2(const float m[3][3], float r[2]);
void mul_v2_m3v2(float r[2], const float m[3][3], const float v[2]);
@@ -234,13 +234,14 @@ void negate_m3(float R[3][3]);
void negate_mat3_m4(float R[4][4]);
void negate_m4(float R[4][4]);
-bool invert_m3_ex(float m[3][3], float epsilon);
-bool invert_m3_m3_ex(float m1[3][3], const float m2[3][3], float epsilon);
+bool invert_m3_ex(float mat[3][3], float epsilon);
+bool invert_m3_m3_ex(float inverse[3][3], const float mat[3][3], float epsilon);
-bool invert_m3(float R[3][3]);
-bool invert_m3_m3(float R[3][3], const float A[3][3]);
-bool invert_m4(float R[4][4]);
-bool invert_m4_m4(float R[4][4], const float A[4][4]);
+bool invert_m3(float mat[3][3]);
+bool invert_m2_m2(float inverse[2][2], const float mat[2][2]);
+bool invert_m3_m3(float inverse[3][3], const float mat[3][3]);
+bool invert_m4(float mat[4][4]);
+bool invert_m4_m4(float inverse[4][4], const float mat[4][4]);
/**
* Computes the inverse of mat and puts it in inverse.
* Uses Gaussian Elimination with partial (maximal column) pivoting.
@@ -251,12 +252,12 @@ bool invert_m4_m4(float R[4][4], const float A[4][4]);
* for non-invertible scale matrices, finding a partial solution can
* be useful to have a valid local transform center, see T57767.
*/
-bool invert_m4_m4_fallback(float R[4][4], const float A[4][4]);
+bool invert_m4_m4_fallback(float inverse[4][4], const float mat[4][4]);
/* Double arithmetic (mixed float/double). */
-void mul_m4_v4d(const float M[4][4], double r[4]);
-void mul_v4d_m4v4d(double r[4], const float M[4][4], const double v[4]);
+void mul_m4_v4d(const float mat[4][4], double r[4]);
+void mul_v4d_m4v4d(double r[4], const float mat[4][4], const double v[4]);
/* Double matrix functions (no mixing types). */
@@ -290,8 +291,8 @@ void normalize_m3_m3_ex(float R[3][3], const float M[3][3], float r_scale[3]) AT
void normalize_m3_m3(float R[3][3], const float M[3][3]) ATTR_NONNULL();
void normalize_m4_ex(float R[4][4], float r_scale[3]) ATTR_NONNULL();
void normalize_m4(float R[4][4]) ATTR_NONNULL();
-void normalize_m4_m4_ex(float R[4][4], const float M[4][4], float r_scale[3]) ATTR_NONNULL();
-void normalize_m4_m4(float R[4][4], const float M[4][4]) ATTR_NONNULL();
+void normalize_m4_m4_ex(float rmat[4][4], const float mat[4][4], float r_scale[3]) ATTR_NONNULL();
+void normalize_m4_m4(float rmat[4][4], const float mat[4][4]) ATTR_NONNULL();
/**
* Make an orthonormal matrix around the selected axis of the given matrix.
@@ -325,15 +326,15 @@ void orthogonalize_m3_stable(float R[3][3], int axis, bool normalize);
*/
void orthogonalize_m4_stable(float R[4][4], int axis, bool normalize);
-bool orthogonalize_m3_zero_axes(float R[3][3], float unit_length);
-bool orthogonalize_m4_zero_axes(float R[4][4], float unit_length);
+bool orthogonalize_m3_zero_axes(float m[3][3], float unit_length);
+bool orthogonalize_m4_zero_axes(float m[4][4], float unit_length);
-bool is_orthogonal_m3(const float mat[3][3]);
-bool is_orthogonal_m4(const float mat[4][4]);
-bool is_orthonormal_m3(const float mat[3][3]);
-bool is_orthonormal_m4(const float mat[4][4]);
+bool is_orthogonal_m3(const float m[3][3]);
+bool is_orthogonal_m4(const float m[4][4]);
+bool is_orthonormal_m3(const float m[3][3]);
+bool is_orthonormal_m4(const float m[4][4]);
-bool is_uniform_scaled_m3(const float mat[3][3]);
+bool is_uniform_scaled_m3(const float m[3][3]);
bool is_uniform_scaled_m4(const float m[4][4]);
/* NOTE: 'adjoint' here means the adjugate (adjunct, "classical adjoint") matrix!
@@ -361,22 +362,22 @@ float determinant_m4(const float m[4][4]);
* From this decomposition it is trivial to compute the (pseudo-inverse)
* of `A` as `Ainv = V.Winv.transpose(U)`.
*/
-void svd_m4(float U[4][4], float s[4], float V[4][4], float A[4][4]);
-void pseudoinverse_m4_m4(float Ainv[4][4], const float A[4][4], float epsilon);
-void pseudoinverse_m3_m3(float Ainv[3][3], const float A[3][3], float epsilon);
+void svd_m4(float U[4][4], float s[4], float V[4][4], float A_[4][4]);
+void pseudoinverse_m4_m4(float inverse[4][4], const float mat[4][4], float epsilon);
+void pseudoinverse_m3_m3(float inverse[3][3], const float mat[3][3], float epsilon);
bool has_zero_axis_m4(const float matrix[4][4]);
-void invert_m4_m4_safe(float Ainv[4][4], const float A[4][4]);
+void invert_m4_m4_safe(float inverse[4][4], const float mat[4][4]);
-void invert_m3_m3_safe_ortho(float Ainv[3][3], const float A[3][3]);
+void invert_m3_m3_safe_ortho(float inverse[3][3], const float mat[3][3]);
/**
* A safe version of invert that uses valid axes, calculating the zero'd axis
* based on the non-zero ones.
*
* This works well for transformation matrices, when a single axis is zeroed.
*/
-void invert_m4_m4_safe_ortho(float Ainv[4][4], const float A[4][4]);
+void invert_m4_m4_safe_ortho(float inverse[4][4], const float mat[4][4]);
/** \} */
@@ -393,18 +394,18 @@ void scale_m4_v2(float R[4][4], const float scale[2]);
* For an orthogonal matrix, it is the product of all three scale values.
* Returns a negative value if the transform is flipped by negative scale.
*/
-float mat3_to_volume_scale(const float M[3][3]);
-float mat4_to_volume_scale(const float M[4][4]);
+float mat3_to_volume_scale(const float mat[3][3]);
+float mat4_to_volume_scale(const float mat[4][4]);
/**
* This gets the average scale of a matrix, only use when your scaling
* data that has no idea of scale axis, examples are bone-envelope-radius
* and curve radius.
*/
-float mat3_to_scale(const float M[3][3]);
-float mat4_to_scale(const float M[4][4]);
+float mat3_to_scale(const float mat[3][3]);
+float mat4_to_scale(const float mat[4][4]);
/** Return 2D scale (in XY plane) of given mat4. */
-float mat4_to_xy_scale(const float M[4][4]);
+float mat4_to_xy_scale(const float mat[4][4]);
void size_to_mat3(float R[3][3], const float size[3]);
void size_to_mat4(float R[4][4], const float size[3]);
@@ -432,7 +433,7 @@ float mat4_to_size_max_axis(const float M[4][4]);
*/
void mat4_to_size_fix_shear(float size[3], const float M[4][4]);
-void translate_m4(float mat[4][4], float tx, float ty, float tz);
+void translate_m4(float mat[4][4], float Tx, float Ty, float Tz);
/**
* Rotate a matrix in-place.
*
@@ -453,8 +454,20 @@ void rescale_m4(float mat[4][4], const float scale[3]);
*/
void transform_pivot_set_m4(float mat[4][4], const float pivot[3]);
+/**
+ * \param rot: A 3x3 rotation matrix, normalized never negative.
+ */
void mat4_to_rot(float rot[3][3], const float wmat[4][4]);
+
+/**
+ * \param rot: A 3x3 rotation matrix, normalized never negative.
+ * \param size: The scale, negative if `mat3` is negative.
+ */
void mat3_to_rot_size(float rot[3][3], float size[3], const float mat3[3][3]);
+/**
+ * \param rot: A 3x3 rotation matrix, normalized never negative.
+ * \param size: The scale, negative if `mat3` is negative.
+ */
void mat4_to_loc_rot_size(float loc[3], float rot[3][3], float size[3], const float wmat[4][4]);
void mat4_to_loc_quat(float loc[3], float quat[4], const float wmat[4][4]);
void mat4_decompose(float loc[3], float quat[4], float size[3], const float wmat[4][4]);
@@ -527,7 +540,18 @@ void interp_m3_m3m3(float R[3][3], const float A[3][3], const float B[3][3], flo
*/
void interp_m4_m4m4(float R[4][4], const float A[4][4], const float B[4][4], float t);
+/**
+ * Return true when the matrices determinant is less than zero.
+ *
+ * \note This is often used to check if a matrix flips content in 3D space,
+ * where transforming geometry (for example) would flip the direction of polygon normals
+ * from pointing outside a closed volume, to pointing inside (or the reverse).
+ *
+ * When the matrix is constructed from location, rotation & scale
+ * as matrix will be negative when it has an odd number of negative scales.
+ */
bool is_negative_m3(const float mat[3][3]);
+/** A version of #is_negative_m3 that takes a 4x4 matrix. */
bool is_negative_m4(const float mat[4][4]);
bool is_zero_m3(const float mat[3][3]);
@@ -604,8 +628,8 @@ void BLI_space_transform_invert_normal(const struct SpaceTransform *data, float
/** \name Other
* \{ */
-void print_m3(const char *str, const float M[3][3]);
-void print_m4(const char *str, const float M[4][4]);
+void print_m3(const char *str, const float m[3][3]);
+void print_m4(const char *str, const float m[4][4]);
#define print_m3_id(M) print_m3(STRINGIFY(M), M)
#define print_m4_id(M) print_m4(STRINGIFY(M), M)
diff --git a/source/blender/blenlib/BLI_math_rotation.h b/source/blender/blenlib/BLI_math_rotation.h
index 192ad482a69..7fb7085360b 100644
--- a/source/blender/blenlib/BLI_math_rotation.h
+++ b/source/blender/blenlib/BLI_math_rotation.h
@@ -71,7 +71,7 @@ void mul_qt_fl(float q[4], float f);
/**
* Raise a unit quaternion to the specified power.
*/
-void pow_qt_fl_normalized(float q[4], float f);
+void pow_qt_fl_normalized(float q[4], float fac);
void sub_qt_qtqt(float q[4], const float a[4], const float b[4]);
@@ -109,8 +109,8 @@ void add_qt_qtqt(float q[4], const float a[4], const float b[4], float t);
/* Conversion. */
-void quat_to_mat3(float mat[3][3], const float q[4]);
-void quat_to_mat4(float mat[4][4], const float q[4]);
+void quat_to_mat3(float m[3][3], const float q[4]);
+void quat_to_mat4(float m[4][4], const float q[4]);
/**
* Apply the rotation of \a a to \a q keeping the values compatible with \a old.
@@ -118,6 +118,11 @@ void quat_to_mat4(float mat[4][4], const float q[4]);
*/
void quat_to_compatible_quat(float q[4], const float a[4], const float old[4]);
+/**
+ * A version of #mat3_normalized_to_quat that skips error checking.
+ */
+void mat3_normalized_to_quat_fast(float q[4], const float mat[3][3]);
+
void mat3_normalized_to_quat(float q[4], const float mat[3][3]);
void mat4_normalized_to_quat(float q[4], const float mat[4][4]);
void mat3_to_quat(float q[4], const float mat[3][3]);
@@ -157,7 +162,10 @@ void rotation_between_quats_to_quat(float q[4], const float q1[4], const float q
* \param r_twist: if not NULL, receives the twist quaternion.
* \returns twist angle.
*/
-float quat_split_swing_and_twist(const float q[4], int axis, float r_swing[4], float r_twist[4]);
+float quat_split_swing_and_twist(const float q_in[4],
+ int axis,
+ float r_swing[4],
+ float r_twist[4]);
float angle_normalized_qt(const float q[4]);
float angle_normalized_qtqt(const float q1[4], const float q2[4]);
@@ -170,12 +178,32 @@ float angle_signed_qt(const float q[4]);
float angle_signed_qtqt(const float q1[4], const float q2[4]);
/**
- * TODO: don't what this is, but it's not the same as #mat3_to_quat.
+ * Legacy matrix to quaternion conversion, keep to prevent changes to existing
+ * boids & particle-system behavior. Use #mat3_to_quat for new code.
*/
-void mat3_to_quat_is_ok(float q[4], const float mat[3][3]);
+void mat3_to_quat_legacy(float q[4], const float wmat[3][3]);
/* Other. */
+/**
+ * Utility that performs `sinf` & `cosf` intended for plotting a 2D circle,
+ * where the values of the coordinates with are exactly symmetrical although this
+ * favors even numbers as odd numbers can only be symmetrical on a single axis.
+ *
+ * Besides adjustments to precision, this function is the equivalent of:
+ * \code {.c}
+ * float phi = (2 * M_PI) * (float)i / (float)denominator;
+ * *r_sin = sinf(phi);
+ * *r_cos = cosf(phi);
+ * \endcode
+ *
+ * \param numerator: An integer factor in [0..denominator] (inclusive).
+ * \param denominator: The fraction denominator (typically the number of segments of the circle).
+ * \param r_sin: The resulting sine.
+ * \param r_cos: The resulting cosine.
+ */
+void sin_cos_from_fraction(int numerator, int denominator, float *r_sin, float *r_cos);
+
void print_qt(const char *str, const float q[4]);
#define print_qt_id(q) print_qt(STRINGIFY(q), q)
@@ -216,16 +244,16 @@ void axis_angle_to_mat4(float R[4][4], const float axis[3], float angle);
/**
* 3x3 matrix to axis angle.
*/
-void mat3_normalized_to_axis_angle(float axis[3], float *angle, const float M[3][3]);
+void mat3_normalized_to_axis_angle(float axis[3], float *angle, const float mat[3][3]);
/**
* 4x4 matrix to axis angle.
*/
-void mat4_normalized_to_axis_angle(float axis[3], float *angle, const float M[4][4]);
-void mat3_to_axis_angle(float axis[3], float *angle, const float M[3][3]);
+void mat4_normalized_to_axis_angle(float axis[3], float *angle, const float mat[4][4]);
+void mat3_to_axis_angle(float axis[3], float *angle, const float mat[3][3]);
/**
* 4x4 matrix to axis angle.
*/
-void mat4_to_axis_angle(float axis[3], float *angle, const float M[4][4]);
+void mat4_to_axis_angle(float axis[3], float *angle, const float mat[4][4]);
/**
* Quaternions to Axis Angle.
*/
@@ -264,19 +292,19 @@ void eul_to_mat3(float mat[3][3], const float eul[3]);
void eul_to_mat4(float mat[4][4], const float eul[3]);
void mat3_normalized_to_eul(float eul[3], const float mat[3][3]);
-void mat4_normalized_to_eul(float eul[3], const float mat[4][4]);
+void mat4_normalized_to_eul(float eul[3], const float m[4][4]);
void mat3_to_eul(float eul[3], const float mat[3][3]);
void mat4_to_eul(float eul[3], const float mat[4][4]);
void quat_to_eul(float eul[3], const float quat[4]);
-void mat3_normalized_to_compatible_eul(float eul[3], const float old[3], float mat[3][3]);
-void mat3_to_compatible_eul(float eul[3], const float old[3], float mat[3][3]);
+void mat3_normalized_to_compatible_eul(float eul[3], const float oldrot[3], float mat[3][3]);
+void mat3_to_compatible_eul(float eul[3], const float oldrot[3], float mat[3][3]);
void quat_to_compatible_eul(float eul[3], const float oldrot[3], const float quat[4]);
-void rotate_eul(float eul[3], char axis, float angle);
+void rotate_eul(float beul[3], char axis, float angle);
/* Order independent. */
-void compatible_eul(float eul[3], const float old[3]);
+void compatible_eul(float eul[3], const float oldrot[3]);
void add_eul_euleul(float r_eul[3], float a[3], float b[3], short order);
void sub_eul_euleul(float r_eul[3], float a[3], float b[3], short order);
@@ -304,15 +332,15 @@ typedef enum eEulerRotationOrders {
/**
* Construct quaternion from Euler angles (in radians).
*/
-void eulO_to_quat(float quat[4], const float eul[3], short order);
+void eulO_to_quat(float q[4], const float e[3], short order);
/**
* Construct 3x3 matrix from Euler angles (in radians).
*/
-void eulO_to_mat3(float mat[3][3], const float eul[3], short order);
+void eulO_to_mat3(float M[3][3], const float e[3], short order);
/**
* Construct 4x4 matrix from Euler angles (in radians).
*/
-void eulO_to_mat4(float mat[4][4], const float eul[3], short order);
+void eulO_to_mat4(float mat[4][4], const float e[3], short order);
/**
* Euler Rotation to Axis Angle.
*/
@@ -325,17 +353,17 @@ void eulO_to_gimbal_axis(float gmat[3][3], const float eul[3], short order);
/**
* Convert 3x3 matrix to Euler angles (in radians).
*/
-void mat3_normalized_to_eulO(float eul[3], short order, const float mat[3][3]);
+void mat3_normalized_to_eulO(float eul[3], short order, const float m[3][3]);
/**
* Convert 4x4 matrix to Euler angles (in radians).
*/
-void mat4_normalized_to_eulO(float eul[3], short order, const float mat[4][4]);
-void mat3_to_eulO(float eul[3], short order, const float mat[3][3]);
-void mat4_to_eulO(float eul[3], short order, const float mat[4][4]);
+void mat4_normalized_to_eulO(float eul[3], short order, const float m[4][4]);
+void mat3_to_eulO(float eul[3], short order, const float m[3][3]);
+void mat4_to_eulO(float eul[3], short order, const float m[4][4]);
/**
* Convert quaternion to Euler angles (in radians).
*/
-void quat_to_eulO(float eul[3], short order, const float quat[4]);
+void quat_to_eulO(float e[3], short order, const float q[4]);
/**
* Axis Angle to Euler Rotation.
*/
@@ -344,18 +372,27 @@ void axis_angle_to_eulO(float eul[3], short order, const float axis[3], float an
/* Uses 2 methods to retrieve eulers, and picks the closest. */
void mat3_normalized_to_compatible_eulO(float eul[3],
- const float old[3],
+ const float oldrot[3],
short order,
const float mat[3][3]);
void mat4_normalized_to_compatible_eulO(float eul[3],
- const float old[3],
+ const float oldrot[3],
short order,
const float mat[4][4]);
-void mat3_to_compatible_eulO(float eul[3], const float old[3], short order, const float mat[3][3]);
-void mat4_to_compatible_eulO(float eul[3], const float old[3], short order, const float mat[4][4]);
-void quat_to_compatible_eulO(float eul[3], const float old[3], short order, const float quat[4]);
-
-void rotate_eulO(float eul[3], short order, char axis, float angle);
+void mat3_to_compatible_eulO(float eul[3],
+ const float oldrot[3],
+ short order,
+ const float mat[3][3]);
+void mat4_to_compatible_eulO(float eul[3],
+ const float oldrot[3],
+ short order,
+ const float mat[4][4]);
+void quat_to_compatible_eulO(float eul[3],
+ const float oldrot[3],
+ short order,
+ const float quat[4]);
+
+void rotate_eulO(float beul[3], short order, char axis, float angle);
/** \} */
@@ -364,7 +401,7 @@ void rotate_eulO(float eul[3], short order, char axis, float angle);
* \{ */
void copy_dq_dq(DualQuat *r, const DualQuat *dq);
-void normalize_dq(DualQuat *dq, float totw);
+void normalize_dq(DualQuat *dq, float totweight);
void add_weighted_dq_dq(DualQuat *dq_sum, const DualQuat *dq, float weight);
void mul_v3m3_dq(float r[3], float R[3][3], DualQuat *dq);
@@ -381,7 +418,7 @@ void vec_apply_track(float vec[3], short axis);
* Lens/angle conversion (radians).
*/
float focallength_to_fov(float focal_length, float sensor);
-float fov_to_focallength(float fov, float sensor);
+float fov_to_focallength(float hfov, float sensor);
float angle_wrap_rad(float angle);
float angle_wrap_deg(float angle);
diff --git a/source/blender/blenlib/BLI_math_rotation.hh b/source/blender/blenlib/BLI_math_rotation.hh
index e8b746b34df..50a062162ad 100644
--- a/source/blender/blenlib/BLI_math_rotation.hh
+++ b/source/blender/blenlib/BLI_math_rotation.hh
@@ -15,4 +15,13 @@ namespace blender::math {
*/
float3 rotate_direction_around_axis(const float3 &direction, const float3 &axis, float angle);
+/**
+ * Rotate any arbitrary \a vector around the \a center position, with a unit-length \a axis
+ * and the specified \a angle.
+ */
+float3 rotate_around_axis(const float3 &vector,
+ const float3 &center,
+ const float3 &axis,
+ float angle);
+
} // namespace blender::math
diff --git a/source/blender/blenlib/BLI_math_vector.h b/source/blender/blenlib/BLI_math_vector.h
index f3283371a3c..17fe25ec67b 100644
--- a/source/blender/blenlib/BLI_math_vector.h
+++ b/source/blender/blenlib/BLI_math_vector.h
@@ -40,6 +40,10 @@ MINLINE void swap_v2_v2(float a[2], float b[2]);
MINLINE void swap_v3_v3(float a[3], float b[3]);
MINLINE void swap_v4_v4(float a[4], float b[4]);
+MINLINE void swap_v2_v2_db(double a[2], double b[2]);
+MINLINE void swap_v3_v3_db(double a[3], double b[3]);
+MINLINE void swap_v4_v4_db(double a[4], double b[4]);
+
/* unsigned char */
MINLINE void copy_v2_v2_uchar(unsigned char r[2], const unsigned char a[2]);
@@ -151,7 +155,7 @@ MINLINE void mul_v3_v3db_db(double r[3], const double a[3], double f);
MINLINE void mul_v2_v2(float r[2], const float a[2]);
MINLINE void mul_v2_v2v2(float r[2], const float a[2], const float b[2]);
MINLINE void mul_v3_v3(float r[3], const float a[3]);
-MINLINE void mul_v3_v3v3(float r[3], const float a[3], const float b[3]);
+MINLINE void mul_v3_v3v3(float r[3], const float v1[3], const float v2[3]);
MINLINE void mul_v4_fl(float r[4], float f);
MINLINE void mul_v4_v4(float r[4], const float a[4]);
MINLINE void mul_v4_v4fl(float r[4], const float a[4], float f);
@@ -267,10 +271,10 @@ MINLINE float len_squared_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT;
MINLINE float len_manhattan_v2(const float v[2]) ATTR_WARN_UNUSED_RESULT;
MINLINE int len_manhattan_v2_int(const int v[2]) ATTR_WARN_UNUSED_RESULT;
MINLINE float len_manhattan_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT;
-MINLINE float len_v2(const float a[2]) ATTR_WARN_UNUSED_RESULT;
+MINLINE float len_v2(const float v[2]) ATTR_WARN_UNUSED_RESULT;
MINLINE double len_v2_db(const double v[2]) ATTR_WARN_UNUSED_RESULT;
-MINLINE float len_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT;
-MINLINE double len_v2v2_db(const double a[2], const double b[2]) ATTR_WARN_UNUSED_RESULT;
+MINLINE float len_v2v2(const float v1[2], const float v2[2]) ATTR_WARN_UNUSED_RESULT;
+MINLINE double len_v2v2_db(const double v1[2], const double v2[2]) ATTR_WARN_UNUSED_RESULT;
MINLINE float len_v2v2_int(const int v1[2], const int v2[2]);
MINLINE float len_squared_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT;
MINLINE double len_squared_v2v2_db(const double a[2], const double b[2]) ATTR_WARN_UNUSED_RESULT;
@@ -284,22 +288,22 @@ MINLINE float len_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESU
MINLINE double len_v3_db(const double a[3]) ATTR_WARN_UNUSED_RESULT;
MINLINE double len_squared_v3_db(const double v[3]) ATTR_WARN_UNUSED_RESULT;
-MINLINE float normalize_v2_length(float r[2], float unit_scale);
+MINLINE float normalize_v2_length(float n[2], float unit_length);
/**
* \note any vectors containing `nan` will be zeroed out.
*/
-MINLINE float normalize_v2_v2_length(float r[2], const float a[2], float unit_scale);
-MINLINE float normalize_v3_length(float r[3], float unit_scale);
+MINLINE float normalize_v2_v2_length(float r[2], const float a[2], float unit_length);
+MINLINE float normalize_v3_length(float n[3], float unit_length);
/**
* \note any vectors containing `nan` will be zeroed out.
*/
-MINLINE float normalize_v3_v3_length(float r[3], const float a[3], float unit_scale);
-MINLINE double normalize_v3_length_db(double n[3], double unit_scale);
-MINLINE double normalize_v3_v3_length_db(double r[3], const double a[3], double unit_scale);
+MINLINE float normalize_v3_v3_length(float r[3], const float a[3], float unit_length);
+MINLINE double normalize_v3_length_db(double n[3], double unit_length);
+MINLINE double normalize_v3_v3_length_db(double r[3], const double a[3], double unit_length);
-MINLINE float normalize_v2(float r[2]);
+MINLINE float normalize_v2(float n[2]);
MINLINE float normalize_v2_v2(float r[2], const float a[2]);
-MINLINE float normalize_v3(float r[3]);
+MINLINE float normalize_v3(float n[3]);
MINLINE float normalize_v3_v3(float r[3], const float a[3]);
MINLINE double normalize_v3_v3_db(double r[3], const double a[3]);
MINLINE double normalize_v3_db(double n[3]);
@@ -420,45 +424,51 @@ void flip_v2_v2v2(float v[2], const float v1[2], const float v2[2]);
/** \name Comparison
* \{ */
-MINLINE bool is_zero_v2(const float a[2]) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool is_zero_v3(const float a[3]) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool is_zero_v4(const float a[4]) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool is_zero_v2(const float v[2]) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool is_zero_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool is_zero_v4(const float v[4]) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool is_zero_v2_db(const double a[2]) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool is_zero_v3_db(const double a[3]) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool is_zero_v4_db(const double a[4]) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool is_zero_v2_db(const double v[2]) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool is_zero_v3_db(const double v[3]) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool is_zero_v4_db(const double v[4]) ATTR_WARN_UNUSED_RESULT;
-bool is_finite_v2(const float a[2]) ATTR_WARN_UNUSED_RESULT;
-bool is_finite_v3(const float a[3]) ATTR_WARN_UNUSED_RESULT;
-bool is_finite_v4(const float a[4]) ATTR_WARN_UNUSED_RESULT;
+bool is_finite_v2(const float v[2]) ATTR_WARN_UNUSED_RESULT;
+bool is_finite_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT;
+bool is_finite_v4(const float v[4]) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool is_one_v3(const float a[3]) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool is_one_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT;
MINLINE bool equals_v2v2(const float v1[2], const float v2[2]) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool equals_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool equals_v4v4(const float a[4], const float b[4]) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool equals_v3v3(const float v1[3], const float v2[3]) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool equals_v4v4(const float v1[4], const float v2[4]) ATTR_WARN_UNUSED_RESULT;
MINLINE bool equals_v2v2_int(const int v1[2], const int v2[2]) ATTR_WARN_UNUSED_RESULT;
MINLINE bool equals_v3v3_int(const int v1[3], const int v2[3]) ATTR_WARN_UNUSED_RESULT;
MINLINE bool equals_v4v4_int(const int v1[4], const int v2[4]) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool compare_v2v2(const float a[2], const float b[2], float limit) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool compare_v3v3(const float a[3], const float b[3], float limit) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool compare_v4v4(const float a[4], const float b[4], float limit) ATTR_WARN_UNUSED_RESULT;
-
-MINLINE bool compare_v2v2_relative(const float a[2], const float b[2], float limit, int max_ulps)
+MINLINE bool compare_v2v2(const float v1[2],
+ const float v2[2],
+ float limit) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool compare_v3v3(const float v1[3],
+ const float v2[3],
+ float limit) ATTR_WARN_UNUSED_RESULT;
+MINLINE bool compare_v4v4(const float v1[4],
+ const float v2[4],
+ float limit) ATTR_WARN_UNUSED_RESULT;
+
+MINLINE bool compare_v2v2_relative(const float v1[2], const float v2[2], float limit, int max_ulps)
ATTR_WARN_UNUSED_RESULT;
-MINLINE bool compare_v3v3_relative(const float a[3], const float b[3], float limit, int max_ulps)
+MINLINE bool compare_v3v3_relative(const float v1[3], const float v2[3], float limit, int max_ulps)
ATTR_WARN_UNUSED_RESULT;
-MINLINE bool compare_v4v4_relative(const float a[4], const float b[4], float limit, int max_ulps)
+MINLINE bool compare_v4v4_relative(const float v1[4], const float v2[4], float limit, int max_ulps)
ATTR_WARN_UNUSED_RESULT;
-MINLINE bool compare_len_v3v3(const float a[3],
- const float b[3],
+MINLINE bool compare_len_v3v3(const float v1[3],
+ const float v2[3],
float limit) ATTR_WARN_UNUSED_RESULT;
-MINLINE bool compare_size_v3v3(const float a[3],
- const float b[3],
+MINLINE bool compare_size_v3v3(const float v1[3],
+ const float v2[3],
float limit) ATTR_WARN_UNUSED_RESULT;
/**
@@ -602,8 +612,8 @@ void project_v3_plane(float out[3], const float plane_no[3], const float plane_c
* out: result (negate for a 'bounce').
* </pre>
*/
-void reflect_v3_v3v3(float out[3], const float vec[3], const float normal[3]);
-void reflect_v3_v3v3_db(double out[3], const double vec[3], const double normal[3]);
+void reflect_v3_v3v3(float out[3], const float v[3], const float normal[3]);
+void reflect_v3_v3v3_db(double out[3], const double v[3], const double normal[3]);
/**
* Takes a vector and computes 2 orthogonal directions.
*
@@ -651,10 +661,10 @@ void print_vn(const char *str, const float v[], int n);
#define print_v4_id(v) print_v4(STRINGIFY(v), v)
#define print_vn_id(v, n) print_vn(STRINGIFY(v), v, n)
-MINLINE void normal_float_to_short_v2(short r[2], const float n[2]);
-MINLINE void normal_short_to_float_v3(float r[3], const short n[3]);
-MINLINE void normal_float_to_short_v3(short r[3], const float n[3]);
-MINLINE void normal_float_to_short_v4(short r[4], const float n[4]);
+MINLINE void normal_float_to_short_v2(short out[2], const float in[2]);
+MINLINE void normal_short_to_float_v3(float out[3], const short in[3]);
+MINLINE void normal_float_to_short_v3(short out[3], const float in[3]);
+MINLINE void normal_float_to_short_v4(short out[4], const float in[4]);
void minmax_v4v4_v4(float min[4], float max[4], const float vec[4]);
void minmax_v3v3_v3(float min[3], float max[3], const float vec[3]);
diff --git a/source/blender/blenlib/BLI_memory_utils.hh b/source/blender/blenlib/BLI_memory_utils.hh
index 940542c9f1d..c2ad3ea761a 100644
--- a/source/blender/blenlib/BLI_memory_utils.hh
+++ b/source/blender/blenlib/BLI_memory_utils.hh
@@ -19,6 +19,21 @@
namespace blender {
/**
+ * Under some circumstances #std::is_trivial_v<T> is false even though we know that the type is
+ * actually trivial. Using that extra knowledge allows for some optimizations.
+ */
+template<typename T> inline constexpr bool is_trivial_extended_v = std::is_trivial_v<T>;
+template<typename T>
+inline constexpr bool is_trivially_destructible_extended_v = is_trivial_extended_v<T> ||
+ std::is_trivially_destructible_v<T>;
+template<typename T>
+inline constexpr bool is_trivially_copy_constructible_extended_v =
+ is_trivial_extended_v<T> || std::is_trivially_copy_constructible_v<T>;
+template<typename T>
+inline constexpr bool is_trivially_move_constructible_extended_v =
+ is_trivial_extended_v<T> || std::is_trivially_move_constructible_v<T>;
+
+/**
* Call the destructor on n consecutive values. For trivially destructible types, this does
* nothing.
*
@@ -38,7 +53,7 @@ template<typename T> void destruct_n(T *ptr, int64_t n)
/* This is not strictly necessary, because the loop below will be optimized away anyway. It is
* nice to make behavior this explicitly, though. */
- if (std::is_trivially_destructible_v<T>) {
+ if (is_trivially_destructible_extended_v<T>) {
return;
}
diff --git a/source/blender/blenlib/BLI_multi_value_map.hh b/source/blender/blenlib/BLI_multi_value_map.hh
index 4b6300c09aa..1fc5a797574 100644
--- a/source/blender/blenlib/BLI_multi_value_map.hh
+++ b/source/blender/blenlib/BLI_multi_value_map.hh
@@ -137,6 +137,11 @@ template<typename Key, typename Value> class MultiValueMap {
{
return map_.values();
}
+
+ void clear()
+ {
+ map_.clear();
+ }
};
} // namespace blender
diff --git a/source/blender/blenlib/BLI_pool.hh b/source/blender/blenlib/BLI_pool.hh
new file mode 100644
index 00000000000..8745c019db5
--- /dev/null
+++ b/source/blender/blenlib/BLI_pool.hh
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/** \file
+ * \ingroup bli
+ *
+ * A `blender::Pool` allows fast allocation and deallocation of many elements of the same type.
+ *
+ * It is compatible with types that are not movable.
+ *
+ * Freed elements memory will be reused by next allocations.
+ * Elements are allocated in chunks to reduce memory fragmentation and avoid reallocation.
+ */
+
+#pragma once
+
+#include "BLI_stack.hh"
+#include "BLI_utility_mixins.hh"
+#include "BLI_vector.hh"
+
+namespace blender {
+
+template<typename T, int64_t ChunkLen = 64> class Pool : NonCopyable {
+ private:
+ using Chunk = TypedBuffer<T, ChunkLen>;
+
+ /** Allocated item buffer. */
+ Vector<std::unique_ptr<Chunk>> values_;
+ /** List of freed elements to be use for the next allocations. A Stack is best here to avoid
+ * overhead when growing the free list. It also offers better cache performance than a queue
+ * since last added entries will be reused first. */
+ Stack<T *, 0> free_list_;
+
+ public:
+ ~Pool()
+ {
+ /* All elements need to be freed before freeing the pool. */
+ BLI_assert(this->size() == 0);
+ }
+
+ /**
+ * Construct an object inside this pool's memory.
+ */
+ template<typename... ForwardT> T &construct(ForwardT &&...value)
+ {
+ if (free_list_.is_empty()) {
+ values_.append(std::make_unique<Chunk>());
+ T *chunk_start = values_.last()->ptr();
+ for (auto i : IndexRange(ChunkLen)) {
+ free_list_.push(chunk_start + i);
+ }
+ }
+ T *ptr = free_list_.pop();
+ new (ptr) T(std::forward<ForwardT>(value)...);
+ return *ptr;
+ }
+
+ /**
+ * Destroy the given element inside this memory pool. Memory will be reused by next element
+ * construction. This invokes undefined behavior if the item is not from this pool.
+ */
+ void destruct(T &value)
+ {
+ value.~T();
+ free_list_.push(&value);
+ }
+
+ /**
+ * Return the number of constructed elements in this pool.
+ */
+ int64_t size() const
+ {
+ return values_.size() * ChunkLen - free_list_.size();
+ }
+
+ /**
+ * Returns true when the pool contains no elements, otherwise false.
+ */
+ bool is_empty() const
+ {
+ return this->size() == 0;
+ }
+};
+
+} // namespace blender
diff --git a/source/blender/blenlib/BLI_scanfill.h b/source/blender/blenlib/BLI_scanfill.h
index 04ac7cb05e4..b5b100ac27d 100644
--- a/source/blender/blenlib/BLI_scanfill.h
+++ b/source/blender/blenlib/BLI_scanfill.h
@@ -82,7 +82,7 @@ struct ScanFillEdge *BLI_scanfill_edge_add(ScanFillContext *sf_ctx,
struct ScanFillVert *v2);
enum {
- /* NOTE(campbell): using BLI_SCANFILL_CALC_REMOVE_DOUBLES
+ /* NOTE(@campbellbarton): using #BLI_SCANFILL_CALC_REMOVE_DOUBLES
* Assumes ordered edges, otherwise we risk an eternal loop
* removing double verts. */
BLI_SCANFILL_CALC_REMOVE_DOUBLES = (1 << 1),
diff --git a/source/blender/blenlib/BLI_serialize.hh b/source/blender/blenlib/BLI_serialize.hh
index bd91c522d06..e23d7d20d0b 100644
--- a/source/blender/blenlib/BLI_serialize.hh
+++ b/source/blender/blenlib/BLI_serialize.hh
@@ -55,7 +55,6 @@
*
* To add a new formatter a new sub-class of `Formatter` must be created and the
* `serialize`/`deserialize` methods should be implemented.
- *
*/
#include <ostream>
@@ -110,7 +109,6 @@ using ArrayValue = ContainerValue<Vector<std::shared_ptr<Value>>, eValueType::Ar
* - `DoubleValue`: contains a double precision floating point number.
* - `DictionaryValue`: represents an object (key value pairs where keys are strings and values can
* be of different types.
- *
*/
class Value {
private:
diff --git a/source/blender/blenlib/BLI_set.hh b/source/blender/blenlib/BLI_set.hh
index 62de4b79e41..a1b6ad9754e 100644
--- a/source/blender/blenlib/BLI_set.hh
+++ b/source/blender/blenlib/BLI_set.hh
@@ -427,10 +427,10 @@ class Set {
return *this;
}
- Iterator operator++(int) const
+ Iterator operator++(int)
{
Iterator copied_iterator = *this;
- ++copied_iterator;
+ ++(*this);
return copied_iterator;
}
diff --git a/source/blender/blenlib/BLI_string_utf8.h b/source/blender/blenlib/BLI_string_utf8.h
index 4c5cc3fd9c5..61a21fd8bbf 100644
--- a/source/blender/blenlib/BLI_string_utf8.h
+++ b/source/blender/blenlib/BLI_string_utf8.h
@@ -157,8 +157,8 @@ size_t BLI_strnlen_utf8(const char *strc, size_t maxlen) ATTR_NONNULL(1) ATTR_WA
size_t BLI_strncpy_wchar_as_utf8(char *__restrict dst,
const wchar_t *__restrict src,
size_t maxncpy) ATTR_NONNULL(1, 2);
-size_t BLI_strncpy_wchar_from_utf8(wchar_t *__restrict dst,
- const char *__restrict src,
+size_t BLI_strncpy_wchar_from_utf8(wchar_t *__restrict dst_w,
+ const char *__restrict src_c,
size_t maxncpy) ATTR_NONNULL(1, 2);
/**
@@ -174,17 +174,17 @@ int BLI_str_utf8_char_width_safe(const char *p) ATTR_WARN_UNUSED_RESULT ATTR_NON
size_t BLI_str_partition_utf8(const char *str,
const unsigned int delim[],
- const char **sep,
- const char **suf) ATTR_NONNULL(1, 2, 3, 4);
+ const char **r_sep,
+ const char **r_suf) ATTR_NONNULL(1, 2, 3, 4);
size_t BLI_str_rpartition_utf8(const char *str,
const unsigned int delim[],
- const char **sep,
- const char **suf) ATTR_NONNULL(1, 2, 3, 4);
+ const char **r_sep,
+ const char **r_suf) ATTR_NONNULL(1, 2, 3, 4);
size_t BLI_str_partition_ex_utf8(const char *str,
const char *end,
const unsigned int delim[],
- const char **sep,
- const char **suf,
+ const char **r_sep,
+ const char **r_suf,
bool from_right) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1, 3, 4, 5);
int BLI_str_utf8_offset_to_index(const char *str, int offset) ATTR_WARN_UNUSED_RESULT
diff --git a/source/blender/blenlib/BLI_task.hh b/source/blender/blenlib/BLI_task.hh
index 904dea66f7a..33a781d3749 100644
--- a/source/blender/blenlib/BLI_task.hh
+++ b/source/blender/blenlib/BLI_task.hh
@@ -105,6 +105,22 @@ template<typename... Functions> void parallel_invoke(Functions &&...functions)
#endif
}
+/**
+ * Same #parallel_invoke, but allows disabling threading dynamically. This is useful because when
+ * the individual functions do very little work, there is a lot of overhead from starting parallel
+ * tasks.
+ */
+template<typename... Functions>
+void parallel_invoke(const bool use_threading, Functions &&...functions)
+{
+ if (use_threading) {
+ parallel_invoke(std::forward<Functions>(functions)...);
+ }
+ else {
+ (functions(), ...);
+ }
+}
+
/** See #BLI_task_isolate for a description of what isolating a task means. */
template<typename Function> void isolate_task(const Function &function)
{
diff --git a/source/blender/blenlib/BLI_vector.hh b/source/blender/blenlib/BLI_vector.hh
index c23d846d277..1f5f97d754d 100644
--- a/source/blender/blenlib/BLI_vector.hh
+++ b/source/blender/blenlib/BLI_vector.hh
@@ -451,8 +451,16 @@ class Vector {
*/
int64_t append_and_get_index(const T &value)
{
+ return this->append_and_get_index_as(value);
+ }
+ int64_t append_and_get_index(T &&value)
+ {
+ return this->append_and_get_index_as(std::move(value));
+ }
+ template<typename... ForwardValue> int64_t append_and_get_index_as(ForwardValue &&...value)
+ {
const int64_t index = this->size();
- this->append(value);
+ this->append_as(std::forward<ForwardValue>(value)...);
return index;
}
diff --git a/source/blender/blenlib/BLI_vector_set.hh b/source/blender/blenlib/BLI_vector_set.hh
index b0a3696f245..1a42e776d3d 100644
--- a/source/blender/blenlib/BLI_vector_set.hh
+++ b/source/blender/blenlib/BLI_vector_set.hh
@@ -358,7 +358,7 @@ class VectorSet {
}
/**
- * Return the location of the key in the vector. It is assumed, that the key is in the vector
+ * Return the location of the key in the vector. It is assumed that the key is in the vector
* set. If this is not necessarily the case, use `index_of_try`.
*/
int64_t index_of(const Key &key) const
diff --git a/source/blender/blenlib/BLI_virtual_array.hh b/source/blender/blenlib/BLI_virtual_array.hh
index 8f228ea188e..19ee2334bd9 100644
--- a/source/blender/blenlib/BLI_virtual_array.hh
+++ b/source/blender/blenlib/BLI_virtual_array.hh
@@ -23,6 +23,8 @@
* see of the increased compile time and binary size is worth it.
*/
+#include <optional>
+
#include "BLI_any.hh"
#include "BLI_array.hh"
#include "BLI_index_mask.hh"
@@ -35,6 +37,36 @@ class GVArray;
class GVMutableArray;
/**
+ * Is used to quickly check if a varray is a span or single value. This struct also allows
+ * retrieving multiple pieces of data with a single virtual method call.
+ */
+struct CommonVArrayInfo {
+ enum class Type : uint8_t {
+ /* Is not one of the common special types below. */
+ Any,
+ Span,
+ Single,
+ };
+
+ Type type = Type::Any;
+
+ /** True when the #data becomes a dangling pointer when the virtual array is destructed. */
+ bool may_have_ownership = true;
+
+ /**
+ * Points either to nothing, a single value or array of values, depending on #type.
+ * If this is a span of a mutable virtual array, it is safe to cast away const.
+ */
+ const void *data;
+
+ CommonVArrayInfo() = default;
+ CommonVArrayInfo(const Type _type, const bool _may_have_ownership, const void *_data)
+ : type(_type), may_have_ownership(_may_have_ownership), data(_data)
+ {
+ }
+};
+
+/**
* Implements the specifics of how the elements of a virtual array are accessed. It contains a
* bunch of virtual methods that are wrapped by #VArray.
*/
@@ -65,65 +97,18 @@ template<typename T> class VArrayImpl {
*/
virtual T get(int64_t index) const = 0;
- /**
- * Return true when the virtual array is a plain array internally.
- */
- virtual bool is_span() const
- {
- return false;
- }
-
- /**
- * Return the span of the virtual array.
- * This invokes undefined behavior when #is_span returned false.
- */
- virtual Span<T> get_internal_span() const
+ virtual CommonVArrayInfo common_info() const
{
- /* Provide a default implementation, so that subclasses don't have to provide it. This method
- * should never be called because #is_span returns false by default. */
- BLI_assert_unreachable();
return {};
}
/**
- * Return true when the virtual array has the same value at every index.
- */
- virtual bool is_single() const
- {
- return false;
- }
-
- /**
- * Return the value that is used at every index.
- * This invokes undefined behavior when #is_single returned false.
- */
- virtual T get_internal_single() const
- {
- /* Provide a default implementation, so that subclasses don't have to provide it. This method
- * should never be called because #is_single returns false by default. */
- BLI_assert_unreachable();
- return T();
- }
-
- /**
* Copy values from the virtual array into the provided span. The index of the value in the
* virtual array is the same as the index in the span.
*/
virtual void materialize(IndexMask mask, MutableSpan<T> r_span) const
{
- T *dst = r_span.data();
- /* Optimize for a few different common cases. */
- if (this->is_span()) {
- const T *src = this->get_internal_span().data();
- mask.foreach_index([&](const int64_t i) { dst[i] = src[i]; });
- }
- else if (this->is_single()) {
- const T single = this->get_internal_single();
- mask.foreach_index([&](const int64_t i) { dst[i] = single; });
- }
- else {
- mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); });
- }
+ mask.foreach_index([&](const int64_t i) { r_span[i] = this->get(i); });
}
/**
@@ -132,18 +117,7 @@ template<typename T> class VArrayImpl {
virtual void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{
T *dst = r_span.data();
- /* Optimize for a few different common cases. */
- if (this->is_span()) {
- const T *src = this->get_internal_span().data();
- mask.foreach_index([&](const int64_t i) { new (dst + i) T(src[i]); });
- }
- else if (this->is_single()) {
- const T single = this->get_internal_single();
- mask.foreach_index([&](const int64_t i) { new (dst + i) T(single); });
- }
- else {
- mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
- }
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
}
/**
@@ -187,17 +161,6 @@ template<typename T> class VArrayImpl {
}
/**
- * Return true when this virtual array may own any of the memory it references. This can be used
- * for optimization purposes when converting or copying the virtual array.
- */
- virtual bool may_have_ownership() const
- {
- /* Use true by default to be on the safe side. Subclasses that know for sure that they don't
- * own anything can overwrite this with false. */
- return true;
- }
-
- /**
* Return true when the other virtual array should be considered to be the same, e.g. because it
* shares the same underlying memory.
*/
@@ -222,10 +185,10 @@ template<typename T> class VMutableArrayImpl : public VArrayImpl<T> {
*/
virtual void set_all(Span<T> src)
{
- if (this->is_span()) {
- const Span<T> const_span = this->get_internal_span();
- const MutableSpan<T> span{(T *)const_span.data(), const_span.size()};
- initialized_copy_n(src.data(), this->size_, span.data());
+ const CommonVArrayInfo info = this->common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ initialized_copy_n(
+ src.data(), this->size_, const_cast<T *>(static_cast<const T *>(info.data)));
}
else {
const int64_t size = this->size_;
@@ -273,14 +236,9 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
data_[index] = value;
}
- bool is_span() const override
+ CommonVArrayInfo common_info() const override
{
- return true;
- }
-
- Span<T> get_internal_span() const override
- {
- return Span<T>(data_, this->size_);
+ return CommonVArrayInfo(CommonVArrayInfo::Type::Span, true, data_);
}
bool is_same(const VArrayImpl<T> &other) const final
@@ -288,15 +246,27 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
if (other.size() != this->size_) {
return false;
}
- if (!other.is_span()) {
+ const CommonVArrayInfo other_info = other.common_info();
+ if (other_info.type != CommonVArrayInfo::Type::Span) {
return false;
}
- const Span<T> other_span = other.get_internal_span();
- return data_ == other_span.data();
+ return data_ == static_cast<const T *>(other_info.data);
+ }
+
+ void materialize(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ mask.foreach_index([&](const int64_t i) { r_span[i] = data_[i]; });
+ }
+
+ void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ T *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(data_[i]); });
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
{
+ BLI_assert(mask.size() == r_span.size());
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
r_span[i] = data_[best_mask[i]];
@@ -307,6 +277,7 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<T> r_span) const override
{
+ BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
@@ -324,13 +295,22 @@ template<typename T> class VArrayImpl_For_Span_final final : public VArrayImpl_F
public:
using VArrayImpl_For_Span<T>::VArrayImpl_For_Span;
+ VArrayImpl_For_Span_final(const Span<T> data)
+ /* Cast const away, because the implementation for const and non const spans is shared. */
+ : VArrayImpl_For_Span<T>({const_cast<T *>(data.data()), data.size()})
+ {
+ }
+
private:
- bool may_have_ownership() const override
+ CommonVArrayInfo common_info() const final
{
- return false;
+ return CommonVArrayInfo(CommonVArrayInfo::Type::Span, false, this->data_);
}
};
+template<typename T>
+inline constexpr bool is_trivial_extended_v<VArrayImpl_For_Span_final<T>> = true;
+
/**
* A variant of `VArrayImpl_For_Span` that owns the underlying data.
* The `Container` type has to implement a `size()` and `data()` method.
@@ -371,24 +351,20 @@ template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
return value_;
}
- bool is_span() const override
- {
- return this->size_ == 1;
- }
-
- Span<T> get_internal_span() const override
+ CommonVArrayInfo common_info() const override
{
- return Span<T>(&value_, 1);
+ return CommonVArrayInfo(CommonVArrayInfo::Type::Single, true, &value_);
}
- bool is_single() const override
+ void materialize(IndexMask mask, MutableSpan<T> r_span) const override
{
- return true;
+ r_span.fill_indices(mask, value_);
}
- T get_internal_single() const override
+ void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
{
- return value_;
+ T *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(value_); });
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
@@ -406,6 +382,9 @@ template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
}
};
+template<typename T>
+inline constexpr bool is_trivial_extended_v<VArrayImpl_For_Single<T>> = is_trivial_extended_v<T>;
+
/**
* This class makes it easy to create a virtual array for an existing function or lambda. The
* `GetFunc` should take a single `index` argument and return the value at that index.
@@ -531,11 +510,6 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
});
}
- bool may_have_ownership() const override
- {
- return false;
- }
-
bool is_same(const VArrayImpl<ElemT> &other) const override
{
if (other.size() != this->size_) {
@@ -554,6 +528,13 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
}
};
+template<typename StructT,
+ typename ElemT,
+ ElemT (*GetFunc)(const StructT &),
+ void (*SetFunc)(StructT &, ElemT)>
+inline constexpr bool
+ is_trivial_extended_v<VArrayImpl_For_DerivedSpan<StructT, ElemT, GetFunc, SetFunc>> = true;
+
namespace detail {
/**
@@ -768,11 +749,18 @@ template<typename T> class VArrayCommon {
return IndexRange(this->size());
}
+ CommonVArrayInfo common_info() const
+ {
+ BLI_assert(*this);
+ return impl_->common_info();
+ }
+
/** Return true when the virtual array is stored as a span internally. */
bool is_span() const
{
BLI_assert(*this);
- return impl_->is_span();
+ const CommonVArrayInfo info = impl_->common_info();
+ return info.type == CommonVArrayInfo::Type::Span;
}
/**
@@ -782,14 +770,16 @@ template<typename T> class VArrayCommon {
Span<T> get_internal_span() const
{
BLI_assert(this->is_span());
- return impl_->get_internal_span();
+ const CommonVArrayInfo info = impl_->common_info();
+ return Span<T>(static_cast<const T *>(info.data), this->size());
}
/** Return true when the virtual array returns the same value for every index. */
bool is_single() const
{
BLI_assert(*this);
- return impl_->is_single();
+ const CommonVArrayInfo info = impl_->common_info();
+ return info.type == CommonVArrayInfo::Type::Single;
}
/**
@@ -799,7 +789,20 @@ template<typename T> class VArrayCommon {
T get_internal_single() const
{
BLI_assert(this->is_single());
- return impl_->get_internal_single();
+ const CommonVArrayInfo info = impl_->common_info();
+ return *static_cast<const T *>(info.data);
+ }
+
+ /**
+ * Return the value that is returned for every index, if the array is stored as a single value.
+ */
+ std::optional<T> get_if_single() const
+ {
+ const CommonVArrayInfo info = impl_->common_info();
+ if (info.type != CommonVArrayInfo::Type::Single) {
+ return std::nullopt;
+ }
+ return *static_cast<const T *>(info.data);
}
/**
@@ -861,12 +864,6 @@ template<typename T> class VArrayCommon {
{
return impl_->try_assign_GVArray(varray);
}
-
- /** See #GVArrayImpl::may_have_ownership. */
- bool may_have_ownership() const
- {
- return impl_->may_have_ownership();
- }
};
template<typename T> class VMutableArray;
@@ -910,10 +907,7 @@ template<typename T> class VArray : public VArrayCommon<T> {
VArray(varray_tag::span /* tag */, Span<T> span)
{
- /* Cast const away, because the virtual array implementation for const and non const spans is
- * shared. */
- MutableSpan<T> mutable_span{const_cast<T *>(span.data()), span.size()};
- this->template emplace<VArrayImpl_For_Span_final<T>>(mutable_span);
+ this->template emplace<VArrayImpl_For_Span_final<T>>(span);
}
VArray(varray_tag::single /* tag */, T value, const int64_t size)
@@ -1076,8 +1070,8 @@ template<typename T> class VMutableArray : public VArrayCommon<T> {
MutableSpan<T> get_internal_span() const
{
BLI_assert(this->is_span());
- const Span<T> span = this->impl_->get_internal_span();
- return MutableSpan<T>(const_cast<T *>(span.data()), span.size());
+ const CommonVArrayInfo info = this->get_impl()->common_info();
+ return MutableSpan<T>(const_cast<T *>(static_cast<const T *>(info.data)), this->size());
}
/**
@@ -1132,19 +1126,23 @@ template<typename T> static constexpr bool is_VMutableArray_v<VMutableArray<T>>
* from faster access.
* - An API is called, that does not accept virtual arrays, but only spans.
*/
-template<typename T> class VArray_Span final : public Span<T> {
+template<typename T> class VArraySpan final : public Span<T> {
private:
VArray<T> varray_;
Array<T> owned_data_;
public:
- VArray_Span() = default;
+ VArraySpan() = default;
- VArray_Span(VArray<T> varray) : Span<T>(), varray_(std::move(varray))
+ VArraySpan(VArray<T> varray) : Span<T>(), varray_(std::move(varray))
{
+ if (!varray_) {
+ return;
+ }
this->size_ = varray_.size();
- if (varray_.is_span()) {
- this->data_ = varray_.get_internal_span().data();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ this->data_ = static_cast<const T *>(info.data);
}
else {
owned_data_.~Array();
@@ -1154,12 +1152,16 @@ template<typename T> class VArray_Span final : public Span<T> {
}
}
- VArray_Span(VArray_Span &&other)
+ VArraySpan(VArraySpan &&other)
: varray_(std::move(other.varray_)), owned_data_(std::move(other.owned_data_))
{
+ if (!varray_) {
+ return;
+ }
this->size_ = varray_.size();
- if (varray_.is_span()) {
- this->data_ = varray_.get_internal_span().data();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ this->data_ = static_cast<const T *>(info.data);
}
else {
this->data_ = owned_data_.data();
@@ -1168,25 +1170,25 @@ template<typename T> class VArray_Span final : public Span<T> {
other.size_ = 0;
}
- VArray_Span &operator=(VArray_Span &&other)
+ VArraySpan &operator=(VArraySpan &&other)
{
if (this == &other) {
return *this;
}
std::destroy_at(this);
- new (this) VArray_Span(std::move(other));
+ new (this) VArraySpan(std::move(other));
return *this;
}
};
/**
- * Same as #VArray_Span, but for a mutable span.
+ * Same as #VArraySpan, but for a mutable span.
* The important thing to note is that when changing this span, the results might not be
* immediately reflected in the underlying virtual array (only when the virtual array is a span
* internally). The #save method can be used to write all changes to the underlying virtual array,
* if necessary.
*/
-template<typename T> class VMutableArray_Span final : public MutableSpan<T> {
+template<typename T> class MutableVArraySpan final : public MutableSpan<T> {
private:
VMutableArray<T> varray_;
Array<T> owned_data_;
@@ -1194,14 +1196,21 @@ template<typename T> class VMutableArray_Span final : public MutableSpan<T> {
bool show_not_saved_warning_ = true;
public:
+ MutableVArraySpan() = default;
+
/* Create a span for any virtual array. This is cheap when the virtual array is a span itself. If
* not, a new array has to be allocated as a wrapper for the underlying virtual array. */
- VMutableArray_Span(VMutableArray<T> varray, const bool copy_values_to_span = true)
+ MutableVArraySpan(VMutableArray<T> varray, const bool copy_values_to_span = true)
: MutableSpan<T>(), varray_(std::move(varray))
{
+ if (!varray_) {
+ return;
+ }
+
this->size_ = varray_.size();
- if (varray_.is_span()) {
- this->data_ = varray_.get_internal_span().data();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ this->data_ = const_cast<T *>(static_cast<const T *>(info.data));
}
else {
if (copy_values_to_span) {
@@ -1216,15 +1225,53 @@ template<typename T> class VMutableArray_Span final : public MutableSpan<T> {
}
}
- ~VMutableArray_Span()
+ MutableVArraySpan(MutableVArraySpan &&other)
+ : varray_(std::move(other.varray_)),
+ owned_data_(std::move(other.owned_data_)),
+ show_not_saved_warning_(other.show_not_saved_warning_)
{
- if (show_not_saved_warning_) {
- if (!save_has_been_called_) {
- std::cout << "Warning: Call `save()` to make sure that changes persist in all cases.\n";
+ if (!varray_) {
+ return;
+ }
+
+ this->size_ = varray_.size();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ this->data_ = static_cast<T *>(const_cast<void *>(info.data));
+ }
+ else {
+ this->data_ = owned_data_.data();
+ }
+ other.data_ = nullptr;
+ other.size_ = 0;
+ }
+
+ ~MutableVArraySpan()
+ {
+ if (varray_) {
+ if (show_not_saved_warning_) {
+ if (!save_has_been_called_) {
+ std::cout << "Warning: Call `save()` to make sure that changes persist in all cases.\n";
+ }
}
}
}
+ MutableVArraySpan &operator=(MutableVArraySpan &&other)
+ {
+ if (this == &other) {
+ return *this;
+ }
+ std::destroy_at(this);
+ new (this) MutableVArraySpan(std::move(other));
+ return *this;
+ }
+
+ const VMutableArray<T> &varray() const
+ {
+ return varray_;
+ }
+
/* Write back all values from a temporary allocated array to the underlying virtual array. */
void save()
{
diff --git a/source/blender/blenlib/CMakeLists.txt b/source/blender/blenlib/CMakeLists.txt
index 109230ebfa7..d87c60e6099 100644
--- a/source/blender/blenlib/CMakeLists.txt
+++ b/source/blender/blenlib/CMakeLists.txt
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright 2006 Blender Foundation. All rights reserved.
+if(HAVE_EXECINFO_H)
+ add_definitions(-DHAVE_EXECINFO_H)
+endif()
+
set(INC
.
# ../blenkernel # don't add this back!
@@ -166,6 +170,7 @@ set(SRC
BLI_astar.h
BLI_bitmap.h
BLI_bitmap_draw_2d.h
+ BLI_bit_vector.hh
BLI_blenlib.h
BLI_bounds.hh
BLI_boxpack_2d.h
@@ -279,6 +284,7 @@ set(SRC
BLI_path_util.h
BLI_polyfill_2d.h
BLI_polyfill_2d_beautify.h
+ BLI_pool.hh
BLI_probing_strategies.hh
BLI_quadric.h
BLI_rand.h
@@ -424,6 +430,8 @@ if(WITH_GTESTS)
tests/BLI_array_store_test.cc
tests/BLI_array_test.cc
tests/BLI_array_utils_test.cc
+ tests/BLI_bit_vector_test.cc
+ tests/BLI_bitmap_test.cc
tests/BLI_bounds_test.cc
tests/BLI_color_test.cc
tests/BLI_cpp_type_test.cc
@@ -445,6 +453,7 @@ if(WITH_GTESTS)
tests/BLI_index_range_test.cc
tests/BLI_inplace_priority_queue_test.cc
tests/BLI_kdopbvh_test.cc
+ tests/BLI_kdtree_test.cc
tests/BLI_length_parameterize_test.cc
tests/BLI_linear_allocator_test.cc
tests/BLI_linklist_lockfree_test.cc
@@ -468,6 +477,7 @@ if(WITH_GTESTS)
tests/BLI_multi_value_map_test.cc
tests/BLI_path_util_test.cc
tests/BLI_polyfill_2d_test.cc
+ tests/BLI_pool_test.cc
tests/BLI_ressource_strings.h
tests/BLI_serialize_test.cc
tests/BLI_session_uuid_test.cc
diff --git a/source/blender/blenlib/intern/BLI_index_range.cc b/source/blender/blenlib/intern/BLI_index_range.cc
index 398228ab461..624dcc39fc5 100644
--- a/source/blender/blenlib/intern/BLI_index_range.cc
+++ b/source/blender/blenlib/intern/BLI_index_range.cc
@@ -44,4 +44,34 @@ Span<int64_t> IndexRange::as_span_internal() const
return Span<int64_t>(s_current_array + start_, size_);
}
+AlignedIndexRanges split_index_range_by_alignment(const IndexRange range, const int64_t alignment)
+{
+ BLI_assert(is_power_of_2_i(alignment));
+ const int64_t mask = alignment - 1;
+
+ AlignedIndexRanges aligned_ranges;
+
+ const int64_t start_chunk = range.start() & ~mask;
+ const int64_t end_chunk = range.one_after_last() & ~mask;
+ if (start_chunk == end_chunk) {
+ aligned_ranges.prefix = range;
+ }
+ else {
+ int64_t prefix_size = 0;
+ int64_t suffix_size = 0;
+ if (range.start() != start_chunk) {
+ prefix_size = alignment - (range.start() & mask);
+ }
+ if (range.one_after_last() != end_chunk) {
+ suffix_size = range.one_after_last() - end_chunk;
+ }
+ aligned_ranges.prefix = IndexRange(range.start(), prefix_size);
+ aligned_ranges.suffix = IndexRange(end_chunk, suffix_size);
+ aligned_ranges.aligned = IndexRange(aligned_ranges.prefix.one_after_last(),
+ range.size() - prefix_size - suffix_size);
+ }
+
+ return aligned_ranges;
+}
+
} // namespace blender
diff --git a/source/blender/blenlib/intern/BLI_kdopbvh.c b/source/blender/blenlib/intern/BLI_kdopbvh.c
index 62bf17bd415..a43b725b6e3 100644
--- a/source/blender/blenlib/intern/BLI_kdopbvh.c
+++ b/source/blender/blenlib/intern/BLI_kdopbvh.c
@@ -1385,7 +1385,7 @@ BVHTreeOverlap *BLI_bvhtree_overlap(
static bool tree_intersect_plane_test(const float *bv, const float plane[4])
{
- /* TODO(germano): Support other KDOP geometries. */
+ /* TODO(@germano): Support other KDOP geometries. */
const float bb_min[3] = {bv[0], bv[2], bv[4]};
const float bb_max[3] = {bv[1], bv[3], bv[5]};
float bb_near[3], bb_far[3];
diff --git a/source/blender/blenlib/intern/BLI_memarena.c b/source/blender/blenlib/intern/BLI_memarena.c
index 3b73a81012d..ada2d27f9b2 100644
--- a/source/blender/blenlib/intern/BLI_memarena.c
+++ b/source/blender/blenlib/intern/BLI_memarena.c
@@ -158,6 +158,7 @@ void *BLI_memarena_calloc(MemArena *ma, size_t size)
BLI_assert(ma->use_calloc == false);
ptr = BLI_memarena_alloc(ma, size);
+ BLI_assert(ptr != NULL);
memset(ptr, 0, size);
return ptr;
diff --git a/source/blender/blenlib/intern/BLI_memblock.c b/source/blender/blenlib/intern/BLI_memblock.c
index f780d520301..b03efd2b8a2 100644
--- a/source/blender/blenlib/intern/BLI_memblock.c
+++ b/source/blender/blenlib/intern/BLI_memblock.c
@@ -5,7 +5,6 @@
* \ingroup bli
*
* Dead simple, fast memory allocator for allocating many elements of the same size.
- *
*/
#include <stdlib.h>
diff --git a/source/blender/blenlib/intern/bitmap.c b/source/blender/blenlib/intern/bitmap.c
index 7fcbc31c066..2cc2fbc3e2f 100644
--- a/source/blender/blenlib/intern/bitmap.c
+++ b/source/blender/blenlib/intern/bitmap.c
@@ -11,6 +11,7 @@
#include <string.h>
#include "BLI_bitmap.h"
+#include "BLI_math_bits.h"
#include "BLI_utildefines.h"
void BLI_bitmap_set_all(BLI_bitmap *bitmap, bool set, size_t bits)
@@ -46,3 +47,22 @@ void BLI_bitmap_or_all(BLI_bitmap *dst, const BLI_bitmap *src, size_t bits)
dst[i] |= src[i];
}
}
+
+int BLI_bitmap_find_first_unset(const BLI_bitmap *bitmap, const size_t bits)
+{
+ const size_t blocks_num = _BITMAP_NUM_BLOCKS(bits);
+ int result = -1;
+ /* Skip over completely set blocks. */
+ int index = 0;
+ while (index < blocks_num && bitmap[index] == ~0u) {
+ index++;
+ }
+ if (index < blocks_num) {
+ /* Found a partially used block: find the lowest unused bit. */
+ const uint m = ~bitmap[index];
+ BLI_assert(m != 0);
+ const uint bit_index = bitscan_forward_uint(m);
+ result = bit_index + (index << _BITMAP_POWER);
+ }
+ return result;
+}
diff --git a/source/blender/blenlib/intern/boxpack_2d.c b/source/blender/blenlib/intern/boxpack_2d.c
index 78f5088e8b1..d55a4a8c9ff 100644
--- a/source/blender/blenlib/intern/boxpack_2d.c
+++ b/source/blender/blenlib/intern/boxpack_2d.c
@@ -712,7 +712,6 @@ void BLI_box_pack_2d_fixedarea(ListBase *boxes, int width, int height, ListBase
* # Box * Small # # Box * #
* # * # # * #
* ################### ###################
- *
*/
int area_hsplit_large = space->w * (space->h - box->h);
int area_vsplit_large = (space->w - box->w) * space->h;
diff --git a/source/blender/blenlib/intern/filereader_zstd.c b/source/blender/blenlib/intern/filereader_zstd.c
index 5f114f24fb0..aeb000e9754 100644
--- a/source/blender/blenlib/intern/filereader_zstd.c
+++ b/source/blender/blenlib/intern/filereader_zstd.c
@@ -281,7 +281,10 @@ static void zstd_close(FileReader *reader)
if (zstd->reader.seek) {
MEM_freeN(zstd->seek.uncompressed_ofs);
MEM_freeN(zstd->seek.compressed_ofs);
- MEM_freeN(zstd->seek.cached_content);
+ /* When an error has occurred this may be NULL, see: T99744. */
+ if (zstd->seek.cached_content) {
+ MEM_freeN(zstd->seek.cached_content);
+ }
}
else {
MEM_freeN((void *)zstd->in_buf.src);
diff --git a/source/blender/blenlib/intern/generic_virtual_array.cc b/source/blender/blenlib/intern/generic_virtual_array.cc
index a6fbf4bff5b..f66b1e14fc6 100644
--- a/source/blender/blenlib/intern/generic_virtual_array.cc
+++ b/source/blender/blenlib/intern/generic_virtual_array.cc
@@ -46,25 +46,9 @@ void GVArrayImpl::get(const int64_t index, void *r_value) const
this->get_to_uninitialized(index, r_value);
}
-bool GVArrayImpl::is_span() const
+CommonVArrayInfo GVArrayImpl::common_info() const
{
- return false;
-}
-
-GSpan GVArrayImpl::get_internal_span() const
-{
- BLI_assert(false);
- return GSpan(*type_);
-}
-
-bool GVArrayImpl::is_single() const
-{
- return false;
-}
-
-void GVArrayImpl::get_internal_single(void *UNUSED(r_value)) const
-{
- BLI_assert(false);
+ return {};
}
bool GVArrayImpl::try_assign_VArray(void *UNUSED(varray)) const
@@ -72,13 +56,6 @@ bool GVArrayImpl::try_assign_VArray(void *UNUSED(varray)) const
return false;
}
-bool GVArrayImpl::may_have_ownership() const
-{
- /* Use true as default to avoid accidentally creating subclasses that have this set to false but
- * actually own data. Subclasses should set the to false instead. */
- return true;
-}
-
/** \} */
/* -------------------------------------------------------------------- */
@@ -101,9 +78,9 @@ void GVMutableArrayImpl::set_by_relocate(const int64_t index, void *value)
void GVMutableArrayImpl::set_all(const void *src)
{
- if (this->is_span()) {
- const GSpan span = this->get_internal_span();
- type_->copy_assign_n(src, const_cast<void *>(span.data()), size_);
+ const CommonVArrayInfo info = this->common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ type_->copy_assign_n(src, const_cast<void *>(info.data), size_);
}
else {
for (int64_t i : IndexRange(size_)) {
@@ -114,9 +91,9 @@ void GVMutableArrayImpl::set_all(const void *src)
void GVMutableArray::fill(const void *value)
{
- if (this->is_span()) {
- const GSpan span = this->get_internal_span();
- this->type().fill_assign_n(value, const_cast<void *>(span.data()), this->size());
+ const CommonVArrayInfo info = this->common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ this->type().fill_assign_n(value, const_cast<void *>(info.data), this->size());
}
else {
for (int64_t i : IndexRange(this->size())) {
@@ -161,14 +138,9 @@ void GVArrayImpl_For_GSpan::set_by_relocate(const int64_t index, void *value)
type_->relocate_assign(value, POINTER_OFFSET(data_, element_size_ * index));
}
-bool GVArrayImpl_For_GSpan::is_span() const
-{
- return true;
-}
-
-GSpan GVArrayImpl_For_GSpan::get_internal_span() const
+CommonVArrayInfo GVArrayImpl_For_GSpan::common_info() const
{
- return GSpan(*type_, data_, size_);
+ return CommonVArrayInfo{CommonVArrayInfo::Type::Span, true, data_};
}
void GVArrayImpl_For_GSpan::materialize(const IndexMask mask, void *dst) const
@@ -210,22 +182,9 @@ void GVArrayImpl_For_SingleValueRef::get_to_uninitialized(const int64_t UNUSED(i
type_->copy_construct(value_, r_value);
}
-bool GVArrayImpl_For_SingleValueRef::is_span() const
-{
- return size_ == 1;
-}
-GSpan GVArrayImpl_For_SingleValueRef::get_internal_span() const
-{
- return GSpan{*type_, value_, 1};
-}
-
-bool GVArrayImpl_For_SingleValueRef::is_single() const
+CommonVArrayInfo GVArrayImpl_For_SingleValueRef::common_info() const
{
- return true;
-}
-void GVArrayImpl_For_SingleValueRef::get_internal_single(void *r_value) const
-{
- type_->copy_assign(value_, r_value);
+ return CommonVArrayInfo{CommonVArrayInfo::Type::Single, true, value_};
}
void GVArrayImpl_For_SingleValueRef::materialize(const IndexMask mask, void *dst) const
@@ -311,32 +270,36 @@ template<int BufferSize> class GVArrayImpl_For_SmallTrivialSingleValue : public
this->copy_value_to(r_value);
}
- bool is_single() const override
- {
- return true;
- }
- void get_internal_single(void *r_value) const override
+ void copy_value_to(void *dst) const
{
- this->copy_value_to(r_value);
+ memcpy(dst, &buffer_, type_->size());
}
- void copy_value_to(void *dst) const
+ CommonVArrayInfo common_info() const override
{
- memcpy(dst, &buffer_, type_->size());
+ return CommonVArrayInfo{CommonVArrayInfo::Type::Single, true, &buffer_};
}
};
/** \} */
/* -------------------------------------------------------------------- */
-/** \name #GVArray_GSpan
+/** \name #GVArraySpan
* \{ */
-GVArray_GSpan::GVArray_GSpan(GVArray varray) : GSpan(varray.type()), varray_(std::move(varray))
+GVArraySpan::GVArraySpan() = default;
+
+GVArraySpan::GVArraySpan(GVArray varray)
+ : GSpan(varray ? &varray.type() : nullptr), varray_(std::move(varray))
{
+ if (!varray_) {
+ return;
+ }
+
size_ = varray_.size();
- if (varray_.is_span()) {
- data_ = varray_.get_internal_span().data();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ data_ = info.data;
}
else {
owned_data_ = MEM_mallocN_aligned(type_->size() * size_, type_->alignment(), __func__);
@@ -345,7 +308,27 @@ GVArray_GSpan::GVArray_GSpan(GVArray varray) : GSpan(varray.type()), varray_(std
}
}
-GVArray_GSpan::~GVArray_GSpan()
+GVArraySpan::GVArraySpan(GVArraySpan &&other)
+ : GSpan(other.type_ptr()), varray_(std::move(other.varray_)), owned_data_(other.owned_data_)
+{
+ if (!varray_) {
+ return;
+ }
+
+ size_ = varray_.size();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ data_ = info.data;
+ }
+ else {
+ data_ = owned_data_;
+ }
+ other.owned_data_ = nullptr;
+ other.data_ = nullptr;
+ other.size_ = 0;
+}
+
+GVArraySpan::~GVArraySpan()
{
if (owned_data_ != nullptr) {
type_->destruct_n(owned_data_, size_);
@@ -353,18 +336,34 @@ GVArray_GSpan::~GVArray_GSpan()
}
}
+GVArraySpan &GVArraySpan::operator=(GVArraySpan &&other)
+{
+ if (this == &other) {
+ return *this;
+ }
+ std::destroy_at(this);
+ new (this) GVArraySpan(std::move(other));
+ return *this;
+}
+
/** \} */
/* -------------------------------------------------------------------- */
-/** \name #GVMutableArray_GSpan
+/** \name #GMutableVArraySpan
* \{ */
-GVMutableArray_GSpan::GVMutableArray_GSpan(GVMutableArray varray, const bool copy_values_to_span)
- : GMutableSpan(varray.type()), varray_(std::move(varray))
+GMutableVArraySpan::GMutableVArraySpan() = default;
+
+GMutableVArraySpan::GMutableVArraySpan(GVMutableArray varray, const bool copy_values_to_span)
+ : GMutableSpan(varray ? &varray.type() : nullptr), varray_(std::move(varray))
{
+ if (!varray_) {
+ return;
+ }
size_ = varray_.size();
- if (varray_.is_span()) {
- data_ = varray_.get_internal_span().data();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ data_ = const_cast<void *>(info.data);
}
else {
owned_data_ = MEM_mallocN_aligned(type_->size() * size_, type_->alignment(), __func__);
@@ -378,11 +377,35 @@ GVMutableArray_GSpan::GVMutableArray_GSpan(GVMutableArray varray, const bool cop
}
}
-GVMutableArray_GSpan::~GVMutableArray_GSpan()
+GMutableVArraySpan::GMutableVArraySpan(GMutableVArraySpan &&other)
+ : GMutableSpan(other.type_ptr()),
+ varray_(std::move(other.varray_)),
+ owned_data_(other.owned_data_),
+ show_not_saved_warning_(other.show_not_saved_warning_)
{
- if (show_not_saved_warning_) {
- if (!save_has_been_called_) {
- std::cout << "Warning: Call `apply()` to make sure that changes persist in all cases.\n";
+ if (!varray_) {
+ return;
+ }
+ size_ = varray_.size();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ data_ = const_cast<void *>(info.data);
+ }
+ else {
+ data_ = owned_data_;
+ }
+ other.owned_data_ = nullptr;
+ other.data_ = nullptr;
+ other.size_ = 0;
+}
+
+GMutableVArraySpan::~GMutableVArraySpan()
+{
+ if (varray_) {
+ if (show_not_saved_warning_) {
+ if (!save_has_been_called_) {
+ std::cout << "Warning: Call `save()` to make sure that changes persist in all cases.\n";
+ }
}
}
if (owned_data_ != nullptr) {
@@ -391,7 +414,17 @@ GVMutableArray_GSpan::~GVMutableArray_GSpan()
}
}
-void GVMutableArray_GSpan::save()
+GMutableVArraySpan &GMutableVArraySpan::operator=(GMutableVArraySpan &&other)
+{
+ if (this == &other) {
+ return *this;
+ }
+ std::destroy_at(this);
+ new (this) GMutableVArraySpan(std::move(other));
+ return *this;
+}
+
+void GMutableVArraySpan::save()
{
save_has_been_called_ = true;
if (data_ != owned_data_) {
@@ -400,11 +433,16 @@ void GVMutableArray_GSpan::save()
varray_.set_all(owned_data_);
}
-void GVMutableArray_GSpan::disable_not_applied_warning()
+void GMutableVArraySpan::disable_not_applied_warning()
{
show_not_saved_warning_ = false;
}
+const GVMutableArray &GMutableVArraySpan::varray() const
+{
+ return varray_;
+}
+
/** \} */
/* -------------------------------------------------------------------- */
@@ -437,22 +475,24 @@ class GVArrayImpl_For_SlicedGVArray : public GVArrayImpl {
varray_.get_to_uninitialized(index + offset_, r_value);
}
- bool is_span() const override
- {
- return varray_.is_span();
- }
- GSpan get_internal_span() const override
- {
- return varray_.get_internal_span().slice(slice_);
- }
-
- bool is_single() const override
- {
- return varray_.is_single();
- }
- void get_internal_single(void *r_value) const override
+ CommonVArrayInfo common_info() const override
{
- varray_.get_internal_single(r_value);
+ const CommonVArrayInfo internal_info = varray_.common_info();
+ switch (internal_info.type) {
+ case CommonVArrayInfo::Type::Any: {
+ return {};
+ }
+ case CommonVArrayInfo::Type::Span: {
+ return CommonVArrayInfo(CommonVArrayInfo::Type::Span,
+ internal_info.may_have_ownership,
+ POINTER_OFFSET(internal_info.data, type_->size() * offset_));
+ }
+ case CommonVArrayInfo::Type::Single: {
+ return internal_info;
+ }
+ }
+ BLI_assert_unreachable();
+ return {};
}
void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
@@ -535,11 +575,6 @@ void GVArrayCommon::materialize_compressed_to_uninitialized(IndexMask mask, void
impl_->materialize_compressed_to_uninitialized(mask, dst);
}
-bool GVArrayCommon::may_have_ownership() const
-{
- return impl_->may_have_ownership();
-}
-
void GVArrayCommon::copy_from(const GVArrayCommon &other)
{
if (this == &other) {
@@ -562,24 +597,28 @@ void GVArrayCommon::move_from(GVArrayCommon &&other) noexcept
bool GVArrayCommon::is_span() const
{
- return impl_->is_span();
+ const CommonVArrayInfo info = impl_->common_info();
+ return info.type == CommonVArrayInfo::Type::Span;
}
GSpan GVArrayCommon::get_internal_span() const
{
BLI_assert(this->is_span());
- return impl_->get_internal_span();
+ const CommonVArrayInfo info = impl_->common_info();
+ return GSpan(this->type(), info.data, this->size());
}
bool GVArrayCommon::is_single() const
{
- return impl_->is_single();
+ const CommonVArrayInfo info = impl_->common_info();
+ return info.type == CommonVArrayInfo::Type::Single;
}
void GVArrayCommon::get_internal_single(void *r_value) const
{
BLI_assert(this->is_single());
- impl_->get_internal_single(r_value);
+ const CommonVArrayInfo info = impl_->common_info();
+ this->type().copy_assign(info.data, r_value);
}
void GVArrayCommon::get_internal_single_to_uninitialized(void *r_value) const
@@ -675,6 +714,15 @@ GVArray GVArray::ForEmpty(const CPPType &type)
GVArray GVArray::slice(IndexRange slice) const
{
+ const CommonVArrayInfo info = this->common_info();
+ if (info.type == CommonVArrayInfo::Type::Single) {
+ return GVArray::ForSingle(this->type(), slice.size(), info.data);
+ }
+ /* Need to check for ownership, because otherwise the referenced data can be destructed when
+ * #this is destructed. */
+ if (info.type == CommonVArrayInfo::Type::Span && !info.may_have_ownership) {
+ return GVArray::ForSpan(GSpan(this->type(), info.data, this->size()).slice(slice));
+ }
return GVArray::For<GVArrayImpl_For_SlicedGVArray>(*this, slice);
}
@@ -752,10 +800,20 @@ void GVMutableArray::set_all(const void *src)
GMutableSpan GVMutableArray::get_internal_span() const
{
BLI_assert(this->is_span());
- const GSpan span = impl_->get_internal_span();
- return GMutableSpan(span.type(), const_cast<void *>(span.data()), span.size());
+ const CommonVArrayInfo info = impl_->common_info();
+ return GMutableSpan(this->type(), const_cast<void *>(info.data), this->size());
}
/** \} */
+CommonVArrayInfo GVArrayImpl_For_GSpan_final::common_info() const
+{
+ return CommonVArrayInfo(CommonVArrayInfo::Type::Span, false, data_);
+}
+
+CommonVArrayInfo GVArrayImpl_For_SingleValueRef_final::common_info() const
+{
+ return CommonVArrayInfo(CommonVArrayInfo::Type::Single, false, value_);
+}
+
} // namespace blender
diff --git a/source/blender/blenlib/intern/index_mask.cc b/source/blender/blenlib/intern/index_mask.cc
index 1e301bc5fb9..e9af183d60d 100644
--- a/source/blender/blenlib/intern/index_mask.cc
+++ b/source/blender/blenlib/intern/index_mask.cc
@@ -128,7 +128,9 @@ Vector<IndexRange> IndexMask::extract_ranges_invert(const IndexRange full_range,
} // namespace blender
-namespace blender::index_mask_ops::detail {
+namespace blender::index_mask_ops {
+
+namespace detail {
IndexMask find_indices_based_on_predicate__merge(
IndexMask indices_to_check,
@@ -140,6 +142,7 @@ IndexMask find_indices_based_on_predicate__merge(
int64_t result_mask_size = 0;
for (Vector<Vector<int64_t>> &local_sub_masks : sub_masks) {
for (Vector<int64_t> &sub_mask : local_sub_masks) {
+ BLI_assert(!sub_mask.is_empty());
all_vectors.append(&sub_mask);
result_mask_size += sub_mask.size();
}
@@ -193,4 +196,49 @@ IndexMask find_indices_based_on_predicate__merge(
return r_indices.as_span();
}
-} // namespace blender::index_mask_ops::detail
+} // namespace detail
+
+IndexMask find_indices_from_virtual_array(const IndexMask indices_to_check,
+ const VArray<bool> &virtual_array,
+ const int64_t parallel_grain_size,
+ Vector<int64_t> &r_indices)
+{
+ if (virtual_array.is_single()) {
+ return virtual_array.get_internal_single() ? indices_to_check : IndexMask(0);
+ }
+ if (virtual_array.is_span()) {
+ const Span<bool> span = virtual_array.get_internal_span();
+ return find_indices_based_on_predicate(
+ indices_to_check, 4096, r_indices, [&](const int64_t i) { return span[i]; });
+ }
+
+ threading::EnumerableThreadSpecific<Vector<bool>> materialize_buffers;
+ threading::EnumerableThreadSpecific<Vector<Vector<int64_t>>> sub_masks;
+
+ threading::parallel_for(
+ indices_to_check.index_range(), parallel_grain_size, [&](const IndexRange range) {
+ const IndexMask sliced_mask = indices_to_check.slice(range);
+
+ /* To avoid virtual function call overhead from accessing the virtual array,
+ * materialize the necessary indices for this chunk into a reused buffer. */
+ Vector<bool> &buffer = materialize_buffers.local();
+ buffer.reinitialize(sliced_mask.size());
+ virtual_array.materialize_compressed(sliced_mask, buffer);
+
+ Vector<int64_t> masked_indices;
+ sliced_mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ if (buffer[i]) {
+ masked_indices.append(best_mask[i]);
+ }
+ }
+ });
+ if (!masked_indices.is_empty()) {
+ sub_masks.local().append(std::move(masked_indices));
+ }
+ });
+
+ return detail::find_indices_based_on_predicate__merge(indices_to_check, sub_masks, r_indices);
+}
+
+} // namespace blender::index_mask_ops
diff --git a/source/blender/blenlib/intern/kdtree_impl.h b/source/blender/blenlib/intern/kdtree_impl.h
index d9ae826093c..6614f1bf964 100644
--- a/source/blender/blenlib/intern/kdtree_impl.h
+++ b/source/blender/blenlib/intern/kdtree_impl.h
@@ -927,6 +927,14 @@ int BLI_kdtree_nd_(calc_duplicates_fast)(const KDTree *tree,
/** \name BLI_kdtree_3d_deduplicate
* \{ */
+static int kdtree_cmp_bool(const bool a, const bool b)
+{
+ if (a == b) {
+ return 0;
+ }
+ return b ? -1 : 1;
+}
+
static int kdtree_node_cmp_deduplicate(const void *n0_p, const void *n1_p)
{
const KDTreeNode *n0 = n0_p;
@@ -939,17 +947,16 @@ static int kdtree_node_cmp_deduplicate(const void *n0_p, const void *n1_p)
return 1;
}
}
- /* Sort by pointer so the first added will be used.
- * assignment below ignores const correctness,
- * however the values aren't used for sorting and are to be discarded. */
- if (n0 < n1) {
- ((KDTreeNode *)n1)->d = KD_DIMS; /* tag invalid */
- return -1;
- }
- else {
- ((KDTreeNode *)n0)->d = KD_DIMS; /* tag invalid */
- return 1;
+
+ if (n0->d != KD_DIMS && n1->d != KD_DIMS) {
+ /* Two nodes share identical `co`
+ * Both are still valid.
+ * Cast away `const` and tag one of them as invalid. */
+ ((KDTreeNode *)n1)->d = KD_DIMS;
}
+
+ /* Keep sorting until each unique value has one and only one valid node. */
+ return kdtree_cmp_bool(n0->d == KD_DIMS, n1->d == KD_DIMS);
}
/**
diff --git a/source/blender/blenlib/intern/length_parameterize.cc b/source/blender/blenlib/intern/length_parameterize.cc
index 7c0fc860b53..06cca281510 100644
--- a/source/blender/blenlib/intern/length_parameterize.cc
+++ b/source/blender/blenlib/intern/length_parameterize.cc
@@ -1,144 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_length_parameterize.hh"
+#include "BLI_task.hh"
namespace blender::length_parameterize {
-void create_uniform_samples(const Span<float> lengths,
- const bool cyclic,
- MutableSpan<int> indices,
- MutableSpan<float> factors)
+void sample_uniform(const Span<float> lengths,
+ const bool include_last_point,
+ MutableSpan<int> r_segment_indices,
+ MutableSpan<float> r_factors)
{
- const int count = indices.size();
+ const int count = r_segment_indices.size();
BLI_assert(count > 0);
BLI_assert(lengths.size() >= 1);
BLI_assert(std::is_sorted(lengths.begin(), lengths.end()));
- const int segments_num = lengths.size();
- const int points_num = cyclic ? segments_num : segments_num + 1;
- indices.first() = 0;
- factors.first() = 0.0f;
if (count == 1) {
+ r_segment_indices[0] = 0;
+ r_factors[0] = 0.0f;
return;
}
-
const float total_length = lengths.last();
- if (total_length == 0.0f) {
- indices.fill(0);
- factors.fill(0.0f);
- return;
- }
-
- const float step_length = total_length / (count - (cyclic ? 0 : 1));
- const float step_length_inv = 1.0f / step_length;
-
- int i_dst = 1;
- /* Store the length at the previous point in a variable so it can start out at zero
- * (the lengths array doesn't contain 0 for the first point). */
- float prev_length = 0.0f;
- for (const int i_src : IndexRange(points_num - 1)) {
- const float next_length = lengths[i_src];
- const float segment_length = next_length - prev_length;
- if (segment_length == 0.0f) {
- continue;
- }
- /* Add every sample that fits in this segment. */
- const float segment_length_inv = 1.0f / segment_length;
- const int segment_samples_num = std::ceil(next_length * step_length_inv - i_dst);
- indices.slice(i_dst, segment_samples_num).fill(i_src);
-
- for (const int i : factors.index_range().slice(i_dst, segment_samples_num)) {
- const float length_in_segment = step_length * i - prev_length;
- factors[i] = length_in_segment * segment_length_inv;
- }
-
- i_dst += segment_samples_num;
-
- prev_length = next_length;
- }
-
- /* Add the samples on the last cyclic segment if necessary, and also the samples
- * that weren't created in the previous loop due to floating point inaccuracy. */
- if (cyclic && lengths.size() > 1) {
- indices.drop_front(i_dst).fill(points_num - 1);
- const float segment_length = lengths.last() - lengths.last(1);
- if (segment_length == 0.0f) {
- return;
- }
- const float segment_length_inv = 1.0f / segment_length;
- for (const int i : indices.index_range().drop_front(i_dst)) {
- const float length_in_segment = step_length * i - prev_length;
- factors[i] = length_in_segment * segment_length_inv;
+ const float step_length = total_length / (count - include_last_point);
+ threading::parallel_for(IndexRange(count), 512, [&](const IndexRange range) {
+ SampleSegmentHint hint;
+ for (const int i : range) {
+ /* Use minimum to avoid issues with floating point accuracy. */
+ const float sample_length = std::min(total_length, i * step_length);
+ sample_at_length(lengths, sample_length, r_segment_indices[i], r_factors[i], &hint);
}
- }
- else {
- indices.drop_front(i_dst).fill(points_num - 2);
- factors.drop_front(i_dst).fill(1.0f);
- }
+ });
}
-void create_samples_from_sorted_lengths(const Span<float> lengths,
- const Span<float> sample_lengths,
- const bool cyclic,
- MutableSpan<int> indices,
- MutableSpan<float> factors)
+void sample_at_lengths(const Span<float> accumulated_segment_lengths,
+ const Span<float> sample_lengths,
+ MutableSpan<int> r_segment_indices,
+ MutableSpan<float> r_factors)
{
- BLI_assert(std::is_sorted(lengths.begin(), lengths.end()));
+ BLI_assert(
+ std::is_sorted(accumulated_segment_lengths.begin(), accumulated_segment_lengths.end()));
BLI_assert(std::is_sorted(sample_lengths.begin(), sample_lengths.end()));
- BLI_assert(indices.size() == sample_lengths.size());
- BLI_assert(indices.size() == factors.size());
- const int segments_num = lengths.size();
- const int points_num = cyclic ? segments_num : segments_num + 1;
- const float total_length = lengths.last();
- if (total_length == 0.0f) {
- indices.fill(0);
- factors.fill(0.0f);
- return;
- }
+ const int count = sample_lengths.size();
+ BLI_assert(count == r_segment_indices.size());
+ BLI_assert(count == r_factors.size());
- int i_dst = 0;
- /* Store the length at the previous point in a variable so it can start out at zero
- * (the lengths array doesn't contain 0 for the first point). */
- float prev_length = 0.0f;
- for (const int i_src : IndexRange(points_num - 1)) {
- const float next_length = lengths[i_src];
- const float segment_length = next_length - prev_length;
- if (segment_length == 0.0f) {
- continue;
- }
- /* Add every sample that fits in this segment. It's also necessary to check if the last sample
- * has been reached, since there is no upper bound on the number of samples in each segment. */
- const float segment_length_inv = 1.0f / segment_length;
- while (i_dst < sample_lengths.size() && sample_lengths[i_dst] < next_length) {
- const float length_in_segment = sample_lengths[i_dst] - prev_length;
- const float factor = length_in_segment * segment_length_inv;
- indices[i_dst] = i_src;
- factors[i_dst] = factor;
- i_dst++;
+ threading::parallel_for(IndexRange(count), 512, [&](const IndexRange range) {
+ SampleSegmentHint hint;
+ for (const int i : range) {
+ const float sample_length = sample_lengths[i];
+ sample_at_length(
+ accumulated_segment_lengths, sample_length, r_segment_indices[i], r_factors[i], &hint);
}
-
- prev_length = next_length;
- }
-
- /* Add the samples on the last cyclic segment if necessary, and also the samples
- * that weren't created in the previous loop due to floating point inaccuracy. */
- if (cyclic && lengths.size() > 1) {
- const float segment_length = lengths.last() - lengths.last(1);
- while (sample_lengths[i_dst] < total_length) {
- const float length_in_segment = sample_lengths[i_dst] - prev_length;
- const float factor = length_in_segment / segment_length;
- indices[i_dst] = points_num - 1;
- factors[i_dst] = factor;
- i_dst++;
- }
- indices.drop_front(i_dst).fill(points_num - 1);
- factors.drop_front(i_dst).fill(1.0f);
- }
- else {
- indices.drop_front(i_dst).fill(points_num - 2);
- factors.drop_front(i_dst).fill(1.0f);
- }
+ });
}
} // namespace blender::length_parameterize
diff --git a/source/blender/blenlib/intern/math_base_inline.c b/source/blender/blenlib/intern/math_base_inline.c
index 4a213f5fe74..fb71e84c23e 100644
--- a/source/blender/blenlib/intern/math_base_inline.c
+++ b/source/blender/blenlib/intern/math_base_inline.c
@@ -370,6 +370,24 @@ MINLINE uint divide_ceil_u(uint a, uint b)
return (a + b - 1) / b;
}
+MINLINE uint64_t divide_ceil_ul(uint64_t a, uint64_t b)
+{
+ return (a + b - 1) / b;
+}
+
+/**
+ * Returns \a a if it is a multiple of \a b or the next multiple or \a b after \b a .
+ */
+MINLINE uint ceil_to_multiple_u(uint a, uint b)
+{
+ return divide_ceil_u(a, b) * b;
+}
+
+MINLINE uint64_t ceil_to_multiple_ul(uint64_t a, uint64_t b)
+{
+ return divide_ceil_ul(a, b) * b;
+}
+
MINLINE int mod_i(int i, int n)
{
return (i % n + n) % n;
diff --git a/source/blender/blenlib/intern/math_geom.c b/source/blender/blenlib/intern/math_geom.c
index e7ccdeab80a..773aac95193 100644
--- a/source/blender/blenlib/intern/math_geom.c
+++ b/source/blender/blenlib/intern/math_geom.c
@@ -1667,8 +1667,8 @@ bool isect_ray_tri_v3(const float ray_origin[3],
float *r_lambda,
float r_uv[2])
{
- /* NOTE(campbell): these values were 0.000001 in 2.4x but for projection snapping on
- * a human head (1BU == 1m), subsurf level 2, this gave many errors. */
+ /* NOTE(@campbellbarton): these values were 0.000001 in 2.4x but for projection snapping on
+ * a human head `(1BU == 1m)`, subdivision-surface level 2, this gave many errors. */
const float epsilon = 0.00000001f;
float p[3], s[3], e1[3], e2[3], q[3];
float a, f, u, v;
@@ -3773,7 +3773,7 @@ void barycentric_weights_v2_quad(const float v1[2],
const float co[2],
float w[4])
{
- /* NOTE(campbell): fabsf() here is not needed for convex quads
+ /* NOTE(@campbellbarton): fabsf() here is not needed for convex quads
* (and not used in #interp_weights_poly_v2).
* But in the case of concave/bow-tie quads for the mask rasterizer it
* gives unreliable results without adding `absf()`. If this becomes an issue for more general
diff --git a/source/blender/blenlib/intern/math_matrix.c b/source/blender/blenlib/intern/math_matrix.c
index ce9abc36cad..e96b12033a9 100644
--- a/source/blender/blenlib/intern/math_matrix.c
+++ b/source/blender/blenlib/intern/math_matrix.c
@@ -113,7 +113,6 @@ void copy_m4_m3(float m1[4][4], const float m2[3][3]) /* no clear */
m1[2][1] = m2[2][1];
m1[2][2] = m2[2][2];
- /* Reevan's Bugfix */
m1[0][3] = 0.0f;
m1[1][3] = 0.0f;
m1[2][3] = 0.0f;
@@ -787,14 +786,14 @@ void mul_m2_v2(const float mat[2][2], float vec[2])
mul_v2_m2v2(vec, mat, vec);
}
-void mul_mat3_m4_v3(const float M[4][4], float r[3])
+void mul_mat3_m4_v3(const float mat[4][4], float r[3])
{
const float x = r[0];
const float y = r[1];
- r[0] = x * M[0][0] + y * M[1][0] + M[2][0] * r[2];
- r[1] = x * M[0][1] + y * M[1][1] + M[2][1] * r[2];
- r[2] = x * M[0][2] + y * M[1][2] + M[2][2] * r[2];
+ r[0] = x * mat[0][0] + y * mat[1][0] + mat[2][0] * r[2];
+ r[1] = x * mat[0][1] + y * mat[1][1] + mat[2][1] * r[2];
+ r[2] = x * mat[0][2] + y * mat[1][2] + mat[2][2] * r[2];
}
void mul_v3_mat3_m4v3(float r[3], const float mat[4][4], const float vec[3])
@@ -1116,16 +1115,32 @@ double determinant_m3_array_db(const double m[3][3])
m[2][0] * (m[0][1] * m[1][2] - m[0][2] * m[1][1]));
}
-bool invert_m3_ex(float m[3][3], const float epsilon)
+bool invert_m2_m2(float inverse[2][2], const float mat[2][2])
{
- float tmp[3][3];
- const bool success = invert_m3_m3_ex(tmp, m, epsilon);
+ adjoint_m2_m2(inverse, mat);
+ float det = determinant_m2(mat[0][0], mat[1][0], mat[0][1], mat[1][1]);
+
+ bool success = (det != 0.0f);
+ if (success) {
+ inverse[0][0] /= det;
+ inverse[1][0] /= det;
+ inverse[0][1] /= det;
+ inverse[1][1] /= det;
+ }
- copy_m3_m3(m, tmp);
return success;
}
-bool invert_m3_m3_ex(float m1[3][3], const float m2[3][3], const float epsilon)
+bool invert_m3_ex(float mat[3][3], const float epsilon)
+{
+ float mat_tmp[3][3];
+ const bool success = invert_m3_m3_ex(mat_tmp, mat, epsilon);
+
+ copy_m3_m3(mat, mat_tmp);
+ return success;
+}
+
+bool invert_m3_m3_ex(float inverse[3][3], const float mat[3][3], const float epsilon)
{
float det;
int a, b;
@@ -1134,10 +1149,10 @@ bool invert_m3_m3_ex(float m1[3][3], const float m2[3][3], const float epsilon)
BLI_assert(epsilon >= 0.0f);
/* calc adjoint */
- adjoint_m3_m3(m1, m2);
+ adjoint_m3_m3(inverse, mat);
/* then determinant old matrix! */
- det = determinant_m3_array(m2);
+ det = determinant_m3_array(mat);
success = (fabsf(det) > epsilon);
@@ -1145,33 +1160,33 @@ bool invert_m3_m3_ex(float m1[3][3], const float m2[3][3], const float epsilon)
det = 1.0f / det;
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
- m1[a][b] *= det;
+ inverse[a][b] *= det;
}
}
}
return success;
}
-bool invert_m3(float m[3][3])
+bool invert_m3(float mat[3][3])
{
- float tmp[3][3];
- const bool success = invert_m3_m3(tmp, m);
+ float mat_tmp[3][3];
+ const bool success = invert_m3_m3(mat_tmp, mat);
- copy_m3_m3(m, tmp);
+ copy_m3_m3(mat, mat_tmp);
return success;
}
-bool invert_m3_m3(float m1[3][3], const float m2[3][3])
+bool invert_m3_m3(float inverse[3][3], const float mat[3][3])
{
float det;
int a, b;
bool success;
/* calc adjoint */
- adjoint_m3_m3(m1, m2);
+ adjoint_m3_m3(inverse, mat);
/* then determinant old matrix! */
- det = determinant_m3_array(m2);
+ det = determinant_m3_array(mat);
success = (det != 0.0f);
@@ -1179,7 +1194,7 @@ bool invert_m3_m3(float m1[3][3], const float m2[3][3])
det = 1.0f / det;
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
- m1[a][b] *= det;
+ inverse[a][b] *= det;
}
}
}
@@ -1187,12 +1202,12 @@ bool invert_m3_m3(float m1[3][3], const float m2[3][3])
return success;
}
-bool invert_m4(float m[4][4])
+bool invert_m4(float mat[4][4])
{
- float tmp[4][4];
- const bool success = invert_m4_m4(tmp, m);
+ float mat_tmp[4][4];
+ const bool success = invert_m4_m4(mat_tmp, mat);
- copy_m4_m4(m, tmp);
+ copy_m4_m4(mat, mat_tmp);
return success;
}
@@ -2175,11 +2190,11 @@ float mat4_to_scale(const float mat[4][4])
return len_v3(unit_vec);
}
-float mat4_to_xy_scale(const float M[4][4])
+float mat4_to_xy_scale(const float mat[4][4])
{
/* unit length vector in xy plane */
float unit_vec[3] = {(float)M_SQRT1_2, (float)M_SQRT1_2, 0.0f};
- mul_mat3_m4_v3(M, unit_vec);
+ mul_mat3_m4_v3(mat, unit_vec);
return len_v3(unit_vec);
}
@@ -2224,12 +2239,6 @@ void mat4_to_loc_quat(float loc[3], float quat[4], const float wmat[4][4])
copy_m3_m4(mat3, wmat);
normalize_m3_m3(mat3_n, mat3);
- /* So scale doesn't interfere with rotation T24291. */
- /* FIXME: this is a workaround for negative matrix not working for rotation conversion. */
- if (is_negative_m3(mat3)) {
- negate_m3(mat3_n);
- }
-
mat3_normalized_to_quat(quat, mat3_n);
copy_v3_v3(loc, wmat[3]);
}
@@ -2238,7 +2247,7 @@ void mat4_decompose(float loc[3], float quat[4], float size[3], const float wmat
{
float rot[3][3];
mat4_to_loc_rot_size(loc, rot, size, wmat);
- mat3_normalized_to_quat(quat, rot);
+ mat3_normalized_to_quat_fast(quat, rot);
}
/**
@@ -2377,8 +2386,8 @@ void blend_m3_m3m3(float out[3][3],
mat3_to_rot_size(drot, dscale, dst);
mat3_to_rot_size(srot, sscale, src);
- mat3_normalized_to_quat(dquat, drot);
- mat3_normalized_to_quat(squat, srot);
+ mat3_normalized_to_quat_fast(dquat, drot);
+ mat3_normalized_to_quat_fast(squat, srot);
/* do blending */
interp_qt_qtqt(fquat, dquat, squat, srcweight);
@@ -2403,8 +2412,8 @@ void blend_m4_m4m4(float out[4][4],
mat4_to_loc_rot_size(dloc, drot, dscale, dst);
mat4_to_loc_rot_size(sloc, srot, sscale, src);
- mat3_normalized_to_quat(dquat, drot);
- mat3_normalized_to_quat(squat, srot);
+ mat3_normalized_to_quat_fast(dquat, drot);
+ mat3_normalized_to_quat_fast(squat, srot);
/* do blending */
interp_v3_v3v3(floc, dloc, sloc, srcweight);
@@ -2440,11 +2449,11 @@ void interp_m3_m3m3(float R[3][3], const float A[3][3], const float B[3][3], con
* Note that a flip of two axes is just a rotation of 180 degrees around the third axis, and
* three flipped axes are just an 180 degree rotation + a single axis flip. It is thus sufficient
* to solve this problem for single axis flips. */
- if (determinant_m3_array(U_A) < 0) {
+ if (is_negative_m3(U_A)) {
mul_m3_fl(U_A, -1.0f);
mul_m3_fl(P_A, -1.0f);
}
- if (determinant_m3_array(U_B) < 0) {
+ if (is_negative_m3(U_B)) {
mul_m3_fl(U_B, -1.0f);
mul_m3_fl(P_B, -1.0f);
}
@@ -2485,16 +2494,14 @@ void interp_m4_m4m4(float R[4][4], const float A[4][4], const float B[4][4], con
bool is_negative_m3(const float mat[3][3])
{
- float vec[3];
- cross_v3_v3v3(vec, mat[0], mat[1]);
- return (dot_v3v3(vec, mat[2]) < 0.0f);
+ return determinant_m3_array(mat) < 0.0f;
}
bool is_negative_m4(const float mat[4][4])
{
- float vec[3];
- cross_v3_v3v3(vec, mat[0], mat[1]);
- return (dot_v3v3(vec, mat[2]) < 0.0f);
+ /* Don't use #determinant_m4 as only the 3x3 components are needed
+ * when the matrix is used as a transformation to represent location/scale/rotation. */
+ return determinant_m4_mat3_array(mat) < 0.0f;
}
bool is_zero_m3(const float mat[3][3])
@@ -2552,11 +2559,8 @@ void loc_eul_size_to_mat4(float R[4][4],
R[3][2] = loc[2];
}
-void loc_eulO_size_to_mat4(float R[4][4],
- const float loc[3],
- const float eul[3],
- const float size[3],
- const short rotOrder)
+void loc_eulO_size_to_mat4(
+ float R[4][4], const float loc[3], const float eul[3], const float size[3], const short order)
{
float rmat[3][3], smat[3][3], tmat[3][3];
@@ -2564,7 +2568,7 @@ void loc_eulO_size_to_mat4(float R[4][4],
unit_m4(R);
/* Make rotation + scaling part. */
- eulO_to_mat3(rmat, eul, rotOrder);
+ eulO_to_mat3(rmat, eul, order);
size_to_mat3(smat, size);
mul_m3_m3m3(tmat, rmat, smat);
@@ -3066,14 +3070,14 @@ void svd_m4(float U[4][4], float s[4], float V[4][4], float A_[4][4])
}
}
-void pseudoinverse_m4_m4(float Ainv[4][4], const float A_[4][4], float epsilon)
+void pseudoinverse_m4_m4(float inverse[4][4], const float mat[4][4], float epsilon)
{
/* compute Moore-Penrose pseudo inverse of matrix, singular values
* below epsilon are ignored for stability (truncated SVD) */
float A[4][4], V[4][4], W[4], Wm[4][4], U[4][4];
int i;
- transpose_m4_m4(A, A_);
+ transpose_m4_m4(A, mat);
svd_m4(V, W, U, A);
transpose_m4(U);
transpose_m4(V);
@@ -3085,18 +3089,18 @@ void pseudoinverse_m4_m4(float Ainv[4][4], const float A_[4][4], float epsilon)
transpose_m4(V);
- mul_m4_series(Ainv, U, Wm, V);
+ mul_m4_series(inverse, U, Wm, V);
}
-void pseudoinverse_m3_m3(float Ainv[3][3], const float A[3][3], float epsilon)
+void pseudoinverse_m3_m3(float inverse[3][3], const float mat[3][3], float epsilon)
{
/* try regular inverse when possible, otherwise fall back to slow svd */
- if (!invert_m3_m3(Ainv, A)) {
- float tmp[4][4], tmpinv[4][4];
+ if (!invert_m3_m3(inverse, mat)) {
+ float mat_tmp[4][4], tmpinv[4][4];
- copy_m4_m3(tmp, A);
- pseudoinverse_m4_m4(tmpinv, tmp, epsilon);
- copy_m3_m4(Ainv, tmpinv);
+ copy_m4_m3(mat_tmp, mat);
+ pseudoinverse_m4_m4(tmpinv, mat_tmp, epsilon);
+ copy_m3_m4(inverse, tmpinv);
}
}
@@ -3106,22 +3110,22 @@ bool has_zero_axis_m4(const float matrix[4][4])
len_squared_v3(matrix[2]) < FLT_EPSILON;
}
-void invert_m4_m4_safe(float Ainv[4][4], const float A[4][4])
+void invert_m4_m4_safe(float inverse[4][4], const float mat[4][4])
{
- if (!invert_m4_m4(Ainv, A)) {
- float Atemp[4][4];
+ if (!invert_m4_m4(inverse, mat)) {
+ float mat_tmp[4][4];
- copy_m4_m4(Atemp, A);
+ copy_m4_m4(mat_tmp, mat);
/* Matrix is degenerate (e.g. 0 scale on some axis), ideally we should
* never be in this situation, but try to invert it anyway with tweak.
*/
- Atemp[0][0] += 1e-8f;
- Atemp[1][1] += 1e-8f;
- Atemp[2][2] += 1e-8f;
+ mat_tmp[0][0] += 1e-8f;
+ mat_tmp[1][1] += 1e-8f;
+ mat_tmp[2][2] += 1e-8f;
- if (!invert_m4_m4(Ainv, Atemp)) {
- unit_m4(Ainv);
+ if (!invert_m4_m4(inverse, mat_tmp)) {
+ unit_m4(inverse);
}
}
}
@@ -3141,24 +3145,24 @@ void invert_m4_m4_safe(float Ainv[4][4], const float A[4][4])
* where we want to specify the length of the degenerate axes.
* \{ */
-void invert_m4_m4_safe_ortho(float Ainv[4][4], const float A[4][4])
+void invert_m4_m4_safe_ortho(float inverse[4][4], const float mat[4][4])
{
- if (UNLIKELY(!invert_m4_m4(Ainv, A))) {
- float Atemp[4][4];
- copy_m4_m4(Atemp, A);
- if (UNLIKELY(!(orthogonalize_m4_zero_axes(Atemp, 1.0f) && invert_m4_m4(Ainv, Atemp)))) {
- unit_m4(Ainv);
+ if (UNLIKELY(!invert_m4_m4(inverse, mat))) {
+ float mat_tmp[4][4];
+ copy_m4_m4(mat_tmp, mat);
+ if (UNLIKELY(!(orthogonalize_m4_zero_axes(mat_tmp, 1.0f) && invert_m4_m4(inverse, mat_tmp)))) {
+ unit_m4(inverse);
}
}
}
-void invert_m3_m3_safe_ortho(float Ainv[3][3], const float A[3][3])
+void invert_m3_m3_safe_ortho(float inverse[3][3], const float mat[3][3])
{
- if (UNLIKELY(!invert_m3_m3(Ainv, A))) {
- float Atemp[3][3];
- copy_m3_m3(Atemp, A);
- if (UNLIKELY(!(orthogonalize_m3_zero_axes(Atemp, 1.0f) && invert_m3_m3(Ainv, Atemp)))) {
- unit_m3(Ainv);
+ if (UNLIKELY(!invert_m3_m3(inverse, mat))) {
+ float mat_tmp[3][3];
+ copy_m3_m3(mat_tmp, mat);
+ if (UNLIKELY(!(orthogonalize_m3_zero_axes(mat_tmp, 1.0f) && invert_m3_m3(inverse, mat_tmp)))) {
+ unit_m3(inverse);
}
}
}
diff --git a/source/blender/blenlib/intern/math_rotation.c b/source/blender/blenlib/intern/math_rotation.c
index 92223bdf1d5..ae068e3fb19 100644
--- a/source/blender/blenlib/intern/math_rotation.c
+++ b/source/blender/blenlib/intern/math_rotation.c
@@ -176,7 +176,7 @@ void quat_to_compatible_quat(float q[4], const float a[4], const float old[4])
}
}
-/* skip error check, currently only needed by mat3_to_quat_is_ok */
+/* Skip error check, currently only needed by #mat3_to_quat_legacy. */
static void quat_to_mat3_no_error(float m[3][3], const float q[4])
{
double q0, q1, q2, q3, qda, qdb, qdc, qaa, qab, qac, qbb, qbc, qcc;
@@ -269,9 +269,11 @@ void quat_to_mat4(float m[4][4], const float q[4])
m[3][3] = 1.0f;
}
-void mat3_normalized_to_quat(float q[4], const float mat[3][3])
+void mat3_normalized_to_quat_fast(float q[4], const float mat[3][3])
{
BLI_ASSERT_UNIT_M3(mat);
+ /* Caller must ensure matrices aren't negative for valid results, see: T24291, T94231. */
+ BLI_assert(!is_negative_m3(mat));
/* Check the trace of the matrix - bad precision if close to -1. */
const float trace = mat[0][0] + mat[1][1] + mat[2][2];
@@ -332,34 +334,54 @@ void mat3_normalized_to_quat(float q[4], const float mat[3][3])
normalize_qt(q);
}
-void mat3_to_quat(float q[4], const float m[3][3])
-{
- float unit_mat[3][3];
- /* work on a copy */
- /* this is needed AND a 'normalize_qt' in the end */
- normalize_m3_m3(unit_mat, m);
- mat3_normalized_to_quat(q, unit_mat);
+static void mat3_normalized_to_quat_with_checks(float q[4], float mat[3][3])
+{
+ const float det = determinant_m3_array(mat);
+ if (UNLIKELY(!isfinite(det))) {
+ unit_m3(mat);
+ }
+ else if (UNLIKELY(det < 0.0f)) {
+ negate_m3(mat);
+ }
+ mat3_normalized_to_quat_fast(q, mat);
}
-void mat4_normalized_to_quat(float q[4], const float m[4][4])
+void mat3_normalized_to_quat(float q[4], const float mat[3][3])
{
- float mat3[3][3];
+ float unit_mat_abs[3][3];
+ copy_m3_m3(unit_mat_abs, mat);
+ mat3_normalized_to_quat_with_checks(q, unit_mat_abs);
+}
- copy_m3_m4(mat3, m);
- mat3_normalized_to_quat(q, mat3);
+void mat3_to_quat(float q[4], const float mat[3][3])
+{
+ float unit_mat_abs[3][3];
+ normalize_m3_m3(unit_mat_abs, mat);
+ mat3_normalized_to_quat_with_checks(q, unit_mat_abs);
}
-void mat4_to_quat(float q[4], const float m[4][4])
+void mat4_normalized_to_quat(float q[4], const float mat[4][4])
{
- float mat3[3][3];
+ float unit_mat_abs[3][3];
+ copy_m3_m4(unit_mat_abs, mat);
+ mat3_normalized_to_quat_with_checks(q, unit_mat_abs);
+}
- copy_m3_m4(mat3, m);
- mat3_to_quat(q, mat3);
+void mat4_to_quat(float q[4], const float mat[4][4])
+{
+ float unit_mat_abs[3][3];
+ copy_m3_m4(unit_mat_abs, mat);
+ normalize_m3(unit_mat_abs);
+ mat3_normalized_to_quat_with_checks(q, unit_mat_abs);
}
-void mat3_to_quat_is_ok(float q[4], const float wmat[3][3])
+void mat3_to_quat_legacy(float q[4], const float wmat[3][3])
{
+ /* Legacy version of #mat3_to_quat which has slightly different behavior.
+ * Keep for particle-system & boids since replacing this will make subtle changes
+ * that impact hair in existing files. See: D15772. */
+
float mat[3][3], matr[3][3], matn[3][3], q1[4], q2[4], angle, si, co, nor[3];
/* work on a copy */
@@ -498,7 +520,10 @@ void rotation_between_quats_to_quat(float q[4], const float q1[4], const float q
mul_qt_qtqt(q, tquat, q2);
}
-float quat_split_swing_and_twist(const float q_in[4], int axis, float r_swing[4], float r_twist[4])
+float quat_split_swing_and_twist(const float q_in[4],
+ const int axis,
+ float r_swing[4],
+ float r_twist[4])
{
BLI_assert(axis >= 0 && axis <= 2);
@@ -915,6 +940,65 @@ float tri_to_quat(float q[4], const float a[3], const float b[3], const float c[
return len;
}
+void sin_cos_from_fraction(int numerator, int denominator, float *r_sin, float *r_cos)
+{
+ /* By default, creating a circle from an integer: calling #sinf & #cosf on the fraction doesn't
+ * create symmetrical values (because floats can't represent Pi exactly).
+ * Resolve this when the rotation is calculated from a fraction by mapping the `numerator`
+ * to lower values so X/Y values for points around a circle are exactly symmetrical, see T87779.
+ *
+ * Multiply both the `numerator` and `denominator` by eight to ensure we can divide the circle
+ * into 8 octants. For each octant, we then use symmetry and negation to bring the `numerator`
+ * closer to the origin where precision is highest.
+ *
+ * Cases 2, 4, 5 and 7, use the trigonometric identity sin(-x) == -sin(x).
+ * Cases 1, 2, 5 and 6, swap the pointers `r_sin` and `r_cos`.
+ */
+ BLI_assert(0 <= numerator);
+ BLI_assert(numerator <= denominator);
+ BLI_assert(denominator > 0);
+
+ numerator *= 8; /* Multiply numerator the same as denominator. */
+ const int octant = numerator / denominator; /* Determine the octant. */
+ denominator *= 8; /* Ensure denominator is a multiple of eight. */
+ float cos_sign = 1.0f; /* Either 1.0f or -1.0f. */
+
+ switch (octant) {
+ case 0:
+ /* Primary octant, nothing to do. */
+ break;
+ case 1:
+ case 2:
+ numerator = (denominator / 4) - numerator;
+ SWAP(float *, r_sin, r_cos);
+ break;
+ case 3:
+ case 4:
+ numerator = (denominator / 2) - numerator;
+ cos_sign = -1.0f;
+ break;
+ case 5:
+ case 6:
+ numerator = numerator - (denominator * 3 / 4);
+ SWAP(float *, r_sin, r_cos);
+ cos_sign = -1.0f;
+ break;
+ case 7:
+ numerator = numerator - denominator;
+ break;
+ default:
+ BLI_assert_unreachable();
+ }
+
+ BLI_assert(-denominator / 4 <= numerator); /* Numerator may be negative. */
+ BLI_assert(numerator <= denominator / 4);
+ BLI_assert(cos_sign == -1.0f || cos_sign == 1.0f);
+
+ const float angle = (float)(2.0 * M_PI) * ((float)numerator / (float)denominator);
+ *r_sin = sinf(angle);
+ *r_cos = cosf(angle) * cos_sign;
+}
+
void print_qt(const char *str, const float q[4])
{
printf("%s: %.3f %.3f %.3f %.3f\n", str, q[0], q[1], q[2], q[3]);
@@ -1322,10 +1406,10 @@ void mat4_normalized_to_eul(float eul[3], const float m[4][4])
copy_m3_m4(mat3, m);
mat3_normalized_to_eul(eul, mat3);
}
-void mat4_to_eul(float eul[3], const float m[4][4])
+void mat4_to_eul(float eul[3], const float mat[4][4])
{
float mat3[3][3];
- copy_m3_m4(mat3, m);
+ copy_m3_m4(mat3, mat);
mat3_to_eul(eul, mat3);
}
@@ -1360,7 +1444,7 @@ void eul_to_quat(float quat[4], const float eul[3])
quat[3] = cj * cs - sj * sc;
}
-void rotate_eul(float beul[3], const char axis, const float ang)
+void rotate_eul(float beul[3], const char axis, const float angle)
{
float eul[3], mat1[3][3], mat2[3][3], totmat[3][3];
@@ -1368,13 +1452,13 @@ void rotate_eul(float beul[3], const char axis, const float ang)
eul[0] = eul[1] = eul[2] = 0.0f;
if (axis == 'X') {
- eul[0] = ang;
+ eul[0] = angle;
}
else if (axis == 'Y') {
- eul[1] = ang;
+ eul[1] = angle;
}
else {
- eul[2] = ang;
+ eul[2] = angle;
}
eul_to_mat3(mat1, eul);
@@ -1730,23 +1814,23 @@ void mat3_to_compatible_eulO(float eul[3],
void mat4_normalized_to_compatible_eulO(float eul[3],
const float oldrot[3],
const short order,
- const float m[4][4])
+ const float mat[4][4])
{
float mat3[3][3];
/* for now, we'll just do this the slow way (i.e. copying matrices) */
- copy_m3_m4(mat3, m);
+ copy_m3_m4(mat3, mat);
mat3_normalized_to_compatible_eulO(eul, oldrot, order, mat3);
}
void mat4_to_compatible_eulO(float eul[3],
const float oldrot[3],
const short order,
- const float m[4][4])
+ const float mat[4][4])
{
float mat3[3][3];
/* for now, we'll just do this the slow way (i.e. copying matrices) */
- copy_m3_m4(mat3, m);
+ copy_m3_m4(mat3, mat);
normalize_m3(mat3);
mat3_normalized_to_compatible_eulO(eul, oldrot, order, mat3);
}
@@ -1765,7 +1849,7 @@ void quat_to_compatible_eulO(float eul[3],
/* rotate the given euler by the given angle on the specified axis */
/* NOTE: is this safe to do with different axis orders? */
-void rotate_eulO(float beul[3], const short order, char axis, float ang)
+void rotate_eulO(float beul[3], const short order, const char axis, const float angle)
{
float eul[3], mat1[3][3], mat2[3][3], totmat[3][3];
@@ -1774,13 +1858,13 @@ void rotate_eulO(float beul[3], const short order, char axis, float ang)
zero_v3(eul);
if (axis == 'X') {
- eul[0] = ang;
+ eul[0] = angle;
}
else if (axis == 'Y') {
- eul[1] = ang;
+ eul[1] = angle;
}
else {
- eul[2] = ang;
+ eul[2] = angle;
}
eulO_to_mat3(mat1, eul, order);
diff --git a/source/blender/blenlib/intern/math_rotation.cc b/source/blender/blenlib/intern/math_rotation.cc
index 74300d55954..091e8af85d9 100644
--- a/source/blender/blenlib/intern/math_rotation.cc
+++ b/source/blender/blenlib/intern/math_rotation.cc
@@ -23,4 +23,17 @@ float3 rotate_direction_around_axis(const float3 &direction, const float3 &axis,
return axis_scaled + diff * std::cos(angle) + cross * std::sin(angle);
}
+float3 rotate_around_axis(const float3 &vector,
+ const float3 &center,
+ const float3 &axis,
+ const float angle)
+
+{
+ float3 result = vector - center;
+ float mat[3][3];
+ axis_angle_normalized_to_mat3(mat, axis, angle);
+ mul_m3_v3(mat, result);
+ return result + center;
+}
+
} // namespace blender::math
diff --git a/source/blender/blenlib/intern/math_vector_inline.c b/source/blender/blenlib/intern/math_vector_inline.c
index 339bfb8f95e..27c17a90f5f 100644
--- a/source/blender/blenlib/intern/math_vector_inline.c
+++ b/source/blender/blenlib/intern/math_vector_inline.c
@@ -316,6 +316,27 @@ MINLINE void swap_v4_v4(float a[4], float b[4])
SWAP(float, a[3], b[3]);
}
+MINLINE void swap_v2_v2_db(double a[2], double b[2])
+{
+ SWAP(double, a[0], b[0]);
+ SWAP(double, a[1], b[1]);
+}
+
+MINLINE void swap_v3_v3_db(double a[3], double b[3])
+{
+ SWAP(double, a[0], b[0]);
+ SWAP(double, a[1], b[1]);
+ SWAP(double, a[2], b[2]);
+}
+
+MINLINE void swap_v4_v4_db(double a[4], double b[4])
+{
+ SWAP(double, a[0], b[0]);
+ SWAP(double, a[1], b[1]);
+ SWAP(double, a[2], b[2]);
+ SWAP(double, a[3], b[3]);
+}
+
/* float args -> vec */
MINLINE void copy_v2_fl2(float v[2], float x, float y)
@@ -613,10 +634,10 @@ MINLINE void mul_v2_v2_cw(float r[2], const float mat[2], const float vec[2])
MINLINE void mul_v2_v2_ccw(float r[2], const float mat[2], const float vec[2])
{
- BLI_assert(r != vec);
-
- r[0] = mat[0] * vec[0] + (-mat[1]) * vec[1];
- r[1] = mat[1] * vec[0] + (+mat[0]) * vec[1];
+ float r0 = mat[0] * vec[0] + (-mat[1]) * vec[1];
+ float r1 = mat[1] * vec[0] + (+mat[0]) * vec[1];
+ r[0] = r0;
+ r[1] = r1;
}
MINLINE float mul_project_m4_v3_zfac(const float mat[4][4], const float co[3])
diff --git a/source/blender/blenlib/intern/mesh_boolean.cc b/source/blender/blenlib/intern/mesh_boolean.cc
index 700c126ca4c..0d8ad1da582 100644
--- a/source/blender/blenlib/intern/mesh_boolean.cc
+++ b/source/blender/blenlib/intern/mesh_boolean.cc
@@ -1675,7 +1675,7 @@ static Edge find_good_sorting_edge(const Vert *testp,
* The algorithm is similar to the one for find_ambient_cell, except that
* instead of an arbitrary point known to be outside the whole mesh, we
* have a particular point (v) and we just want to determine the patches
- * that that point is between in sorting-around-an-edge order.
+ * that point is between in sorting-around-an-edge order.
*/
static int find_containing_cell(const Vert *v,
int t,
@@ -2966,6 +2966,11 @@ static std::ostream &operator<<(std::ostream &os, const FaceMergeState &fms)
* \a tris all have the same original face.
* Find the 2d edge/triangle topology for these triangles, but only the ones facing in the
* norm direction, and whether each edge is dissolvable or not.
+ * If we did the initial triangulation properly, and any Delaunay triangulations of intersections
+ * properly, then each triangle edge should have at most one neighbor.
+ * However, there can be anomalies. For example, if an input face is self-intersecting, we fall
+ * back on the floating point poly-fill triangulation, which, after which all bets are off.
+ * Hence, try to be tolerant of such unexpected topology.
*/
static void init_face_merge_state(FaceMergeState *fms,
const Vector<int> &tris,
@@ -3053,16 +3058,35 @@ static void init_face_merge_state(FaceMergeState *fms,
std::cout << "me.v1 == mf.vert[i] so set edge[" << me_index << "].left_face = " << f
<< "\n";
}
- BLI_assert(me.left_face == -1);
- fms->edge[me_index].left_face = f;
+ if (me.left_face != -1) {
+ /* Unexpected in the normal case: this means more than one triangle shares this
+ * edge in the same orientation. But be tolerant of this case. By making this
+ * edge not dissolvable, we'll avoid future problems due to this non-manifold topology.
+ */
+ if (dbg_level > 1) {
+ std::cout << "me.left_face was already occupied, so triangulation wasn't good\n";
+ }
+ me.dissolvable = false;
+ }
+ else {
+ fms->edge[me_index].left_face = f;
+ }
}
else {
if (dbg_level > 1) {
std::cout << "me.v1 != mf.vert[i] so set edge[" << me_index << "].right_face = " << f
<< "\n";
}
- BLI_assert(me.right_face == -1);
- fms->edge[me_index].right_face = f;
+ if (me.right_face != -1) {
+ /* Unexpected, analogous to the me.left_face != -1 case above. */
+ if (dbg_level > 1) {
+ std::cout << "me.right_face was already occupied, so triangulation wasn't good\n";
+ }
+ me.dissolvable = false;
+ }
+ else {
+ fms->edge[me_index].right_face = f;
+ }
}
fms->face[f].edge.append(me_index);
}
diff --git a/source/blender/blenlib/intern/mesh_intersect.cc b/source/blender/blenlib/intern/mesh_intersect.cc
index d5585f953ec..e8aa359fbe4 100644
--- a/source/blender/blenlib/intern/mesh_intersect.cc
+++ b/source/blender/blenlib/intern/mesh_intersect.cc
@@ -710,7 +710,7 @@ bool IMesh::erase_face_positions(int f_index, Span<bool> face_pos_erase, IMeshAr
* mark with null pointer and caller should call remove_null_faces().
* the loop is done.
*/
- this->face_[f_index] = NULL;
+ this->face_[f_index] = nullptr;
return true;
}
Array<const Vert *> new_vert(new_len);
diff --git a/source/blender/blenlib/intern/noise.c b/source/blender/blenlib/intern/noise.c
index c39a2b5a27e..3ec7c3f9804 100644
--- a/source/blender/blenlib/intern/noise.c
+++ b/source/blender/blenlib/intern/noise.c
@@ -1125,7 +1125,7 @@ float BLI_noise_cell(float x, float y, float z)
return (2.0f * BLI_cellNoiseU(x, y, z) - 1.0f);
}
-void BLI_noise_cell_v3(float x, float y, float z, float ca[3])
+void BLI_noise_cell_v3(float x, float y, float z, float r_ca[3])
{
/* avoid precision issues on unit coordinates */
x = (x + 0.000001f) * 1.00001f;
@@ -1136,9 +1136,9 @@ void BLI_noise_cell_v3(float x, float y, float z, float ca[3])
int yi = (int)(floor(y));
int zi = (int)(floor(z));
const float *p = HASHPNT(xi, yi, zi);
- ca[0] = p[0];
- ca[1] = p[1];
- ca[2] = p[2];
+ r_ca[0] = p[0];
+ r_ca[1] = p[1];
+ r_ca[2] = p[2];
}
/** \} */
diff --git a/source/blender/blenlib/intern/noise.cc b/source/blender/blenlib/intern/noise.cc
index a514c9e5183..8a073239b31 100644
--- a/source/blender/blenlib/intern/noise.cc
+++ b/source/blender/blenlib/intern/noise.cc
@@ -263,7 +263,6 @@ BLI_INLINE float mix(float v0, float v1, float x)
* + + |
* @ + + + + @ @------> x
* v0 v1
- *
*/
BLI_INLINE float mix(float v0, float v1, float v2, float v3, float x, float y)
{
@@ -809,15 +808,14 @@ float musgrave_hybrid_multi_fractal(const float co,
{
float p = co;
const float pwHL = std::pow(lacunarity, -H);
- float pwr = pwHL;
- float value = perlin_signed(p) + offset;
- float weight = gain * value;
- p *= lacunarity;
+ float pwr = 1.0f;
+ float value = 0.0f;
+ float weight = 1.0f;
const float octaves = CLAMPIS(octaves_unclamped, 0.0f, 15.0f);
- for (int i = 1; (weight > 0.001f) && (i < (int)octaves); i++) {
+ for (int i = 0; (weight > 0.001f) && (i < (int)octaves); i++) {
if (weight > 1.0f) {
weight = 1.0f;
}
@@ -830,8 +828,12 @@ float musgrave_hybrid_multi_fractal(const float co,
}
const float rmd = octaves - floorf(octaves);
- if (rmd != 0.0f) {
- value += rmd * ((perlin_signed(p) + offset) * pwr);
+ if ((rmd != 0.0f) && (weight > 0.001f)) {
+ if (weight > 1.0f) {
+ weight = 1.0f;
+ }
+ float signal = (perlin_signed(p) + offset) * pwr;
+ value += rmd * weight * signal;
}
return value;
@@ -961,15 +963,14 @@ float musgrave_hybrid_multi_fractal(const float2 co,
{
float2 p = co;
const float pwHL = std::pow(lacunarity, -H);
- float pwr = pwHL;
- float value = perlin_signed(p) + offset;
- float weight = gain * value;
- p *= lacunarity;
+ float pwr = 1.0f;
+ float value = 0.0f;
+ float weight = 1.0f;
const float octaves = CLAMPIS(octaves_unclamped, 0.0f, 15.0f);
- for (int i = 1; (weight > 0.001f) && (i < (int)octaves); i++) {
+ for (int i = 0; (weight > 0.001f) && (i < (int)octaves); i++) {
if (weight > 1.0f) {
weight = 1.0f;
}
@@ -982,8 +983,12 @@ float musgrave_hybrid_multi_fractal(const float2 co,
}
const float rmd = octaves - floorf(octaves);
- if (rmd != 0.0f) {
- value += rmd * ((perlin_signed(p) + offset) * pwr);
+ if ((rmd != 0.0f) && (weight > 0.001f)) {
+ if (weight > 1.0f) {
+ weight = 1.0f;
+ }
+ float signal = (perlin_signed(p) + offset) * pwr;
+ value += rmd * weight * signal;
}
return value;
@@ -1115,15 +1120,14 @@ float musgrave_hybrid_multi_fractal(const float3 co,
{
float3 p = co;
const float pwHL = std::pow(lacunarity, -H);
- float pwr = pwHL;
- float value = perlin_signed(p) + offset;
- float weight = gain * value;
- p *= lacunarity;
+ float pwr = 1.0f;
+ float value = 0.0f;
+ float weight = 1.0f;
const float octaves = CLAMPIS(octaves_unclamped, 0.0f, 15.0f);
- for (int i = 1; (weight > 0.001f) && (i < (int)octaves); i++) {
+ for (int i = 0; (weight > 0.001f) && (i < (int)octaves); i++) {
if (weight > 1.0f) {
weight = 1.0f;
}
@@ -1136,8 +1140,12 @@ float musgrave_hybrid_multi_fractal(const float3 co,
}
const float rmd = octaves - floorf(octaves);
- if (rmd != 0.0f) {
- value += rmd * ((perlin_signed(p) + offset) * pwr);
+ if ((rmd != 0.0f) && (weight > 0.001f)) {
+ if (weight > 1.0f) {
+ weight = 1.0f;
+ }
+ float signal = (perlin_signed(p) + offset) * pwr;
+ value += rmd * weight * signal;
}
return value;
@@ -1269,15 +1277,14 @@ float musgrave_hybrid_multi_fractal(const float4 co,
{
float4 p = co;
const float pwHL = std::pow(lacunarity, -H);
- float pwr = pwHL;
- float value = perlin_signed(p) + offset;
- float weight = gain * value;
- p *= lacunarity;
+ float pwr = 1.0f;
+ float value = 0.0f;
+ float weight = 1.0f;
const float octaves = CLAMPIS(octaves_unclamped, 0.0f, 15.0f);
- for (int i = 1; (weight > 0.001f) && (i < (int)octaves); i++) {
+ for (int i = 0; (weight > 0.001f) && (i < (int)octaves); i++) {
if (weight > 1.0f) {
weight = 1.0f;
}
@@ -1290,8 +1297,12 @@ float musgrave_hybrid_multi_fractal(const float4 co,
}
const float rmd = octaves - floorf(octaves);
- if (rmd != 0.0f) {
- value += rmd * ((perlin_signed(p) + offset) * pwr);
+ if ((rmd != 0.0f) && (weight > 0.001f)) {
+ if (weight > 1.0f) {
+ weight = 1.0f;
+ }
+ float signal = (perlin_signed(p) + offset) * pwr;
+ value += rmd * weight * signal;
}
return value;
diff --git a/source/blender/blenlib/intern/path_util.c b/source/blender/blenlib/intern/path_util.c
index 5a96221c8d1..623dd572b11 100644
--- a/source/blender/blenlib/intern/path_util.c
+++ b/source/blender/blenlib/intern/path_util.c
@@ -1105,29 +1105,29 @@ bool BLI_path_program_search(char *fullname, const size_t maxlen, const char *na
path = BLI_getenv("PATH");
if (path) {
- char filename[FILE_MAX];
+ char filepath_test[FILE_MAX];
const char *temp;
do {
temp = strchr(path, separator);
if (temp) {
- memcpy(filename, path, temp - path);
- filename[temp - path] = 0;
+ memcpy(filepath_test, path, temp - path);
+ filepath_test[temp - path] = 0;
path = temp + 1;
}
else {
- BLI_strncpy(filename, path, sizeof(filename));
+ BLI_strncpy(filepath_test, path, sizeof(filepath_test));
}
- BLI_path_append(filename, maxlen, name);
+ BLI_path_append(filepath_test, maxlen, name);
if (
#ifdef _WIN32
- BLI_path_program_extensions_add_win32(filename, maxlen)
+ BLI_path_program_extensions_add_win32(filepath_test, maxlen)
#else
- BLI_exists(filename)
+ BLI_exists(filepath_test)
#endif
) {
- BLI_strncpy(fullname, filename, maxlen);
+ BLI_strncpy(fullname, filepath_test, maxlen);
retval = true;
break;
}
diff --git a/source/blender/blenlib/intern/rct.c b/source/blender/blenlib/intern/rct.c
index 0bb606c288e..7248db5b718 100644
--- a/source/blender/blenlib/intern/rct.c
+++ b/source/blender/blenlib/intern/rct.c
@@ -265,7 +265,7 @@ bool BLI_rcti_isect_segment(const rcti *rect, const int s1[2], const int s2[2])
/* diagonal: [/] */
tvec1[0] = rect->xmin;
tvec1[1] = rect->ymin;
- tvec2[0] = rect->xmin;
+ tvec2[0] = rect->xmax;
tvec2[1] = rect->ymax;
if (isect_segments_i(s1, s2, tvec1, tvec2)) {
return true;
@@ -311,7 +311,7 @@ bool BLI_rctf_isect_segment(const rctf *rect, const float s1[2], const float s2[
/* diagonal: [/] */
tvec1[0] = rect->xmin;
tvec1[1] = rect->ymin;
- tvec2[0] = rect->xmin;
+ tvec2[0] = rect->xmax;
tvec2[1] = rect->ymax;
if (isect_segments_fl(s1, s2, tvec1, tvec2)) {
return true;
diff --git a/source/blender/blenlib/intern/smallhash.c b/source/blender/blenlib/intern/smallhash.c
index 2d76f662611..8263f8ff34e 100644
--- a/source/blender/blenlib/intern/smallhash.c
+++ b/source/blender/blenlib/intern/smallhash.c
@@ -329,8 +329,7 @@ void **BLI_smallhash_iternew_p(const SmallHash *sh, SmallHashIter *iter, uintptr
/** \name Debugging & Introspection
* \{ */
-/* NOTE(campbell): this was called _print_smhash in knifetool.c
- * it may not be intended for general use. */
+/* NOTE(@campbellbarton): useful for debugging but may not be intended for general use. */
#if 0
void BLI_smallhash_print(SmallHash *sh)
{
diff --git a/source/blender/blenlib/intern/string_search.cc b/source/blender/blenlib/intern/string_search.cc
index 14d85b99739..31ea24eb494 100644
--- a/source/blender/blenlib/intern/string_search.cc
+++ b/source/blender/blenlib/intern/string_search.cc
@@ -11,8 +11,8 @@
#include "BLI_timeit.hh"
/* Right arrow, keep in sync with #UI_MENU_ARROW_SEP in `UI_interface.h`. */
-#define UI_MENU_ARROW_SEP "\xe2\x96\xb6"
-#define UI_MENU_ARROW_SEP_UNICODE 0x25b6
+#define UI_MENU_ARROW_SEP "\xe2\x96\xb8"
+#define UI_MENU_ARROW_SEP_UNICODE 0x25b8
namespace blender::string_search {
diff --git a/source/blender/blenlib/intern/string_utf8.c b/source/blender/blenlib/intern/string_utf8.c
index 94efb0dd9e7..17fb451e422 100644
--- a/source/blender/blenlib/intern/string_utf8.c
+++ b/source/blender/blenlib/intern/string_utf8.c
@@ -363,6 +363,10 @@ size_t BLI_strncpy_wchar_from_utf8(wchar_t *__restrict dst_w,
int BLI_wcwidth(char32_t ucs)
{
+ /* Treat private use areas (icon fonts), symbols, and emoticons as double-width. */
+ if (ucs >= 0xf0000 || (ucs >= 0xe000 && ucs < 0xf8ff) || (ucs >= 0x1f300 && ucs < 0x1fbff)) {
+ return 2;
+ }
return mk_wcwidth(ucs);
}
@@ -399,7 +403,7 @@ int BLI_str_utf8_char_width_safe(const char *p)
/* copied from glib's gutf8.c, added 'Err' arg */
-/* NOTE(campbell): glib uses uint for unicode, best we do the same,
+/* NOTE(@campbellbarton): glib uses uint for unicode, best we do the same,
* though we don't typedef it. */
#define UTF8_COMPUTE(Char, Mask, Len, Err) \
@@ -688,25 +692,25 @@ const char *BLI_str_find_next_char_utf8(const char *p, const char *str_end)
size_t BLI_str_partition_utf8(const char *str,
const uint delim[],
- const char **sep,
- const char **suf)
+ const char **r_sep,
+ const char **r_suf)
{
- return BLI_str_partition_ex_utf8(str, NULL, delim, sep, suf, false);
+ return BLI_str_partition_ex_utf8(str, NULL, delim, r_sep, r_suf, false);
}
size_t BLI_str_rpartition_utf8(const char *str,
const uint delim[],
- const char **sep,
- const char **suf)
+ const char **r_sep,
+ const char **r_suf)
{
- return BLI_str_partition_ex_utf8(str, NULL, delim, sep, suf, true);
+ return BLI_str_partition_ex_utf8(str, NULL, delim, r_sep, r_suf, true);
}
size_t BLI_str_partition_ex_utf8(const char *str,
const char *end,
const uint delim[],
- const char **sep,
- const char **suf,
+ const char **r_sep,
+ const char **r_suf,
const bool from_right)
{
const size_t str_len = end ? (size_t)(end - str) : strlen(str);
@@ -717,36 +721,32 @@ size_t BLI_str_partition_ex_utf8(const char *str,
/* Note that here, we assume end points to a valid utf8 char! */
BLI_assert((end >= str) && (BLI_str_utf8_as_unicode(end) != BLI_UTF8_ERR));
- *suf = (char *)(str + str_len);
-
- size_t index;
- for (*sep = (char *)(from_right ? BLI_str_find_prev_char_utf8(end, str) : str), index = 0;
- from_right ? (*sep > str) : ((*sep < end) && (**sep != '\0'));
- *sep = (char *)(from_right ? (str != *sep ? BLI_str_find_prev_char_utf8(*sep, str) : NULL) :
- str + index)) {
+ char *suf = (char *)(str + str_len);
+ size_t index = 0;
+ for (char *sep = (char *)(from_right ? BLI_str_find_prev_char_utf8(end, str) : str);
+ from_right ? (sep > str) : ((sep < end) && (*sep != '\0'));
+ sep = (char *)(from_right ? (str != sep ? BLI_str_find_prev_char_utf8(sep, str) : NULL) :
+ str + index)) {
size_t index_ofs = 0;
- const uint c = BLI_str_utf8_as_unicode_step_or_error(*sep, (size_t)(end - *sep), &index_ofs);
- index += index_ofs;
-
- if (c == BLI_UTF8_ERR) {
- *suf = *sep = NULL;
+ const uint c = BLI_str_utf8_as_unicode_step_or_error(sep, (size_t)(end - sep), &index_ofs);
+ if (UNLIKELY(c == BLI_UTF8_ERR)) {
break;
}
+ index += index_ofs;
for (const uint *d = delim; *d != '\0'; d++) {
if (*d == c) {
- /* *suf is already correct in case from_right is true. */
- if (!from_right) {
- *suf = (char *)(str + index);
- }
- return (size_t)(*sep - str);
+ /* `suf` is already correct in case from_right is true. */
+ *r_sep = sep;
+ *r_suf = from_right ? suf : (char *)(str + index);
+ return (size_t)(sep - str);
}
}
- *suf = *sep; /* Useful in 'from_right' case! */
+ suf = sep; /* Useful in 'from_right' case! */
}
- *suf = *sep = NULL;
+ *r_suf = *r_sep = NULL;
return str_len;
}
@@ -786,9 +786,9 @@ int BLI_str_utf8_offset_to_column(const char *str, int offset)
int BLI_str_utf8_offset_from_column(const char *str, int column)
{
- int offset = 0, pos = 0, col;
+ int offset = 0, pos = 0;
while (*(str + offset) && pos < column) {
- col = BLI_str_utf8_char_width_safe(str + offset);
+ const int col = BLI_str_utf8_char_width_safe(str + offset);
if (pos + col > column) {
break;
}
diff --git a/source/blender/blenlib/intern/system.c b/source/blender/blenlib/intern/system.c
index 35e26e0cb33..781b38f713a 100644
--- a/source/blender/blenlib/intern/system.c
+++ b/source/blender/blenlib/intern/system.c
@@ -21,7 +21,9 @@
# include "BLI_winstuff.h"
#else
-# include <execinfo.h>
+# if defined(HAVE_EXECINFO_H)
+# include <execinfo.h>
+# endif
# include <unistd.h>
#endif
@@ -61,9 +63,9 @@ int BLI_cpu_support_sse2(void)
#if !defined(_MSC_VER)
void BLI_system_backtrace(FILE *fp)
{
- /* ------------- */
- /* Linux / Apple */
-# if defined(__linux__) || defined(__APPLE__)
+ /* ----------------------- */
+ /* If system as execinfo.h */
+# if defined(HAVE_EXECINFO_H)
# define SIZE 100
void *buffer[SIZE];
diff --git a/source/blender/blenlib/intern/task_pool.cc b/source/blender/blenlib/intern/task_pool.cc
index a29dbe95ba9..c335d04413c 100644
--- a/source/blender/blenlib/intern/task_pool.cc
+++ b/source/blender/blenlib/intern/task_pool.cc
@@ -84,11 +84,11 @@ class Task {
free_taskdata(other.free_taskdata),
freedata(other.freedata)
{
- ((Task &)other).pool = NULL;
- ((Task &)other).run = NULL;
- ((Task &)other).taskdata = NULL;
+ ((Task &)other).pool = nullptr;
+ ((Task &)other).run = nullptr;
+ ((Task &)other).taskdata = nullptr;
((Task &)other).free_taskdata = false;
- ((Task &)other).freedata = NULL;
+ ((Task &)other).freedata = nullptr;
}
#else
Task(const Task &other) = delete;
diff --git a/source/blender/blenlib/intern/timeit.cc b/source/blender/blenlib/intern/timeit.cc
index f11f9c4ad94..7a8cf8da038 100644
--- a/source/blender/blenlib/intern/timeit.cc
+++ b/source/blender/blenlib/intern/timeit.cc
@@ -3,19 +3,29 @@
#include "BLI_timeit.hh"
#include <algorithm>
+#include <iomanip>
namespace blender::timeit {
void print_duration(Nanoseconds duration)
{
- if (duration < std::chrono::microseconds(100)) {
+ using namespace std::chrono;
+ if (duration < microseconds(100)) {
std::cout << duration.count() << " ns";
}
- else if (duration < std::chrono::seconds(5)) {
- std::cout << duration.count() / 1.0e6 << " ms";
+ else if (duration < seconds(5)) {
+ std::cout << std::fixed << std::setprecision(1) << duration.count() / 1.0e6 << " ms";
+ }
+ else if (duration > seconds(90)) {
+ /* Long durations: print seconds, and also H:m:s */
+ const auto dur_hours = duration_cast<hours>(duration);
+ const auto dur_mins = duration_cast<minutes>(duration - dur_hours);
+ const auto dur_sec = duration_cast<seconds>(duration - dur_hours - dur_mins);
+ std::cout << std::fixed << std::setprecision(1) << duration.count() / 1.0e9 << " s ("
+ << dur_hours.count() << "H:" << dur_mins.count() << "m:" << dur_sec.count() << "s)";
}
else {
- std::cout << duration.count() / 1.0e9 << " s";
+ std::cout << std::fixed << std::setprecision(1) << duration.count() / 1.0e9 << " s";
}
}
diff --git a/source/blender/blenlib/intern/winstuff.c b/source/blender/blenlib/intern/winstuff.c
index e90a0ee02db..7e2c5e8f1dd 100644
--- a/source/blender/blenlib/intern/winstuff.c
+++ b/source/blender/blenlib/intern/winstuff.c
@@ -63,23 +63,17 @@ bool BLI_windows_register_blend_extension(const bool background)
char buffer[256];
char BlPath[MAX_PATH];
- char InstallDir[FILE_MAXDIR];
- char SysDir[FILE_MAXDIR];
- const char *ThumbHandlerDLL;
- char RegCmd[MAX_PATH * 2];
char MBox[256];
- char *blender_app;
-# ifndef _WIN64
- BOOL IsWOW64;
-# endif
printf("Registering file extension...");
GetModuleFileName(0, BlPath, MAX_PATH);
/* Replace the actual app name with the wrapper. */
- blender_app = strstr(BlPath, "blender.exe");
- if (blender_app != NULL) {
- strcpy(blender_app, "blender-launcher.exe");
+ {
+ char *blender_app = strstr(BlPath, "blender.exe");
+ if (blender_app != NULL) {
+ strcpy(blender_app, "blender-launcher.exe");
+ }
}
/* root is HKLM by default */
@@ -157,12 +151,17 @@ bool BLI_windows_register_blend_extension(const bool background)
}
# ifdef WITH_BLENDER_THUMBNAILER
- BLI_windows_get_executable_dir(InstallDir);
- GetSystemDirectory(SysDir, FILE_MAXDIR);
- ThumbHandlerDLL = "BlendThumb.dll";
- snprintf(
- RegCmd, MAX_PATH * 2, "%s\\regsvr32 /s \"%s\\%s\"", SysDir, InstallDir, ThumbHandlerDLL);
- system(RegCmd);
+ {
+ char RegCmd[MAX_PATH * 2];
+ char InstallDir[FILE_MAXDIR];
+ char SysDir[FILE_MAXDIR];
+ BLI_windows_get_executable_dir(InstallDir);
+ GetSystemDirectory(SysDir, FILE_MAXDIR);
+ const char *ThumbHandlerDLL = "BlendThumb.dll";
+ snprintf(
+ RegCmd, MAX_PATH * 2, "%s\\regsvr32 /s \"%s\\%s\"", SysDir, InstallDir, ThumbHandlerDLL);
+ system(RegCmd);
+ }
# endif
RegCloseKey(root);
diff --git a/source/blender/blenlib/tests/BLI_array_store_test.cc b/source/blender/blenlib/tests/BLI_array_store_test.cc
index 20e2a4d88f8..5d05e3be1f3 100644
--- a/source/blender/blenlib/tests/BLI_array_store_test.cc
+++ b/source/blender/blenlib/tests/BLI_array_store_test.cc
@@ -181,7 +181,7 @@ static void testbuffer_list_state_from_data__stride_expand(ListBase *lb,
#define TESTBUFFER_STRINGS_CREATE(lb, ...) \
{ \
BLI_listbase_clear(lb); \
- const char *data_array[] = {__VA_ARGS__ NULL}; \
+ const char *data_array[] = {__VA_ARGS__ nullptr}; \
testbuffer_list_state_from_string_array((lb), data_array); \
} \
((void)0)
@@ -795,10 +795,10 @@ TEST(array_store, TestChunk_Rand31_Stride11_Chunk21)
/* Test From Files (disabled, keep for local tests.) */
-void *file_read_binary_as_mem(const char *filepath, size_t pad_bytes, size_t *r_size)
+static void *file_read_binary_as_mem(const char *filepath, size_t pad_bytes, size_t *r_size)
{
FILE *fp = fopen(filepath, "rb");
- void *mem = NULL;
+ void *mem = nullptr;
if (fp) {
long int filelen_read;
@@ -810,14 +810,14 @@ void *file_read_binary_as_mem(const char *filepath, size_t pad_bytes, size_t *r_
fseek(fp, 0L, SEEK_SET);
mem = MEM_mallocN(filelen + pad_bytes, __func__);
- if (mem == NULL) {
+ if (mem == nullptr) {
goto finally;
}
filelen_read = fread(mem, 1, filelen, fp);
if ((filelen_read != filelen) || ferror(fp)) {
MEM_freeN(mem);
- mem = NULL;
+ mem = nullptr;
goto finally;
}
diff --git a/source/blender/blenlib/tests/BLI_bit_vector_test.cc b/source/blender/blenlib/tests/BLI_bit_vector_test.cc
new file mode 100644
index 00000000000..c477b464f0c
--- /dev/null
+++ b/source/blender/blenlib/tests/BLI_bit_vector_test.cc
@@ -0,0 +1,186 @@
+/* Apache License, Version 2.0 */
+
+#include "BLI_bit_vector.hh"
+#include "BLI_exception_safety_test_utils.hh"
+#include "BLI_strict_flags.h"
+
+#include "testing/testing.h"
+
+namespace blender::tests {
+
+TEST(bit_vector, DefaultConstructor)
+{
+ BitVector vec;
+ EXPECT_EQ(vec.size(), 0);
+}
+
+TEST(bit_vector, CopyConstructorInline)
+{
+ BitVector<> vec({false, false, true, true, false});
+ BitVector<> vec2 = vec;
+
+ EXPECT_EQ(vec.size(), 5);
+ EXPECT_EQ(vec2.size(), 5);
+
+ vec2[1].set();
+ EXPECT_FALSE(vec[1]);
+
+ EXPECT_FALSE(vec2[0]);
+ EXPECT_TRUE(vec2[1]);
+ EXPECT_TRUE(vec2[2]);
+ EXPECT_TRUE(vec2[3]);
+ EXPECT_FALSE(vec2[4]);
+}
+
+TEST(bit_vector, CopyConstructorLarge)
+{
+ BitVector<> vec(500, false);
+ vec[1].set();
+
+ BitVector<> vec2 = vec;
+
+ EXPECT_EQ(vec.size(), 500);
+ EXPECT_EQ(vec2.size(), 500);
+
+ vec2[2].set();
+ EXPECT_FALSE(vec[2]);
+
+ EXPECT_FALSE(vec2[0]);
+ EXPECT_TRUE(vec2[1]);
+ EXPECT_TRUE(vec2[2]);
+}
+
+TEST(bit_vector, MoveConstructorInline)
+{
+ BitVector<> vec({false, false, true, true, false});
+ BitVector<> vec2 = std::move(vec);
+
+ EXPECT_EQ(vec.size(), 0);
+ EXPECT_EQ(vec2.size(), 5);
+
+ EXPECT_FALSE(vec2[0]);
+ EXPECT_FALSE(vec2[1]);
+ EXPECT_TRUE(vec2[2]);
+ EXPECT_TRUE(vec2[3]);
+ EXPECT_FALSE(vec2[4]);
+}
+
+TEST(bit_vector, MoveConstructorLarge)
+{
+ BitVector<> vec(500, false);
+ vec[3].set();
+
+ BitVector<> vec2 = std::move(vec);
+
+ EXPECT_EQ(vec.size(), 0);
+ EXPECT_EQ(vec2.size(), 500);
+
+ EXPECT_FALSE(vec2[0]);
+ EXPECT_FALSE(vec2[1]);
+ EXPECT_FALSE(vec2[2]);
+ EXPECT_TRUE(vec2[3]);
+ EXPECT_FALSE(vec2[4]);
+}
+
+TEST(bit_vector, SizeConstructor)
+{
+ {
+ BitVector<> vec(0);
+ EXPECT_EQ(vec.size(), 0);
+ }
+ {
+ BitVector<> vec(5);
+ EXPECT_EQ(vec.size(), 5);
+ for (BitRef bit : vec) {
+ EXPECT_FALSE(bit);
+ }
+ }
+ {
+ BitVector<> vec(123);
+ EXPECT_EQ(vec.size(), 123);
+ for (BitRef bit : vec) {
+ EXPECT_FALSE(bit);
+ }
+ }
+}
+
+TEST(bit_vector, SizeFillConstructor)
+{
+ {
+ BitVector<> vec(5, false);
+ for (const int64_t i : IndexRange(5)) {
+ EXPECT_FALSE(vec[i]);
+ }
+ }
+ {
+ BitVector<> vec(123, true);
+ for (const int64_t i : IndexRange(123)) {
+ EXPECT_TRUE(vec[i]);
+ }
+ }
+}
+
+TEST(bit_vector, IndexAccess)
+{
+ BitVector<> vec(100, false);
+ vec[55].set();
+ EXPECT_FALSE(vec[50]);
+ EXPECT_FALSE(vec[51]);
+ EXPECT_FALSE(vec[52]);
+ EXPECT_FALSE(vec[53]);
+ EXPECT_FALSE(vec[54]);
+ EXPECT_TRUE(vec[55]);
+ EXPECT_FALSE(vec[56]);
+ EXPECT_FALSE(vec[57]);
+ EXPECT_FALSE(vec[58]);
+}
+
+TEST(bit_vector, Iterator)
+{
+ BitVector<> vec(100, false);
+ {
+ int64_t index = 0;
+ for (MutableBitRef bit : vec) {
+ bit.set(ELEM(index, 0, 4, 7, 10, 11));
+ index++;
+ }
+ }
+ {
+ int64_t index = 0;
+ for (BitRef bit : const_cast<const BitVector<> &>(vec)) {
+ EXPECT_EQ(bit, ELEM(index, 0, 4, 7, 10, 11));
+ index++;
+ }
+ }
+}
+
+TEST(bit_vector, Append)
+{
+ BitVector<> vec;
+ vec.append(false);
+ vec.append(true);
+ vec.append(true);
+ vec.append(false);
+
+ EXPECT_EQ(vec.size(), 4);
+ EXPECT_FALSE(vec[0]);
+ EXPECT_TRUE(vec[1]);
+ EXPECT_TRUE(vec[2]);
+ EXPECT_FALSE(vec[3]);
+}
+
+TEST(bit_vector, AppendMany)
+{
+ BitVector<> vec;
+ for (const int64_t i : IndexRange(1000)) {
+ vec.append(i % 2);
+ }
+ EXPECT_FALSE(vec[0]);
+ EXPECT_TRUE(vec[1]);
+ EXPECT_FALSE(vec[2]);
+ EXPECT_TRUE(vec[3]);
+ EXPECT_FALSE(vec[4]);
+ EXPECT_TRUE(vec[5]);
+}
+
+} // namespace blender::tests
diff --git a/source/blender/blenlib/tests/BLI_bitmap_test.cc b/source/blender/blenlib/tests/BLI_bitmap_test.cc
new file mode 100644
index 00000000000..fb9e03e3136
--- /dev/null
+++ b/source/blender/blenlib/tests/BLI_bitmap_test.cc
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+
+#include "BLI_bitmap.h"
+#include "testing/testing.h"
+
+namespace blender::tests {
+
+TEST(bitmap, empty_is_all_unset)
+{
+ BLI_BITMAP_DECLARE(bitmap, 10);
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_FALSE(BLI_BITMAP_TEST_BOOL(bitmap, i));
+ }
+}
+
+TEST(bitmap, find_first_unset_empty)
+{
+ BLI_BITMAP_DECLARE(bitmap, 10);
+ EXPECT_EQ(0, BLI_bitmap_find_first_unset(bitmap, 10));
+}
+
+TEST(bitmap, find_first_unset_full)
+{
+ BLI_BITMAP_DECLARE(bitmap, 10);
+ BLI_bitmap_flip_all(bitmap, 10);
+ EXPECT_EQ(-1, BLI_bitmap_find_first_unset(bitmap, 10));
+}
+
+TEST(bitmap, find_first_unset_middle)
+{
+ BLI_BITMAP_DECLARE(bitmap, 100);
+ BLI_bitmap_flip_all(bitmap, 100);
+ /* Turn some bits off */
+ BLI_BITMAP_DISABLE(bitmap, 53);
+ BLI_BITMAP_DISABLE(bitmap, 81);
+ BLI_BITMAP_DISABLE(bitmap, 85);
+ BLI_BITMAP_DISABLE(bitmap, 86);
+
+ /* Find lowest unset bit, and set it. */
+ EXPECT_EQ(53, BLI_bitmap_find_first_unset(bitmap, 100));
+ BLI_BITMAP_ENABLE(bitmap, 53);
+ /* Now should find the next lowest bit. */
+ EXPECT_EQ(81, BLI_bitmap_find_first_unset(bitmap, 100));
+}
+
+} // namespace blender::tests
diff --git a/source/blender/blenlib/tests/BLI_index_range_test.cc b/source/blender/blenlib/tests/BLI_index_range_test.cc
index 10f6784cd44..955691b3430 100644
--- a/source/blender/blenlib/tests/BLI_index_range_test.cc
+++ b/source/blender/blenlib/tests/BLI_index_range_test.cc
@@ -105,6 +105,12 @@ TEST(index_range, OneAfterEnd)
EXPECT_EQ(range.one_after_last(), 8);
}
+TEST(index_range, OneBeforeStart)
+{
+ IndexRange range = IndexRange(5, 3);
+ EXPECT_EQ(range.one_before_start(), 4);
+}
+
TEST(index_range, Start)
{
IndexRange range = IndexRange(6, 2);
@@ -229,4 +235,50 @@ TEST(index_range, GenericAlgorithms)
EXPECT_EQ(std::count_if(range.begin(), range.end(), [](int v) { return v < 7; }), 3);
}
+TEST(index_range, SplitByAlignment)
+{
+ {
+ AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(0, 0), 16);
+ EXPECT_EQ(ranges.prefix, IndexRange());
+ EXPECT_EQ(ranges.aligned, IndexRange());
+ EXPECT_EQ(ranges.suffix, IndexRange());
+ }
+ {
+ AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(0, 24), 8);
+ EXPECT_EQ(ranges.prefix, IndexRange());
+ EXPECT_EQ(ranges.aligned, IndexRange(0, 24));
+ EXPECT_EQ(ranges.suffix, IndexRange());
+ }
+ {
+ AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(1, 2), 4);
+ EXPECT_EQ(ranges.prefix, IndexRange(1, 2));
+ EXPECT_EQ(ranges.aligned, IndexRange());
+ EXPECT_EQ(ranges.suffix, IndexRange());
+ }
+ {
+ AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(3, 50), 8);
+ EXPECT_EQ(ranges.prefix, IndexRange(3, 5));
+ EXPECT_EQ(ranges.aligned, IndexRange(8, 40));
+ EXPECT_EQ(ranges.suffix, IndexRange(48, 5));
+ }
+ {
+ AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(3, 50), 1);
+ EXPECT_EQ(ranges.prefix, IndexRange());
+ EXPECT_EQ(ranges.aligned, IndexRange(3, 50));
+ EXPECT_EQ(ranges.suffix, IndexRange());
+ }
+ {
+ AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(64, 16), 16);
+ EXPECT_EQ(ranges.prefix, IndexRange());
+ EXPECT_EQ(ranges.aligned, IndexRange(64, 16));
+ EXPECT_EQ(ranges.suffix, IndexRange());
+ }
+ {
+ AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(3, 5), 8);
+ EXPECT_EQ(ranges.prefix, IndexRange(3, 5));
+ EXPECT_EQ(ranges.aligned, IndexRange());
+ EXPECT_EQ(ranges.suffix, IndexRange());
+ }
+}
+
} // namespace blender::tests
diff --git a/source/blender/blenlib/tests/BLI_kdtree_test.cc b/source/blender/blenlib/tests/BLI_kdtree_test.cc
new file mode 100644
index 00000000000..d040ea15172
--- /dev/null
+++ b/source/blender/blenlib/tests/BLI_kdtree_test.cc
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+
+#include "testing/testing.h"
+
+#include "BLI_kdtree.h"
+
+#include <cmath>
+
+/* -------------------------------------------------------------------- */
+/* Tests */
+
+static void standard_test()
+{
+ for (int tree_size = 30; tree_size < 500; tree_size++) {
+ int tree_index = 0;
+ KDTree_1d *tree = BLI_kdtree_1d_new(tree_size);
+ int mask = tree_size & 31;
+ bool occupied[32] = {false};
+
+ for (int i = 0; i < tree_size; i++) {
+ int index = i & mask;
+ occupied[index] = true;
+ float value = fmodf(index * 7.121f, 0.6037f); /* Co-prime. */
+ float key[1] = {value};
+ BLI_kdtree_1d_insert(tree, tree_index++, key);
+ }
+ int expected = 0;
+ for (int j = 0; j < 32; j++) {
+ if (occupied[j]) {
+ expected++;
+ }
+ }
+
+ int dedup_count = BLI_kdtree_1d_deduplicate(tree);
+ EXPECT_EQ(dedup_count, expected);
+ BLI_kdtree_1d_free(tree);
+ }
+}
+
+static void deduplicate_test()
+{
+ for (int tree_size = 1; tree_size < 40; tree_size++) {
+ int tree_index = 0;
+ KDTree_1d *tree = BLI_kdtree_1d_new(tree_size);
+ for (int i = 0; i < tree_size; i++) {
+ float key[1] = {1.0f};
+ BLI_kdtree_1d_insert(tree, tree_index++, key);
+ }
+ int dedup_count = BLI_kdtree_1d_deduplicate(tree);
+ EXPECT_EQ(dedup_count, 1);
+ BLI_kdtree_1d_free(tree);
+ }
+}
+
+TEST(kdtree, Standard)
+{
+ standard_test();
+}
+
+TEST(kdtree, Deduplicate)
+{
+ deduplicate_test();
+}
diff --git a/source/blender/blenlib/tests/BLI_length_parameterize_test.cc b/source/blender/blenlib/tests/BLI_length_parameterize_test.cc
index b63e3a0ec86..3b41a7aed0c 100644
--- a/source/blender/blenlib/tests/BLI_length_parameterize_test.cc
+++ b/source/blender/blenlib/tests/BLI_length_parameterize_test.cc
@@ -10,7 +10,7 @@ namespace blender::length_parameterize::tests {
template<typename T> Array<float> calculate_lengths(const Span<T> values, const bool cyclic)
{
- Array<float> lengths(lengths_num(values.size(), cyclic));
+ Array<float> lengths(segments_num(values.size(), cyclic));
accumulate_lengths<T>(values, cyclic, lengths);
return lengths;
}
@@ -30,9 +30,9 @@ TEST(length_parameterize, FloatSimple)
Array<int> indices(4);
Array<float> factors(4);
- create_uniform_samples(lengths, false, indices, factors);
+ sample_uniform(lengths, true, indices, factors);
Array<float> results(4);
- linear_interpolation<float>(values, indices, factors, results);
+ interpolate<float>(values, indices, factors, results);
Array<float> expected({
0.0f,
1.33333f,
@@ -52,9 +52,9 @@ TEST(length_parameterize, Float)
Array<int> indices(20);
Array<float> factors(20);
- create_uniform_samples(lengths, false, indices, factors);
+ sample_uniform(lengths, true, indices, factors);
Array<float> results(20);
- linear_interpolation<float>(values, indices, factors, results);
+ interpolate<float>(values, indices, factors, results);
Array<float> expected({
1.0f, 1.47368f, 1.94737f, 2.42105f, 2.89474f, 3.36842f, 3.84211f,
4.31579f, 4.78947f, 5.26316f, 5.73684f, 6.21053f, 6.68421f, 7.1579f,
@@ -73,9 +73,9 @@ TEST(length_parameterize, Float2)
Array<int> indices(12);
Array<float> factors(12);
- create_uniform_samples(lengths, false, indices, factors);
+ sample_uniform(lengths, true, indices, factors);
Array<float2> results(12);
- linear_interpolation<float2>(values, indices, factors, results);
+ interpolate<float2>(values, indices, factors, results);
Array<float2> expected({
{0.0f, 0.0f},
{0.272727f, 0.0f},
@@ -103,9 +103,9 @@ TEST(length_parameterize, Float2Cyclic)
Array<int> indices(12);
Array<float> factors(12);
- create_uniform_samples(lengths, true, indices, factors);
+ sample_uniform(lengths, false, indices, factors);
Array<float2> results(12);
- linear_interpolation<float2>(values, indices, factors, results);
+ interpolate<float2>(values, indices, factors, results);
Array<float2> expected({
{0.0f, 0.0f},
{0.333333f, 0.0f},
@@ -133,9 +133,9 @@ TEST(length_parameterize, LineMany)
Array<int> indices(5007);
Array<float> factors(5007);
- create_uniform_samples(lengths, false, indices, factors);
+ sample_uniform(lengths, true, indices, factors);
Array<float> results(5007);
- linear_interpolation<float>(values, indices, factors, results);
+ interpolate<float>(values, indices, factors, results);
Array<float> expected({
1.9962f, 1.9964f, 1.9966f, 1.9968f, 1.997f, 1.9972f, 1.9974f, 1.9976f, 1.9978f, 1.998f,
1.9982f, 1.9984f, 1.9986f, 1.9988f, 1.999f, 1.9992f, 1.9994f, 1.9996f, 1.9998f, 2.0f,
@@ -152,9 +152,9 @@ TEST(length_parameterize, CyclicMany)
Array<int> indices(5007);
Array<float> factors(5007);
- create_uniform_samples(lengths, true, indices, factors);
+ sample_uniform(lengths, false, indices, factors);
Array<float2> results(5007);
- linear_interpolation<float2>(values, indices, factors, results);
+ interpolate<float2>(values, indices, factors, results);
Array<float2> expected({
{0, 0.0159776}, {0, 0.0151787}, {0, 0.0143797}, {0, 0.013581}, {0, 0.0127821},
{0, 0.0119832}, {0, 0.0111842}, {0, 0.0103855}, {0, 0.00958657}, {0, 0.00878763},
@@ -176,9 +176,9 @@ TEST(length_parameterize, InterpolateColor)
Array<int> indices(10);
Array<float> factors(10);
- create_uniform_samples(lengths, true, indices, factors);
+ sample_uniform(lengths, false, indices, factors);
Array<ColorGeometry4f> results(10);
- linear_interpolation<ColorGeometry4f>(colors, indices, factors, results);
+ interpolate<ColorGeometry4f>(colors, indices, factors, results);
Array<ColorGeometry4f> expected({
{0, 0, 0, 1},
{0.4, 0, 0, 1},
@@ -207,10 +207,9 @@ TEST(length_parameterize, ArbitraryFloatSimple)
Array<float> sample_lengths{{0.5f, 1.5f, 2.0f, 4.0f}};
Array<int> indices(4);
Array<float> factors(4);
- create_samples_from_sorted_lengths(lengths, sample_lengths, false, indices, factors);
+ sample_at_lengths(lengths, sample_lengths, indices, factors);
Array<float> results(4);
- linear_interpolation<float>(values, indices, factors, results);
- results.as_span().print_as_lines("results");
+ interpolate<float>(values, indices, factors, results);
Array<float> expected({
0.5f,
1.5f,
@@ -231,10 +230,9 @@ TEST(length_parameterize, ArbitraryFloat2)
{0.5f, 1.5f, 2.0f, 2.0f, 2.1f, 2.5f, 3.5f, 3.6f, 3.8f, 3.85f, 3.90f, 4.0f}};
Array<int> indices(12);
Array<float> factors(12);
- create_samples_from_sorted_lengths(lengths, sample_lengths, true, indices, factors);
+ sample_at_lengths(lengths, sample_lengths, indices, factors);
Array<float2> results(12);
- linear_interpolation<float2>(values, indices, factors, results);
- results.as_span().print_as_lines("results");
+ interpolate<float2>(values, indices, factors, results);
Array<float2> expected({
{0.5f, 0.0f},
{1.0f, 0.5f},
diff --git a/source/blender/blenlib/tests/BLI_math_rotation_test.cc b/source/blender/blenlib/tests/BLI_math_rotation_test.cc
index a283118bea2..460cfd2d36c 100644
--- a/source/blender/blenlib/tests/BLI_math_rotation_test.cc
+++ b/source/blender/blenlib/tests/BLI_math_rotation_test.cc
@@ -7,6 +7,8 @@
#include "BLI_math_rotation.hh"
#include "BLI_math_vector.hh"
+#include "BLI_vector.hh"
+
#include <cmath>
/* Test that quaternion converts to itself via matrix. */
@@ -150,6 +152,107 @@ TEST(math_rotation, quat_split_swing_and_twist_negative)
EXPECT_V4_NEAR(twist, expected_twist, FLT_EPSILON);
}
+/* -------------------------------------------------------------------- */
+/** \name Test `sin_cos_from_fraction` Accuracy & Exact Symmetry
+ * \{ */
+
+static void test_sin_cos_from_fraction_accuracy(const int range, const float expected_eps)
+{
+ for (int i = 0; i < range; i++) {
+ float sin_cos_fl[2];
+ sin_cos_from_fraction(i, range, &sin_cos_fl[0], &sin_cos_fl[1]);
+ const float phi = (float)(2.0 * M_PI) * ((float)i / (float)range);
+ const float sin_cos_test_fl[2] = {sinf(phi), cosf(phi)};
+ EXPECT_V2_NEAR(sin_cos_fl, sin_cos_test_fl, expected_eps);
+ }
+}
+
+/** Ensure the result of #sin_cos_from_fraction match #sinf & #cosf. */
+TEST(math_rotation, sin_cos_from_fraction_accuracy)
+{
+ for (int range = 1; range <= 64; range++) {
+ test_sin_cos_from_fraction_accuracy(range, 1e-6f);
+ }
+}
+
+/** Ensure values are exactly symmetrical where possible. */
+static void test_sin_cos_from_fraction_symmetry(const int range)
+{
+ /* The expected number of unique numbers depends on the range being a multiple of 4/2/1. */
+ const enum {
+ MULTIPLE_OF_1 = 1,
+ MULTIPLE_OF_2 = 2,
+ MULTIPLE_OF_4 = 3,
+ } multiple_of = (range & 1) ? MULTIPLE_OF_1 : ((range & 3) ? MULTIPLE_OF_2 : MULTIPLE_OF_4);
+
+ blender::Vector<blender::float2> coords;
+ coords.reserve(range);
+ for (int i = 0; i < range; i++) {
+ float sin_cos_fl[2];
+ sin_cos_from_fraction(i, range, &sin_cos_fl[0], &sin_cos_fl[1]);
+ switch (multiple_of) {
+ case MULTIPLE_OF_1: {
+ sin_cos_fl[0] = fabsf(sin_cos_fl[0]);
+ break;
+ }
+ case MULTIPLE_OF_2: {
+ sin_cos_fl[0] = fabsf(sin_cos_fl[0]);
+ sin_cos_fl[1] = fabsf(sin_cos_fl[1]);
+ break;
+ }
+ case MULTIPLE_OF_4: {
+ sin_cos_fl[0] = fabsf(sin_cos_fl[0]);
+ sin_cos_fl[1] = fabsf(sin_cos_fl[1]);
+ if (sin_cos_fl[0] > sin_cos_fl[1]) {
+ SWAP(float, sin_cos_fl[0], sin_cos_fl[1]);
+ }
+ break;
+ }
+ }
+ coords.append_unchecked(sin_cos_fl);
+ }
+ /* Sort, then count unique items. */
+ std::sort(coords.begin(), coords.end(), [](const blender::float2 &a, const blender::float2 &b) {
+ float delta = b[0] - a[0];
+ if (delta == 0.0f) {
+ delta = b[1] - a[1];
+ }
+ return delta > 0.0f;
+ });
+ int unique_coords_count = 1;
+ if (range > 1) {
+ int i_prev = 0;
+ for (int i = 1; i < range; i_prev = i++) {
+ if (coords[i_prev] != coords[i]) {
+ unique_coords_count += 1;
+ }
+ }
+ }
+ switch (multiple_of) {
+ case MULTIPLE_OF_1: {
+ EXPECT_EQ(unique_coords_count, (range / 2) + 1);
+ break;
+ }
+ case MULTIPLE_OF_2: {
+ EXPECT_EQ(unique_coords_count, (range / 4) + 1);
+ break;
+ }
+ case MULTIPLE_OF_4: {
+ EXPECT_EQ(unique_coords_count, (range / 8) + 1);
+ break;
+ }
+ }
+}
+
+TEST(math_rotation, sin_cos_from_fraction_symmetry)
+{
+ for (int range = 1; range <= 64; range++) {
+ test_sin_cos_from_fraction_symmetry(range);
+ }
+}
+
+/** \} */
+
namespace blender::math::tests {
TEST(math_rotation, RotateDirectionAroundAxis)
diff --git a/source/blender/blenlib/tests/BLI_pool_test.cc b/source/blender/blenlib/tests/BLI_pool_test.cc
new file mode 100644
index 00000000000..31cb255d997
--- /dev/null
+++ b/source/blender/blenlib/tests/BLI_pool_test.cc
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+
+#include "BLI_exception_safety_test_utils.hh"
+#include "BLI_pool.hh"
+#include "BLI_strict_flags.h"
+#include "testing/testing.h"
+
+namespace blender::tests {
+
+TEST(pool, DefaultConstructor)
+{
+ Pool<int> pool;
+ EXPECT_EQ(pool.size(), 0);
+}
+
+TEST(pool, Allocation)
+{
+ Vector<int *> ptrs;
+ Pool<int> pool;
+ for (int i = 0; i < 100; i++) {
+ ptrs.append(&pool.construct(i));
+ }
+ EXPECT_EQ(pool.size(), 100);
+
+ for (int *ptr : ptrs) {
+ pool.destruct(*ptr);
+ }
+ EXPECT_EQ(pool.size(), 0);
+}
+
+TEST(pool, Reuse)
+{
+ Vector<int *> ptrs;
+ Pool<int> pool;
+ for (int i = 0; i < 32; i++) {
+ ptrs.append(&pool.construct(i));
+ }
+
+ int *freed_ptr = ptrs[6];
+ pool.destruct(*freed_ptr);
+
+ ptrs[6] = &pool.construct(0);
+
+ EXPECT_EQ(ptrs[6], freed_ptr);
+
+ for (int *ptr : ptrs) {
+ pool.destruct(*ptr);
+ }
+}
+
+} // namespace blender::tests
diff --git a/source/blender/blenlib/tests/BLI_set_test.cc b/source/blender/blenlib/tests/BLI_set_test.cc
index 5a97b2c7999..9dfa48b5822 100644
--- a/source/blender/blenlib/tests/BLI_set_test.cc
+++ b/source/blender/blenlib/tests/BLI_set_test.cc
@@ -532,8 +532,14 @@ TEST(set, ForwardIterator)
Set<int>::iterator iter1 = set.begin();
int value1 = *iter1;
Set<int>::iterator iter2 = iter1++;
- EXPECT_EQ(*iter1, value1);
- EXPECT_EQ(*iter2, *(++iter1));
+ EXPECT_EQ(*iter2, value1);
+ EXPECT_EQ(*(++iter2), *iter1);
+ /* Interesting find: On GCC & MSVC this will succeed, as the 2nd argument is evaluated before the
+ * 1st. On Apple Clang it's the other way around, and the test fails. */
+ // EXPECT_EQ(*iter1, *(++iter1));
+ Set<int>::iterator iter3 = ++iter1;
+ /* Check that #iter1 itself changed. */
+ EXPECT_EQ(*iter3, *iter1);
}
TEST(set, GenericAlgorithms)
diff --git a/source/blender/blenlib/tests/BLI_string_search_test.cc b/source/blender/blenlib/tests/BLI_string_search_test.cc
index aa6adae8d76..ab1d073ed33 100644
--- a/source/blender/blenlib/tests/BLI_string_search_test.cc
+++ b/source/blender/blenlib/tests/BLI_string_search_test.cc
@@ -9,7 +9,7 @@
namespace blender::string_search::tests {
/* Right arrow, keep in sync with #UI_MENU_ARROW_SEP in `UI_interface.h`. */
-#define UI_MENU_ARROW_SEP "\xe2\x96\xb6"
+#define UI_MENU_ARROW_SEP "\xe2\x96\xb8"
TEST(string_search, damerau_levenshtein_distance)
{
diff --git a/source/blender/blenlib/tests/BLI_vector_test.cc b/source/blender/blenlib/tests/BLI_vector_test.cc
index 29b6d2b41fe..3a12fe14de3 100644
--- a/source/blender/blenlib/tests/BLI_vector_test.cc
+++ b/source/blender/blenlib/tests/BLI_vector_test.cc
@@ -264,7 +264,8 @@ TEST(vector, AppendAndGetIndex)
EXPECT_EQ(vec.append_and_get_index(10), 1);
EXPECT_EQ(vec.append_and_get_index(10), 2);
vec.append(10);
- EXPECT_EQ(vec.append_and_get_index(10), 4);
+ int value = 10;
+ EXPECT_EQ(vec.append_and_get_index(value), 4);
}
TEST(vector, AppendNonDuplicates)
@@ -479,6 +480,7 @@ TEST(vector, UniquePtrValue)
vec.remove_and_reorder(0);
vec.remove(0);
EXPECT_EQ(vec.size(), 1);
+ EXPECT_EQ(vec.append_and_get_index(std::make_unique<int>(4)), 1);
UNUSED_VARS(a, b);
}
diff --git a/source/blender/blenlib/tests/BLI_virtual_array_test.cc b/source/blender/blenlib/tests/BLI_virtual_array_test.cc
index 90c7f1078a5..14f5480f751 100644
--- a/source/blender/blenlib/tests/BLI_virtual_array_test.cc
+++ b/source/blender/blenlib/tests/BLI_virtual_array_test.cc
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "BLI_array.hh"
+#include "BLI_generic_virtual_array.hh"
#include "BLI_strict_flags.h"
#include "BLI_vector.hh"
#include "BLI_vector_set.hh"
@@ -109,7 +110,7 @@ TEST(virtual_array, AsSpan)
{
auto func = [](int64_t index) { return (int)(10 * index); };
VArray<int> func_varray = VArray<int>::ForFunc(10, func);
- VArray_Span span_varray{func_varray};
+ VArraySpan span_varray{func_varray};
EXPECT_EQ(span_varray.size(), 10);
Span<int> span = span_varray;
EXPECT_EQ(span.size(), 10);
@@ -222,4 +223,36 @@ TEST(virtual_array, MaterializeCompressed)
}
}
+TEST(virtual_array, EmptySpanWrapper)
+{
+ {
+ VArray<int> varray;
+ VArraySpan<int> span1 = varray;
+ EXPECT_TRUE(span1.is_empty());
+ VArraySpan<int> span2 = std::move(span1);
+ EXPECT_TRUE(span2.is_empty());
+ }
+ {
+ VMutableArray<int> varray;
+ MutableVArraySpan<int> span1 = varray;
+ EXPECT_TRUE(span1.is_empty());
+ MutableVArraySpan<int> span2 = std::move(span1);
+ EXPECT_TRUE(span2.is_empty());
+ }
+ {
+ GVArray varray;
+ GVArraySpan span1 = varray;
+ EXPECT_TRUE(span1.is_empty());
+ GVArraySpan span2 = std::move(span1);
+ EXPECT_TRUE(span2.is_empty());
+ }
+ {
+ GVMutableArray varray;
+ GMutableVArraySpan span1 = varray;
+ EXPECT_TRUE(span1.is_empty());
+ GMutableVArraySpan span2 = std::move(span1);
+ EXPECT_TRUE(span2.is_empty());
+ }
+}
+
} // namespace blender::tests