Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacques Lucke <jacques@blender.org>2020-07-20 13:16:20 +0300
committerJacques Lucke <jacques@blender.org>2020-07-20 13:16:20 +0300
commit8cbbdedaf4dfec9e320e7e2be58b75d256950df1 (patch)
tree496b9620e11ac44e515b0bb4ca52c05834d557f9 /source/blender/blenlib
parent686ab4c9401a90b22fb17e46c992eb513fe4f693 (diff)
Refactor: Update integer type usage
This updates the usage of integer types in code I wrote according to our new style guides. Major changes: * Use signed instead of unsigned integers in many places. * C++ containers in blenlib use `int64_t` for size and indices now (instead of `uint`). * Hash values for C++ containers are 64 bit wide now (instead of 32 bit). I do hope that I broke no builds, but it is quite likely that some compiler reports slightly different errors. Please let me know when there are any errors. If the fix is small, feel free to commit it yourself. I compiled successfully on linux with gcc and on windows.
Diffstat (limited to 'source/blender/blenlib')
-rw-r--r--source/blender/blenlib/BLI_allocator.hh2
-rw-r--r--source/blender/blenlib/BLI_array.hh45
-rw-r--r--source/blender/blenlib/BLI_color.hh16
-rw-r--r--source/blender/blenlib/BLI_disjoint_set.hh27
-rw-r--r--source/blender/blenlib/BLI_dot_export.hh4
-rw-r--r--source/blender/blenlib/BLI_float3.hh8
-rw-r--r--source/blender/blenlib/BLI_float4x4.hh6
-rw-r--r--source/blender/blenlib/BLI_hash.hh62
-rw-r--r--source/blender/blenlib/BLI_hash_tables.hh101
-rw-r--r--source/blender/blenlib/BLI_index_mask.hh30
-rw-r--r--source/blender/blenlib/BLI_index_range.hh64
-rw-r--r--source/blender/blenlib/BLI_linear_allocator.hh29
-rw-r--r--source/blender/blenlib/BLI_listbase_wrapper.hh6
-rw-r--r--source/blender/blenlib/BLI_map.hh95
-rw-r--r--source/blender/blenlib/BLI_map_slots.hh20
-rw-r--r--source/blender/blenlib/BLI_memory_utils.hh67
-rw-r--r--source/blender/blenlib/BLI_probing_strategies.hh58
-rw-r--r--source/blender/blenlib/BLI_rand.hh2
-rw-r--r--source/blender/blenlib/BLI_resource_collector.hh2
-rw-r--r--source/blender/blenlib/BLI_set.hh67
-rw-r--r--source/blender/blenlib/BLI_set_slots.hh26
-rw-r--r--source/blender/blenlib/BLI_span.hh89
-rw-r--r--source/blender/blenlib/BLI_stack.hh16
-rw-r--r--source/blender/blenlib/BLI_string_ref.hh53
-rw-r--r--source/blender/blenlib/BLI_vector.hh118
-rw-r--r--source/blender/blenlib/BLI_vector_set.hh90
-rw-r--r--source/blender/blenlib/BLI_vector_set_slots.hh24
-rw-r--r--source/blender/blenlib/intern/BLI_index_range.cc22
-rw-r--r--source/blender/blenlib/intern/dot_export.cc4
-rw-r--r--source/blender/blenlib/intern/rand.cc19
30 files changed, 597 insertions, 575 deletions
diff --git a/source/blender/blenlib/BLI_allocator.hh b/source/blender/blenlib/BLI_allocator.hh
index d57703f71bc..ec82e5ab71c 100644
--- a/source/blender/blenlib/BLI_allocator.hh
+++ b/source/blender/blenlib/BLI_allocator.hh
@@ -84,7 +84,7 @@ class RawAllocator {
void *ptr = malloc(size + alignment + sizeof(MemHead));
void *used_ptr = (void *)((uintptr_t)POINTER_OFFSET(ptr, alignment + sizeof(MemHead)) &
~((uintptr_t)alignment - 1));
- uint offset = (uint)((uintptr_t)used_ptr - (uintptr_t)ptr);
+ int offset = (int)((uintptr_t)used_ptr - (uintptr_t)ptr);
BLI_assert(offset >= sizeof(MemHead));
((MemHead *)used_ptr - 1)->offset = (int)offset;
return used_ptr;
diff --git a/source/blender/blenlib/BLI_array.hh b/source/blender/blenlib/BLI_array.hh
index c411fc50f15..c30893f1337 100644
--- a/source/blender/blenlib/BLI_array.hh
+++ b/source/blender/blenlib/BLI_array.hh
@@ -13,6 +13,7 @@
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+
#ifndef __BLI_ARRAY_HH__
#define __BLI_ARRAY_HH__
@@ -56,7 +57,7 @@ template<
* When T is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitly though.
*/
- uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
+ int64_t InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
/**
* The allocator used by this array. Should rarely be changed, except when you don't want that
* MEM_* functions are used internally.
@@ -68,7 +69,7 @@ class Array {
T *data_;
/** Number of elements in the array. */
- uint size_;
+ int64_t size_;
/** Used for allocations when the inline buffer is too small. */
Allocator allocator_;
@@ -117,7 +118,7 @@ class Array {
* even for non-trivial types. This should not be the default though, because one can easily mess
* up when dealing with uninitialized memory.
*/
- explicit Array(uint size)
+ explicit Array(int64_t size)
{
size_ = size;
data_ = this->get_buffer_for_size(size);
@@ -128,8 +129,9 @@ class Array {
* Create a new array with the given size. All values will be initialized by copying the given
* default.
*/
- Array(uint size, const T &value)
+ Array(int64_t size, const T &value)
{
+ BLI_assert(size >= 0);
size_ = size;
data_ = this->get_buffer_for_size(size);
uninitialized_fill_n(data_, size_, value);
@@ -147,8 +149,9 @@ class Array {
* Usage:
* Array<std::string> my_strings(10, NoInitialization());
*/
- Array(uint size, NoInitialization)
+ Array(int64_t size, NoInitialization)
{
+ BLI_assert(size >= 0);
size_ = size;
data_ = this->get_buffer_for_size(size);
}
@@ -203,14 +206,16 @@ class Array {
return *this;
}
- T &operator[](uint index)
+ T &operator[](int64_t index)
{
+ BLI_assert(index >= 0);
BLI_assert(index < size_);
return data_[index];
}
- const T &operator[](uint index) const
+ const T &operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < size_);
return data_[index];
}
@@ -250,7 +255,7 @@ class Array {
/**
* Returns the number of elements in the array.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -264,22 +269,6 @@ class Array {
}
/**
- * Copies the value to all indices in the array.
- */
- void fill(const T &value)
- {
- initialized_fill_n(data_, size_, value);
- }
-
- /**
- * Copies the value to the given indices in the array.
- */
- void fill_indices(Span<uint> indices, const T &value)
- {
- MutableSpan<T>(*this).fill_indices(indices, value);
- }
-
- /**
* Get a pointer to the beginning of the array.
*/
const T *data() const
@@ -340,13 +329,13 @@ class Array {
* Get the value of the InlineBufferCapacity template argument. This is the number of elements
* that can be stored without doing an allocation.
*/
- static uint inline_buffer_capacity()
+ static int64_t inline_buffer_capacity()
{
return InlineBufferCapacity;
}
private:
- T *get_buffer_for_size(uint size)
+ T *get_buffer_for_size(int64_t size)
{
if (size <= InlineBufferCapacity) {
return inline_buffer_;
@@ -356,9 +345,9 @@ class Array {
}
}
- T *allocate(uint size)
+ T *allocate(int64_t size)
{
- return (T *)allocator_.allocate(size * sizeof(T), alignof(T), AT);
+ return (T *)allocator_.allocate((size_t)size * sizeof(T), alignof(T), AT);
}
bool uses_inline_buffer() const
diff --git a/source/blender/blenlib/BLI_color.hh b/source/blender/blenlib/BLI_color.hh
index 265013c0013..72caa5b1118 100644
--- a/source/blender/blenlib/BLI_color.hh
+++ b/source/blender/blenlib/BLI_color.hh
@@ -62,12 +62,12 @@ struct Color4f {
return !(a == b);
}
- uint32_t hash() const
+ uint64_t hash() const
{
- uint32_t x1 = *(uint32_t *)&r;
- uint32_t x2 = *(uint32_t *)&g;
- uint32_t x3 = *(uint32_t *)&b;
- uint32_t x4 = *(uint32_t *)&a;
+ uint64_t x1 = *(uint32_t *)&r;
+ uint64_t x2 = *(uint32_t *)&g;
+ uint64_t x3 = *(uint32_t *)&b;
+ uint64_t x4 = *(uint32_t *)&a;
return (x1 * 1283591) ^ (x2 * 850177) ^ (x3 * 735391) ^ (x4 * 442319);
}
};
@@ -119,10 +119,10 @@ struct Color4b {
return !(a == b);
}
- uint32_t hash() const
+ uint64_t hash() const
{
- return ((uint32_t)r * 1283591) ^ ((uint32_t)g * 850177) ^ ((uint32_t)b * 735391) ^
- ((uint32_t)a * 442319);
+ return ((uint64_t)r * 1283591) ^ ((uint64_t)g * 850177) ^ ((uint64_t)b * 735391) ^
+ ((uint64_t)a * 442319);
}
};
diff --git a/source/blender/blenlib/BLI_disjoint_set.hh b/source/blender/blenlib/BLI_disjoint_set.hh
index 3b8453669aa..e0580709a44 100644
--- a/source/blender/blenlib/BLI_disjoint_set.hh
+++ b/source/blender/blenlib/BLI_disjoint_set.hh
@@ -29,16 +29,17 @@ namespace blender {
class DisjointSet {
private:
- Array<uint> parents_;
- Array<uint> ranks_;
+ Array<int64_t> parents_;
+ Array<int64_t> ranks_;
public:
/**
* Create a new disjoint set with the given size. Initially, every element is in a separate set.
*/
- DisjointSet(uint size) : parents_(size), ranks_(size, 0)
+ DisjointSet(int64_t size) : parents_(size), ranks_(size, 0)
{
- for (uint i = 0; i < size; i++) {
+ BLI_assert(size >= 0);
+ for (int64_t i = 0; i < size; i++) {
parents_[i] = i;
}
}
@@ -47,10 +48,10 @@ class DisjointSet {
* Join the sets containing elements x and y. Nothing happens when they have been in the same set
* before.
*/
- void join(uint x, uint y)
+ void join(int64_t x, int64_t y)
{
- uint root1 = this->find_root(x);
- uint root2 = this->find_root(y);
+ int64_t root1 = this->find_root(x);
+ int64_t root2 = this->find_root(y);
/* x and y are in the same set already. */
if (root1 == root2) {
@@ -71,27 +72,27 @@ class DisjointSet {
/**
* Return true when x and y are in the same set.
*/
- bool in_same_set(uint x, uint y)
+ bool in_same_set(int64_t x, int64_t y)
{
- uint root1 = this->find_root(x);
- uint root2 = this->find_root(y);
+ int64_t root1 = this->find_root(x);
+ int64_t root2 = this->find_root(y);
return root1 == root2;
}
/**
* Find the element that represents the set containing x currently.
*/
- uint find_root(uint x)
+ int64_t find_root(int64_t x)
{
/* Find root by following parents. */
- uint root = x;
+ int64_t root = x;
while (parents_[root] != root) {
root = parents_[root];
}
/* Compress path. */
while (parents_[x] != root) {
- uint parent = parents_[x];
+ int64_t parent = parents_[x];
parents_[x] = root;
x = parent;
}
diff --git a/source/blender/blenlib/BLI_dot_export.hh b/source/blender/blenlib/BLI_dot_export.hh
index a7c5f1436d1..0870d8c4c30 100644
--- a/source/blender/blenlib/BLI_dot_export.hh
+++ b/source/blender/blenlib/BLI_dot_export.hh
@@ -274,13 +274,13 @@ class NodeWithSocketsRef {
return *node_;
}
- NodePort input(uint index) const
+ NodePort input(int index) const
{
std::string port = "\"in" + std::to_string(index) + "\"";
return NodePort(*node_, port);
}
- NodePort output(uint index) const
+ NodePort output(int index) const
{
std::string port = "\"out" + std::to_string(index) + "\"";
return NodePort(*node_, port);
diff --git a/source/blender/blenlib/BLI_float3.hh b/source/blender/blenlib/BLI_float3.hh
index a36cedad41d..b2633985ac7 100644
--- a/source/blender/blenlib/BLI_float3.hh
+++ b/source/blender/blenlib/BLI_float3.hh
@@ -188,11 +188,11 @@ struct float3 {
z = -z;
}
- uint32_t hash() const
+ uint64_t hash() const
{
- uint32_t x1 = *(uint32_t *)&x;
- uint32_t x2 = *(uint32_t *)&y;
- uint32_t x3 = *(uint32_t *)&z;
+ uint64_t x1 = *(uint32_t *)&x;
+ uint64_t x2 = *(uint32_t *)&y;
+ uint64_t x3 = *(uint32_t *)&z;
return (x1 * 435109) ^ (x2 * 380867) ^ (x3 * 1059217);
}
diff --git a/source/blender/blenlib/BLI_float4x4.hh b/source/blender/blenlib/BLI_float4x4.hh
index ef83f9ffc19..185cffd13ac 100644
--- a/source/blender/blenlib/BLI_float4x4.hh
+++ b/source/blender/blenlib/BLI_float4x4.hh
@@ -109,10 +109,10 @@ struct float4x4 {
return result;
}
- uint32_t hash() const
+ uint64_t hash() const
{
- uint32_t h = 435109;
- for (uint i = 0; i < 16; i++) {
+ uint64_t h = 435109;
+ for (int i = 0; i < 16; i++) {
float value = ((const float *)this)[i];
h = h * 33 + (*(uint32_t *)&value);
}
diff --git a/source/blender/blenlib/BLI_hash.hh b/source/blender/blenlib/BLI_hash.hh
index 5cd4ce3c1a9..b14a4ca933c 100644
--- a/source/blender/blenlib/BLI_hash.hh
+++ b/source/blender/blenlib/BLI_hash.hh
@@ -38,7 +38,7 @@
* multiple `operator()` in a specialization of #DefaultHash. All those methods have to compute the
* same hash for values that compare equal.
*
- * The computed hash is an unsigned 32 bit integer. Ideally, the hash function would generate
+ * The computed hash is an unsigned 64 bit integer. Ideally, the hash function would generate
* uniformly random hash values for a set of keys. However, in many cases trivial hash functions
* are faster and produce a good enough distribution. In general it is better when more information
* is in the lower bits of the hash. By choosing a good probing strategy, the effects of a bad hash
@@ -49,7 +49,7 @@
* There are three main ways to provide a hash table implementation with a custom hash function.
*
* - When you want to provide a default hash function for your own custom type: Add a `hash`
- * member function to it. The function should return `uint32_t` and take no arguments. This
+ * member function to it. The function should return `uint64_t` and take no arguments. This
* method will be called by the default implementation of #DefaultHash. It will automatically be
* used by hash table implementations.
*
@@ -58,7 +58,7 @@
* either global or BLI namespace.
*
* template<> struct blender::DefaultHash<TheType> {
- * uint32_t operator()(const TheType &value) const {
+ * uint64_t operator()(const TheType &value) const {
* return ...;
* }
* };
@@ -68,7 +68,7 @@
* table explicitly.
*
* struct MyCustomHash {
- * uint32_t operator()(const TheType &value) const {
+ * uint64_t operator()(const TheType &value) const {
* return ...;
* }
* };
@@ -91,7 +91,7 @@ namespace blender {
* that you have to implement a hash function using one of three strategies listed above.
*/
template<typename T> struct DefaultHash {
- uint32_t operator()(const T &value) const
+ uint64_t operator()(const T &value) const
{
return value.hash();
}
@@ -101,7 +101,7 @@ template<typename T> struct DefaultHash {
* Use the same hash function for const and non const variants of a type.
*/
template<typename T> struct DefaultHash<const T> {
- uint32_t operator()(const T &value) const
+ uint64_t operator()(const T &value) const
{
return DefaultHash<T>{}(value);
}
@@ -109,9 +109,9 @@ template<typename T> struct DefaultHash<const T> {
#define TRIVIAL_DEFAULT_INT_HASH(TYPE) \
template<> struct DefaultHash<TYPE> { \
- uint32_t operator()(TYPE value) const \
+ uint64_t operator()(TYPE value) const \
{ \
- return (uint32_t)value; \
+ return (uint64_t)value; \
} \
}
@@ -127,43 +127,29 @@ TRIVIAL_DEFAULT_INT_HASH(int16_t);
TRIVIAL_DEFAULT_INT_HASH(uint16_t);
TRIVIAL_DEFAULT_INT_HASH(int32_t);
TRIVIAL_DEFAULT_INT_HASH(uint32_t);
-
-template<> struct DefaultHash<uint64_t> {
- uint32_t operator()(uint64_t value) const
- {
- uint32_t low = (uint32_t)value;
- uint32_t high = (uint32_t)(value >> 32);
- return low ^ (high * 0x45d9f3b);
- }
-};
-
-template<> struct DefaultHash<int64_t> {
- uint32_t operator()(uint64_t value) const
- {
- return DefaultHash<uint64_t>{}((uint64_t)value);
- }
-};
+TRIVIAL_DEFAULT_INT_HASH(int64_t);
+TRIVIAL_DEFAULT_INT_HASH(uint64_t);
/**
* One should try to avoid using floats as keys in hash tables, but sometimes it is convenient.
*/
template<> struct DefaultHash<float> {
- uint32_t operator()(float value) const
+ uint64_t operator()(float value) const
{
return *(uint32_t *)&value;
}
};
template<> struct DefaultHash<bool> {
- uint32_t operator()(bool value) const
+ uint64_t operator()(bool value) const
{
- return (uint32_t)(value != false) * 1298191;
+ return (uint64_t)(value != false) * 1298191;
}
};
-inline uint32_t hash_string(StringRef str)
+inline uint64_t hash_string(StringRef str)
{
- uint32_t hash = 5381;
+ uint64_t hash = 5381;
for (char c : str) {
hash = hash * 33 + c;
}
@@ -175,21 +161,21 @@ template<> struct DefaultHash<std::string> {
* Take a #StringRef as parameter to support heterogeneous lookups in hash table implementations
* when std::string is used as key.
*/
- uint32_t operator()(StringRef value) const
+ uint64_t operator()(StringRef value) const
{
return hash_string(value);
}
};
template<> struct DefaultHash<StringRef> {
- uint32_t operator()(StringRef value) const
+ uint64_t operator()(StringRef value) const
{
return hash_string(value);
}
};
template<> struct DefaultHash<StringRefNull> {
- uint32_t operator()(StringRef value) const
+ uint64_t operator()(StringRef value) const
{
return hash_string(value);
}
@@ -199,26 +185,26 @@ template<> struct DefaultHash<StringRefNull> {
* While we cannot guarantee that the lower 4 bits of a pointer are zero, it is often the case.
*/
template<typename T> struct DefaultHash<T *> {
- uint32_t operator()(const T *value) const
+ uint64_t operator()(const T *value) const
{
uintptr_t ptr = (uintptr_t)value;
- uint32_t hash = (uint32_t)(ptr >> 4);
+ uint64_t hash = (uint64_t)(ptr >> 4);
return hash;
}
};
template<typename T> struct DefaultHash<std::unique_ptr<T>> {
- uint32_t operator()(const std::unique_ptr<T> &value) const
+ uint64_t operator()(const std::unique_ptr<T> &value) const
{
return DefaultHash<T *>{}(value.get());
}
};
template<typename T1, typename T2> struct DefaultHash<std::pair<T1, T2>> {
- uint32_t operator()(const std::pair<T1, T2> &value) const
+ uint64_t operator()(const std::pair<T1, T2> &value) const
{
- uint32_t hash1 = DefaultHash<T1>{}(value.first);
- uint32_t hash2 = DefaultHash<T2>{}(value.second);
+ uint64_t hash1 = DefaultHash<T1>{}(value.first);
+ uint64_t hash2 = DefaultHash<T2>{}(value.second);
return hash1 ^ (hash2 * 33);
}
};
diff --git a/source/blender/blenlib/BLI_hash_tables.hh b/source/blender/blenlib/BLI_hash_tables.hh
index aaed772071d..5d8f8862a09 100644
--- a/source/blender/blenlib/BLI_hash_tables.hh
+++ b/source/blender/blenlib/BLI_hash_tables.hh
@@ -42,59 +42,64 @@ namespace blender {
* Those should eventually be de-duplicated with functions in BLI_math_base.h.
* \{ */
-inline constexpr int is_power_of_2_i_constexpr(const int n)
+inline constexpr int64_t is_power_of_2_constexpr(const int64_t x)
{
- return (n & (n - 1)) == 0;
+ BLI_assert(x >= 0);
+ return (x & (x - 1)) == 0;
}
-inline constexpr uint32_t log2_floor_u_constexpr(const uint32_t x)
+inline constexpr int64_t log2_floor_constexpr(const int64_t x)
{
- return x <= 1 ? 0 : 1 + log2_floor_u_constexpr(x >> 1);
+ BLI_assert(x >= 0);
+ return x <= 1 ? 0 : 1 + log2_floor_constexpr(x >> 1);
}
-inline constexpr uint32_t log2_ceil_u_constexpr(const uint32_t x)
+inline constexpr int64_t log2_ceil_constexpr(const int64_t x)
{
- return (is_power_of_2_i_constexpr((int)x)) ? log2_floor_u_constexpr(x) :
- log2_floor_u_constexpr(x) + 1;
+ BLI_assert(x >= 0);
+ return (is_power_of_2_constexpr((int)x)) ? log2_floor_constexpr(x) : log2_floor_constexpr(x) + 1;
}
-inline constexpr uint32_t power_of_2_max_u_constexpr(const uint32_t x)
+inline constexpr int64_t power_of_2_max_constexpr(const int64_t x)
{
- return 1u << log2_ceil_u_constexpr(x);
+ BLI_assert(x >= 0);
+ return 1ll << log2_ceil_constexpr(x);
}
template<typename IntT> inline constexpr IntT ceil_division(const IntT x, const IntT y)
{
- BLI_STATIC_ASSERT(!std::is_signed_v<IntT>, "");
+ BLI_assert(x >= 0);
+ BLI_assert(y >= 0);
return x / y + ((x % y) != 0);
}
template<typename IntT> inline constexpr IntT floor_division(const IntT x, const IntT y)
{
- BLI_STATIC_ASSERT(!std::is_signed_v<IntT>, "");
+ BLI_assert(x >= 0);
+ BLI_assert(y >= 0);
return x / y;
}
-inline constexpr uint32_t ceil_division_by_fraction(const uint32_t x,
- const uint32_t numerator,
- const uint32_t denominator)
+inline constexpr int64_t ceil_division_by_fraction(const int64_t x,
+ const int64_t numerator,
+ const int64_t denominator)
{
- return (uint32_t)ceil_division((uint64_t)x * (uint64_t)denominator, (uint64_t)numerator);
+ return (int64_t)ceil_division((uint64_t)x * (uint64_t)denominator, (uint64_t)numerator);
}
-inline constexpr uint32_t floor_multiplication_with_fraction(const uint32_t x,
- const uint32_t numerator,
- const uint32_t denominator)
+inline constexpr int64_t floor_multiplication_with_fraction(const int64_t x,
+ const int64_t numerator,
+ const int64_t denominator)
{
- return (uint32_t)((uint64_t)x * (uint64_t)numerator / (uint64_t)denominator);
+ return (int64_t)((uint64_t)x * (uint64_t)numerator / (uint64_t)denominator);
}
-inline constexpr uint32_t total_slot_amount_for_usable_slots(
- const uint32_t min_usable_slots,
- const uint32_t max_load_factor_numerator,
- const uint32_t max_load_factor_denominator)
+inline constexpr int64_t total_slot_amount_for_usable_slots(
+ const int64_t min_usable_slots,
+ const int64_t max_load_factor_numerator,
+ const int64_t max_load_factor_denominator)
{
- return power_of_2_max_u_constexpr(ceil_division_by_fraction(
+ return power_of_2_max_constexpr(ceil_division_by_fraction(
min_usable_slots, max_load_factor_numerator, max_load_factor_denominator));
}
@@ -121,16 +126,16 @@ class LoadFactor {
BLI_assert(numerator < denominator);
}
- void compute_total_and_usable_slots(uint32_t min_total_slots,
- uint32_t min_usable_slots,
- uint32_t *r_total_slots,
- uint32_t *r_usable_slots) const
+ void compute_total_and_usable_slots(int64_t min_total_slots,
+ int64_t min_usable_slots,
+ int64_t *r_total_slots,
+ int64_t *r_usable_slots) const
{
BLI_assert(is_power_of_2_i((int)min_total_slots));
- uint32_t total_slots = this->compute_total_slots(min_usable_slots, numerator_, denominator_);
+ int64_t total_slots = this->compute_total_slots(min_usable_slots, numerator_, denominator_);
total_slots = std::max(total_slots, min_total_slots);
- const uint32_t usable_slots = floor_multiplication_with_fraction(
+ const int64_t usable_slots = floor_multiplication_with_fraction(
total_slots, numerator_, denominator_);
BLI_assert(min_usable_slots <= usable_slots);
@@ -138,9 +143,9 @@ class LoadFactor {
*r_usable_slots = usable_slots;
}
- static constexpr uint32_t compute_total_slots(uint32_t min_usable_slots,
- uint8_t numerator,
- uint8_t denominator)
+ static constexpr int64_t compute_total_slots(int64_t min_usable_slots,
+ uint8_t numerator,
+ uint8_t denominator)
{
return total_slot_amount_for_usable_slots(min_usable_slots, numerator, denominator);
}
@@ -262,27 +267,27 @@ template<typename Pointer> struct PointerKeyInfo {
class HashTableStats {
private:
- Vector<uint32_t> keys_by_collision_count_;
- uint32_t total_collisions_;
+ Vector<int64_t> keys_by_collision_count_;
+ int64_t total_collisions_;
float average_collisions_;
- uint32_t size_;
- uint32_t capacity_;
- uint32_t removed_amount_;
+ int64_t size_;
+ int64_t capacity_;
+ int64_t removed_amount_;
float load_factor_;
float removed_load_factor_;
- uint32_t size_per_element_;
- uint32_t size_in_bytes_;
+ int64_t size_per_element_;
+ int64_t size_in_bytes_;
const void *address_;
public:
/**
* Requires that the hash table has the following methods:
- * - count_collisions(key) -> uint32_t
- * - size() -> uint32_t
- * - capacity() -> uint32_t
- * - removed_amount() -> uint32_t
- * - size_per_element() -> uint32_t
- * - size_in_bytes() -> uint32_t
+ * - count_collisions(key) -> int64_t
+ * - size() -> int64_t
+ * - capacity() -> int64_t
+ * - removed_amount() -> int64_t
+ * - size_per_element() -> int64_t
+ * - size_in_bytes() -> int64_t
*/
template<typename HashTable, typename Keys>
HashTableStats(const HashTable &hash_table, const Keys &keys)
@@ -296,7 +301,7 @@ class HashTableStats {
address_ = (const void *)&hash_table;
for (const auto &key : keys) {
- uint32_t collisions = hash_table.count_collisions(key);
+ int64_t collisions = hash_table.count_collisions(key);
if (keys_by_collision_count_.size() <= collisions) {
keys_by_collision_count_.append_n_times(0,
collisions - keys_by_collision_count_.size() + 1);
@@ -325,7 +330,7 @@ class HashTableStats {
std::cout << " Size per Slot: " << size_per_element_ << " bytes\n";
std::cout << " Average Collisions: " << average_collisions_ << "\n";
- for (uint32_t collision_count : keys_by_collision_count_.index_range()) {
+ for (int64_t collision_count : keys_by_collision_count_.index_range()) {
std::cout << " " << collision_count
<< " Collisions: " << keys_by_collision_count_[collision_count] << "\n";
}
diff --git a/source/blender/blenlib/BLI_index_mask.hh b/source/blender/blenlib/BLI_index_mask.hh
index 93bbb269d30..ff271faa0c2 100644
--- a/source/blender/blenlib/BLI_index_mask.hh
+++ b/source/blender/blenlib/BLI_index_mask.hh
@@ -46,7 +46,7 @@ namespace blender {
class IndexMask {
private:
/* The underlying reference to sorted integers. */
- Span<uint> indices_;
+ Span<int64_t> indices_;
public:
/* Creates an IndexMask that contains no indices. */
@@ -57,10 +57,10 @@ class IndexMask {
* This constructor asserts that the given integers are in ascending order and that there are no
* duplicates.
*/
- IndexMask(Span<uint> indices) : indices_(indices)
+ IndexMask(Span<int64_t> indices) : indices_(indices)
{
#ifdef DEBUG
- for (uint i = 1; i < indices.size(); i++) {
+ for (int64_t i = 1; i < indices.size(); i++) {
BLI_assert(indices[i - 1] < indices[i]);
}
#endif
@@ -84,28 +84,28 @@ class IndexMask {
* Do this:
* do_something_with_an_index_mask({3, 4, 5});
*/
- IndexMask(const std::initializer_list<uint> &indices) : IndexMask(Span<uint>(indices))
+ IndexMask(const std::initializer_list<int64_t> &indices) : IndexMask(Span<int64_t>(indices))
{
}
/**
* Creates an IndexMask that references the indices [0, n-1].
*/
- explicit IndexMask(uint n) : IndexMask(IndexRange(n))
+ explicit IndexMask(int64_t n) : IndexMask(IndexRange(n))
{
}
- operator Span<uint>() const
+ operator Span<int64_t>() const
{
return indices_;
}
- const uint *begin() const
+ const int64_t *begin() const
{
return indices_.begin();
}
- const uint *end() const
+ const int64_t *end() const
{
return indices_.end();
}
@@ -114,7 +114,7 @@ class IndexMask {
* Returns the n-th index referenced by this IndexMask. The `index_mask` method returns an
* IndexRange containing all indices that can be used as parameter here.
*/
- uint operator[](uint n) const
+ int64_t operator[](int64_t n) const
{
return indices_[n];
}
@@ -123,7 +123,7 @@ class IndexMask {
* Returns the minimum size an array has to have, if the integers in this IndexMask are going to
* be used as indices in that array.
*/
- uint min_array_size() const
+ int64_t min_array_size() const
{
if (indices_.size() == 0) {
return 0;
@@ -133,7 +133,7 @@ class IndexMask {
}
}
- Span<uint> indices() const
+ Span<int64_t> indices() const
{
return indices_;
}
@@ -167,12 +167,12 @@ class IndexMask {
{
if (this->is_range()) {
IndexRange range = this->as_range();
- for (uint i : range) {
+ for (int64_t i : range) {
callback(i);
}
}
else {
- for (uint i : indices_) {
+ for (int64_t i : indices_) {
callback(i);
}
}
@@ -193,7 +193,7 @@ class IndexMask {
/**
* Returns the largest index that is referenced by this IndexMask.
*/
- uint last() const
+ int64_t last() const
{
return indices_.last();
}
@@ -201,7 +201,7 @@ class IndexMask {
/**
* Returns the number of indices referenced by this IndexMask.
*/
- uint size() const
+ int64_t size() const
{
return indices_.size();
}
diff --git a/source/blender/blenlib/BLI_index_range.hh b/source/blender/blenlib/BLI_index_range.hh
index 1ae08e834ae..7c813f58b2c 100644
--- a/source/blender/blenlib/BLI_index_range.hh
+++ b/source/blender/blenlib/BLI_index_range.hh
@@ -27,29 +27,29 @@
* I'd argue that the second loop is more readable and less error prone than the first one. That is
* not necessarily always the case, but often it is.
*
- * for (uint i = 0; i < 10; i++) {
- * for (uint j = 0; j < 20; j++) {
- * for (uint k = 0; k < 30; k++) {
+ * for (int64_t i = 0; i < 10; i++) {
+ * for (int64_t j = 0; j < 20; j++) {
+ * for (int64_t k = 0; k < 30; k++) {
*
- * for (uint i : IndexRange(10)) {
- * for (uint j : IndexRange(20)) {
- * for (uint k : IndexRange(30)) {
+ * for (int64_t i : IndexRange(10)) {
+ * for (int64_t j : IndexRange(20)) {
+ * for (int64_t k : IndexRange(30)) {
*
* Some containers like blender::Vector have an index_range() method. This will return the
* IndexRange that contains all indices that can be used to access the container. This is
* particularly useful when you want to iterate over the indices and the elements (much like
* Python's enumerate(), just worse). Again, I think the second example here is better:
*
- * for (uint i = 0; i < my_vector_with_a_long_name.size(); i++) {
+ * for (int64_t i = 0; i < my_vector_with_a_long_name.size(); i++) {
* do_something(i, my_vector_with_a_long_name[i]);
*
- * for (uint i : my_vector_with_a_long_name.index_range()) {
+ * for (int64_t i : my_vector_with_a_long_name.index_range()) {
* do_something(i, my_vector_with_a_long_name[i]);
*
* Ideally this could be could be even closer to Python's enumerate(). We might get that in the
* future with newer C++ versions.
*
- * One other important feature is the as_span method. This method returns an Span<uint>
+ * One other important feature is the as_span method. This method returns an Span<int64_t>
* that contains the interval as individual numbers.
*/
@@ -70,18 +70,21 @@ template<typename T> class Span;
class IndexRange {
private:
- uint start_ = 0;
- uint size_ = 0;
+ int64_t start_ = 0;
+ int64_t size_ = 0;
public:
IndexRange() = default;
- explicit IndexRange(uint size) : start_(0), size_(size)
+ explicit IndexRange(int64_t size) : start_(0), size_(size)
{
+ BLI_assert(size >= 0);
}
- IndexRange(uint start, uint size) : start_(start), size_(size)
+ IndexRange(int64_t start, int64_t size) : start_(start), size_(size)
{
+ BLI_assert(start >= 0);
+ BLI_assert(size >= 0);
}
template<typename T>
@@ -91,10 +94,10 @@ class IndexRange {
class Iterator {
private:
- uint current_;
+ int64_t current_;
public:
- Iterator(uint current) : current_(current)
+ Iterator(int64_t current) : current_(current)
{
}
@@ -109,7 +112,7 @@ class IndexRange {
return current_ != iterator.current_;
}
- uint operator*() const
+ int64_t operator*() const
{
return current_;
}
@@ -128,8 +131,9 @@ class IndexRange {
/**
* Access an element in the range.
*/
- uint operator[](uint index) const
+ int64_t operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->size());
return start_ + index;
}
@@ -145,7 +149,7 @@ class IndexRange {
/**
* Get the amount of numbers in the range.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -153,16 +157,18 @@ class IndexRange {
/**
* Create a new range starting at the end of the current one.
*/
- IndexRange after(uint n) const
+ IndexRange after(int64_t n) const
{
+ BLI_assert(n >= 0);
return IndexRange(start_ + size_, n);
}
/**
* Create a new range that ends at the start of the current one.
*/
- IndexRange before(uint n) const
+ IndexRange before(int64_t n) const
{
+ BLI_assert(n >= 0);
return IndexRange(start_ - n, n);
}
@@ -170,7 +176,7 @@ class IndexRange {
* Get the first element in the range.
* Asserts when the range is empty.
*/
- uint first() const
+ int64_t first() const
{
BLI_assert(this->size() > 0);
return start_;
@@ -180,7 +186,7 @@ class IndexRange {
* Get the last element in the range.
* Asserts when the range is empty.
*/
- uint last() const
+ int64_t last() const
{
BLI_assert(this->size() > 0);
return start_ + size_ - 1;
@@ -189,7 +195,7 @@ class IndexRange {
/**
* Get the element one after the end. The returned value is undefined when the range is empty.
*/
- uint one_after_last() const
+ int64_t one_after_last() const
{
return start_ + size_;
}
@@ -197,7 +203,7 @@ class IndexRange {
/**
* Get the first element in the range. The returned value is undefined when the range is empty.
*/
- uint start() const
+ int64_t start() const
{
return start_;
}
@@ -205,7 +211,7 @@ class IndexRange {
/**
* Returns true when the range contains a certain number, otherwise false.
*/
- bool contains(uint value) const
+ bool contains(int64_t value) const
{
return value >= start_ && value < start_ + size_;
}
@@ -213,9 +219,11 @@ class IndexRange {
/**
* Returns a new range, that contains a sub-interval of the current one.
*/
- IndexRange slice(uint start, uint size) const
+ IndexRange slice(int64_t start, int64_t size) const
{
- uint new_start = start_ + start;
+ BLI_assert(start >= 0);
+ BLI_assert(size >= 0);
+ int64_t new_start = start_ + start;
BLI_assert(new_start + size <= start_ + size_ || size == 0);
return IndexRange(new_start, size);
}
@@ -227,7 +235,7 @@ class IndexRange {
/**
* Get read-only access to a memory buffer that contains the range as actual numbers.
*/
- Span<uint> as_span() const;
+ Span<int64_t> as_span() const;
friend std::ostream &operator<<(std::ostream &stream, IndexRange range)
{
diff --git a/source/blender/blenlib/BLI_linear_allocator.hh b/source/blender/blenlib/BLI_linear_allocator.hh
index b13d88d5b93..39a3ed27f42 100644
--- a/source/blender/blenlib/BLI_linear_allocator.hh
+++ b/source/blender/blenlib/BLI_linear_allocator.hh
@@ -39,10 +39,10 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
uintptr_t current_begin_;
uintptr_t current_end_;
- uint next_min_alloc_size_;
+ int64_t next_min_alloc_size_;
#ifdef DEBUG
- uint debug_allocated_amount_ = 0;
+ int64_t debug_allocated_amount_ = 0;
#endif
public:
@@ -66,8 +66,9 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*
* The alignment has to be a power of 2.
*/
- void *allocate(const uint size, const uint alignment)
+ void *allocate(const int64_t size, const int64_t alignment)
{
+ BLI_assert(size >= 0);
BLI_assert(alignment >= 1);
BLI_assert(is_power_of_2_i(alignment));
@@ -105,7 +106,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*
* This method only allocates memory and does not construct the instance.
*/
- template<typename T> MutableSpan<T> allocate_array(uint size)
+ template<typename T> MutableSpan<T> allocate_array(int64_t size)
{
return MutableSpan<T>((T *)this->allocate(sizeof(T) * size, alignof(T)), size);
}
@@ -141,22 +142,22 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*/
StringRefNull copy_string(StringRef str)
{
- const uint alloc_size = str.size() + 1;
+ const int64_t alloc_size = str.size() + 1;
char *buffer = (char *)this->allocate(alloc_size, 1);
str.copy(buffer, alloc_size);
return StringRefNull((const char *)buffer);
}
- MutableSpan<void *> allocate_elements_and_pointer_array(uint element_amount,
- uint element_size,
- uint element_alignment)
+ MutableSpan<void *> allocate_elements_and_pointer_array(int64_t element_amount,
+ int64_t element_size,
+ int64_t element_alignment)
{
void *pointer_buffer = this->allocate(element_amount * sizeof(void *), alignof(void *));
void *elements_buffer = this->allocate(element_amount * element_size, element_alignment);
MutableSpan<void *> pointers((void **)pointer_buffer, element_amount);
void *next_element_buffer = elements_buffer;
- for (uint i : IndexRange(element_amount)) {
+ for (int64_t i : IndexRange(element_amount)) {
pointers[i] = next_element_buffer;
next_element_buffer = POINTER_OFFSET(next_element_buffer, element_size);
}
@@ -165,13 +166,13 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
}
template<typename T, typename... Args>
- Span<T *> construct_elements_and_pointer_array(uint n, Args &&... args)
+ Span<T *> construct_elements_and_pointer_array(int64_t n, Args &&... args)
{
MutableSpan<void *> void_pointers = this->allocate_elements_and_pointer_array(
n, sizeof(T), alignof(T));
MutableSpan<T *> pointers = void_pointers.cast<T *>();
- for (uint i : IndexRange(n)) {
+ for (int64_t i : IndexRange(n)) {
new ((void *)pointers[i]) T(std::forward<Args>(args)...);
}
@@ -194,9 +195,9 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
}
private:
- void allocate_new_buffer(uint min_allocation_size)
+ void allocate_new_buffer(int64_t min_allocation_size)
{
- for (uint i : unused_borrowed_buffers_.index_range()) {
+ for (int64_t i : unused_borrowed_buffers_.index_range()) {
Span<char> buffer = unused_borrowed_buffers_[i];
if (buffer.size() >= min_allocation_size) {
unused_borrowed_buffers_.remove_and_reorder(i);
@@ -206,7 +207,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
}
}
- const uint size_in_bytes = power_of_2_min_u(
+ const int64_t size_in_bytes = power_of_2_min_u(
std::max(min_allocation_size, next_min_alloc_size_));
next_min_alloc_size_ = size_in_bytes * 2;
diff --git a/source/blender/blenlib/BLI_listbase_wrapper.hh b/source/blender/blenlib/BLI_listbase_wrapper.hh
index 047099eb36e..46f4a9d49fa 100644
--- a/source/blender/blenlib/BLI_listbase_wrapper.hh
+++ b/source/blender/blenlib/BLI_listbase_wrapper.hh
@@ -96,9 +96,9 @@ template<typename T> class ListBaseWrapper {
return (T *)ptr;
}
- uint index_of(const T *value) const
+ int64_t index_of(const T *value) const
{
- uint index = 0;
+ int64_t index = 0;
for (T *ptr : *this) {
if (ptr == value) {
return index;
@@ -106,7 +106,7 @@ template<typename T> class ListBaseWrapper {
index++;
}
BLI_assert(false);
- return 0;
+ return -1;
}
};
diff --git a/source/blender/blenlib/BLI_map.hh b/source/blender/blenlib/BLI_map.hh
index 6bbd4ee09db..2abaf814ec9 100644
--- a/source/blender/blenlib/BLI_map.hh
+++ b/source/blender/blenlib/BLI_map.hh
@@ -96,7 +96,7 @@ template<
* When Key or Value are large, the small buffer optimization is disabled by default to avoid
* large unexpected allocations on the stack. It can still be enabled explicitly though.
*/
- uint32_t InlineBufferCapacity = (sizeof(Key) + sizeof(Value) < 100) ? 4 : 0,
+ int64_t InlineBufferCapacity = (sizeof(Key) + sizeof(Value) < 100) ? 4 : 0,
/**
* The strategy used to deal with collisions. They are defined in BLI_probing_strategies.hh.
*/
@@ -129,20 +129,20 @@ class Map {
* Slots are either empty, occupied or removed. The number of occupied slots can be computed by
* subtracting the removed slots from the occupied-and-removed slots.
*/
- uint32_t removed_slots_;
- uint32_t occupied_and_removed_slots_;
+ int64_t removed_slots_;
+ int64_t occupied_and_removed_slots_;
/**
* The maximum number of slots that can be used (either occupied or removed) until the set has to
* grow. This is the total number of slots times the max load factor.
*/
- uint32_t usable_slots_;
+ int64_t usable_slots_;
/**
* The number of slots minus one. This is a bit mask that can be used to turn any integer into a
* valid slot index efficiently.
*/
- uint32_t slot_mask_;
+ uint64_t slot_mask_;
/** This is called to hash incoming keys. */
Hash hash_;
@@ -577,8 +577,8 @@ class Map {
*/
template<typename FuncT> void foreach_item(const FuncT &func) const
{
- uint32_t size = slots_.size();
- for (uint32_t i = 0; i < size; i++) {
+ int64_t size = slots_.size();
+ for (int64_t i = 0; i < size; i++) {
const Slot &slot = slots_[i];
if (slot.is_occupied()) {
const Key &key = *slot.key();
@@ -594,10 +594,10 @@ class Map {
*/
template<typename SubIterator> struct BaseIterator {
Slot *slots_;
- uint32_t total_slots_;
- uint32_t current_slot_;
+ int64_t total_slots_;
+ int64_t current_slot_;
- BaseIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ BaseIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: slots_(const_cast<Slot *>(slots)), total_slots_(total_slots), current_slot_(current_slot)
{
}
@@ -621,7 +621,7 @@ class Map {
SubIterator begin() const
{
- for (uint32_t i = 0; i < total_slots_; i++) {
+ for (int64_t i = 0; i < total_slots_; i++) {
if (slots_[i].is_occupied()) {
return SubIterator(slots_, total_slots_, i);
}
@@ -642,7 +642,7 @@ class Map {
class KeyIterator final : public BaseIterator<KeyIterator> {
public:
- KeyIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ KeyIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: BaseIterator<KeyIterator>(slots, total_slots, current_slot)
{
}
@@ -655,7 +655,7 @@ class Map {
class ValueIterator final : public BaseIterator<ValueIterator> {
public:
- ValueIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ ValueIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: BaseIterator<ValueIterator>(slots, total_slots, current_slot)
{
}
@@ -668,7 +668,7 @@ class Map {
class MutableValueIterator final : public BaseIterator<MutableValueIterator> {
public:
- MutableValueIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ MutableValueIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: BaseIterator<MutableValueIterator>(slots, total_slots, current_slot)
{
}
@@ -696,7 +696,7 @@ class Map {
class ItemIterator final : public BaseIterator<ItemIterator> {
public:
- ItemIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ ItemIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: BaseIterator<ItemIterator>(slots, total_slots, current_slot)
{
}
@@ -710,7 +710,7 @@ class Map {
class MutableItemIterator final : public BaseIterator<MutableItemIterator> {
public:
- MutableItemIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ MutableItemIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: BaseIterator<MutableItemIterator>(slots, total_slots, current_slot)
{
}
@@ -783,7 +783,7 @@ class Map {
/**
* Return the number of key-value-pairs that are stored in the map.
*/
- uint32_t size() const
+ int64_t size() const
{
return occupied_and_removed_slots_ - removed_slots_;
}
@@ -801,7 +801,7 @@ class Map {
/**
* Returns the number of available slots. This is mostly for debugging purposes.
*/
- uint32_t capacity() const
+ int64_t capacity() const
{
return slots_.size();
}
@@ -809,7 +809,7 @@ class Map {
/**
* Returns the amount of removed slots in the set. This is mostly for debugging purposes.
*/
- uint32_t removed_amount() const
+ int64_t removed_amount() const
{
return removed_slots_;
}
@@ -817,7 +817,7 @@ class Map {
/**
* Returns the bytes required per element. This is mostly for debugging purposes.
*/
- uint32_t size_per_element() const
+ int64_t size_per_element() const
{
return sizeof(Slot);
}
@@ -826,16 +826,16 @@ class Map {
* Returns the approximate memory requirements of the map in bytes. This becomes more exact the
* larger the map becomes.
*/
- uint32_t size_in_bytes() const
+ int64_t size_in_bytes() const
{
- return (uint32_t)(sizeof(Slot) * slots_.size());
+ return (int64_t)(sizeof(Slot) * slots_.size());
}
/**
* Potentially resize the map such that the specified number of elements can be added without
* another grow operation.
*/
- void reserve(uint32_t n)
+ void reserve(int64_t n)
{
if (usable_slots_ < n) {
this->realloc_and_reinsert(n);
@@ -855,18 +855,19 @@ class Map {
* Get the number of collisions that the probing strategy has to go through to find the key or
* determine that it is not in the map.
*/
- uint32_t count_collisions(const Key &key) const
+ int64_t count_collisions(const Key &key) const
{
return this->count_collisions__impl(key, hash_(key));
}
private:
- BLI_NOINLINE void realloc_and_reinsert(uint32_t min_usable_slots)
+ BLI_NOINLINE void realloc_and_reinsert(int64_t min_usable_slots)
{
- uint32_t total_slots, usable_slots;
+ int64_t total_slots, usable_slots;
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
- uint32_t new_slot_mask = total_slots - 1;
+ BLI_assert(total_slots >= 1);
+ const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
/**
* Optimize the case when the map was empty beforehand. We can avoid some copies here.
@@ -901,9 +902,9 @@ class Map {
void add_after_grow_and_destruct_old(Slot &old_slot,
SlotArray &new_slots,
- uint32_t new_slot_mask)
+ uint64_t new_slot_mask)
{
- uint32_t hash = old_slot.get_hash(Hash());
+ uint64_t hash = old_slot.get_hash(Hash());
SLOT_PROBING_BEGIN (ProbingStrategy, hash, new_slot_mask, slot_index) {
Slot &slot = new_slots[slot_index];
if (slot.is_empty()) {
@@ -914,7 +915,7 @@ class Map {
SLOT_PROBING_END();
}
- template<typename ForwardKey> bool contains__impl(const ForwardKey &key, uint32_t hash) const
+ template<typename ForwardKey> bool contains__impl(const ForwardKey &key, uint64_t hash) const
{
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
@@ -928,7 +929,7 @@ class Map {
}
template<typename ForwardKey, typename ForwardValue>
- void add_new__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ void add_new__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
BLI_assert(!this->contains_as(key));
@@ -945,7 +946,7 @@ class Map {
}
template<typename ForwardKey, typename ForwardValue>
- bool add__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ bool add__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
this->ensure_can_add();
@@ -962,7 +963,7 @@ class Map {
MAP_SLOT_PROBING_END();
}
- template<typename ForwardKey> bool remove__impl(const ForwardKey &key, uint32_t hash)
+ template<typename ForwardKey> bool remove__impl(const ForwardKey &key, uint64_t hash)
{
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -977,7 +978,7 @@ class Map {
MAP_SLOT_PROBING_END();
}
- template<typename ForwardKey> void remove_contained__impl(const ForwardKey &key, uint32_t hash)
+ template<typename ForwardKey> void remove_contained__impl(const ForwardKey &key, uint64_t hash)
{
BLI_assert(this->contains_as(key));
@@ -992,7 +993,7 @@ class Map {
MAP_SLOT_PROBING_END();
}
- template<typename ForwardKey> Value pop__impl(const ForwardKey &key, uint32_t hash)
+ template<typename ForwardKey> Value pop__impl(const ForwardKey &key, uint64_t hash)
{
BLI_assert(this->contains_as(key));
@@ -1009,7 +1010,7 @@ class Map {
}
template<typename ForwardKey>
- std::optional<Value> pop_try__impl(const ForwardKey &key, uint32_t hash)
+ std::optional<Value> pop_try__impl(const ForwardKey &key, uint64_t hash)
{
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -1026,7 +1027,7 @@ class Map {
}
template<typename ForwardKey, typename ForwardValue>
- Value pop_default__impl(const ForwardKey &key, ForwardValue &&default_value, uint32_t hash)
+ Value pop_default__impl(const ForwardKey &key, ForwardValue &&default_value, uint64_t hash)
{
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -1046,7 +1047,7 @@ class Map {
auto add_or_modify__impl(ForwardKey &&key,
const CreateValueF &create_value,
const ModifyValueF &modify_value,
- uint32_t hash) -> decltype(create_value(nullptr))
+ uint64_t hash) -> decltype(create_value(nullptr))
{
using CreateReturnT = decltype(create_value(nullptr));
using ModifyReturnT = decltype(modify_value(nullptr));
@@ -1071,7 +1072,7 @@ class Map {
}
template<typename ForwardKey, typename CreateValueF>
- Value &lookup_or_add_cb__impl(ForwardKey &&key, const CreateValueF &create_value, uint32_t hash)
+ Value &lookup_or_add_cb__impl(ForwardKey &&key, const CreateValueF &create_value, uint64_t hash)
{
this->ensure_can_add();
@@ -1089,7 +1090,7 @@ class Map {
}
template<typename ForwardKey, typename ForwardValue>
- Value &lookup_or_add__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ Value &lookup_or_add__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
this->ensure_can_add();
@@ -1107,7 +1108,7 @@ class Map {
}
template<typename ForwardKey, typename ForwardValue>
- bool add_overwrite__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ bool add_overwrite__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
auto create_func = [&](Value *ptr) {
new ((void *)ptr) Value(std::forward<ForwardValue>(value));
@@ -1122,7 +1123,7 @@ class Map {
}
template<typename ForwardKey>
- const Value *lookup_ptr__impl(const ForwardKey &key, uint32_t hash) const
+ const Value *lookup_ptr__impl(const ForwardKey &key, uint64_t hash) const
{
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
@@ -1136,9 +1137,9 @@ class Map {
}
template<typename ForwardKey>
- uint32_t count_collisions__impl(const ForwardKey &key, uint32_t hash) const
+ int64_t count_collisions__impl(const ForwardKey &key, uint64_t hash) const
{
- uint32_t collisions = 0;
+ int64_t collisions = 0;
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -1171,9 +1172,9 @@ template<typename Key, typename Value> class StdUnorderedMapWrapper {
MapType map_;
public:
- uint32_t size() const
+ int64_t size() const
{
- return (uint32_t)map_.size();
+ return (int64_t)map_.size();
}
bool is_empty() const
@@ -1181,7 +1182,7 @@ template<typename Key, typename Value> class StdUnorderedMapWrapper {
return map_.empty();
}
- void reserve(uint32_t n)
+ void reserve(int64_t n)
{
map_.reserve(n);
}
diff --git a/source/blender/blenlib/BLI_map_slots.hh b/source/blender/blenlib/BLI_map_slots.hh
index ff3ed34eb9d..b5360795a13 100644
--- a/source/blender/blenlib/BLI_map_slots.hh
+++ b/source/blender/blenlib/BLI_map_slots.hh
@@ -155,7 +155,7 @@ template<typename Key, typename Value> class SimpleMapSlot {
* Returns the hash of the currently stored key. In this simple map slot implementation, we just
* computed the hash here. Other implementations might store the hash in the slot instead.
*/
- template<typename Hash> uint32_t get_hash(const Hash &hash)
+ template<typename Hash> uint64_t get_hash(const Hash &hash)
{
BLI_assert(this->is_occupied());
return hash(*key_buffer_);
@@ -165,7 +165,7 @@ template<typename Key, typename Value> class SimpleMapSlot {
* Move the other slot into this slot and destruct it. We do destruction here, because this way
* we can avoid a comparison with the state, since we know the slot is occupied.
*/
- void relocate_occupied_here(SimpleMapSlot &other, uint32_t UNUSED(hash))
+ void relocate_occupied_here(SimpleMapSlot &other, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -181,7 +181,7 @@ template<typename Key, typename Value> class SimpleMapSlot {
* key. The hash can be used by other slot implementations to determine inequality faster.
*/
template<typename ForwardKey, typename IsEqual>
- bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const
+ bool contains(const ForwardKey &key, const IsEqual &is_equal, uint64_t UNUSED(hash)) const
{
if (state_ == Occupied) {
return is_equal(key, *key_buffer_);
@@ -194,7 +194,7 @@ template<typename Key, typename Value> class SimpleMapSlot {
* constructed by calling the constructor with the given key/value as parameter.
*/
template<typename ForwardKey, typename ForwardValue>
- void occupy(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ void occupy(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
BLI_assert(!this->is_occupied());
this->occupy_without_value(std::forward<ForwardKey>(key), hash);
@@ -205,7 +205,7 @@ template<typename Key, typename Value> class SimpleMapSlot {
* Change the state of this slot from empty/removed to occupied, but leave the value
* uninitialized. The caller is responsible to construct the value afterwards.
*/
- template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint32_t UNUSED(hash))
+ template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
state_ = Occupied;
@@ -292,13 +292,13 @@ template<typename Key, typename Value, typename KeyInfo> class IntrusiveMapSlot
return KeyInfo::is_empty(key_);
}
- template<typename Hash> uint32_t get_hash(const Hash &hash)
+ template<typename Hash> uint64_t get_hash(const Hash &hash)
{
BLI_assert(this->is_occupied());
return hash(key_);
}
- void relocate_occupied_here(IntrusiveMapSlot &other, uint32_t UNUSED(hash))
+ void relocate_occupied_here(IntrusiveMapSlot &other, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -309,14 +309,14 @@ template<typename Key, typename Value, typename KeyInfo> class IntrusiveMapSlot
}
template<typename ForwardKey, typename IsEqual>
- bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const
+ bool contains(const ForwardKey &key, const IsEqual &is_equal, uint64_t UNUSED(hash)) const
{
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
return is_equal(key, key_);
}
template<typename ForwardKey, typename ForwardValue>
- void occupy(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ void occupy(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
BLI_assert(!this->is_occupied());
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
@@ -324,7 +324,7 @@ template<typename Key, typename Value, typename KeyInfo> class IntrusiveMapSlot
new (&value_buffer_) Value(std::forward<ForwardValue>(value));
}
- template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint32_t UNUSED(hash))
+ template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
diff --git a/source/blender/blenlib/BLI_memory_utils.hh b/source/blender/blenlib/BLI_memory_utils.hh
index b73e0e95312..5c692850017 100644
--- a/source/blender/blenlib/BLI_memory_utils.hh
+++ b/source/blender/blenlib/BLI_memory_utils.hh
@@ -43,8 +43,10 @@ namespace blender {
* After:
* ptr: uninitialized
*/
-template<typename T> void destruct_n(T *ptr, uint n)
+template<typename T> void destruct_n(T *ptr, int64_t n)
{
+ BLI_assert(n >= 0);
+
static_assert(std::is_nothrow_destructible_v<T>,
"This should be true for all types. Destructors are noexcept by default.");
@@ -54,7 +56,7 @@ template<typename T> void destruct_n(T *ptr, uint n)
return;
}
- for (uint i = 0; i < n; i++) {
+ for (int64_t i = 0; i < n; i++) {
ptr[i].~T();
}
}
@@ -70,15 +72,17 @@ template<typename T> void destruct_n(T *ptr, uint n)
* After:
* ptr: initialized
*/
-template<typename T> void default_construct_n(T *ptr, uint n)
+template<typename T> void default_construct_n(T *ptr, int64_t n)
{
+ BLI_assert(n >= 0);
+
/* This is not strictly necessary, because the loop below will be optimized away anyway. It is
* nice to make behavior this explicitly, though. */
if (std::is_trivially_constructible_v<T>) {
return;
}
- uint current = 0;
+ int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(ptr + current)) T;
@@ -102,9 +106,11 @@ template<typename T> void default_construct_n(T *ptr, uint n)
* src: initialized
* dst: initialized
*/
-template<typename T> void initialized_copy_n(const T *src, uint n, T *dst)
+template<typename T> void initialized_copy_n(const T *src, int64_t n, T *dst)
{
- for (uint i = 0; i < n; i++) {
+ BLI_assert(n >= 0);
+
+ for (int64_t i = 0; i < n; i++) {
dst[i] = src[i];
}
}
@@ -121,9 +127,11 @@ template<typename T> void initialized_copy_n(const T *src, uint n, T *dst)
* src: initialized
* dst: initialized
*/
-template<typename T> void uninitialized_copy_n(const T *src, uint n, T *dst)
+template<typename T> void uninitialized_copy_n(const T *src, int64_t n, T *dst)
{
- uint current = 0;
+ BLI_assert(n >= 0);
+
+ int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) T(src[current]);
@@ -147,9 +155,12 @@ template<typename T> void uninitialized_copy_n(const T *src, uint n, T *dst)
* src: initialized
* dst: initialized
*/
-template<typename From, typename To> void uninitialized_convert_n(const From *src, uint n, To *dst)
+template<typename From, typename To>
+void uninitialized_convert_n(const From *src, int64_t n, To *dst)
{
- uint current = 0;
+ BLI_assert(n >= 0);
+
+ int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) To((To)src[current]);
@@ -173,9 +184,11 @@ template<typename From, typename To> void uninitialized_convert_n(const From *sr
* src: initialized, moved-from
* dst: initialized
*/
-template<typename T> void initialized_move_n(T *src, uint n, T *dst)
+template<typename T> void initialized_move_n(T *src, int64_t n, T *dst)
{
- for (uint i = 0; i < n; i++) {
+ BLI_assert(n >= 0);
+
+ for (int64_t i = 0; i < n; i++) {
dst[i] = std::move(src[i]);
}
}
@@ -192,9 +205,11 @@ template<typename T> void initialized_move_n(T *src, uint n, T *dst)
* src: initialized, moved-from
* dst: initialized
*/
-template<typename T> void uninitialized_move_n(T *src, uint n, T *dst)
+template<typename T> void uninitialized_move_n(T *src, int64_t n, T *dst)
{
- uint current = 0;
+ BLI_assert(n >= 0);
+
+ int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) T(std::move(src[current]));
@@ -219,8 +234,10 @@ template<typename T> void uninitialized_move_n(T *src, uint n, T *dst)
* src: uninitialized
* dst: initialized
*/
-template<typename T> void initialized_relocate_n(T *src, uint n, T *dst)
+template<typename T> void initialized_relocate_n(T *src, int64_t n, T *dst)
{
+ BLI_assert(n >= 0);
+
initialized_move_n(src, n, dst);
destruct_n(src, n);
}
@@ -238,8 +255,10 @@ template<typename T> void initialized_relocate_n(T *src, uint n, T *dst)
* src: uninitialized
* dst: initialized
*/
-template<typename T> void uninitialized_relocate_n(T *src, uint n, T *dst)
+template<typename T> void uninitialized_relocate_n(T *src, int64_t n, T *dst)
{
+ BLI_assert(n >= 0);
+
uninitialized_move_n(src, n, dst);
destruct_n(src, n);
}
@@ -254,9 +273,11 @@ template<typename T> void uninitialized_relocate_n(T *src, uint n, T *dst)
* After:
* dst: initialized
*/
-template<typename T> void initialized_fill_n(T *dst, uint n, const T &value)
+template<typename T> void initialized_fill_n(T *dst, int64_t n, const T &value)
{
- for (uint i = 0; i < n; i++) {
+ BLI_assert(n >= 0);
+
+ for (int64_t i = 0; i < n; i++) {
dst[i] = value;
}
}
@@ -271,9 +292,11 @@ template<typename T> void initialized_fill_n(T *dst, uint n, const T &value)
* After:
* dst: initialized
*/
-template<typename T> void uninitialized_fill_n(T *dst, uint n, const T &value)
+template<typename T> void uninitialized_fill_n(T *dst, int64_t n, const T &value)
{
- uint current = 0;
+ BLI_assert(n >= 0);
+
+ int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) T(value);
@@ -334,9 +357,9 @@ template<size_t Size, size_t Alignment> class alignas(Alignment) AlignedBuffer {
* lifetime of the object they are embedded in. It's used by containers with small buffer
* optimization and hash table implementations.
*/
-template<typename T, size_t Size = 1> class TypedBuffer {
+template<typename T, int64_t Size = 1> class TypedBuffer {
private:
- AlignedBuffer<sizeof(T) * Size, alignof(T)> buffer_;
+ AlignedBuffer<sizeof(T) * (size_t)Size, alignof(T)> buffer_;
public:
operator T *()
diff --git a/source/blender/blenlib/BLI_probing_strategies.hh b/source/blender/blenlib/BLI_probing_strategies.hh
index d2b16ac3516..0e5338fa6ed 100644
--- a/source/blender/blenlib/BLI_probing_strategies.hh
+++ b/source/blender/blenlib/BLI_probing_strategies.hh
@@ -25,17 +25,17 @@
* values based on an initial hash value.
*
* A probing strategy has to implement the following methods:
- * - Constructor(uint32_t hash): Start a new probing sequence based on the given hash.
- * - get() const -> uint32_t: Get the current value in the sequence.
+ * - Constructor(uint64_t hash): Start a new probing sequence based on the given hash.
+ * - get() const -> uint64_t: Get the current value in the sequence.
* - next() -> void: Update the internal state, so that the next value can be accessed with get().
- * - linear_steps() -> uint32_t: Returns number of linear probing steps that should be done.
+ * - linear_steps() -> int64_t: Returns number of linear probing steps that should be done.
*
* Using linear probing steps between larger jumps can result in better performance, due to
* improved cache usage. It's a way of getting the benefits or linear probing without the
* clustering issues. However, more linear steps can also make things slower when the initial hash
* produces many collisions.
*
- * Every probing strategy has to guarantee, that every possible uint32_t is returned eventually.
+ * Every probing strategy has to guarantee, that every possible uint64_t is returned eventually.
* This is necessary for correctness. If this is not the case, empty slots might not be found.
*
* The SLOT_PROBING_BEGIN and SLOT_PROBING_END macros can be used to implement a loop that iterates
@@ -65,10 +65,10 @@ namespace blender {
*/
class LinearProbingStrategy {
private:
- uint32_t hash_;
+ uint64_t hash_;
public:
- LinearProbingStrategy(const uint32_t hash) : hash_(hash)
+ LinearProbingStrategy(const uint64_t hash) : hash_(hash)
{
}
@@ -77,12 +77,12 @@ class LinearProbingStrategy {
hash_++;
}
- uint32_t get() const
+ uint64_t get() const
{
return hash_;
}
- uint32_t linear_steps() const
+ int64_t linear_steps() const
{
return UINT32_MAX;
}
@@ -101,12 +101,12 @@ class LinearProbingStrategy {
*/
class QuadraticProbingStrategy {
private:
- uint32_t original_hash_;
- uint32_t current_hash_;
- uint32_t iteration_;
+ uint64_t original_hash_;
+ uint64_t current_hash_;
+ uint64_t iteration_;
public:
- QuadraticProbingStrategy(const uint32_t hash)
+ QuadraticProbingStrategy(const uint64_t hash)
: original_hash_(hash), current_hash_(hash), iteration_(1)
{
}
@@ -117,12 +117,12 @@ class QuadraticProbingStrategy {
iteration_++;
}
- uint32_t get() const
+ uint64_t get() const
{
return current_hash_;
}
- uint32_t linear_steps() const
+ int64_t linear_steps() const
{
return 1;
}
@@ -138,13 +138,13 @@ class QuadraticProbingStrategy {
* PreShuffle: When true, the initial call to next() will be done to the constructor. This can help
* when the hash function has put little information into the lower bits.
*/
-template<uint32_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingStrategy {
+template<uint64_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingStrategy {
private:
- uint32_t hash_;
- uint32_t perturb_;
+ uint64_t hash_;
+ uint64_t perturb_;
public:
- PythonProbingStrategy(const uint32_t hash) : hash_(hash), perturb_(hash)
+ PythonProbingStrategy(const uint64_t hash) : hash_(hash), perturb_(hash)
{
if (PreShuffle) {
this->next();
@@ -157,12 +157,12 @@ template<uint32_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingS
hash_ = 5 * hash_ + 1 + perturb_;
}
- uint32_t get() const
+ uint64_t get() const
{
return hash_;
}
- uint32_t linear_steps() const
+ int64_t linear_steps() const
{
return LinearSteps;
}
@@ -173,13 +173,13 @@ template<uint32_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingS
* method. This way more bits are taken into account earlier. After a couple of collisions (that
* should happen rarely), it will fallback to a sequence that hits every slot.
*/
-template<uint32_t LinearSteps = 2, bool PreShuffle = false> class ShuffleProbingStrategy {
+template<uint64_t LinearSteps = 2, bool PreShuffle = false> class ShuffleProbingStrategy {
private:
- uint32_t hash_;
- uint32_t perturb_;
+ uint64_t hash_;
+ uint64_t perturb_;
public:
- ShuffleProbingStrategy(const uint32_t hash) : hash_(hash), perturb_(hash)
+ ShuffleProbingStrategy(const uint64_t hash) : hash_(hash), perturb_(hash)
{
if (PreShuffle) {
this->next();
@@ -197,12 +197,12 @@ template<uint32_t LinearSteps = 2, bool PreShuffle = false> class ShuffleProbing
}
}
- uint32_t get() const
+ uint64_t get() const
{
return hash_;
}
- uint32_t linear_steps() const
+ int64_t linear_steps() const
{
return LinearSteps;
}
@@ -233,10 +233,10 @@ using DefaultProbingStrategy = PythonProbingStrategy<>;
#define SLOT_PROBING_BEGIN(PROBING_STRATEGY, HASH, MASK, R_SLOT_INDEX) \
PROBING_STRATEGY probing_strategy(HASH); \
do { \
- uint32_t linear_offset = 0; \
- uint32_t current_hash = probing_strategy.get(); \
+ int64_t linear_offset = 0; \
+ uint64_t current_hash = probing_strategy.get(); \
do { \
- uint32_t R_SLOT_INDEX = (current_hash + linear_offset) & MASK;
+ int64_t R_SLOT_INDEX = (int64_t)((current_hash + (uint64_t)linear_offset) & MASK);
#define SLOT_PROBING_END() \
} while (++linear_offset < probing_strategy.linear_steps()); \
diff --git a/source/blender/blenlib/BLI_rand.hh b/source/blender/blenlib/BLI_rand.hh
index bfc4d276165..612ac0bbe19 100644
--- a/source/blender/blenlib/BLI_rand.hh
+++ b/source/blender/blenlib/BLI_rand.hh
@@ -86,7 +86,7 @@ class RandomNumberGenerator {
/**
* Simulate getting \a n random values.
*/
- void skip(uint n)
+ void skip(int64_t n)
{
while (n--) {
this->step();
diff --git a/source/blender/blenlib/BLI_resource_collector.hh b/source/blender/blenlib/BLI_resource_collector.hh
index 672a1269962..10d610da618 100644
--- a/source/blender/blenlib/BLI_resource_collector.hh
+++ b/source/blender/blenlib/BLI_resource_collector.hh
@@ -51,7 +51,7 @@ class ResourceCollector : NonCopyable, NonMovable {
~ResourceCollector()
{
/* Free in reversed order. */
- for (uint i = m_resources.size(); i--;) {
+ for (int64_t i = m_resources.size(); i--;) {
ResourceData &data = m_resources[i];
data.free(data.data);
}
diff --git a/source/blender/blenlib/BLI_set.hh b/source/blender/blenlib/BLI_set.hh
index c5096f84c80..80e4858bbb9 100644
--- a/source/blender/blenlib/BLI_set.hh
+++ b/source/blender/blenlib/BLI_set.hh
@@ -93,7 +93,7 @@ template<
* When Key is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitly though.
*/
- uint32_t InlineBufferCapacity = (sizeof(Key) < 100) ? 4 : 0,
+ int64_t InlineBufferCapacity = (sizeof(Key) < 100) ? 4 : 0,
/**
* The strategy used to deal with collisions. They are defined in BLI_probing_strategies.hh.
*/
@@ -128,20 +128,20 @@ class Set {
* Slots are either empty, occupied or removed. The number of occupied slots can be computed by
* subtracting the removed slots from the occupied-and-removed slots.
*/
- uint32_t removed_slots_;
- uint32_t occupied_and_removed_slots_;
+ int64_t removed_slots_;
+ int64_t occupied_and_removed_slots_;
/**
* The maximum number of slots that can be used (either occupied or removed) until the set has to
* grow. This is the total number of slots times the max load factor.
*/
- uint32_t usable_slots_;
+ int64_t usable_slots_;
/**
* The number of slots minus one. This is a bit mask that can be used to turn any integer into a
* valid slot index efficiently.
*/
- uint32_t slot_mask_;
+ uint64_t slot_mask_;
/** This is called to hash incoming keys. */
Hash hash_;
@@ -384,11 +384,11 @@ class Set {
class Iterator {
private:
const Slot *slots_;
- uint32_t total_slots_;
- uint32_t current_slot_;
+ int64_t total_slots_;
+ int64_t current_slot_;
public:
- Iterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ Iterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: slots_(slots), total_slots_(total_slots), current_slot_(current_slot)
{
}
@@ -418,7 +418,7 @@ class Set {
Iterator begin() const
{
- for (uint32_t i = 0; i < slots_.size(); i++) {
+ for (int64_t i = 0; i < slots_.size(); i++) {
if (slots_[i].is_occupied()) {
return Iterator(slots_.data(), slots_.size(), i);
}
@@ -444,7 +444,7 @@ class Set {
* Get the number of collisions that the probing strategy has to go through to find the key or
* determine that it is not in the set.
*/
- uint32_t count_collisions(const Key &key) const
+ int64_t count_collisions(const Key &key) const
{
return this->count_collisions__impl(key, hash_(key));
}
@@ -470,7 +470,7 @@ class Set {
/**
* Returns the number of keys stored in the set.
*/
- uint32_t size() const
+ int64_t size() const
{
return occupied_and_removed_slots_ - removed_slots_;
}
@@ -486,7 +486,7 @@ class Set {
/**
* Returns the number of available slots. This is mostly for debugging purposes.
*/
- uint32_t capacity() const
+ int64_t capacity() const
{
return slots_.size();
}
@@ -494,7 +494,7 @@ class Set {
/**
* Returns the amount of removed slots in the set. This is mostly for debugging purposes.
*/
- uint32_t removed_amount() const
+ int64_t removed_amount() const
{
return removed_slots_;
}
@@ -502,7 +502,7 @@ class Set {
/**
* Returns the bytes required per element. This is mostly for debugging purposes.
*/
- uint32_t size_per_element() const
+ int64_t size_per_element() const
{
return sizeof(Slot);
}
@@ -511,7 +511,7 @@ class Set {
* Returns the approximate memory requirements of the set in bytes. This is more correct for
* larger sets.
*/
- uint32_t size_in_bytes() const
+ int64_t size_in_bytes() const
{
return sizeof(Slot) * slots_.size();
}
@@ -520,7 +520,7 @@ class Set {
* Potentially resize the set such that it can hold the specified number of keys without another
* grow operation.
*/
- void reserve(const uint32_t n)
+ void reserve(const int64_t n)
{
if (usable_slots_ < n) {
this->realloc_and_reinsert(n);
@@ -554,12 +554,13 @@ class Set {
}
private:
- BLI_NOINLINE void realloc_and_reinsert(const uint32_t min_usable_slots)
+ BLI_NOINLINE void realloc_and_reinsert(const int64_t min_usable_slots)
{
- uint32_t total_slots, usable_slots;
+ int64_t total_slots, usable_slots;
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
- const uint32_t new_slot_mask = total_slots - 1;
+ BLI_assert(total_slots >= 1);
+ const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
/**
* Optimize the case when the set was empty beforehand. We can avoid some copies here.
@@ -595,9 +596,9 @@ class Set {
void add_after_grow_and_destruct_old(Slot &old_slot,
SlotArray &new_slots,
- const uint32_t new_slot_mask)
+ const uint64_t new_slot_mask)
{
- const uint32_t hash = old_slot.get_hash(Hash());
+ const uint64_t hash = old_slot.get_hash(Hash());
SLOT_PROBING_BEGIN (ProbingStrategy, hash, new_slot_mask, slot_index) {
Slot &slot = new_slots[slot_index];
@@ -610,7 +611,7 @@ class Set {
}
template<typename ForwardKey>
- bool contains__impl(const ForwardKey &key, const uint32_t hash) const
+ bool contains__impl(const ForwardKey &key, const uint64_t hash) const
{
SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
@@ -624,7 +625,7 @@ class Set {
}
template<typename ForwardKey>
- const Key &lookup_key__impl(const ForwardKey &key, const uint32_t hash) const
+ const Key &lookup_key__impl(const ForwardKey &key, const uint64_t hash) const
{
BLI_assert(this->contains_as(key));
@@ -637,7 +638,7 @@ class Set {
}
template<typename ForwardKey>
- const Key *lookup_key_ptr__impl(const ForwardKey &key, const uint32_t hash) const
+ const Key *lookup_key_ptr__impl(const ForwardKey &key, const uint64_t hash) const
{
SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -650,7 +651,7 @@ class Set {
SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> void add_new__impl(ForwardKey &&key, const uint32_t hash)
+ template<typename ForwardKey> void add_new__impl(ForwardKey &&key, const uint64_t hash)
{
BLI_assert(!this->contains_as(key));
@@ -666,7 +667,7 @@ class Set {
SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> bool add__impl(ForwardKey &&key, const uint32_t hash)
+ template<typename ForwardKey> bool add__impl(ForwardKey &&key, const uint64_t hash)
{
this->ensure_can_add();
@@ -683,7 +684,7 @@ class Set {
SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> bool remove__impl(const ForwardKey &key, const uint32_t hash)
+ template<typename ForwardKey> bool remove__impl(const ForwardKey &key, const uint64_t hash)
{
SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -699,7 +700,7 @@ class Set {
}
template<typename ForwardKey>
- void remove_contained__impl(const ForwardKey &key, const uint32_t hash)
+ void remove_contained__impl(const ForwardKey &key, const uint64_t hash)
{
BLI_assert(this->contains_as(key));
removed_slots_++;
@@ -714,9 +715,9 @@ class Set {
}
template<typename ForwardKey>
- uint32_t count_collisions__impl(const ForwardKey &key, const uint32_t hash) const
+ int64_t count_collisions__impl(const ForwardKey &key, const uint64_t hash) const
{
- uint32_t collisions = 0;
+ int64_t collisions = 0;
SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -749,9 +750,9 @@ template<typename Key> class StdUnorderedSetWrapper {
SetType set_;
public:
- uint32_t size() const
+ int64_t size() const
{
- return (uint32_t)set_.size();
+ return (int64_t)set_.size();
}
bool is_empty() const
@@ -759,7 +760,7 @@ template<typename Key> class StdUnorderedSetWrapper {
return set_.empty();
}
- void reserve(uint32_t n)
+ void reserve(int64_t n)
{
set_.reserve(n);
}
diff --git a/source/blender/blenlib/BLI_set_slots.hh b/source/blender/blenlib/BLI_set_slots.hh
index d3891e78b52..b78ed37f534 100644
--- a/source/blender/blenlib/BLI_set_slots.hh
+++ b/source/blender/blenlib/BLI_set_slots.hh
@@ -133,7 +133,7 @@ template<typename Key> class SimpleSetSlot {
* Return the hash of the currently stored key. In this simple set slot implementation, we just
* compute the hash here. Other implementations might store the hash in the slot instead.
*/
- template<typename Hash> uint32_t get_hash(const Hash &hash) const
+ template<typename Hash> uint64_t get_hash(const Hash &hash) const
{
BLI_assert(this->is_occupied());
return hash(*key_buffer_);
@@ -143,7 +143,7 @@ template<typename Key> class SimpleSetSlot {
* Move the other slot into this slot and destruct it. We do destruction here, because this way
* we can avoid a comparison with the state, since we know the slot is occupied.
*/
- void relocate_occupied_here(SimpleSetSlot &other, uint32_t UNUSED(hash))
+ void relocate_occupied_here(SimpleSetSlot &other, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -157,7 +157,7 @@ template<typename Key> class SimpleSetSlot {
* key. The hash is used by other slot implementations to determine inequality faster.
*/
template<typename ForwardKey, typename IsEqual>
- bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const
+ bool contains(const ForwardKey &key, const IsEqual &is_equal, uint64_t UNUSED(hash)) const
{
if (state_ == Occupied) {
return is_equal(key, *key_buffer_);
@@ -169,7 +169,7 @@ template<typename Key> class SimpleSetSlot {
* Change the state of this slot from empty/removed to occupied. The key has to be constructed
* by calling the constructor with the given key as parameter.
*/
- template<typename ForwardKey> void occupy(ForwardKey &&key, uint32_t UNUSED(hash))
+ template<typename ForwardKey> void occupy(ForwardKey &&key, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
state_ = Occupied;
@@ -199,7 +199,7 @@ template<typename Key> class HashedSetSlot {
Removed = 2,
};
- uint32_t hash_;
+ uint64_t hash_;
State state_;
TypedBuffer<Key> key_buffer_;
@@ -254,13 +254,13 @@ template<typename Key> class HashedSetSlot {
return state_ == Empty;
}
- template<typename Hash> uint32_t get_hash(const Hash &UNUSED(hash)) const
+ template<typename Hash> uint64_t get_hash(const Hash &UNUSED(hash)) const
{
BLI_assert(this->is_occupied());
return hash_;
}
- void relocate_occupied_here(HashedSetSlot &other, const uint32_t hash)
+ void relocate_occupied_here(HashedSetSlot &other, const uint64_t hash)
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -271,7 +271,7 @@ template<typename Key> class HashedSetSlot {
}
template<typename ForwardKey, typename IsEqual>
- bool contains(const ForwardKey &key, const IsEqual &is_equal, const uint32_t hash) const
+ bool contains(const ForwardKey &key, const IsEqual &is_equal, const uint64_t hash) const
{
/* hash_ might be uninitialized here, but that is ok. */
if (hash_ == hash) {
@@ -282,7 +282,7 @@ template<typename Key> class HashedSetSlot {
return false;
}
- template<typename ForwardKey> void occupy(ForwardKey &&key, const uint32_t hash)
+ template<typename ForwardKey> void occupy(ForwardKey &&key, const uint64_t hash)
{
BLI_assert(!this->is_occupied());
state_ = Occupied;
@@ -336,13 +336,13 @@ template<typename Key, typename KeyInfo> class IntrusiveSetSlot {
return KeyInfo::is_empty(key_);
}
- template<typename Hash> uint32_t get_hash(const Hash &hash) const
+ template<typename Hash> uint64_t get_hash(const Hash &hash) const
{
BLI_assert(this->is_occupied());
return hash(key_);
}
- void relocate_occupied_here(IntrusiveSetSlot &other, const uint32_t UNUSED(hash))
+ void relocate_occupied_here(IntrusiveSetSlot &other, const uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -351,13 +351,13 @@ template<typename Key, typename KeyInfo> class IntrusiveSetSlot {
}
template<typename ForwardKey, typename IsEqual>
- bool contains(const ForwardKey &key, const IsEqual &is_equal, const uint32_t UNUSED(hash)) const
+ bool contains(const ForwardKey &key, const IsEqual &is_equal, const uint64_t UNUSED(hash)) const
{
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
return is_equal(key_, key);
}
- template<typename ForwardKey> void occupy(ForwardKey &&key, const uint32_t UNUSED(hash))
+ template<typename ForwardKey> void occupy(ForwardKey &&key, const uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
diff --git a/source/blender/blenlib/BLI_span.hh b/source/blender/blenlib/BLI_span.hh
index 57ef9ce9eb6..67bb32304de 100644
--- a/source/blender/blenlib/BLI_span.hh
+++ b/source/blender/blenlib/BLI_span.hh
@@ -88,7 +88,7 @@ namespace blender {
template<typename T> class Span {
private:
const T *start_ = nullptr;
- uint size_ = 0;
+ int64_t size_ = 0;
public:
/**
@@ -96,13 +96,15 @@ template<typename T> class Span {
*/
Span() = default;
- Span(const T *start, uint size) : start_(start), size_(size)
+ Span(const T *start, int64_t size) : start_(start), size_(size)
{
+ BLI_assert(size >= 0);
}
template<typename U, typename std::enable_if_t<is_convertible_pointer_v<U, T>> * = nullptr>
- Span(const U *start, uint size) : start_((const T *)start), size_(size)
+ Span(const U *start, int64_t size) : start_((const T *)start), size_(size)
{
+ BLI_assert(size >= 0);
}
/**
@@ -116,11 +118,11 @@ template<typename T> class Span {
* Span<int> span = {1, 2, 3, 4};
* call_function_with_array(span);
*/
- Span(const std::initializer_list<T> &list) : Span(list.begin(), (uint)list.size())
+ Span(const std::initializer_list<T> &list) : Span(list.begin(), (int64_t)list.size())
{
}
- Span(const std::vector<T> &vector) : Span(vector.data(), (uint)vector.size())
+ Span(const std::vector<T> &vector) : Span(vector.data(), (int64_t)vector.size())
{
}
@@ -142,8 +144,10 @@ template<typename T> class Span {
* Returns a contiguous part of the array. This invokes undefined behavior when the slice does
* not stay within the bounds of the array.
*/
- Span slice(uint start, uint size) const
+ Span slice(int64_t start, int64_t size) const
{
+ BLI_assert(start >= 0);
+ BLI_assert(size >= 0);
BLI_assert(start + size <= this->size() || size == 0);
return Span(start_ + start, size);
}
@@ -157,8 +161,9 @@ template<typename T> class Span {
* Returns a new Span with n elements removed from the beginning. This invokes undefined
* behavior when the array is too small.
*/
- Span drop_front(uint n) const
+ Span drop_front(int64_t n) const
{
+ BLI_assert(n >= 0);
BLI_assert(n <= this->size());
return this->slice(n, this->size() - n);
}
@@ -167,8 +172,9 @@ template<typename T> class Span {
* Returns a new Span with n elements removed from the beginning. This invokes undefined
* behavior when the array is too small.
*/
- Span drop_back(uint n) const
+ Span drop_back(int64_t n) const
{
+ BLI_assert(n >= 0);
BLI_assert(n <= this->size());
return this->slice(0, this->size() - n);
}
@@ -177,8 +183,9 @@ template<typename T> class Span {
* Returns a new Span that only contains the first n elements. This invokes undefined
* behavior when the array is too small.
*/
- Span take_front(uint n) const
+ Span take_front(int64_t n) const
{
+ BLI_assert(n >= 0);
BLI_assert(n <= this->size());
return this->slice(0, n);
}
@@ -187,8 +194,9 @@ template<typename T> class Span {
* Returns a new Span that only contains the last n elements. This invokes undefined
* behavior when the array is too small.
*/
- Span take_back(uint n) const
+ Span take_back(int64_t n) const
{
+ BLI_assert(n >= 0);
BLI_assert(n <= this->size());
return this->slice(this->size() - n, n);
}
@@ -216,8 +224,9 @@ template<typename T> class Span {
* Access an element in the array. This invokes undefined behavior when the index is out of
* bounds.
*/
- const T &operator[](uint index) const
+ const T &operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < size_);
return start_[index];
}
@@ -225,7 +234,7 @@ template<typename T> class Span {
/**
* Returns the number of elements in the referenced array.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -241,7 +250,7 @@ template<typename T> class Span {
/**
* Returns the number of bytes referenced by this Span.
*/
- uint size_in_bytes() const
+ int64_t size_in_bytes() const
{
return sizeof(T) * size_;
}
@@ -273,9 +282,9 @@ template<typename T> class Span {
* Does a linear search to count how often the value is in the array.
* Returns the number of occurrences.
*/
- uint count(const T &value) const
+ int64_t count(const T &value) const
{
- uint counter = 0;
+ int64_t counter = 0;
for (const T &element : *this) {
if (element == value) {
counter++;
@@ -308,9 +317,9 @@ template<typename T> class Span {
* Returns the element at the given index. If the index is out of range, return the fallback
* value.
*/
- T get(uint index, const T &fallback) const
+ T get(int64_t index, const T &fallback) const
{
- if (index < size_) {
+ if (index < size_ && index >= 0) {
return start_[index];
}
return fallback;
@@ -326,9 +335,9 @@ template<typename T> class Span {
* changed. */
BLI_assert(size_ < 1000);
- for (uint i = 0; i < size_; i++) {
+ for (int64_t i = 0; i < size_; i++) {
const T &value = start_[i];
- for (uint j = i + 1; j < size_; j++) {
+ for (int64_t j = i + 1; j < size_; j++) {
if (value == start_[j]) {
return true;
}
@@ -348,7 +357,7 @@ template<typename T> class Span {
* changed. */
BLI_assert(size_ < 1000);
- for (uint i = 0; i < size_; i++) {
+ for (int64_t i = 0; i < size_; i++) {
const T &value = start_[i];
if (other.contains(value)) {
return true;
@@ -361,19 +370,19 @@ template<typename T> class Span {
* Returns the index of the first occurrence of the given value. This invokes undefined behavior
* when the value is not in the array.
*/
- uint first_index(const T &search_value) const
+ int64_t first_index(const T &search_value) const
{
- const int index = this->first_index_try(search_value);
+ const int64_t index = this->first_index_try(search_value);
BLI_assert(index >= 0);
- return (uint)index;
+ return (int64_t)index;
}
/**
* Returns the index of the first occurrence of the given value or -1 if it does not exist.
*/
- int first_index_try(const T &search_value) const
+ int64_t first_index_try(const T &search_value) const
{
- for (uint i = 0; i < size_; i++) {
+ for (int64_t i = 0; i < size_; i++) {
if (start_[i] == search_value) {
return i;
}
@@ -396,7 +405,7 @@ template<typename T> class Span {
template<typename NewT> Span<NewT> cast() const
{
BLI_assert((size_ * sizeof(T)) % sizeof(NewT) == 0);
- uint new_size = size_ * sizeof(T) / sizeof(NewT);
+ int64_t new_size = size_ * sizeof(T) / sizeof(NewT);
return Span<NewT>(reinterpret_cast<const NewT *>(start_), new_size);
}
@@ -431,12 +440,12 @@ template<typename T> class Span {
template<typename T> class MutableSpan {
private:
T *start_;
- uint size_;
+ int64_t size_;
public:
MutableSpan() = default;
- MutableSpan(T *start, const uint size) : start_(start), size_(size)
+ MutableSpan(T *start, const int64_t size) : start_(start), size_(size)
{
}
@@ -456,7 +465,7 @@ template<typename T> class MutableSpan {
/**
* Returns the number of elements in the array.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -473,9 +482,9 @@ template<typename T> class MutableSpan {
* Replace a subset of all elements with the given value. This invokes undefined behavior when
* one of the indices is out of bounds.
*/
- void fill_indices(Span<uint> indices, const T &value)
+ void fill_indices(Span<int64_t> indices, const T &value)
{
- for (uint i : indices) {
+ for (int64_t i : indices) {
BLI_assert(i < size_);
start_[i] = value;
}
@@ -500,7 +509,7 @@ template<typename T> class MutableSpan {
return start_ + size_;
}
- T &operator[](const uint index) const
+ T &operator[](const int64_t index) const
{
BLI_assert(index < this->size());
return start_[index];
@@ -510,7 +519,7 @@ template<typename T> class MutableSpan {
* Returns a contiguous part of the array. This invokes undefined behavior when the slice would
* go out of bounds.
*/
- MutableSpan slice(const uint start, const uint length) const
+ MutableSpan slice(const int64_t start, const int64_t length) const
{
BLI_assert(start + length <= this->size());
return MutableSpan(start_ + start, length);
@@ -520,7 +529,7 @@ template<typename T> class MutableSpan {
* Returns a new MutableSpan with n elements removed from the beginning. This invokes
* undefined behavior when the array is too small.
*/
- MutableSpan drop_front(const uint n) const
+ MutableSpan drop_front(const int64_t n) const
{
BLI_assert(n <= this->size());
return this->slice(n, this->size() - n);
@@ -530,7 +539,7 @@ template<typename T> class MutableSpan {
* Returns a new MutableSpan with n elements removed from the end. This invokes undefined
* behavior when the array is too small.
*/
- MutableSpan drop_back(const uint n) const
+ MutableSpan drop_back(const int64_t n) const
{
BLI_assert(n <= this->size());
return this->slice(0, this->size() - n);
@@ -540,7 +549,7 @@ template<typename T> class MutableSpan {
* Returns a new MutableSpan that only contains the first n elements. This invokes undefined
* behavior when the array is too small.
*/
- MutableSpan take_front(const uint n) const
+ MutableSpan take_front(const int64_t n) const
{
BLI_assert(n <= this->size());
return this->slice(0, n);
@@ -550,7 +559,7 @@ template<typename T> class MutableSpan {
* Return a new MutableSpan that only contains the last n elements. This invokes undefined
* behavior when the array is too small.
*/
- MutableSpan take_back(const uint n) const
+ MutableSpan take_back(const int64_t n) const
{
BLI_assert(n <= this->size());
return this->slice(this->size() - n, n);
@@ -590,7 +599,7 @@ template<typename T> class MutableSpan {
template<typename NewT> MutableSpan<NewT> cast() const
{
BLI_assert((size_ * sizeof(T)) % sizeof(NewT) == 0);
- uint new_size = size_ * sizeof(T) / sizeof(NewT);
+ int64_t new_size = size_ * sizeof(T) / sizeof(NewT);
return MutableSpan<NewT>(reinterpret_cast<NewT *>(start_), new_size);
}
};
@@ -602,7 +611,7 @@ template<typename T1, typename T2> void assert_same_size(const T1 &v1, const T2
{
UNUSED_VARS_NDEBUG(v1, v2);
#ifdef DEBUG
- uint size = v1.size();
+ int64_t size = v1.size();
BLI_assert(size == v1.size());
BLI_assert(size == v2.size());
#endif
@@ -613,7 +622,7 @@ void assert_same_size(const T1 &v1, const T2 &v2, const T3 &v3)
{
UNUSED_VARS_NDEBUG(v1, v2, v3);
#ifdef DEBUG
- uint size = v1.size();
+ int64_t size = v1.size();
BLI_assert(size == v1.size());
BLI_assert(size == v2.size());
BLI_assert(size == v3.size());
diff --git a/source/blender/blenlib/BLI_stack.hh b/source/blender/blenlib/BLI_stack.hh
index a5a95186e37..422931862a8 100644
--- a/source/blender/blenlib/BLI_stack.hh
+++ b/source/blender/blenlib/BLI_stack.hh
@@ -60,7 +60,7 @@ template<typename T> struct StackChunk {
/** Pointer to one element past the end of the referenced buffer. */
T *capacity_end;
- uint capacity() const
+ int64_t capacity() const
{
return capacity_end - begin;
}
@@ -77,7 +77,7 @@ template<
* When T is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitly though.
*/
- uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
+ int64_t InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
/**
* The allocator used by this stack. Should rarely be changed, except when you don't want that
* MEM_* is used internally.
@@ -103,7 +103,7 @@ class Stack {
/**
* Number of elements in the entire stack. The sum of initialized element counts in the chunks.
*/
- uint size_;
+ int64_t size_;
/** The buffer used to implement small object optimization. */
TypedBuffer<T, InlineBufferCapacity> inline_buffer_;
@@ -298,8 +298,8 @@ class Stack {
this->activate_next_chunk(remaining_values.size());
}
- const uint remaining_capacity = top_chunk_->capacity_end - top_;
- const uint amount = std::min(remaining_values.size(), remaining_capacity);
+ const int64_t remaining_capacity = top_chunk_->capacity_end - top_;
+ const int64_t amount = std::min(remaining_values.size(), remaining_capacity);
uninitialized_copy_n(remaining_values.data(), amount, top_);
top_ += amount;
@@ -320,7 +320,7 @@ class Stack {
/**
* Returns the number of elements in the stack.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -344,11 +344,11 @@ class Stack {
*
* This invokes undefined behavior when the currently active chunk is not full.
*/
- void activate_next_chunk(const uint size_hint)
+ void activate_next_chunk(const int64_t size_hint)
{
BLI_assert(top_ == top_chunk_->capacity_end);
if (top_chunk_->above == nullptr) {
- const uint new_capacity = std::max(size_hint, top_chunk_->capacity() * 2 + 10);
+ const int64_t new_capacity = std::max(size_hint, top_chunk_->capacity() * 2 + 10);
/* Do a single memory allocation for the Chunk and the array it references. */
void *buffer = allocator_.allocate(
diff --git a/source/blender/blenlib/BLI_string_ref.hh b/source/blender/blenlib/BLI_string_ref.hh
index 5b555b8cd1d..06fc66f6b55 100644
--- a/source/blender/blenlib/BLI_string_ref.hh
+++ b/source/blender/blenlib/BLI_string_ref.hh
@@ -60,9 +60,9 @@ class StringRef;
class StringRefBase {
protected:
const char *data_;
- uint size_;
+ int64_t size_;
- StringRefBase(const char *data, const uint size) : data_(data), size_(size)
+ StringRefBase(const char *data, const int64_t size) : data_(data), size_(size)
{
}
@@ -70,7 +70,7 @@ class StringRefBase {
/**
* Return the (byte-)length of the referenced string, without any null-terminator.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -94,7 +94,7 @@ class StringRefBase {
*/
operator std::string() const
{
- return std::string(data_, size_);
+ return std::string(data_, (size_t)size_);
}
const char *begin() const
@@ -114,7 +114,7 @@ class StringRefBase {
*/
void unsafe_copy(char *dst) const
{
- memcpy(dst, data_, size_);
+ memcpy(dst, data_, (size_t)size_);
dst[size_] = '\0';
}
@@ -122,7 +122,7 @@ class StringRefBase {
* Copy the string into a buffer. The copied string will be null-terminated. This invokes
* undefined behavior when dst_size is too small. (Should we define the behavior?)
*/
- void copy(char *dst, const uint dst_size) const
+ void copy(char *dst, const int64_t dst_size) const
{
if (size_ < dst_size) {
this->unsafe_copy(dst);
@@ -137,7 +137,7 @@ class StringRefBase {
* Copy the string into a char array. The copied string will be null-terminated. This invokes
* undefined behavior when dst is too small.
*/
- template<uint N> void copy(char (&dst)[N])
+ template<size_t N> void copy(char (&dst)[N])
{
this->copy(dst, N);
}
@@ -152,7 +152,7 @@ class StringRefBase {
*/
bool endswith(StringRef suffix) const;
- StringRef substr(uint start, const uint size) const;
+ StringRef substr(int64_t start, const int64_t size) const;
};
/**
@@ -168,7 +168,7 @@ class StringRefNull : public StringRefBase {
/**
* Construct a StringRefNull from a null terminated c-string. The pointer must not point to NULL.
*/
- StringRefNull(const char *str) : StringRefBase(str, (uint)strlen(str))
+ StringRefNull(const char *str) : StringRefBase(str, (int64_t)strlen(str))
{
BLI_assert(str != NULL);
BLI_assert(data_[size_] == '\0');
@@ -178,9 +178,9 @@ class StringRefNull : public StringRefBase {
* Construct a StringRefNull from a null terminated c-string. This invokes undefined behavior
* when the given size is not the correct size of the string.
*/
- StringRefNull(const char *str, const uint size) : StringRefBase(str, size)
+ StringRefNull(const char *str, const int64_t size) : StringRefBase(str, size)
{
- BLI_assert((uint)strlen(str) == size);
+ BLI_assert((int64_t)strlen(str) == size);
}
/**
@@ -194,8 +194,9 @@ class StringRefNull : public StringRefBase {
/**
* Get the char at the given index.
*/
- char operator[](const uint index) const
+ char operator[](const int64_t index) const
{
+ BLI_assert(index >= 0);
/* Use '<=' instead of just '<', so that the null character can be accessed as well. */
BLI_assert(index <= size_);
return data_[index];
@@ -231,11 +232,11 @@ class StringRef : public StringRefBase {
/**
* Create a StringRef from a null-terminated c-string.
*/
- StringRef(const char *str) : StringRefBase(str, str ? (uint)strlen(str) : 0)
+ StringRef(const char *str) : StringRefBase(str, str ? (int64_t)strlen(str) : 0)
{
}
- StringRef(const char *str, const uint length) : StringRefBase(str, length)
+ StringRef(const char *str, const int64_t length) : StringRefBase(str, length)
{
}
@@ -244,7 +245,7 @@ class StringRef : public StringRefBase {
* second point points to a smaller address than the first one.
*/
StringRef(const char *begin, const char *one_after_end)
- : StringRefBase(begin, (uint)(one_after_end - begin))
+ : StringRefBase(begin, (int64_t)(one_after_end - begin))
{
BLI_assert(begin <= one_after_end);
}
@@ -253,15 +254,16 @@ class StringRef : public StringRefBase {
* Reference a std::string. Remember that when the std::string is destructed, the StringRef
* will point to uninitialized memory.
*/
- StringRef(const std::string &str) : StringRefBase(str.data(), (uint)str.size())
+ StringRef(const std::string &str) : StringRefBase(str.data(), (int64_t)str.size())
{
}
/**
* Return a new StringRef that does not contain the first n chars.
*/
- StringRef drop_prefix(const uint n) const
+ StringRef drop_prefix(const int64_t n) const
{
+ BLI_assert(n >= 0);
BLI_assert(n <= size_);
return StringRef(data_ + n, size_ - n);
}
@@ -279,8 +281,9 @@ class StringRef : public StringRefBase {
/**
* Get the char at the given index.
*/
- char operator[](uint index) const
+ char operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < size_);
return data_[index];
}
@@ -297,7 +300,7 @@ inline std::ostream &operator<<(std::ostream &stream, StringRef ref)
inline std::ostream &operator<<(std::ostream &stream, StringRefNull ref)
{
- stream << std::string(ref.data(), ref.size());
+ stream << std::string(ref.data(), (size_t)ref.size());
return stream;
}
@@ -315,7 +318,7 @@ inline bool operator==(StringRef a, StringRef b)
if (a.size() != b.size()) {
return false;
}
- return STREQLEN(a.data(), b.data(), a.size());
+ return STREQLEN(a.data(), b.data(), (size_t)a.size());
}
inline bool operator!=(StringRef a, StringRef b)
@@ -331,7 +334,7 @@ inline bool StringRefBase::startswith(StringRef prefix) const
if (size_ < prefix.size_) {
return false;
}
- for (uint i = 0; i < prefix.size_; i++) {
+ for (int64_t i = 0; i < prefix.size_; i++) {
if (data_[i] != prefix.data_[i]) {
return false;
}
@@ -347,8 +350,8 @@ inline bool StringRefBase::endswith(StringRef suffix) const
if (size_ < suffix.size_) {
return false;
}
- const uint offset = size_ - suffix.size_;
- for (uint i = 0; i < suffix.size_; i++) {
+ const int64_t offset = size_ - suffix.size_;
+ for (int64_t i = 0; i < suffix.size_; i++) {
if (data_[offset + i] != suffix.data_[i]) {
return false;
}
@@ -359,8 +362,10 @@ inline bool StringRefBase::endswith(StringRef suffix) const
/**
* Return a new #StringRef containing only a sub-string of the original string.
*/
-inline StringRef StringRefBase::substr(const uint start, const uint size) const
+inline StringRef StringRefBase::substr(const int64_t start, const int64_t size) const
{
+ BLI_assert(size >= 0);
+ BLI_assert(start >= 0);
BLI_assert(start + size <= size_);
return StringRef(data_ + start, size);
}
diff --git a/source/blender/blenlib/BLI_vector.hh b/source/blender/blenlib/BLI_vector.hh
index 1fe38464ad0..66de8d2fbd1 100644
--- a/source/blender/blenlib/BLI_vector.hh
+++ b/source/blender/blenlib/BLI_vector.hh
@@ -70,7 +70,7 @@ template<
* When T is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitly though.
*/
- uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
+ int64_t InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
/**
* The allocator used by this vector. Should rarely be changed, except when you don't want that
* MEM_* is used internally.
@@ -100,8 +100,8 @@ class Vector {
* annoying. Knowing the size of a vector is often quite essential when debugging some code.
*/
#ifndef NDEBUG
- uint debug_size_;
-# define UPDATE_VECTOR_SIZE(ptr) (ptr)->debug_size_ = (uint)((ptr)->end_ - (ptr)->begin_)
+ int64_t debug_size_;
+# define UPDATE_VECTOR_SIZE(ptr) (ptr)->debug_size_ = (int64_t)((ptr)->end_ - (ptr)->begin_)
#else
# define UPDATE_VECTOR_SIZE(ptr) ((void)0)
#endif
@@ -110,7 +110,7 @@ class Vector {
* Be a friend with other vector instantiations. This is necessary to implement some memory
* management logic.
*/
- template<typename OtherT, uint OtherInlineBufferCapacity, typename OtherAllocator>
+ template<typename OtherT, int64_t OtherInlineBufferCapacity, typename OtherAllocator>
friend class Vector;
public:
@@ -131,7 +131,7 @@ class Vector {
* The elements will be default constructed.
* If T is trivially constructible, the elements in the vector are not touched.
*/
- explicit Vector(uint size) : Vector()
+ explicit Vector(int64_t size) : Vector()
{
this->resize(size);
}
@@ -139,7 +139,7 @@ class Vector {
/**
* Create a vector filled with a specific value.
*/
- Vector(uint size, const T &value) : Vector()
+ Vector(int64_t size, const T &value) : Vector()
{
this->resize(size, value);
}
@@ -150,7 +150,7 @@ class Vector {
template<typename U, typename std::enable_if_t<std::is_convertible_v<U, T>> * = nullptr>
Vector(Span<U> values, Allocator allocator = {}) : Vector(allocator)
{
- const uint size = values.size();
+ const int64_t size = values.size();
this->reserve(size);
this->increase_size_by_unchecked(size);
uninitialized_convert_n<U, T>(values.data(), size, begin_);
@@ -217,7 +217,7 @@ class Vector {
* Create a copy of a vector with a different InlineBufferCapacity. This needs to be handled
* separately, so that the other one is a valid copy constructor.
*/
- template<uint OtherInlineBufferCapacity>
+ template<int64_t OtherInlineBufferCapacity>
Vector(const Vector<T, OtherInlineBufferCapacity, Allocator> &other)
: Vector(other.as_span(), other.allocator_)
{
@@ -227,11 +227,11 @@ class Vector {
* Steal the elements from another vector. This does not do an allocation. The other vector will
* have zero elements afterwards.
*/
- template<uint OtherInlineBufferCapacity>
+ template<int64_t OtherInlineBufferCapacity>
Vector(Vector<T, OtherInlineBufferCapacity, Allocator> &&other) noexcept
: allocator_(other.allocator_)
{
- const uint size = other.size();
+ const int64_t size = other.size();
if (other.is_inline()) {
if (size <= InlineBufferCapacity) {
@@ -243,8 +243,8 @@ class Vector {
}
else {
/* Copy from inline buffer to newly allocated buffer. */
- const uint capacity = size;
- begin_ = (T *)allocator_.allocate(sizeof(T) * capacity, alignof(T), AT);
+ const int64_t capacity = size;
+ begin_ = (T *)allocator_.allocate(sizeof(T) * (size_t)capacity, alignof(T), AT);
end_ = begin_ + size;
capacity_end_ = begin_ + capacity;
uninitialized_relocate_n(other.begin_, size, begin_);
@@ -302,14 +302,16 @@ class Vector {
* Get the value at the given index. This invokes undefined behavior when the index is out of
* bounds.
*/
- const T &operator[](uint index) const
+ const T &operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->size());
return begin_[index];
}
- T &operator[](uint index)
+ T &operator[](int64_t index)
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->size());
return begin_[index];
}
@@ -351,7 +353,7 @@ class Vector {
* This won't necessarily make an allocation when min_capacity is small.
* The actual size of the vector does not change.
*/
- void reserve(const uint min_capacity)
+ void reserve(const int64_t min_capacity)
{
if (min_capacity > this->capacity()) {
this->realloc_to_at_least(min_capacity);
@@ -364,9 +366,10 @@ class Vector {
* destructed. If new_size is larger than the old size, the new elements at the end are default
* constructed. If T is trivially constructible, the memory is not touched by this function.
*/
- void resize(const uint new_size)
+ void resize(const int64_t new_size)
{
- const uint old_size = this->size();
+ BLI_assert(new_size >= 0);
+ const int64_t old_size = this->size();
if (new_size > old_size) {
this->reserve(new_size);
default_construct_n(begin_ + old_size, new_size - old_size);
@@ -384,9 +387,10 @@ class Vector {
* destructed. If new_size is larger than the old size, the new elements will be copy constructed
* from the given value.
*/
- void resize(const uint new_size, const T &value)
+ void resize(const int64_t new_size, const T &value)
{
- const uint old_size = this->size();
+ BLI_assert(new_size >= 0);
+ const int64_t old_size = this->size();
if (new_size > old_size) {
this->reserve(new_size);
uninitialized_fill_n(begin_ + old_size, new_size - old_size, value);
@@ -447,9 +451,9 @@ class Vector {
* Append the value to the vector and return the index that can be used to access the newly
* added value.
*/
- uint append_and_get_index(const T &value)
+ int64_t append_and_get_index(const T &value)
{
- const uint index = this->size();
+ const int64_t index = this->size();
this->append(value);
return index;
}
@@ -490,8 +494,9 @@ class Vector {
* Insert the same element n times at the end of the vector.
* This might result in a reallocation internally.
*/
- void append_n_times(const T &value, const uint n)
+ void append_n_times(const T &value, const int64_t n)
{
+ BLI_assert(n >= 0);
this->reserve(this->size() + n);
blender::uninitialized_fill_n(end_, n, value);
this->increase_size_by_unchecked(n);
@@ -503,7 +508,7 @@ class Vector {
* useful when you want to call constructors in the vector yourself. This should only be done in
* very rare cases and has to be justified every time.
*/
- void increase_size_by_unchecked(const uint n)
+ void increase_size_by_unchecked(const int64_t n)
{
BLI_assert(end_ + n <= capacity_end_);
end_ += n;
@@ -519,7 +524,7 @@ class Vector {
{
this->extend(array.data(), array.size());
}
- void extend(const T *start, uint amount)
+ void extend(const T *start, int64_t amount)
{
this->reserve(this->size() + amount);
this->extend_unchecked(start, amount);
@@ -545,8 +550,9 @@ class Vector {
{
this->extend_unchecked(array.data(), array.size());
}
- void extend_unchecked(const T *start, uint amount)
+ void extend_unchecked(const T *start, int64_t amount)
{
+ BLI_assert(amount >= 0);
BLI_assert(begin_ + amount <= capacity_end_);
blender::uninitialized_copy_n(start, amount, end_);
end_ += amount;
@@ -569,28 +575,12 @@ class Vector {
}
/**
- * Replace every element with a new value.
- */
- void fill(const T &value)
- {
- initialized_fill_n(begin_, this->size(), value);
- }
-
- /**
- * Copy the value to all positions specified by the indices array.
- */
- void fill_indices(Span<uint> indices, const T &value)
- {
- MutableSpan<T>(*this).fill_indices(indices, value);
- }
-
- /**
* Return how many values are currently stored in the vector.
*/
- uint size() const
+ int64_t size() const
{
- BLI_assert(debug_size_ == (uint)(end_ - begin_));
- return (uint)(end_ - begin_);
+ BLI_assert(debug_size_ == (int64_t)(end_ - begin_));
+ return (int64_t)(end_ - begin_);
}
/**
@@ -635,8 +625,9 @@ class Vector {
* Delete any element in the vector. The empty space will be filled by the previously last
* element. This takes O(1) time.
*/
- void remove_and_reorder(const uint index)
+ void remove_and_reorder(const int64_t index)
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->size());
T *element_to_remove = begin_ + index;
end_--;
@@ -653,8 +644,8 @@ class Vector {
*/
void remove_first_occurrence_and_reorder(const T &value)
{
- const uint index = this->first_index_of(value);
- this->remove_and_reorder((uint)index);
+ const int64_t index = this->first_index_of(value);
+ this->remove_and_reorder(index);
}
/**
@@ -664,11 +655,12 @@ class Vector {
*
* This is similar to std::vector::erase.
*/
- void remove(const uint index)
+ void remove(const int64_t index)
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->size());
- const uint last_index = this->size() - 1;
- for (uint i = index; i < last_index; i++) {
+ const int64_t last_index = this->size() - 1;
+ for (int64_t i = index; i < last_index; i++) {
begin_[i] = std::move(begin_[i + 1]);
}
begin_[last_index].~T();
@@ -680,11 +672,11 @@ class Vector {
* Do a linear search to find the value in the vector.
* When found, return the first index, otherwise return -1.
*/
- int first_index_of_try(const T &value) const
+ int64_t first_index_of_try(const T &value) const
{
for (const T *current = begin_; current != end_; current++) {
if (*current == value) {
- return (int)(current - begin_);
+ return (int64_t)(current - begin_);
}
}
return -1;
@@ -694,11 +686,11 @@ class Vector {
* Do a linear search to find the value in the vector and return the found index. This invokes
* undefined behavior when the value is not in the vector.
*/
- uint first_index_of(const T &value) const
+ int64_t first_index_of(const T &value) const
{
- const int index = this->first_index_of_try(value);
+ const int64_t index = this->first_index_of_try(value);
BLI_assert(index >= 0);
- return (uint)index;
+ return index;
}
/**
@@ -748,9 +740,9 @@ class Vector {
* Get the current capacity of the vector, i.e. the maximum number of elements the vector can
* hold, before it has to reallocate.
*/
- uint capacity() const
+ int64_t capacity() const
{
- return (uint)(capacity_end_ - begin_);
+ return (int64_t)(capacity_end_ - begin_);
}
/**
@@ -758,7 +750,7 @@ class Vector {
* Obviously, this should only be used when you actually need the index in the loop.
*
* Example:
- * for (uint i : myvector.index_range()) {
+ * for (int64_t i : myvector.index_range()) {
* do_something(i, my_vector[i]);
* }
*/
@@ -796,7 +788,7 @@ class Vector {
}
}
- BLI_NOINLINE void realloc_to_at_least(const uint min_capacity)
+ BLI_NOINLINE void realloc_to_at_least(const int64_t min_capacity)
{
if (this->capacity() >= min_capacity) {
return;
@@ -804,12 +796,12 @@ class Vector {
/* At least double the size of the previous allocation. Otherwise consecutive calls to grow can
* cause a reallocation every time even though min_capacity only increments. */
- const uint min_new_capacity = this->capacity() * 2;
+ const int64_t min_new_capacity = this->capacity() * 2;
- const uint new_capacity = std::max(min_capacity, min_new_capacity);
- const uint size = this->size();
+ const int64_t new_capacity = std::max(min_capacity, min_new_capacity);
+ const int64_t size = this->size();
- T *new_array = (T *)allocator_.allocate(new_capacity * (uint)sizeof(T), alignof(T), AT);
+ T *new_array = (T *)allocator_.allocate((size_t)new_capacity * sizeof(T), alignof(T), AT);
uninitialized_relocate_n(begin_, size, new_array);
if (!this->is_inline()) {
diff --git a/source/blender/blenlib/BLI_vector_set.hh b/source/blender/blenlib/BLI_vector_set.hh
index dd1b17653c0..7573b77cdf7 100644
--- a/source/blender/blenlib/BLI_vector_set.hh
+++ b/source/blender/blenlib/BLI_vector_set.hh
@@ -106,20 +106,20 @@ class VectorSet {
* Slots are either empty, occupied or removed. The number of occupied slots can be computed by
* subtracting the removed slots from the occupied-and-removed slots.
*/
- uint32_t removed_slots_;
- uint32_t occupied_and_removed_slots_;
+ int64_t removed_slots_;
+ int64_t occupied_and_removed_slots_;
/**
* The maximum number of slots that can be used (either occupied or removed) until the set has to
* grow. This is the total number of slots times the max load factor.
*/
- uint32_t usable_slots_;
+ int64_t usable_slots_;
/**
* The number of slots minus one. This is a bit mask that can be used to turn any integer into a
* valid slot index efficiently.
*/
- uint32_t slot_mask_;
+ uint64_t slot_mask_;
/** This is called to hash incoming keys. */
Hash hash_;
@@ -238,8 +238,9 @@ class VectorSet {
/**
* Get the key stored at the given position in the vector.
*/
- const Key &operator[](const uint32_t index) const
+ const Key &operator[](const int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index <= this->size());
return keys_[index];
}
@@ -362,11 +363,11 @@ class VectorSet {
* Return the location of the key in the vector. It is assumed, that the key is in the vector
* set. If this is not necessarily the case, use `index_of_try`.
*/
- uint32_t index_of(const Key &key) const
+ int64_t index_of(const Key &key) const
{
return this->index_of_as(key);
}
- template<typename ForwardKey> uint32_t index_of_as(const ForwardKey &key) const
+ template<typename ForwardKey> int64_t index_of_as(const ForwardKey &key) const
{
return this->index_of__impl(key, hash_(key));
}
@@ -375,11 +376,11 @@ class VectorSet {
* Return the location of the key in the vector. If the key is not in the set, -1 is returned.
* If you know for sure that the key is in the set, it is better to use `index_of` instead.
*/
- int32_t index_of_try(const Key &key) const
+ int64_t index_of_try(const Key &key) const
{
- return (int32_t)this->index_of_try_as(key);
+ return this->index_of_try_as(key);
}
- template<typename ForwardKey> int32_t index_of_try_as(const ForwardKey &key) const
+ template<typename ForwardKey> int64_t index_of_try_as(const ForwardKey &key) const
{
return this->index_of_try__impl(key, hash_(key));
}
@@ -414,7 +415,7 @@ class VectorSet {
/**
* Returns the number of keys stored in the vector set.
*/
- uint32_t size() const
+ int64_t size() const
{
return occupied_and_removed_slots_ - removed_slots_;
}
@@ -430,7 +431,7 @@ class VectorSet {
/**
* Returns the number of available slots. This is mostly for debugging purposes.
*/
- uint32_t capacity() const
+ int64_t capacity() const
{
return slots_.size();
}
@@ -438,7 +439,7 @@ class VectorSet {
/**
* Returns the amount of removed slots in the set. This is mostly for debugging purposes.
*/
- uint32_t removed_amount() const
+ int64_t removed_amount() const
{
return removed_slots_;
}
@@ -446,7 +447,7 @@ class VectorSet {
/**
* Returns the bytes required per element. This is mostly for debugging purposes.
*/
- uint32_t size_per_element() const
+ int64_t size_per_element() const
{
return sizeof(Slot) + sizeof(Key);
}
@@ -455,15 +456,15 @@ class VectorSet {
* Returns the approximate memory requirements of the set in bytes. This is more correct for
* larger sets.
*/
- uint32_t size_in_bytes() const
+ int64_t size_in_bytes() const
{
- return (uint32_t)(sizeof(Slot) * slots_.size() + sizeof(Key) * usable_slots_);
+ return (int64_t)(sizeof(Slot) * slots_.size() + sizeof(Key) * usable_slots_);
}
/**
* Potentially resize the vector set such that it can hold n elements without doing another grow.
*/
- void reserve(const uint32_t n)
+ void reserve(const int64_t n)
{
if (usable_slots_ < n) {
this->realloc_and_reinsert(n);
@@ -474,18 +475,19 @@ class VectorSet {
* Get the number of collisions that the probing strategy has to go through to find the key or
* determine that it is not in the set.
*/
- uint32_t count_collisions(const Key &key) const
+ int64_t count_collisions(const Key &key) const
{
return this->count_collisions__impl(key, hash_(key));
}
private:
- BLI_NOINLINE void realloc_and_reinsert(const uint32_t min_usable_slots)
+ BLI_NOINLINE void realloc_and_reinsert(const int64_t min_usable_slots)
{
- uint32_t total_slots, usable_slots;
+ int64_t total_slots, usable_slots;
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
- const uint32_t new_slot_mask = total_slots - 1;
+ BLI_assert(total_slots >= 1);
+ const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
/* Optimize the case when the set was empty beforehand. We can avoid some copies here. */
if (this->size() == 0) {
@@ -524,10 +526,10 @@ class VectorSet {
void add_after_grow_and_destruct_old(Slot &old_slot,
SlotArray &new_slots,
- const uint32_t new_slot_mask)
+ const uint64_t new_slot_mask)
{
const Key &key = keys_[old_slot.index()];
- const uint32_t hash = old_slot.get_hash(key, Hash());
+ const uint64_t hash = old_slot.get_hash(key, Hash());
SLOT_PROBING_BEGIN (ProbingStrategy, hash, new_slot_mask, slot_index) {
Slot &slot = new_slots[slot_index];
@@ -540,7 +542,7 @@ class VectorSet {
}
template<typename ForwardKey>
- bool contains__impl(const ForwardKey &key, const uint32_t hash) const
+ bool contains__impl(const ForwardKey &key, const uint64_t hash) const
{
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
@@ -553,7 +555,7 @@ class VectorSet {
VECTOR_SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> void add_new__impl(ForwardKey &&key, const uint32_t hash)
+ template<typename ForwardKey> void add_new__impl(ForwardKey &&key, const uint64_t hash)
{
BLI_assert(!this->contains_as(key));
@@ -561,7 +563,7 @@ class VectorSet {
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
- uint32_t index = this->size();
+ int64_t index = this->size();
new (keys_ + index) Key(std::forward<ForwardKey>(key));
slot.occupy(index, hash);
occupied_and_removed_slots_++;
@@ -571,13 +573,13 @@ class VectorSet {
VECTOR_SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> bool add__impl(ForwardKey &&key, const uint32_t hash)
+ template<typename ForwardKey> bool add__impl(ForwardKey &&key, const uint64_t hash)
{
this->ensure_can_add();
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
- uint32_t index = this->size();
+ int64_t index = this->size();
new (keys_ + index) Key(std::forward<ForwardKey>(key));
occupied_and_removed_slots_++;
slot.occupy(index, hash);
@@ -591,7 +593,7 @@ class VectorSet {
}
template<typename ForwardKey>
- uint32_t index_of__impl(const ForwardKey &key, const uint32_t hash) const
+ int64_t index_of__impl(const ForwardKey &key, const uint64_t hash) const
{
BLI_assert(this->contains_as(key));
@@ -604,11 +606,11 @@ class VectorSet {
}
template<typename ForwardKey>
- int32_t index_of_try__impl(const ForwardKey &key, const uint32_t hash) const
+ int64_t index_of_try__impl(const ForwardKey &key, const uint64_t hash) const
{
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash, keys_)) {
- return (int32_t)slot.index();
+ return slot.index();
}
if (slot.is_empty()) {
return -1;
@@ -621,10 +623,10 @@ class VectorSet {
{
BLI_assert(this->size() > 0);
- const uint32_t index_to_pop = this->size() - 1;
+ const int64_t index_to_pop = this->size() - 1;
Key key = std::move(keys_[index_to_pop]);
keys_[index_to_pop].~Key();
- const uint32_t hash = hash_(key);
+ const uint64_t hash = hash_(key);
removed_slots_++;
@@ -637,7 +639,7 @@ class VectorSet {
VECTOR_SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> bool remove__impl(const ForwardKey &key, const uint32_t hash)
+ template<typename ForwardKey> bool remove__impl(const ForwardKey &key, const uint64_t hash)
{
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash, keys_)) {
@@ -652,7 +654,7 @@ class VectorSet {
}
template<typename ForwardKey>
- void remove_contained__impl(const ForwardKey &key, const uint32_t hash)
+ void remove_contained__impl(const ForwardKey &key, const uint64_t hash)
{
BLI_assert(this->contains_as(key));
@@ -667,9 +669,9 @@ class VectorSet {
void remove_key_internal(Slot &slot)
{
- uint32_t index_to_remove = slot.index();
- uint32_t size = this->size();
- uint32_t last_element_index = size - 1;
+ int64_t index_to_remove = slot.index();
+ int64_t size = this->size();
+ int64_t last_element_index = size - 1;
if (index_to_remove < last_element_index) {
keys_[index_to_remove] = std::move(keys_[last_element_index]);
@@ -682,9 +684,9 @@ class VectorSet {
return;
}
- void update_slot_index(const Key &key, const uint32_t old_index, const uint32_t new_index)
+ void update_slot_index(const Key &key, const int64_t old_index, const int64_t new_index)
{
- uint32_t hash = hash_(key);
+ uint64_t hash = hash_(key);
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.has_index(old_index)) {
slot.update_index(new_index);
@@ -695,9 +697,9 @@ class VectorSet {
}
template<typename ForwardKey>
- uint32_t count_collisions__impl(const ForwardKey &key, const uint32_t hash) const
+ int64_t count_collisions__impl(const ForwardKey &key, const uint64_t hash) const
{
- uint32_t collisions = 0;
+ int64_t collisions = 0;
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash, keys_)) {
@@ -719,9 +721,9 @@ class VectorSet {
}
}
- Key *allocate_keys_array(const uint32_t size)
+ Key *allocate_keys_array(const int64_t size)
{
- return (Key *)slots_.allocator().allocate((uint32_t)sizeof(Key) * size, alignof(Key), AT);
+ return (Key *)slots_.allocator().allocate(sizeof(Key) * (size_t)size, alignof(Key), AT);
}
void deallocate_keys_array(Key *keys)
diff --git a/source/blender/blenlib/BLI_vector_set_slots.hh b/source/blender/blenlib/BLI_vector_set_slots.hh
index e43b892b3f7..49e6d4daedb 100644
--- a/source/blender/blenlib/BLI_vector_set_slots.hh
+++ b/source/blender/blenlib/BLI_vector_set_slots.hh
@@ -53,7 +53,7 @@ template<typename Key> class SimpleVectorSetSlot {
/**
* After the default constructor has run, the slot has to be in the empty state.
*/
- int32_t state_ = s_is_empty;
+ int64_t state_ = s_is_empty;
public:
/**
@@ -75,10 +75,10 @@ template<typename Key> class SimpleVectorSetSlot {
/**
* Return the stored index. It is assumed that the slot is occupied.
*/
- uint32_t index() const
+ int64_t index() const
{
BLI_assert(this->is_occupied());
- return (uint32_t)state_;
+ return state_;
}
/**
@@ -88,7 +88,7 @@ template<typename Key> class SimpleVectorSetSlot {
template<typename ForwardKey, typename IsEqual>
bool contains(const ForwardKey &key,
const IsEqual &is_equal,
- uint32_t UNUSED(hash),
+ uint64_t UNUSED(hash),
const Key *keys) const
{
if (state_ >= 0) {
@@ -102,7 +102,7 @@ template<typename Key> class SimpleVectorSetSlot {
* we can avoid a comparison with the state, since we know the slot is occupied. For this
* specific slot implementation, this does not make a difference.
*/
- void relocate_occupied_here(SimpleVectorSetSlot &other, uint32_t UNUSED(hash))
+ void relocate_occupied_here(SimpleVectorSetSlot &other, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -113,20 +113,20 @@ template<typename Key> class SimpleVectorSetSlot {
* Change the state of this slot from empty/removed to occupied. The hash can be used by other
* slot implementations.
*/
- void occupy(uint32_t index, uint32_t UNUSED(hash))
+ void occupy(int64_t index, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
- state_ = (int32_t)index;
+ state_ = index;
}
/**
* The key has changed its position in the vector, so the index has to be updated. This method
* can assume that the slot is currently occupied.
*/
- void update_index(uint32_t index)
+ void update_index(int64_t index)
{
BLI_assert(this->is_occupied());
- state_ = (int32_t)index;
+ state_ = index;
}
/**
@@ -141,16 +141,16 @@ template<typename Key> class SimpleVectorSetSlot {
/**
* Return true if this slot is currently occupied and its corresponding key has the given index.
*/
- bool has_index(uint32_t index) const
+ bool has_index(int64_t index) const
{
- return (uint32_t)state_ == index;
+ return state_ == index;
}
/**
* Return the hash of the currently stored key. In this simple set slot implementation, we just
* compute the hash here. Other implementations might store the hash in the slot instead.
*/
- template<typename Hash> uint32_t get_hash(const Key &key, const Hash &hash) const
+ template<typename Hash> uint64_t get_hash(const Key &key, const Hash &hash) const
{
BLI_assert(this->is_occupied());
return hash(key);
diff --git a/source/blender/blenlib/intern/BLI_index_range.cc b/source/blender/blenlib/intern/BLI_index_range.cc
index 9fa19143f91..a906416b491 100644
--- a/source/blender/blenlib/intern/BLI_index_range.cc
+++ b/source/blender/blenlib/intern/BLI_index_range.cc
@@ -24,28 +24,28 @@
namespace blender {
-static Vector<Array<uint, 0, RawAllocator>, 1, RawAllocator> arrays;
-static uint current_array_size = 0;
-static uint *current_array = nullptr;
+static Vector<Array<int64_t, 0, RawAllocator>, 1, RawAllocator> arrays;
+static int64_t current_array_size = 0;
+static int64_t *current_array = nullptr;
static std::mutex current_array_mutex;
-Span<uint> IndexRange::as_span() const
+Span<int64_t> IndexRange::as_span() const
{
- uint min_required_size = start_ + size_;
+ int64_t min_required_size = start_ + size_;
if (min_required_size <= current_array_size) {
- return Span<uint>(current_array + start_, size_);
+ return Span<int64_t>(current_array + start_, size_);
}
std::lock_guard<std::mutex> lock(current_array_mutex);
if (min_required_size <= current_array_size) {
- return Span<uint>(current_array + start_, size_);
+ return Span<int64_t>(current_array + start_, size_);
}
- uint new_size = std::max<uint>(1000, power_of_2_max_u(min_required_size));
- Array<uint, 0, RawAllocator> new_array(new_size);
- for (uint i = 0; i < new_size; i++) {
+ int64_t new_size = std::max<int64_t>(1000, power_of_2_max_u(min_required_size));
+ Array<int64_t, 0, RawAllocator> new_array(new_size);
+ for (int64_t i = 0; i < new_size; i++) {
new_array[i] = i;
}
arrays.append(std::move(new_array));
@@ -54,7 +54,7 @@ Span<uint> IndexRange::as_span() const
std::atomic_thread_fence(std::memory_order_seq_cst);
current_array_size = new_size;
- return Span<uint>(current_array + start_, size_);
+ return Span<int64_t>(current_array + start_, size_);
}
} // namespace blender
diff --git a/source/blender/blenlib/intern/dot_export.cc b/source/blender/blenlib/intern/dot_export.cc
index 0f60ea6fd1b..48b6dc826d0 100644
--- a/source/blender/blenlib/intern/dot_export.cc
+++ b/source/blender/blenlib/intern/dot_export.cc
@@ -263,8 +263,8 @@ NodeWithSocketsRef::NodeWithSocketsRef(Node &node,
ss << "</b></td></tr>";
/* Sockets */
- uint socket_max_amount = std::max(input_names.size(), output_names.size());
- for (uint i = 0; i < socket_max_amount; i++) {
+ int socket_max_amount = std::max(input_names.size(), output_names.size());
+ for (int i = 0; i < socket_max_amount; i++) {
ss << "<tr>";
if (i < input_names.size()) {
StringRef name = input_names[i];
diff --git a/source/blender/blenlib/intern/rand.cc b/source/blender/blenlib/intern/rand.cc
index 279682ea171..9bafc422db5 100644
--- a/source/blender/blenlib/intern/rand.cc
+++ b/source/blender/blenlib/intern/rand.cc
@@ -93,8 +93,7 @@ void BLI_rng_srandom(RNG *rng, unsigned int seed)
void BLI_rng_get_char_n(RNG *rng, char *bytes, size_t bytes_len)
{
- BLI_assert(bytes_len > UINT32_MAX);
- rng->rng.get_bytes(blender::MutableSpan(bytes, (uint32_t)bytes_len));
+ rng->rng.get_bytes(blender::MutableSpan(bytes, (int64_t)bytes_len));
}
int BLI_rng_get_int(RNG *rng)
@@ -428,11 +427,11 @@ float2 RandomNumberGenerator::get_triangle_sample(float2 v1, float2 v2, float2 v
void RandomNumberGenerator::get_bytes(MutableSpan<char> r_bytes)
{
- constexpr uint mask_bytes = 2;
- constexpr uint rand_stride = (uint)sizeof(x_) - mask_bytes;
+ constexpr int64_t mask_bytes = 2;
+ constexpr int64_t rand_stride = (int64_t)sizeof(x_) - mask_bytes;
- uint last_len = 0;
- uint trim_len = r_bytes.size();
+ int64_t last_len = 0;
+ int64_t trim_len = r_bytes.size();
if (trim_len > rand_stride) {
last_len = trim_len % rand_stride;
@@ -444,13 +443,13 @@ void RandomNumberGenerator::get_bytes(MutableSpan<char> r_bytes)
}
const char *data_src = (const char *)&x_;
- uint i = 0;
+ int64_t i = 0;
while (i != trim_len) {
BLI_assert(i < trim_len);
#ifdef __BIG_ENDIAN__
- for (uint j = (rand_stride + mask_bytes) - 1; j != mask_bytes - 1; j--)
+ for (int64_t j = (rand_stride + mask_bytes) - 1; j != mask_bytes - 1; j--)
#else
- for (uint j = 0; j != rand_stride; j++)
+ for (int64_t j = 0; j != rand_stride; j++)
#endif
{
r_bytes[i++] = data_src[j];
@@ -458,7 +457,7 @@ void RandomNumberGenerator::get_bytes(MutableSpan<char> r_bytes)
this->step();
}
if (last_len) {
- for (uint j = 0; j != last_len; j++) {
+ for (int64_t j = 0; j != last_len; j++) {
r_bytes[i++] = data_src[j];
}
}