Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
Diffstat (limited to 'source')
-rw-r--r--source/blender/blenlib/BLI_allocator.hh2
-rw-r--r--source/blender/blenlib/BLI_array.hh45
-rw-r--r--source/blender/blenlib/BLI_color.hh16
-rw-r--r--source/blender/blenlib/BLI_disjoint_set.hh27
-rw-r--r--source/blender/blenlib/BLI_dot_export.hh4
-rw-r--r--source/blender/blenlib/BLI_float3.hh8
-rw-r--r--source/blender/blenlib/BLI_float4x4.hh6
-rw-r--r--source/blender/blenlib/BLI_hash.hh62
-rw-r--r--source/blender/blenlib/BLI_hash_tables.hh101
-rw-r--r--source/blender/blenlib/BLI_index_mask.hh30
-rw-r--r--source/blender/blenlib/BLI_index_range.hh64
-rw-r--r--source/blender/blenlib/BLI_linear_allocator.hh29
-rw-r--r--source/blender/blenlib/BLI_listbase_wrapper.hh6
-rw-r--r--source/blender/blenlib/BLI_map.hh95
-rw-r--r--source/blender/blenlib/BLI_map_slots.hh20
-rw-r--r--source/blender/blenlib/BLI_memory_utils.hh67
-rw-r--r--source/blender/blenlib/BLI_probing_strategies.hh58
-rw-r--r--source/blender/blenlib/BLI_rand.hh2
-rw-r--r--source/blender/blenlib/BLI_resource_collector.hh2
-rw-r--r--source/blender/blenlib/BLI_set.hh67
-rw-r--r--source/blender/blenlib/BLI_set_slots.hh26
-rw-r--r--source/blender/blenlib/BLI_span.hh89
-rw-r--r--source/blender/blenlib/BLI_stack.hh16
-rw-r--r--source/blender/blenlib/BLI_string_ref.hh53
-rw-r--r--source/blender/blenlib/BLI_vector.hh118
-rw-r--r--source/blender/blenlib/BLI_vector_set.hh90
-rw-r--r--source/blender/blenlib/BLI_vector_set_slots.hh24
-rw-r--r--source/blender/blenlib/intern/BLI_index_range.cc22
-rw-r--r--source/blender/blenlib/intern/dot_export.cc4
-rw-r--r--source/blender/blenlib/intern/rand.cc19
-rw-r--r--source/blender/depsgraph/intern/builder/deg_builder_cache.cc4
-rw-r--r--source/blender/depsgraph/intern/builder/deg_builder_cache.h2
-rw-r--r--source/blender/depsgraph/intern/depsgraph_registry.cc2
-rw-r--r--source/blender/depsgraph/intern/eval/deg_eval_runtime_backup_modifier.cc2
-rw-r--r--source/blender/depsgraph/intern/eval/deg_eval_runtime_backup_modifier.h2
-rw-r--r--source/blender/depsgraph/intern/node/deg_node_component.cc2
-rw-r--r--source/blender/depsgraph/intern/node/deg_node_component.h2
-rw-r--r--source/blender/depsgraph/intern/node/deg_node_id.cc2
-rw-r--r--source/blender/depsgraph/intern/node/deg_node_id.h2
-rw-r--r--source/blender/functions/FN_array_spans.hh22
-rw-r--r--source/blender/functions/FN_attributes_ref.hh52
-rw-r--r--source/blender/functions/FN_cpp_type.hh92
-rw-r--r--source/blender/functions/FN_generic_vector_array.hh46
-rw-r--r--source/blender/functions/FN_multi_function.hh10
-rw-r--r--source/blender/functions/FN_multi_function_builder.hh18
-rw-r--r--source/blender/functions/FN_multi_function_data_type.hh4
-rw-r--r--source/blender/functions/FN_multi_function_network.hh70
-rw-r--r--source/blender/functions/FN_multi_function_params.hh58
-rw-r--r--source/blender/functions/FN_multi_function_signature.hh12
-rw-r--r--source/blender/functions/FN_spans.hh40
-rw-r--r--source/blender/functions/intern/attributes_ref.cc6
-rw-r--r--source/blender/functions/intern/multi_function_builder.cc10
-rw-r--r--source/blender/functions/intern/multi_function_network.cc20
-rw-r--r--source/blender/functions/intern/multi_function_network_evaluation.cc20
-rw-r--r--source/blender/functions/intern/multi_function_network_optimization.cc46
-rw-r--r--source/blender/nodes/NOD_derived_node_tree.hh44
-rw-r--r--source/blender/nodes/NOD_node_tree_multi_function.hh6
-rw-r--r--source/blender/nodes/NOD_node_tree_ref.hh26
-rw-r--r--source/blender/nodes/intern/derived_node_tree.cc18
-rw-r--r--source/blender/nodes/intern/node_tree_multi_function.cc4
-rw-r--r--source/blender/nodes/shader/nodes/node_shader_map_range.cc4
-rw-r--r--source/blender/nodes/shader/nodes/node_shader_sepcombRGB.cc2
-rw-r--r--source/blender/nodes/shader/nodes/node_shader_sepcombXYZ.cc2
-rw-r--r--source/blender/nodes/shader/nodes/node_shader_valToRgb.cc2
-rw-r--r--source/blender/simulation/intern/particle_allocator.cc14
-rw-r--r--source/blender/simulation/intern/particle_allocator.hh16
-rw-r--r--source/blender/simulation/intern/particle_function.cc14
-rw-r--r--source/blender/simulation/intern/particle_function.hh8
-rw-r--r--source/blender/simulation/intern/simulation_collect_influences.cc14
-rw-r--r--source/blender/simulation/intern/simulation_solver.cc31
70 files changed, 976 insertions, 947 deletions
diff --git a/source/blender/blenlib/BLI_allocator.hh b/source/blender/blenlib/BLI_allocator.hh
index d57703f71bc..ec82e5ab71c 100644
--- a/source/blender/blenlib/BLI_allocator.hh
+++ b/source/blender/blenlib/BLI_allocator.hh
@@ -84,7 +84,7 @@ class RawAllocator {
void *ptr = malloc(size + alignment + sizeof(MemHead));
void *used_ptr = (void *)((uintptr_t)POINTER_OFFSET(ptr, alignment + sizeof(MemHead)) &
~((uintptr_t)alignment - 1));
- uint offset = (uint)((uintptr_t)used_ptr - (uintptr_t)ptr);
+ int offset = (int)((uintptr_t)used_ptr - (uintptr_t)ptr);
BLI_assert(offset >= sizeof(MemHead));
((MemHead *)used_ptr - 1)->offset = (int)offset;
return used_ptr;
diff --git a/source/blender/blenlib/BLI_array.hh b/source/blender/blenlib/BLI_array.hh
index c411fc50f15..c30893f1337 100644
--- a/source/blender/blenlib/BLI_array.hh
+++ b/source/blender/blenlib/BLI_array.hh
@@ -13,6 +13,7 @@
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+
#ifndef __BLI_ARRAY_HH__
#define __BLI_ARRAY_HH__
@@ -56,7 +57,7 @@ template<
* When T is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitly though.
*/
- uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
+ int64_t InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
/**
* The allocator used by this array. Should rarely be changed, except when you don't want that
* MEM_* functions are used internally.
@@ -68,7 +69,7 @@ class Array {
T *data_;
/** Number of elements in the array. */
- uint size_;
+ int64_t size_;
/** Used for allocations when the inline buffer is too small. */
Allocator allocator_;
@@ -117,7 +118,7 @@ class Array {
* even for non-trivial types. This should not be the default though, because one can easily mess
* up when dealing with uninitialized memory.
*/
- explicit Array(uint size)
+ explicit Array(int64_t size)
{
size_ = size;
data_ = this->get_buffer_for_size(size);
@@ -128,8 +129,9 @@ class Array {
* Create a new array with the given size. All values will be initialized by copying the given
* default.
*/
- Array(uint size, const T &value)
+ Array(int64_t size, const T &value)
{
+ BLI_assert(size >= 0);
size_ = size;
data_ = this->get_buffer_for_size(size);
uninitialized_fill_n(data_, size_, value);
@@ -147,8 +149,9 @@ class Array {
* Usage:
* Array<std::string> my_strings(10, NoInitialization());
*/
- Array(uint size, NoInitialization)
+ Array(int64_t size, NoInitialization)
{
+ BLI_assert(size >= 0);
size_ = size;
data_ = this->get_buffer_for_size(size);
}
@@ -203,14 +206,16 @@ class Array {
return *this;
}
- T &operator[](uint index)
+ T &operator[](int64_t index)
{
+ BLI_assert(index >= 0);
BLI_assert(index < size_);
return data_[index];
}
- const T &operator[](uint index) const
+ const T &operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < size_);
return data_[index];
}
@@ -250,7 +255,7 @@ class Array {
/**
* Returns the number of elements in the array.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -264,22 +269,6 @@ class Array {
}
/**
- * Copies the value to all indices in the array.
- */
- void fill(const T &value)
- {
- initialized_fill_n(data_, size_, value);
- }
-
- /**
- * Copies the value to the given indices in the array.
- */
- void fill_indices(Span<uint> indices, const T &value)
- {
- MutableSpan<T>(*this).fill_indices(indices, value);
- }
-
- /**
* Get a pointer to the beginning of the array.
*/
const T *data() const
@@ -340,13 +329,13 @@ class Array {
* Get the value of the InlineBufferCapacity template argument. This is the number of elements
* that can be stored without doing an allocation.
*/
- static uint inline_buffer_capacity()
+ static int64_t inline_buffer_capacity()
{
return InlineBufferCapacity;
}
private:
- T *get_buffer_for_size(uint size)
+ T *get_buffer_for_size(int64_t size)
{
if (size <= InlineBufferCapacity) {
return inline_buffer_;
@@ -356,9 +345,9 @@ class Array {
}
}
- T *allocate(uint size)
+ T *allocate(int64_t size)
{
- return (T *)allocator_.allocate(size * sizeof(T), alignof(T), AT);
+ return (T *)allocator_.allocate((size_t)size * sizeof(T), alignof(T), AT);
}
bool uses_inline_buffer() const
diff --git a/source/blender/blenlib/BLI_color.hh b/source/blender/blenlib/BLI_color.hh
index 265013c0013..72caa5b1118 100644
--- a/source/blender/blenlib/BLI_color.hh
+++ b/source/blender/blenlib/BLI_color.hh
@@ -62,12 +62,12 @@ struct Color4f {
return !(a == b);
}
- uint32_t hash() const
+ uint64_t hash() const
{
- uint32_t x1 = *(uint32_t *)&r;
- uint32_t x2 = *(uint32_t *)&g;
- uint32_t x3 = *(uint32_t *)&b;
- uint32_t x4 = *(uint32_t *)&a;
+ uint64_t x1 = *(uint32_t *)&r;
+ uint64_t x2 = *(uint32_t *)&g;
+ uint64_t x3 = *(uint32_t *)&b;
+ uint64_t x4 = *(uint32_t *)&a;
return (x1 * 1283591) ^ (x2 * 850177) ^ (x3 * 735391) ^ (x4 * 442319);
}
};
@@ -119,10 +119,10 @@ struct Color4b {
return !(a == b);
}
- uint32_t hash() const
+ uint64_t hash() const
{
- return ((uint32_t)r * 1283591) ^ ((uint32_t)g * 850177) ^ ((uint32_t)b * 735391) ^
- ((uint32_t)a * 442319);
+ return ((uint64_t)r * 1283591) ^ ((uint64_t)g * 850177) ^ ((uint64_t)b * 735391) ^
+ ((uint64_t)a * 442319);
}
};
diff --git a/source/blender/blenlib/BLI_disjoint_set.hh b/source/blender/blenlib/BLI_disjoint_set.hh
index 3b8453669aa..e0580709a44 100644
--- a/source/blender/blenlib/BLI_disjoint_set.hh
+++ b/source/blender/blenlib/BLI_disjoint_set.hh
@@ -29,16 +29,17 @@ namespace blender {
class DisjointSet {
private:
- Array<uint> parents_;
- Array<uint> ranks_;
+ Array<int64_t> parents_;
+ Array<int64_t> ranks_;
public:
/**
* Create a new disjoint set with the given size. Initially, every element is in a separate set.
*/
- DisjointSet(uint size) : parents_(size), ranks_(size, 0)
+ DisjointSet(int64_t size) : parents_(size), ranks_(size, 0)
{
- for (uint i = 0; i < size; i++) {
+ BLI_assert(size >= 0);
+ for (int64_t i = 0; i < size; i++) {
parents_[i] = i;
}
}
@@ -47,10 +48,10 @@ class DisjointSet {
* Join the sets containing elements x and y. Nothing happens when they have been in the same set
* before.
*/
- void join(uint x, uint y)
+ void join(int64_t x, int64_t y)
{
- uint root1 = this->find_root(x);
- uint root2 = this->find_root(y);
+ int64_t root1 = this->find_root(x);
+ int64_t root2 = this->find_root(y);
/* x and y are in the same set already. */
if (root1 == root2) {
@@ -71,27 +72,27 @@ class DisjointSet {
/**
* Return true when x and y are in the same set.
*/
- bool in_same_set(uint x, uint y)
+ bool in_same_set(int64_t x, int64_t y)
{
- uint root1 = this->find_root(x);
- uint root2 = this->find_root(y);
+ int64_t root1 = this->find_root(x);
+ int64_t root2 = this->find_root(y);
return root1 == root2;
}
/**
* Find the element that represents the set containing x currently.
*/
- uint find_root(uint x)
+ int64_t find_root(int64_t x)
{
/* Find root by following parents. */
- uint root = x;
+ int64_t root = x;
while (parents_[root] != root) {
root = parents_[root];
}
/* Compress path. */
while (parents_[x] != root) {
- uint parent = parents_[x];
+ int64_t parent = parents_[x];
parents_[x] = root;
x = parent;
}
diff --git a/source/blender/blenlib/BLI_dot_export.hh b/source/blender/blenlib/BLI_dot_export.hh
index a7c5f1436d1..0870d8c4c30 100644
--- a/source/blender/blenlib/BLI_dot_export.hh
+++ b/source/blender/blenlib/BLI_dot_export.hh
@@ -274,13 +274,13 @@ class NodeWithSocketsRef {
return *node_;
}
- NodePort input(uint index) const
+ NodePort input(int index) const
{
std::string port = "\"in" + std::to_string(index) + "\"";
return NodePort(*node_, port);
}
- NodePort output(uint index) const
+ NodePort output(int index) const
{
std::string port = "\"out" + std::to_string(index) + "\"";
return NodePort(*node_, port);
diff --git a/source/blender/blenlib/BLI_float3.hh b/source/blender/blenlib/BLI_float3.hh
index a36cedad41d..b2633985ac7 100644
--- a/source/blender/blenlib/BLI_float3.hh
+++ b/source/blender/blenlib/BLI_float3.hh
@@ -188,11 +188,11 @@ struct float3 {
z = -z;
}
- uint32_t hash() const
+ uint64_t hash() const
{
- uint32_t x1 = *(uint32_t *)&x;
- uint32_t x2 = *(uint32_t *)&y;
- uint32_t x3 = *(uint32_t *)&z;
+ uint64_t x1 = *(uint32_t *)&x;
+ uint64_t x2 = *(uint32_t *)&y;
+ uint64_t x3 = *(uint32_t *)&z;
return (x1 * 435109) ^ (x2 * 380867) ^ (x3 * 1059217);
}
diff --git a/source/blender/blenlib/BLI_float4x4.hh b/source/blender/blenlib/BLI_float4x4.hh
index ef83f9ffc19..185cffd13ac 100644
--- a/source/blender/blenlib/BLI_float4x4.hh
+++ b/source/blender/blenlib/BLI_float4x4.hh
@@ -109,10 +109,10 @@ struct float4x4 {
return result;
}
- uint32_t hash() const
+ uint64_t hash() const
{
- uint32_t h = 435109;
- for (uint i = 0; i < 16; i++) {
+ uint64_t h = 435109;
+ for (int i = 0; i < 16; i++) {
float value = ((const float *)this)[i];
h = h * 33 + (*(uint32_t *)&value);
}
diff --git a/source/blender/blenlib/BLI_hash.hh b/source/blender/blenlib/BLI_hash.hh
index 5cd4ce3c1a9..b14a4ca933c 100644
--- a/source/blender/blenlib/BLI_hash.hh
+++ b/source/blender/blenlib/BLI_hash.hh
@@ -38,7 +38,7 @@
* multiple `operator()` in a specialization of #DefaultHash. All those methods have to compute the
* same hash for values that compare equal.
*
- * The computed hash is an unsigned 32 bit integer. Ideally, the hash function would generate
+ * The computed hash is an unsigned 64 bit integer. Ideally, the hash function would generate
* uniformly random hash values for a set of keys. However, in many cases trivial hash functions
* are faster and produce a good enough distribution. In general it is better when more information
* is in the lower bits of the hash. By choosing a good probing strategy, the effects of a bad hash
@@ -49,7 +49,7 @@
* There are three main ways to provide a hash table implementation with a custom hash function.
*
* - When you want to provide a default hash function for your own custom type: Add a `hash`
- * member function to it. The function should return `uint32_t` and take no arguments. This
+ * member function to it. The function should return `uint64_t` and take no arguments. This
* method will be called by the default implementation of #DefaultHash. It will automatically be
* used by hash table implementations.
*
@@ -58,7 +58,7 @@
* either global or BLI namespace.
*
* template<> struct blender::DefaultHash<TheType> {
- * uint32_t operator()(const TheType &value) const {
+ * uint64_t operator()(const TheType &value) const {
* return ...;
* }
* };
@@ -68,7 +68,7 @@
* table explicitly.
*
* struct MyCustomHash {
- * uint32_t operator()(const TheType &value) const {
+ * uint64_t operator()(const TheType &value) const {
* return ...;
* }
* };
@@ -91,7 +91,7 @@ namespace blender {
* that you have to implement a hash function using one of three strategies listed above.
*/
template<typename T> struct DefaultHash {
- uint32_t operator()(const T &value) const
+ uint64_t operator()(const T &value) const
{
return value.hash();
}
@@ -101,7 +101,7 @@ template<typename T> struct DefaultHash {
* Use the same hash function for const and non const variants of a type.
*/
template<typename T> struct DefaultHash<const T> {
- uint32_t operator()(const T &value) const
+ uint64_t operator()(const T &value) const
{
return DefaultHash<T>{}(value);
}
@@ -109,9 +109,9 @@ template<typename T> struct DefaultHash<const T> {
#define TRIVIAL_DEFAULT_INT_HASH(TYPE) \
template<> struct DefaultHash<TYPE> { \
- uint32_t operator()(TYPE value) const \
+ uint64_t operator()(TYPE value) const \
{ \
- return (uint32_t)value; \
+ return (uint64_t)value; \
} \
}
@@ -127,43 +127,29 @@ TRIVIAL_DEFAULT_INT_HASH(int16_t);
TRIVIAL_DEFAULT_INT_HASH(uint16_t);
TRIVIAL_DEFAULT_INT_HASH(int32_t);
TRIVIAL_DEFAULT_INT_HASH(uint32_t);
-
-template<> struct DefaultHash<uint64_t> {
- uint32_t operator()(uint64_t value) const
- {
- uint32_t low = (uint32_t)value;
- uint32_t high = (uint32_t)(value >> 32);
- return low ^ (high * 0x45d9f3b);
- }
-};
-
-template<> struct DefaultHash<int64_t> {
- uint32_t operator()(uint64_t value) const
- {
- return DefaultHash<uint64_t>{}((uint64_t)value);
- }
-};
+TRIVIAL_DEFAULT_INT_HASH(int64_t);
+TRIVIAL_DEFAULT_INT_HASH(uint64_t);
/**
* One should try to avoid using floats as keys in hash tables, but sometimes it is convenient.
*/
template<> struct DefaultHash<float> {
- uint32_t operator()(float value) const
+ uint64_t operator()(float value) const
{
return *(uint32_t *)&value;
}
};
template<> struct DefaultHash<bool> {
- uint32_t operator()(bool value) const
+ uint64_t operator()(bool value) const
{
- return (uint32_t)(value != false) * 1298191;
+ return (uint64_t)(value != false) * 1298191;
}
};
-inline uint32_t hash_string(StringRef str)
+inline uint64_t hash_string(StringRef str)
{
- uint32_t hash = 5381;
+ uint64_t hash = 5381;
for (char c : str) {
hash = hash * 33 + c;
}
@@ -175,21 +161,21 @@ template<> struct DefaultHash<std::string> {
* Take a #StringRef as parameter to support heterogeneous lookups in hash table implementations
* when std::string is used as key.
*/
- uint32_t operator()(StringRef value) const
+ uint64_t operator()(StringRef value) const
{
return hash_string(value);
}
};
template<> struct DefaultHash<StringRef> {
- uint32_t operator()(StringRef value) const
+ uint64_t operator()(StringRef value) const
{
return hash_string(value);
}
};
template<> struct DefaultHash<StringRefNull> {
- uint32_t operator()(StringRef value) const
+ uint64_t operator()(StringRef value) const
{
return hash_string(value);
}
@@ -199,26 +185,26 @@ template<> struct DefaultHash<StringRefNull> {
* While we cannot guarantee that the lower 4 bits of a pointer are zero, it is often the case.
*/
template<typename T> struct DefaultHash<T *> {
- uint32_t operator()(const T *value) const
+ uint64_t operator()(const T *value) const
{
uintptr_t ptr = (uintptr_t)value;
- uint32_t hash = (uint32_t)(ptr >> 4);
+ uint64_t hash = (uint64_t)(ptr >> 4);
return hash;
}
};
template<typename T> struct DefaultHash<std::unique_ptr<T>> {
- uint32_t operator()(const std::unique_ptr<T> &value) const
+ uint64_t operator()(const std::unique_ptr<T> &value) const
{
return DefaultHash<T *>{}(value.get());
}
};
template<typename T1, typename T2> struct DefaultHash<std::pair<T1, T2>> {
- uint32_t operator()(const std::pair<T1, T2> &value) const
+ uint64_t operator()(const std::pair<T1, T2> &value) const
{
- uint32_t hash1 = DefaultHash<T1>{}(value.first);
- uint32_t hash2 = DefaultHash<T2>{}(value.second);
+ uint64_t hash1 = DefaultHash<T1>{}(value.first);
+ uint64_t hash2 = DefaultHash<T2>{}(value.second);
return hash1 ^ (hash2 * 33);
}
};
diff --git a/source/blender/blenlib/BLI_hash_tables.hh b/source/blender/blenlib/BLI_hash_tables.hh
index aaed772071d..5d8f8862a09 100644
--- a/source/blender/blenlib/BLI_hash_tables.hh
+++ b/source/blender/blenlib/BLI_hash_tables.hh
@@ -42,59 +42,64 @@ namespace blender {
* Those should eventually be de-duplicated with functions in BLI_math_base.h.
* \{ */
-inline constexpr int is_power_of_2_i_constexpr(const int n)
+inline constexpr int64_t is_power_of_2_constexpr(const int64_t x)
{
- return (n & (n - 1)) == 0;
+ BLI_assert(x >= 0);
+ return (x & (x - 1)) == 0;
}
-inline constexpr uint32_t log2_floor_u_constexpr(const uint32_t x)
+inline constexpr int64_t log2_floor_constexpr(const int64_t x)
{
- return x <= 1 ? 0 : 1 + log2_floor_u_constexpr(x >> 1);
+ BLI_assert(x >= 0);
+ return x <= 1 ? 0 : 1 + log2_floor_constexpr(x >> 1);
}
-inline constexpr uint32_t log2_ceil_u_constexpr(const uint32_t x)
+inline constexpr int64_t log2_ceil_constexpr(const int64_t x)
{
- return (is_power_of_2_i_constexpr((int)x)) ? log2_floor_u_constexpr(x) :
- log2_floor_u_constexpr(x) + 1;
+ BLI_assert(x >= 0);
+ return (is_power_of_2_constexpr((int)x)) ? log2_floor_constexpr(x) : log2_floor_constexpr(x) + 1;
}
-inline constexpr uint32_t power_of_2_max_u_constexpr(const uint32_t x)
+inline constexpr int64_t power_of_2_max_constexpr(const int64_t x)
{
- return 1u << log2_ceil_u_constexpr(x);
+ BLI_assert(x >= 0);
+ return 1ll << log2_ceil_constexpr(x);
}
template<typename IntT> inline constexpr IntT ceil_division(const IntT x, const IntT y)
{
- BLI_STATIC_ASSERT(!std::is_signed_v<IntT>, "");
+ BLI_assert(x >= 0);
+ BLI_assert(y >= 0);
return x / y + ((x % y) != 0);
}
template<typename IntT> inline constexpr IntT floor_division(const IntT x, const IntT y)
{
- BLI_STATIC_ASSERT(!std::is_signed_v<IntT>, "");
+ BLI_assert(x >= 0);
+ BLI_assert(y >= 0);
return x / y;
}
-inline constexpr uint32_t ceil_division_by_fraction(const uint32_t x,
- const uint32_t numerator,
- const uint32_t denominator)
+inline constexpr int64_t ceil_division_by_fraction(const int64_t x,
+ const int64_t numerator,
+ const int64_t denominator)
{
- return (uint32_t)ceil_division((uint64_t)x * (uint64_t)denominator, (uint64_t)numerator);
+ return (int64_t)ceil_division((uint64_t)x * (uint64_t)denominator, (uint64_t)numerator);
}
-inline constexpr uint32_t floor_multiplication_with_fraction(const uint32_t x,
- const uint32_t numerator,
- const uint32_t denominator)
+inline constexpr int64_t floor_multiplication_with_fraction(const int64_t x,
+ const int64_t numerator,
+ const int64_t denominator)
{
- return (uint32_t)((uint64_t)x * (uint64_t)numerator / (uint64_t)denominator);
+ return (int64_t)((uint64_t)x * (uint64_t)numerator / (uint64_t)denominator);
}
-inline constexpr uint32_t total_slot_amount_for_usable_slots(
- const uint32_t min_usable_slots,
- const uint32_t max_load_factor_numerator,
- const uint32_t max_load_factor_denominator)
+inline constexpr int64_t total_slot_amount_for_usable_slots(
+ const int64_t min_usable_slots,
+ const int64_t max_load_factor_numerator,
+ const int64_t max_load_factor_denominator)
{
- return power_of_2_max_u_constexpr(ceil_division_by_fraction(
+ return power_of_2_max_constexpr(ceil_division_by_fraction(
min_usable_slots, max_load_factor_numerator, max_load_factor_denominator));
}
@@ -121,16 +126,16 @@ class LoadFactor {
BLI_assert(numerator < denominator);
}
- void compute_total_and_usable_slots(uint32_t min_total_slots,
- uint32_t min_usable_slots,
- uint32_t *r_total_slots,
- uint32_t *r_usable_slots) const
+ void compute_total_and_usable_slots(int64_t min_total_slots,
+ int64_t min_usable_slots,
+ int64_t *r_total_slots,
+ int64_t *r_usable_slots) const
{
BLI_assert(is_power_of_2_i((int)min_total_slots));
- uint32_t total_slots = this->compute_total_slots(min_usable_slots, numerator_, denominator_);
+ int64_t total_slots = this->compute_total_slots(min_usable_slots, numerator_, denominator_);
total_slots = std::max(total_slots, min_total_slots);
- const uint32_t usable_slots = floor_multiplication_with_fraction(
+ const int64_t usable_slots = floor_multiplication_with_fraction(
total_slots, numerator_, denominator_);
BLI_assert(min_usable_slots <= usable_slots);
@@ -138,9 +143,9 @@ class LoadFactor {
*r_usable_slots = usable_slots;
}
- static constexpr uint32_t compute_total_slots(uint32_t min_usable_slots,
- uint8_t numerator,
- uint8_t denominator)
+ static constexpr int64_t compute_total_slots(int64_t min_usable_slots,
+ uint8_t numerator,
+ uint8_t denominator)
{
return total_slot_amount_for_usable_slots(min_usable_slots, numerator, denominator);
}
@@ -262,27 +267,27 @@ template<typename Pointer> struct PointerKeyInfo {
class HashTableStats {
private:
- Vector<uint32_t> keys_by_collision_count_;
- uint32_t total_collisions_;
+ Vector<int64_t> keys_by_collision_count_;
+ int64_t total_collisions_;
float average_collisions_;
- uint32_t size_;
- uint32_t capacity_;
- uint32_t removed_amount_;
+ int64_t size_;
+ int64_t capacity_;
+ int64_t removed_amount_;
float load_factor_;
float removed_load_factor_;
- uint32_t size_per_element_;
- uint32_t size_in_bytes_;
+ int64_t size_per_element_;
+ int64_t size_in_bytes_;
const void *address_;
public:
/**
* Requires that the hash table has the following methods:
- * - count_collisions(key) -> uint32_t
- * - size() -> uint32_t
- * - capacity() -> uint32_t
- * - removed_amount() -> uint32_t
- * - size_per_element() -> uint32_t
- * - size_in_bytes() -> uint32_t
+ * - count_collisions(key) -> int64_t
+ * - size() -> int64_t
+ * - capacity() -> int64_t
+ * - removed_amount() -> int64_t
+ * - size_per_element() -> int64_t
+ * - size_in_bytes() -> int64_t
*/
template<typename HashTable, typename Keys>
HashTableStats(const HashTable &hash_table, const Keys &keys)
@@ -296,7 +301,7 @@ class HashTableStats {
address_ = (const void *)&hash_table;
for (const auto &key : keys) {
- uint32_t collisions = hash_table.count_collisions(key);
+ int64_t collisions = hash_table.count_collisions(key);
if (keys_by_collision_count_.size() <= collisions) {
keys_by_collision_count_.append_n_times(0,
collisions - keys_by_collision_count_.size() + 1);
@@ -325,7 +330,7 @@ class HashTableStats {
std::cout << " Size per Slot: " << size_per_element_ << " bytes\n";
std::cout << " Average Collisions: " << average_collisions_ << "\n";
- for (uint32_t collision_count : keys_by_collision_count_.index_range()) {
+ for (int64_t collision_count : keys_by_collision_count_.index_range()) {
std::cout << " " << collision_count
<< " Collisions: " << keys_by_collision_count_[collision_count] << "\n";
}
diff --git a/source/blender/blenlib/BLI_index_mask.hh b/source/blender/blenlib/BLI_index_mask.hh
index 93bbb269d30..ff271faa0c2 100644
--- a/source/blender/blenlib/BLI_index_mask.hh
+++ b/source/blender/blenlib/BLI_index_mask.hh
@@ -46,7 +46,7 @@ namespace blender {
class IndexMask {
private:
/* The underlying reference to sorted integers. */
- Span<uint> indices_;
+ Span<int64_t> indices_;
public:
/* Creates an IndexMask that contains no indices. */
@@ -57,10 +57,10 @@ class IndexMask {
* This constructor asserts that the given integers are in ascending order and that there are no
* duplicates.
*/
- IndexMask(Span<uint> indices) : indices_(indices)
+ IndexMask(Span<int64_t> indices) : indices_(indices)
{
#ifdef DEBUG
- for (uint i = 1; i < indices.size(); i++) {
+ for (int64_t i = 1; i < indices.size(); i++) {
BLI_assert(indices[i - 1] < indices[i]);
}
#endif
@@ -84,28 +84,28 @@ class IndexMask {
* Do this:
* do_something_with_an_index_mask({3, 4, 5});
*/
- IndexMask(const std::initializer_list<uint> &indices) : IndexMask(Span<uint>(indices))
+ IndexMask(const std::initializer_list<int64_t> &indices) : IndexMask(Span<int64_t>(indices))
{
}
/**
* Creates an IndexMask that references the indices [0, n-1].
*/
- explicit IndexMask(uint n) : IndexMask(IndexRange(n))
+ explicit IndexMask(int64_t n) : IndexMask(IndexRange(n))
{
}
- operator Span<uint>() const
+ operator Span<int64_t>() const
{
return indices_;
}
- const uint *begin() const
+ const int64_t *begin() const
{
return indices_.begin();
}
- const uint *end() const
+ const int64_t *end() const
{
return indices_.end();
}
@@ -114,7 +114,7 @@ class IndexMask {
* Returns the n-th index referenced by this IndexMask. The `index_mask` method returns an
* IndexRange containing all indices that can be used as parameter here.
*/
- uint operator[](uint n) const
+ int64_t operator[](int64_t n) const
{
return indices_[n];
}
@@ -123,7 +123,7 @@ class IndexMask {
* Returns the minimum size an array has to have, if the integers in this IndexMask are going to
* be used as indices in that array.
*/
- uint min_array_size() const
+ int64_t min_array_size() const
{
if (indices_.size() == 0) {
return 0;
@@ -133,7 +133,7 @@ class IndexMask {
}
}
- Span<uint> indices() const
+ Span<int64_t> indices() const
{
return indices_;
}
@@ -167,12 +167,12 @@ class IndexMask {
{
if (this->is_range()) {
IndexRange range = this->as_range();
- for (uint i : range) {
+ for (int64_t i : range) {
callback(i);
}
}
else {
- for (uint i : indices_) {
+ for (int64_t i : indices_) {
callback(i);
}
}
@@ -193,7 +193,7 @@ class IndexMask {
/**
* Returns the largest index that is referenced by this IndexMask.
*/
- uint last() const
+ int64_t last() const
{
return indices_.last();
}
@@ -201,7 +201,7 @@ class IndexMask {
/**
* Returns the number of indices referenced by this IndexMask.
*/
- uint size() const
+ int64_t size() const
{
return indices_.size();
}
diff --git a/source/blender/blenlib/BLI_index_range.hh b/source/blender/blenlib/BLI_index_range.hh
index 1ae08e834ae..7c813f58b2c 100644
--- a/source/blender/blenlib/BLI_index_range.hh
+++ b/source/blender/blenlib/BLI_index_range.hh
@@ -27,29 +27,29 @@
* I'd argue that the second loop is more readable and less error prone than the first one. That is
* not necessarily always the case, but often it is.
*
- * for (uint i = 0; i < 10; i++) {
- * for (uint j = 0; j < 20; j++) {
- * for (uint k = 0; k < 30; k++) {
+ * for (int64_t i = 0; i < 10; i++) {
+ * for (int64_t j = 0; j < 20; j++) {
+ * for (int64_t k = 0; k < 30; k++) {
*
- * for (uint i : IndexRange(10)) {
- * for (uint j : IndexRange(20)) {
- * for (uint k : IndexRange(30)) {
+ * for (int64_t i : IndexRange(10)) {
+ * for (int64_t j : IndexRange(20)) {
+ * for (int64_t k : IndexRange(30)) {
*
* Some containers like blender::Vector have an index_range() method. This will return the
* IndexRange that contains all indices that can be used to access the container. This is
* particularly useful when you want to iterate over the indices and the elements (much like
* Python's enumerate(), just worse). Again, I think the second example here is better:
*
- * for (uint i = 0; i < my_vector_with_a_long_name.size(); i++) {
+ * for (int64_t i = 0; i < my_vector_with_a_long_name.size(); i++) {
* do_something(i, my_vector_with_a_long_name[i]);
*
- * for (uint i : my_vector_with_a_long_name.index_range()) {
+ * for (int64_t i : my_vector_with_a_long_name.index_range()) {
* do_something(i, my_vector_with_a_long_name[i]);
*
* Ideally this could be could be even closer to Python's enumerate(). We might get that in the
* future with newer C++ versions.
*
- * One other important feature is the as_span method. This method returns an Span<uint>
+ * One other important feature is the as_span method. This method returns an Span<int64_t>
* that contains the interval as individual numbers.
*/
@@ -70,18 +70,21 @@ template<typename T> class Span;
class IndexRange {
private:
- uint start_ = 0;
- uint size_ = 0;
+ int64_t start_ = 0;
+ int64_t size_ = 0;
public:
IndexRange() = default;
- explicit IndexRange(uint size) : start_(0), size_(size)
+ explicit IndexRange(int64_t size) : start_(0), size_(size)
{
+ BLI_assert(size >= 0);
}
- IndexRange(uint start, uint size) : start_(start), size_(size)
+ IndexRange(int64_t start, int64_t size) : start_(start), size_(size)
{
+ BLI_assert(start >= 0);
+ BLI_assert(size >= 0);
}
template<typename T>
@@ -91,10 +94,10 @@ class IndexRange {
class Iterator {
private:
- uint current_;
+ int64_t current_;
public:
- Iterator(uint current) : current_(current)
+ Iterator(int64_t current) : current_(current)
{
}
@@ -109,7 +112,7 @@ class IndexRange {
return current_ != iterator.current_;
}
- uint operator*() const
+ int64_t operator*() const
{
return current_;
}
@@ -128,8 +131,9 @@ class IndexRange {
/**
* Access an element in the range.
*/
- uint operator[](uint index) const
+ int64_t operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->size());
return start_ + index;
}
@@ -145,7 +149,7 @@ class IndexRange {
/**
* Get the amount of numbers in the range.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -153,16 +157,18 @@ class IndexRange {
/**
* Create a new range starting at the end of the current one.
*/
- IndexRange after(uint n) const
+ IndexRange after(int64_t n) const
{
+ BLI_assert(n >= 0);
return IndexRange(start_ + size_, n);
}
/**
* Create a new range that ends at the start of the current one.
*/
- IndexRange before(uint n) const
+ IndexRange before(int64_t n) const
{
+ BLI_assert(n >= 0);
return IndexRange(start_ - n, n);
}
@@ -170,7 +176,7 @@ class IndexRange {
* Get the first element in the range.
* Asserts when the range is empty.
*/
- uint first() const
+ int64_t first() const
{
BLI_assert(this->size() > 0);
return start_;
@@ -180,7 +186,7 @@ class IndexRange {
* Get the last element in the range.
* Asserts when the range is empty.
*/
- uint last() const
+ int64_t last() const
{
BLI_assert(this->size() > 0);
return start_ + size_ - 1;
@@ -189,7 +195,7 @@ class IndexRange {
/**
* Get the element one after the end. The returned value is undefined when the range is empty.
*/
- uint one_after_last() const
+ int64_t one_after_last() const
{
return start_ + size_;
}
@@ -197,7 +203,7 @@ class IndexRange {
/**
* Get the first element in the range. The returned value is undefined when the range is empty.
*/
- uint start() const
+ int64_t start() const
{
return start_;
}
@@ -205,7 +211,7 @@ class IndexRange {
/**
* Returns true when the range contains a certain number, otherwise false.
*/
- bool contains(uint value) const
+ bool contains(int64_t value) const
{
return value >= start_ && value < start_ + size_;
}
@@ -213,9 +219,11 @@ class IndexRange {
/**
* Returns a new range, that contains a sub-interval of the current one.
*/
- IndexRange slice(uint start, uint size) const
+ IndexRange slice(int64_t start, int64_t size) const
{
- uint new_start = start_ + start;
+ BLI_assert(start >= 0);
+ BLI_assert(size >= 0);
+ int64_t new_start = start_ + start;
BLI_assert(new_start + size <= start_ + size_ || size == 0);
return IndexRange(new_start, size);
}
@@ -227,7 +235,7 @@ class IndexRange {
/**
* Get read-only access to a memory buffer that contains the range as actual numbers.
*/
- Span<uint> as_span() const;
+ Span<int64_t> as_span() const;
friend std::ostream &operator<<(std::ostream &stream, IndexRange range)
{
diff --git a/source/blender/blenlib/BLI_linear_allocator.hh b/source/blender/blenlib/BLI_linear_allocator.hh
index b13d88d5b93..39a3ed27f42 100644
--- a/source/blender/blenlib/BLI_linear_allocator.hh
+++ b/source/blender/blenlib/BLI_linear_allocator.hh
@@ -39,10 +39,10 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
uintptr_t current_begin_;
uintptr_t current_end_;
- uint next_min_alloc_size_;
+ int64_t next_min_alloc_size_;
#ifdef DEBUG
- uint debug_allocated_amount_ = 0;
+ int64_t debug_allocated_amount_ = 0;
#endif
public:
@@ -66,8 +66,9 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*
* The alignment has to be a power of 2.
*/
- void *allocate(const uint size, const uint alignment)
+ void *allocate(const int64_t size, const int64_t alignment)
{
+ BLI_assert(size >= 0);
BLI_assert(alignment >= 1);
BLI_assert(is_power_of_2_i(alignment));
@@ -105,7 +106,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*
* This method only allocates memory and does not construct the instance.
*/
- template<typename T> MutableSpan<T> allocate_array(uint size)
+ template<typename T> MutableSpan<T> allocate_array(int64_t size)
{
return MutableSpan<T>((T *)this->allocate(sizeof(T) * size, alignof(T)), size);
}
@@ -141,22 +142,22 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*/
StringRefNull copy_string(StringRef str)
{
- const uint alloc_size = str.size() + 1;
+ const int64_t alloc_size = str.size() + 1;
char *buffer = (char *)this->allocate(alloc_size, 1);
str.copy(buffer, alloc_size);
return StringRefNull((const char *)buffer);
}
- MutableSpan<void *> allocate_elements_and_pointer_array(uint element_amount,
- uint element_size,
- uint element_alignment)
+ MutableSpan<void *> allocate_elements_and_pointer_array(int64_t element_amount,
+ int64_t element_size,
+ int64_t element_alignment)
{
void *pointer_buffer = this->allocate(element_amount * sizeof(void *), alignof(void *));
void *elements_buffer = this->allocate(element_amount * element_size, element_alignment);
MutableSpan<void *> pointers((void **)pointer_buffer, element_amount);
void *next_element_buffer = elements_buffer;
- for (uint i : IndexRange(element_amount)) {
+ for (int64_t i : IndexRange(element_amount)) {
pointers[i] = next_element_buffer;
next_element_buffer = POINTER_OFFSET(next_element_buffer, element_size);
}
@@ -165,13 +166,13 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
}
template<typename T, typename... Args>
- Span<T *> construct_elements_and_pointer_array(uint n, Args &&... args)
+ Span<T *> construct_elements_and_pointer_array(int64_t n, Args &&... args)
{
MutableSpan<void *> void_pointers = this->allocate_elements_and_pointer_array(
n, sizeof(T), alignof(T));
MutableSpan<T *> pointers = void_pointers.cast<T *>();
- for (uint i : IndexRange(n)) {
+ for (int64_t i : IndexRange(n)) {
new ((void *)pointers[i]) T(std::forward<Args>(args)...);
}
@@ -194,9 +195,9 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
}
private:
- void allocate_new_buffer(uint min_allocation_size)
+ void allocate_new_buffer(int64_t min_allocation_size)
{
- for (uint i : unused_borrowed_buffers_.index_range()) {
+ for (int64_t i : unused_borrowed_buffers_.index_range()) {
Span<char> buffer = unused_borrowed_buffers_[i];
if (buffer.size() >= min_allocation_size) {
unused_borrowed_buffers_.remove_and_reorder(i);
@@ -206,7 +207,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
}
}
- const uint size_in_bytes = power_of_2_min_u(
+ const int64_t size_in_bytes = power_of_2_min_u(
std::max(min_allocation_size, next_min_alloc_size_));
next_min_alloc_size_ = size_in_bytes * 2;
diff --git a/source/blender/blenlib/BLI_listbase_wrapper.hh b/source/blender/blenlib/BLI_listbase_wrapper.hh
index 047099eb36e..46f4a9d49fa 100644
--- a/source/blender/blenlib/BLI_listbase_wrapper.hh
+++ b/source/blender/blenlib/BLI_listbase_wrapper.hh
@@ -96,9 +96,9 @@ template<typename T> class ListBaseWrapper {
return (T *)ptr;
}
- uint index_of(const T *value) const
+ int64_t index_of(const T *value) const
{
- uint index = 0;
+ int64_t index = 0;
for (T *ptr : *this) {
if (ptr == value) {
return index;
@@ -106,7 +106,7 @@ template<typename T> class ListBaseWrapper {
index++;
}
BLI_assert(false);
- return 0;
+ return -1;
}
};
diff --git a/source/blender/blenlib/BLI_map.hh b/source/blender/blenlib/BLI_map.hh
index 6bbd4ee09db..2abaf814ec9 100644
--- a/source/blender/blenlib/BLI_map.hh
+++ b/source/blender/blenlib/BLI_map.hh
@@ -96,7 +96,7 @@ template<
* When Key or Value are large, the small buffer optimization is disabled by default to avoid
* large unexpected allocations on the stack. It can still be enabled explicitly though.
*/
- uint32_t InlineBufferCapacity = (sizeof(Key) + sizeof(Value) < 100) ? 4 : 0,
+ int64_t InlineBufferCapacity = (sizeof(Key) + sizeof(Value) < 100) ? 4 : 0,
/**
* The strategy used to deal with collisions. They are defined in BLI_probing_strategies.hh.
*/
@@ -129,20 +129,20 @@ class Map {
* Slots are either empty, occupied or removed. The number of occupied slots can be computed by
* subtracting the removed slots from the occupied-and-removed slots.
*/
- uint32_t removed_slots_;
- uint32_t occupied_and_removed_slots_;
+ int64_t removed_slots_;
+ int64_t occupied_and_removed_slots_;
/**
* The maximum number of slots that can be used (either occupied or removed) until the set has to
* grow. This is the total number of slots times the max load factor.
*/
- uint32_t usable_slots_;
+ int64_t usable_slots_;
/**
* The number of slots minus one. This is a bit mask that can be used to turn any integer into a
* valid slot index efficiently.
*/
- uint32_t slot_mask_;
+ uint64_t slot_mask_;
/** This is called to hash incoming keys. */
Hash hash_;
@@ -577,8 +577,8 @@ class Map {
*/
template<typename FuncT> void foreach_item(const FuncT &func) const
{
- uint32_t size = slots_.size();
- for (uint32_t i = 0; i < size; i++) {
+ int64_t size = slots_.size();
+ for (int64_t i = 0; i < size; i++) {
const Slot &slot = slots_[i];
if (slot.is_occupied()) {
const Key &key = *slot.key();
@@ -594,10 +594,10 @@ class Map {
*/
template<typename SubIterator> struct BaseIterator {
Slot *slots_;
- uint32_t total_slots_;
- uint32_t current_slot_;
+ int64_t total_slots_;
+ int64_t current_slot_;
- BaseIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ BaseIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: slots_(const_cast<Slot *>(slots)), total_slots_(total_slots), current_slot_(current_slot)
{
}
@@ -621,7 +621,7 @@ class Map {
SubIterator begin() const
{
- for (uint32_t i = 0; i < total_slots_; i++) {
+ for (int64_t i = 0; i < total_slots_; i++) {
if (slots_[i].is_occupied()) {
return SubIterator(slots_, total_slots_, i);
}
@@ -642,7 +642,7 @@ class Map {
class KeyIterator final : public BaseIterator<KeyIterator> {
public:
- KeyIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ KeyIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: BaseIterator<KeyIterator>(slots, total_slots, current_slot)
{
}
@@ -655,7 +655,7 @@ class Map {
class ValueIterator final : public BaseIterator<ValueIterator> {
public:
- ValueIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ ValueIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: BaseIterator<ValueIterator>(slots, total_slots, current_slot)
{
}
@@ -668,7 +668,7 @@ class Map {
class MutableValueIterator final : public BaseIterator<MutableValueIterator> {
public:
- MutableValueIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ MutableValueIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: BaseIterator<MutableValueIterator>(slots, total_slots, current_slot)
{
}
@@ -696,7 +696,7 @@ class Map {
class ItemIterator final : public BaseIterator<ItemIterator> {
public:
- ItemIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ ItemIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: BaseIterator<ItemIterator>(slots, total_slots, current_slot)
{
}
@@ -710,7 +710,7 @@ class Map {
class MutableItemIterator final : public BaseIterator<MutableItemIterator> {
public:
- MutableItemIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ MutableItemIterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: BaseIterator<MutableItemIterator>(slots, total_slots, current_slot)
{
}
@@ -783,7 +783,7 @@ class Map {
/**
* Return the number of key-value-pairs that are stored in the map.
*/
- uint32_t size() const
+ int64_t size() const
{
return occupied_and_removed_slots_ - removed_slots_;
}
@@ -801,7 +801,7 @@ class Map {
/**
* Returns the number of available slots. This is mostly for debugging purposes.
*/
- uint32_t capacity() const
+ int64_t capacity() const
{
return slots_.size();
}
@@ -809,7 +809,7 @@ class Map {
/**
* Returns the amount of removed slots in the set. This is mostly for debugging purposes.
*/
- uint32_t removed_amount() const
+ int64_t removed_amount() const
{
return removed_slots_;
}
@@ -817,7 +817,7 @@ class Map {
/**
* Returns the bytes required per element. This is mostly for debugging purposes.
*/
- uint32_t size_per_element() const
+ int64_t size_per_element() const
{
return sizeof(Slot);
}
@@ -826,16 +826,16 @@ class Map {
* Returns the approximate memory requirements of the map in bytes. This becomes more exact the
* larger the map becomes.
*/
- uint32_t size_in_bytes() const
+ int64_t size_in_bytes() const
{
- return (uint32_t)(sizeof(Slot) * slots_.size());
+ return (int64_t)(sizeof(Slot) * slots_.size());
}
/**
* Potentially resize the map such that the specified number of elements can be added without
* another grow operation.
*/
- void reserve(uint32_t n)
+ void reserve(int64_t n)
{
if (usable_slots_ < n) {
this->realloc_and_reinsert(n);
@@ -855,18 +855,19 @@ class Map {
* Get the number of collisions that the probing strategy has to go through to find the key or
* determine that it is not in the map.
*/
- uint32_t count_collisions(const Key &key) const
+ int64_t count_collisions(const Key &key) const
{
return this->count_collisions__impl(key, hash_(key));
}
private:
- BLI_NOINLINE void realloc_and_reinsert(uint32_t min_usable_slots)
+ BLI_NOINLINE void realloc_and_reinsert(int64_t min_usable_slots)
{
- uint32_t total_slots, usable_slots;
+ int64_t total_slots, usable_slots;
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
- uint32_t new_slot_mask = total_slots - 1;
+ BLI_assert(total_slots >= 1);
+ const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
/**
* Optimize the case when the map was empty beforehand. We can avoid some copies here.
@@ -901,9 +902,9 @@ class Map {
void add_after_grow_and_destruct_old(Slot &old_slot,
SlotArray &new_slots,
- uint32_t new_slot_mask)
+ uint64_t new_slot_mask)
{
- uint32_t hash = old_slot.get_hash(Hash());
+ uint64_t hash = old_slot.get_hash(Hash());
SLOT_PROBING_BEGIN (ProbingStrategy, hash, new_slot_mask, slot_index) {
Slot &slot = new_slots[slot_index];
if (slot.is_empty()) {
@@ -914,7 +915,7 @@ class Map {
SLOT_PROBING_END();
}
- template<typename ForwardKey> bool contains__impl(const ForwardKey &key, uint32_t hash) const
+ template<typename ForwardKey> bool contains__impl(const ForwardKey &key, uint64_t hash) const
{
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
@@ -928,7 +929,7 @@ class Map {
}
template<typename ForwardKey, typename ForwardValue>
- void add_new__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ void add_new__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
BLI_assert(!this->contains_as(key));
@@ -945,7 +946,7 @@ class Map {
}
template<typename ForwardKey, typename ForwardValue>
- bool add__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ bool add__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
this->ensure_can_add();
@@ -962,7 +963,7 @@ class Map {
MAP_SLOT_PROBING_END();
}
- template<typename ForwardKey> bool remove__impl(const ForwardKey &key, uint32_t hash)
+ template<typename ForwardKey> bool remove__impl(const ForwardKey &key, uint64_t hash)
{
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -977,7 +978,7 @@ class Map {
MAP_SLOT_PROBING_END();
}
- template<typename ForwardKey> void remove_contained__impl(const ForwardKey &key, uint32_t hash)
+ template<typename ForwardKey> void remove_contained__impl(const ForwardKey &key, uint64_t hash)
{
BLI_assert(this->contains_as(key));
@@ -992,7 +993,7 @@ class Map {
MAP_SLOT_PROBING_END();
}
- template<typename ForwardKey> Value pop__impl(const ForwardKey &key, uint32_t hash)
+ template<typename ForwardKey> Value pop__impl(const ForwardKey &key, uint64_t hash)
{
BLI_assert(this->contains_as(key));
@@ -1009,7 +1010,7 @@ class Map {
}
template<typename ForwardKey>
- std::optional<Value> pop_try__impl(const ForwardKey &key, uint32_t hash)
+ std::optional<Value> pop_try__impl(const ForwardKey &key, uint64_t hash)
{
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -1026,7 +1027,7 @@ class Map {
}
template<typename ForwardKey, typename ForwardValue>
- Value pop_default__impl(const ForwardKey &key, ForwardValue &&default_value, uint32_t hash)
+ Value pop_default__impl(const ForwardKey &key, ForwardValue &&default_value, uint64_t hash)
{
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -1046,7 +1047,7 @@ class Map {
auto add_or_modify__impl(ForwardKey &&key,
const CreateValueF &create_value,
const ModifyValueF &modify_value,
- uint32_t hash) -> decltype(create_value(nullptr))
+ uint64_t hash) -> decltype(create_value(nullptr))
{
using CreateReturnT = decltype(create_value(nullptr));
using ModifyReturnT = decltype(modify_value(nullptr));
@@ -1071,7 +1072,7 @@ class Map {
}
template<typename ForwardKey, typename CreateValueF>
- Value &lookup_or_add_cb__impl(ForwardKey &&key, const CreateValueF &create_value, uint32_t hash)
+ Value &lookup_or_add_cb__impl(ForwardKey &&key, const CreateValueF &create_value, uint64_t hash)
{
this->ensure_can_add();
@@ -1089,7 +1090,7 @@ class Map {
}
template<typename ForwardKey, typename ForwardValue>
- Value &lookup_or_add__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ Value &lookup_or_add__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
this->ensure_can_add();
@@ -1107,7 +1108,7 @@ class Map {
}
template<typename ForwardKey, typename ForwardValue>
- bool add_overwrite__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ bool add_overwrite__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
auto create_func = [&](Value *ptr) {
new ((void *)ptr) Value(std::forward<ForwardValue>(value));
@@ -1122,7 +1123,7 @@ class Map {
}
template<typename ForwardKey>
- const Value *lookup_ptr__impl(const ForwardKey &key, uint32_t hash) const
+ const Value *lookup_ptr__impl(const ForwardKey &key, uint64_t hash) const
{
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
@@ -1136,9 +1137,9 @@ class Map {
}
template<typename ForwardKey>
- uint32_t count_collisions__impl(const ForwardKey &key, uint32_t hash) const
+ int64_t count_collisions__impl(const ForwardKey &key, uint64_t hash) const
{
- uint32_t collisions = 0;
+ int64_t collisions = 0;
MAP_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -1171,9 +1172,9 @@ template<typename Key, typename Value> class StdUnorderedMapWrapper {
MapType map_;
public:
- uint32_t size() const
+ int64_t size() const
{
- return (uint32_t)map_.size();
+ return (int64_t)map_.size();
}
bool is_empty() const
@@ -1181,7 +1182,7 @@ template<typename Key, typename Value> class StdUnorderedMapWrapper {
return map_.empty();
}
- void reserve(uint32_t n)
+ void reserve(int64_t n)
{
map_.reserve(n);
}
diff --git a/source/blender/blenlib/BLI_map_slots.hh b/source/blender/blenlib/BLI_map_slots.hh
index ff3ed34eb9d..b5360795a13 100644
--- a/source/blender/blenlib/BLI_map_slots.hh
+++ b/source/blender/blenlib/BLI_map_slots.hh
@@ -155,7 +155,7 @@ template<typename Key, typename Value> class SimpleMapSlot {
* Returns the hash of the currently stored key. In this simple map slot implementation, we just
* computed the hash here. Other implementations might store the hash in the slot instead.
*/
- template<typename Hash> uint32_t get_hash(const Hash &hash)
+ template<typename Hash> uint64_t get_hash(const Hash &hash)
{
BLI_assert(this->is_occupied());
return hash(*key_buffer_);
@@ -165,7 +165,7 @@ template<typename Key, typename Value> class SimpleMapSlot {
* Move the other slot into this slot and destruct it. We do destruction here, because this way
* we can avoid a comparison with the state, since we know the slot is occupied.
*/
- void relocate_occupied_here(SimpleMapSlot &other, uint32_t UNUSED(hash))
+ void relocate_occupied_here(SimpleMapSlot &other, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -181,7 +181,7 @@ template<typename Key, typename Value> class SimpleMapSlot {
* key. The hash can be used by other slot implementations to determine inequality faster.
*/
template<typename ForwardKey, typename IsEqual>
- bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const
+ bool contains(const ForwardKey &key, const IsEqual &is_equal, uint64_t UNUSED(hash)) const
{
if (state_ == Occupied) {
return is_equal(key, *key_buffer_);
@@ -194,7 +194,7 @@ template<typename Key, typename Value> class SimpleMapSlot {
* constructed by calling the constructor with the given key/value as parameter.
*/
template<typename ForwardKey, typename ForwardValue>
- void occupy(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ void occupy(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
BLI_assert(!this->is_occupied());
this->occupy_without_value(std::forward<ForwardKey>(key), hash);
@@ -205,7 +205,7 @@ template<typename Key, typename Value> class SimpleMapSlot {
* Change the state of this slot from empty/removed to occupied, but leave the value
* uninitialized. The caller is responsible to construct the value afterwards.
*/
- template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint32_t UNUSED(hash))
+ template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
state_ = Occupied;
@@ -292,13 +292,13 @@ template<typename Key, typename Value, typename KeyInfo> class IntrusiveMapSlot
return KeyInfo::is_empty(key_);
}
- template<typename Hash> uint32_t get_hash(const Hash &hash)
+ template<typename Hash> uint64_t get_hash(const Hash &hash)
{
BLI_assert(this->is_occupied());
return hash(key_);
}
- void relocate_occupied_here(IntrusiveMapSlot &other, uint32_t UNUSED(hash))
+ void relocate_occupied_here(IntrusiveMapSlot &other, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -309,14 +309,14 @@ template<typename Key, typename Value, typename KeyInfo> class IntrusiveMapSlot
}
template<typename ForwardKey, typename IsEqual>
- bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const
+ bool contains(const ForwardKey &key, const IsEqual &is_equal, uint64_t UNUSED(hash)) const
{
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
return is_equal(key, key_);
}
template<typename ForwardKey, typename ForwardValue>
- void occupy(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
+ void occupy(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
BLI_assert(!this->is_occupied());
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
@@ -324,7 +324,7 @@ template<typename Key, typename Value, typename KeyInfo> class IntrusiveMapSlot
new (&value_buffer_) Value(std::forward<ForwardValue>(value));
}
- template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint32_t UNUSED(hash))
+ template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
diff --git a/source/blender/blenlib/BLI_memory_utils.hh b/source/blender/blenlib/BLI_memory_utils.hh
index b73e0e95312..5c692850017 100644
--- a/source/blender/blenlib/BLI_memory_utils.hh
+++ b/source/blender/blenlib/BLI_memory_utils.hh
@@ -43,8 +43,10 @@ namespace blender {
* After:
* ptr: uninitialized
*/
-template<typename T> void destruct_n(T *ptr, uint n)
+template<typename T> void destruct_n(T *ptr, int64_t n)
{
+ BLI_assert(n >= 0);
+
static_assert(std::is_nothrow_destructible_v<T>,
"This should be true for all types. Destructors are noexcept by default.");
@@ -54,7 +56,7 @@ template<typename T> void destruct_n(T *ptr, uint n)
return;
}
- for (uint i = 0; i < n; i++) {
+ for (int64_t i = 0; i < n; i++) {
ptr[i].~T();
}
}
@@ -70,15 +72,17 @@ template<typename T> void destruct_n(T *ptr, uint n)
* After:
* ptr: initialized
*/
-template<typename T> void default_construct_n(T *ptr, uint n)
+template<typename T> void default_construct_n(T *ptr, int64_t n)
{
+ BLI_assert(n >= 0);
+
/* This is not strictly necessary, because the loop below will be optimized away anyway. It is
* nice to make behavior this explicitly, though. */
if (std::is_trivially_constructible_v<T>) {
return;
}
- uint current = 0;
+ int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(ptr + current)) T;
@@ -102,9 +106,11 @@ template<typename T> void default_construct_n(T *ptr, uint n)
* src: initialized
* dst: initialized
*/
-template<typename T> void initialized_copy_n(const T *src, uint n, T *dst)
+template<typename T> void initialized_copy_n(const T *src, int64_t n, T *dst)
{
- for (uint i = 0; i < n; i++) {
+ BLI_assert(n >= 0);
+
+ for (int64_t i = 0; i < n; i++) {
dst[i] = src[i];
}
}
@@ -121,9 +127,11 @@ template<typename T> void initialized_copy_n(const T *src, uint n, T *dst)
* src: initialized
* dst: initialized
*/
-template<typename T> void uninitialized_copy_n(const T *src, uint n, T *dst)
+template<typename T> void uninitialized_copy_n(const T *src, int64_t n, T *dst)
{
- uint current = 0;
+ BLI_assert(n >= 0);
+
+ int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) T(src[current]);
@@ -147,9 +155,12 @@ template<typename T> void uninitialized_copy_n(const T *src, uint n, T *dst)
* src: initialized
* dst: initialized
*/
-template<typename From, typename To> void uninitialized_convert_n(const From *src, uint n, To *dst)
+template<typename From, typename To>
+void uninitialized_convert_n(const From *src, int64_t n, To *dst)
{
- uint current = 0;
+ BLI_assert(n >= 0);
+
+ int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) To((To)src[current]);
@@ -173,9 +184,11 @@ template<typename From, typename To> void uninitialized_convert_n(const From *sr
* src: initialized, moved-from
* dst: initialized
*/
-template<typename T> void initialized_move_n(T *src, uint n, T *dst)
+template<typename T> void initialized_move_n(T *src, int64_t n, T *dst)
{
- for (uint i = 0; i < n; i++) {
+ BLI_assert(n >= 0);
+
+ for (int64_t i = 0; i < n; i++) {
dst[i] = std::move(src[i]);
}
}
@@ -192,9 +205,11 @@ template<typename T> void initialized_move_n(T *src, uint n, T *dst)
* src: initialized, moved-from
* dst: initialized
*/
-template<typename T> void uninitialized_move_n(T *src, uint n, T *dst)
+template<typename T> void uninitialized_move_n(T *src, int64_t n, T *dst)
{
- uint current = 0;
+ BLI_assert(n >= 0);
+
+ int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) T(std::move(src[current]));
@@ -219,8 +234,10 @@ template<typename T> void uninitialized_move_n(T *src, uint n, T *dst)
* src: uninitialized
* dst: initialized
*/
-template<typename T> void initialized_relocate_n(T *src, uint n, T *dst)
+template<typename T> void initialized_relocate_n(T *src, int64_t n, T *dst)
{
+ BLI_assert(n >= 0);
+
initialized_move_n(src, n, dst);
destruct_n(src, n);
}
@@ -238,8 +255,10 @@ template<typename T> void initialized_relocate_n(T *src, uint n, T *dst)
* src: uninitialized
* dst: initialized
*/
-template<typename T> void uninitialized_relocate_n(T *src, uint n, T *dst)
+template<typename T> void uninitialized_relocate_n(T *src, int64_t n, T *dst)
{
+ BLI_assert(n >= 0);
+
uninitialized_move_n(src, n, dst);
destruct_n(src, n);
}
@@ -254,9 +273,11 @@ template<typename T> void uninitialized_relocate_n(T *src, uint n, T *dst)
* After:
* dst: initialized
*/
-template<typename T> void initialized_fill_n(T *dst, uint n, const T &value)
+template<typename T> void initialized_fill_n(T *dst, int64_t n, const T &value)
{
- for (uint i = 0; i < n; i++) {
+ BLI_assert(n >= 0);
+
+ for (int64_t i = 0; i < n; i++) {
dst[i] = value;
}
}
@@ -271,9 +292,11 @@ template<typename T> void initialized_fill_n(T *dst, uint n, const T &value)
* After:
* dst: initialized
*/
-template<typename T> void uninitialized_fill_n(T *dst, uint n, const T &value)
+template<typename T> void uninitialized_fill_n(T *dst, int64_t n, const T &value)
{
- uint current = 0;
+ BLI_assert(n >= 0);
+
+ int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) T(value);
@@ -334,9 +357,9 @@ template<size_t Size, size_t Alignment> class alignas(Alignment) AlignedBuffer {
* lifetime of the object they are embedded in. It's used by containers with small buffer
* optimization and hash table implementations.
*/
-template<typename T, size_t Size = 1> class TypedBuffer {
+template<typename T, int64_t Size = 1> class TypedBuffer {
private:
- AlignedBuffer<sizeof(T) * Size, alignof(T)> buffer_;
+ AlignedBuffer<sizeof(T) * (size_t)Size, alignof(T)> buffer_;
public:
operator T *()
diff --git a/source/blender/blenlib/BLI_probing_strategies.hh b/source/blender/blenlib/BLI_probing_strategies.hh
index d2b16ac3516..0e5338fa6ed 100644
--- a/source/blender/blenlib/BLI_probing_strategies.hh
+++ b/source/blender/blenlib/BLI_probing_strategies.hh
@@ -25,17 +25,17 @@
* values based on an initial hash value.
*
* A probing strategy has to implement the following methods:
- * - Constructor(uint32_t hash): Start a new probing sequence based on the given hash.
- * - get() const -> uint32_t: Get the current value in the sequence.
+ * - Constructor(uint64_t hash): Start a new probing sequence based on the given hash.
+ * - get() const -> uint64_t: Get the current value in the sequence.
* - next() -> void: Update the internal state, so that the next value can be accessed with get().
- * - linear_steps() -> uint32_t: Returns number of linear probing steps that should be done.
+ * - linear_steps() -> int64_t: Returns number of linear probing steps that should be done.
*
* Using linear probing steps between larger jumps can result in better performance, due to
* improved cache usage. It's a way of getting the benefits or linear probing without the
* clustering issues. However, more linear steps can also make things slower when the initial hash
* produces many collisions.
*
- * Every probing strategy has to guarantee, that every possible uint32_t is returned eventually.
+ * Every probing strategy has to guarantee, that every possible uint64_t is returned eventually.
* This is necessary for correctness. If this is not the case, empty slots might not be found.
*
* The SLOT_PROBING_BEGIN and SLOT_PROBING_END macros can be used to implement a loop that iterates
@@ -65,10 +65,10 @@ namespace blender {
*/
class LinearProbingStrategy {
private:
- uint32_t hash_;
+ uint64_t hash_;
public:
- LinearProbingStrategy(const uint32_t hash) : hash_(hash)
+ LinearProbingStrategy(const uint64_t hash) : hash_(hash)
{
}
@@ -77,12 +77,12 @@ class LinearProbingStrategy {
hash_++;
}
- uint32_t get() const
+ uint64_t get() const
{
return hash_;
}
- uint32_t linear_steps() const
+ int64_t linear_steps() const
{
return UINT32_MAX;
}
@@ -101,12 +101,12 @@ class LinearProbingStrategy {
*/
class QuadraticProbingStrategy {
private:
- uint32_t original_hash_;
- uint32_t current_hash_;
- uint32_t iteration_;
+ uint64_t original_hash_;
+ uint64_t current_hash_;
+ uint64_t iteration_;
public:
- QuadraticProbingStrategy(const uint32_t hash)
+ QuadraticProbingStrategy(const uint64_t hash)
: original_hash_(hash), current_hash_(hash), iteration_(1)
{
}
@@ -117,12 +117,12 @@ class QuadraticProbingStrategy {
iteration_++;
}
- uint32_t get() const
+ uint64_t get() const
{
return current_hash_;
}
- uint32_t linear_steps() const
+ int64_t linear_steps() const
{
return 1;
}
@@ -138,13 +138,13 @@ class QuadraticProbingStrategy {
* PreShuffle: When true, the initial call to next() will be done to the constructor. This can help
* when the hash function has put little information into the lower bits.
*/
-template<uint32_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingStrategy {
+template<uint64_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingStrategy {
private:
- uint32_t hash_;
- uint32_t perturb_;
+ uint64_t hash_;
+ uint64_t perturb_;
public:
- PythonProbingStrategy(const uint32_t hash) : hash_(hash), perturb_(hash)
+ PythonProbingStrategy(const uint64_t hash) : hash_(hash), perturb_(hash)
{
if (PreShuffle) {
this->next();
@@ -157,12 +157,12 @@ template<uint32_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingS
hash_ = 5 * hash_ + 1 + perturb_;
}
- uint32_t get() const
+ uint64_t get() const
{
return hash_;
}
- uint32_t linear_steps() const
+ int64_t linear_steps() const
{
return LinearSteps;
}
@@ -173,13 +173,13 @@ template<uint32_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingS
* method. This way more bits are taken into account earlier. After a couple of collisions (that
* should happen rarely), it will fallback to a sequence that hits every slot.
*/
-template<uint32_t LinearSteps = 2, bool PreShuffle = false> class ShuffleProbingStrategy {
+template<uint64_t LinearSteps = 2, bool PreShuffle = false> class ShuffleProbingStrategy {
private:
- uint32_t hash_;
- uint32_t perturb_;
+ uint64_t hash_;
+ uint64_t perturb_;
public:
- ShuffleProbingStrategy(const uint32_t hash) : hash_(hash), perturb_(hash)
+ ShuffleProbingStrategy(const uint64_t hash) : hash_(hash), perturb_(hash)
{
if (PreShuffle) {
this->next();
@@ -197,12 +197,12 @@ template<uint32_t LinearSteps = 2, bool PreShuffle = false> class ShuffleProbing
}
}
- uint32_t get() const
+ uint64_t get() const
{
return hash_;
}
- uint32_t linear_steps() const
+ int64_t linear_steps() const
{
return LinearSteps;
}
@@ -233,10 +233,10 @@ using DefaultProbingStrategy = PythonProbingStrategy<>;
#define SLOT_PROBING_BEGIN(PROBING_STRATEGY, HASH, MASK, R_SLOT_INDEX) \
PROBING_STRATEGY probing_strategy(HASH); \
do { \
- uint32_t linear_offset = 0; \
- uint32_t current_hash = probing_strategy.get(); \
+ int64_t linear_offset = 0; \
+ uint64_t current_hash = probing_strategy.get(); \
do { \
- uint32_t R_SLOT_INDEX = (current_hash + linear_offset) & MASK;
+ int64_t R_SLOT_INDEX = (int64_t)((current_hash + (uint64_t)linear_offset) & MASK);
#define SLOT_PROBING_END() \
} while (++linear_offset < probing_strategy.linear_steps()); \
diff --git a/source/blender/blenlib/BLI_rand.hh b/source/blender/blenlib/BLI_rand.hh
index bfc4d276165..612ac0bbe19 100644
--- a/source/blender/blenlib/BLI_rand.hh
+++ b/source/blender/blenlib/BLI_rand.hh
@@ -86,7 +86,7 @@ class RandomNumberGenerator {
/**
* Simulate getting \a n random values.
*/
- void skip(uint n)
+ void skip(int64_t n)
{
while (n--) {
this->step();
diff --git a/source/blender/blenlib/BLI_resource_collector.hh b/source/blender/blenlib/BLI_resource_collector.hh
index 672a1269962..10d610da618 100644
--- a/source/blender/blenlib/BLI_resource_collector.hh
+++ b/source/blender/blenlib/BLI_resource_collector.hh
@@ -51,7 +51,7 @@ class ResourceCollector : NonCopyable, NonMovable {
~ResourceCollector()
{
/* Free in reversed order. */
- for (uint i = m_resources.size(); i--;) {
+ for (int64_t i = m_resources.size(); i--;) {
ResourceData &data = m_resources[i];
data.free(data.data);
}
diff --git a/source/blender/blenlib/BLI_set.hh b/source/blender/blenlib/BLI_set.hh
index c5096f84c80..80e4858bbb9 100644
--- a/source/blender/blenlib/BLI_set.hh
+++ b/source/blender/blenlib/BLI_set.hh
@@ -93,7 +93,7 @@ template<
* When Key is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitly though.
*/
- uint32_t InlineBufferCapacity = (sizeof(Key) < 100) ? 4 : 0,
+ int64_t InlineBufferCapacity = (sizeof(Key) < 100) ? 4 : 0,
/**
* The strategy used to deal with collisions. They are defined in BLI_probing_strategies.hh.
*/
@@ -128,20 +128,20 @@ class Set {
* Slots are either empty, occupied or removed. The number of occupied slots can be computed by
* subtracting the removed slots from the occupied-and-removed slots.
*/
- uint32_t removed_slots_;
- uint32_t occupied_and_removed_slots_;
+ int64_t removed_slots_;
+ int64_t occupied_and_removed_slots_;
/**
* The maximum number of slots that can be used (either occupied or removed) until the set has to
* grow. This is the total number of slots times the max load factor.
*/
- uint32_t usable_slots_;
+ int64_t usable_slots_;
/**
* The number of slots minus one. This is a bit mask that can be used to turn any integer into a
* valid slot index efficiently.
*/
- uint32_t slot_mask_;
+ uint64_t slot_mask_;
/** This is called to hash incoming keys. */
Hash hash_;
@@ -384,11 +384,11 @@ class Set {
class Iterator {
private:
const Slot *slots_;
- uint32_t total_slots_;
- uint32_t current_slot_;
+ int64_t total_slots_;
+ int64_t current_slot_;
public:
- Iterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot)
+ Iterator(const Slot *slots, int64_t total_slots, int64_t current_slot)
: slots_(slots), total_slots_(total_slots), current_slot_(current_slot)
{
}
@@ -418,7 +418,7 @@ class Set {
Iterator begin() const
{
- for (uint32_t i = 0; i < slots_.size(); i++) {
+ for (int64_t i = 0; i < slots_.size(); i++) {
if (slots_[i].is_occupied()) {
return Iterator(slots_.data(), slots_.size(), i);
}
@@ -444,7 +444,7 @@ class Set {
* Get the number of collisions that the probing strategy has to go through to find the key or
* determine that it is not in the set.
*/
- uint32_t count_collisions(const Key &key) const
+ int64_t count_collisions(const Key &key) const
{
return this->count_collisions__impl(key, hash_(key));
}
@@ -470,7 +470,7 @@ class Set {
/**
* Returns the number of keys stored in the set.
*/
- uint32_t size() const
+ int64_t size() const
{
return occupied_and_removed_slots_ - removed_slots_;
}
@@ -486,7 +486,7 @@ class Set {
/**
* Returns the number of available slots. This is mostly for debugging purposes.
*/
- uint32_t capacity() const
+ int64_t capacity() const
{
return slots_.size();
}
@@ -494,7 +494,7 @@ class Set {
/**
* Returns the amount of removed slots in the set. This is mostly for debugging purposes.
*/
- uint32_t removed_amount() const
+ int64_t removed_amount() const
{
return removed_slots_;
}
@@ -502,7 +502,7 @@ class Set {
/**
* Returns the bytes required per element. This is mostly for debugging purposes.
*/
- uint32_t size_per_element() const
+ int64_t size_per_element() const
{
return sizeof(Slot);
}
@@ -511,7 +511,7 @@ class Set {
* Returns the approximate memory requirements of the set in bytes. This is more correct for
* larger sets.
*/
- uint32_t size_in_bytes() const
+ int64_t size_in_bytes() const
{
return sizeof(Slot) * slots_.size();
}
@@ -520,7 +520,7 @@ class Set {
* Potentially resize the set such that it can hold the specified number of keys without another
* grow operation.
*/
- void reserve(const uint32_t n)
+ void reserve(const int64_t n)
{
if (usable_slots_ < n) {
this->realloc_and_reinsert(n);
@@ -554,12 +554,13 @@ class Set {
}
private:
- BLI_NOINLINE void realloc_and_reinsert(const uint32_t min_usable_slots)
+ BLI_NOINLINE void realloc_and_reinsert(const int64_t min_usable_slots)
{
- uint32_t total_slots, usable_slots;
+ int64_t total_slots, usable_slots;
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
- const uint32_t new_slot_mask = total_slots - 1;
+ BLI_assert(total_slots >= 1);
+ const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
/**
* Optimize the case when the set was empty beforehand. We can avoid some copies here.
@@ -595,9 +596,9 @@ class Set {
void add_after_grow_and_destruct_old(Slot &old_slot,
SlotArray &new_slots,
- const uint32_t new_slot_mask)
+ const uint64_t new_slot_mask)
{
- const uint32_t hash = old_slot.get_hash(Hash());
+ const uint64_t hash = old_slot.get_hash(Hash());
SLOT_PROBING_BEGIN (ProbingStrategy, hash, new_slot_mask, slot_index) {
Slot &slot = new_slots[slot_index];
@@ -610,7 +611,7 @@ class Set {
}
template<typename ForwardKey>
- bool contains__impl(const ForwardKey &key, const uint32_t hash) const
+ bool contains__impl(const ForwardKey &key, const uint64_t hash) const
{
SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
@@ -624,7 +625,7 @@ class Set {
}
template<typename ForwardKey>
- const Key &lookup_key__impl(const ForwardKey &key, const uint32_t hash) const
+ const Key &lookup_key__impl(const ForwardKey &key, const uint64_t hash) const
{
BLI_assert(this->contains_as(key));
@@ -637,7 +638,7 @@ class Set {
}
template<typename ForwardKey>
- const Key *lookup_key_ptr__impl(const ForwardKey &key, const uint32_t hash) const
+ const Key *lookup_key_ptr__impl(const ForwardKey &key, const uint64_t hash) const
{
SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -650,7 +651,7 @@ class Set {
SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> void add_new__impl(ForwardKey &&key, const uint32_t hash)
+ template<typename ForwardKey> void add_new__impl(ForwardKey &&key, const uint64_t hash)
{
BLI_assert(!this->contains_as(key));
@@ -666,7 +667,7 @@ class Set {
SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> bool add__impl(ForwardKey &&key, const uint32_t hash)
+ template<typename ForwardKey> bool add__impl(ForwardKey &&key, const uint64_t hash)
{
this->ensure_can_add();
@@ -683,7 +684,7 @@ class Set {
SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> bool remove__impl(const ForwardKey &key, const uint32_t hash)
+ template<typename ForwardKey> bool remove__impl(const ForwardKey &key, const uint64_t hash)
{
SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -699,7 +700,7 @@ class Set {
}
template<typename ForwardKey>
- void remove_contained__impl(const ForwardKey &key, const uint32_t hash)
+ void remove_contained__impl(const ForwardKey &key, const uint64_t hash)
{
BLI_assert(this->contains_as(key));
removed_slots_++;
@@ -714,9 +715,9 @@ class Set {
}
template<typename ForwardKey>
- uint32_t count_collisions__impl(const ForwardKey &key, const uint32_t hash) const
+ int64_t count_collisions__impl(const ForwardKey &key, const uint64_t hash) const
{
- uint32_t collisions = 0;
+ int64_t collisions = 0;
SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash)) {
@@ -749,9 +750,9 @@ template<typename Key> class StdUnorderedSetWrapper {
SetType set_;
public:
- uint32_t size() const
+ int64_t size() const
{
- return (uint32_t)set_.size();
+ return (int64_t)set_.size();
}
bool is_empty() const
@@ -759,7 +760,7 @@ template<typename Key> class StdUnorderedSetWrapper {
return set_.empty();
}
- void reserve(uint32_t n)
+ void reserve(int64_t n)
{
set_.reserve(n);
}
diff --git a/source/blender/blenlib/BLI_set_slots.hh b/source/blender/blenlib/BLI_set_slots.hh
index d3891e78b52..b78ed37f534 100644
--- a/source/blender/blenlib/BLI_set_slots.hh
+++ b/source/blender/blenlib/BLI_set_slots.hh
@@ -133,7 +133,7 @@ template<typename Key> class SimpleSetSlot {
* Return the hash of the currently stored key. In this simple set slot implementation, we just
* compute the hash here. Other implementations might store the hash in the slot instead.
*/
- template<typename Hash> uint32_t get_hash(const Hash &hash) const
+ template<typename Hash> uint64_t get_hash(const Hash &hash) const
{
BLI_assert(this->is_occupied());
return hash(*key_buffer_);
@@ -143,7 +143,7 @@ template<typename Key> class SimpleSetSlot {
* Move the other slot into this slot and destruct it. We do destruction here, because this way
* we can avoid a comparison with the state, since we know the slot is occupied.
*/
- void relocate_occupied_here(SimpleSetSlot &other, uint32_t UNUSED(hash))
+ void relocate_occupied_here(SimpleSetSlot &other, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -157,7 +157,7 @@ template<typename Key> class SimpleSetSlot {
* key. The hash is used by other slot implementations to determine inequality faster.
*/
template<typename ForwardKey, typename IsEqual>
- bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const
+ bool contains(const ForwardKey &key, const IsEqual &is_equal, uint64_t UNUSED(hash)) const
{
if (state_ == Occupied) {
return is_equal(key, *key_buffer_);
@@ -169,7 +169,7 @@ template<typename Key> class SimpleSetSlot {
* Change the state of this slot from empty/removed to occupied. The key has to be constructed
* by calling the constructor with the given key as parameter.
*/
- template<typename ForwardKey> void occupy(ForwardKey &&key, uint32_t UNUSED(hash))
+ template<typename ForwardKey> void occupy(ForwardKey &&key, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
state_ = Occupied;
@@ -199,7 +199,7 @@ template<typename Key> class HashedSetSlot {
Removed = 2,
};
- uint32_t hash_;
+ uint64_t hash_;
State state_;
TypedBuffer<Key> key_buffer_;
@@ -254,13 +254,13 @@ template<typename Key> class HashedSetSlot {
return state_ == Empty;
}
- template<typename Hash> uint32_t get_hash(const Hash &UNUSED(hash)) const
+ template<typename Hash> uint64_t get_hash(const Hash &UNUSED(hash)) const
{
BLI_assert(this->is_occupied());
return hash_;
}
- void relocate_occupied_here(HashedSetSlot &other, const uint32_t hash)
+ void relocate_occupied_here(HashedSetSlot &other, const uint64_t hash)
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -271,7 +271,7 @@ template<typename Key> class HashedSetSlot {
}
template<typename ForwardKey, typename IsEqual>
- bool contains(const ForwardKey &key, const IsEqual &is_equal, const uint32_t hash) const
+ bool contains(const ForwardKey &key, const IsEqual &is_equal, const uint64_t hash) const
{
/* hash_ might be uninitialized here, but that is ok. */
if (hash_ == hash) {
@@ -282,7 +282,7 @@ template<typename Key> class HashedSetSlot {
return false;
}
- template<typename ForwardKey> void occupy(ForwardKey &&key, const uint32_t hash)
+ template<typename ForwardKey> void occupy(ForwardKey &&key, const uint64_t hash)
{
BLI_assert(!this->is_occupied());
state_ = Occupied;
@@ -336,13 +336,13 @@ template<typename Key, typename KeyInfo> class IntrusiveSetSlot {
return KeyInfo::is_empty(key_);
}
- template<typename Hash> uint32_t get_hash(const Hash &hash) const
+ template<typename Hash> uint64_t get_hash(const Hash &hash) const
{
BLI_assert(this->is_occupied());
return hash(key_);
}
- void relocate_occupied_here(IntrusiveSetSlot &other, const uint32_t UNUSED(hash))
+ void relocate_occupied_here(IntrusiveSetSlot &other, const uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -351,13 +351,13 @@ template<typename Key, typename KeyInfo> class IntrusiveSetSlot {
}
template<typename ForwardKey, typename IsEqual>
- bool contains(const ForwardKey &key, const IsEqual &is_equal, const uint32_t UNUSED(hash)) const
+ bool contains(const ForwardKey &key, const IsEqual &is_equal, const uint64_t UNUSED(hash)) const
{
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
return is_equal(key_, key);
}
- template<typename ForwardKey> void occupy(ForwardKey &&key, const uint32_t UNUSED(hash))
+ template<typename ForwardKey> void occupy(ForwardKey &&key, const uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
diff --git a/source/blender/blenlib/BLI_span.hh b/source/blender/blenlib/BLI_span.hh
index 57ef9ce9eb6..67bb32304de 100644
--- a/source/blender/blenlib/BLI_span.hh
+++ b/source/blender/blenlib/BLI_span.hh
@@ -88,7 +88,7 @@ namespace blender {
template<typename T> class Span {
private:
const T *start_ = nullptr;
- uint size_ = 0;
+ int64_t size_ = 0;
public:
/**
@@ -96,13 +96,15 @@ template<typename T> class Span {
*/
Span() = default;
- Span(const T *start, uint size) : start_(start), size_(size)
+ Span(const T *start, int64_t size) : start_(start), size_(size)
{
+ BLI_assert(size >= 0);
}
template<typename U, typename std::enable_if_t<is_convertible_pointer_v<U, T>> * = nullptr>
- Span(const U *start, uint size) : start_((const T *)start), size_(size)
+ Span(const U *start, int64_t size) : start_((const T *)start), size_(size)
{
+ BLI_assert(size >= 0);
}
/**
@@ -116,11 +118,11 @@ template<typename T> class Span {
* Span<int> span = {1, 2, 3, 4};
* call_function_with_array(span);
*/
- Span(const std::initializer_list<T> &list) : Span(list.begin(), (uint)list.size())
+ Span(const std::initializer_list<T> &list) : Span(list.begin(), (int64_t)list.size())
{
}
- Span(const std::vector<T> &vector) : Span(vector.data(), (uint)vector.size())
+ Span(const std::vector<T> &vector) : Span(vector.data(), (int64_t)vector.size())
{
}
@@ -142,8 +144,10 @@ template<typename T> class Span {
* Returns a contiguous part of the array. This invokes undefined behavior when the slice does
* not stay within the bounds of the array.
*/
- Span slice(uint start, uint size) const
+ Span slice(int64_t start, int64_t size) const
{
+ BLI_assert(start >= 0);
+ BLI_assert(size >= 0);
BLI_assert(start + size <= this->size() || size == 0);
return Span(start_ + start, size);
}
@@ -157,8 +161,9 @@ template<typename T> class Span {
* Returns a new Span with n elements removed from the beginning. This invokes undefined
* behavior when the array is too small.
*/
- Span drop_front(uint n) const
+ Span drop_front(int64_t n) const
{
+ BLI_assert(n >= 0);
BLI_assert(n <= this->size());
return this->slice(n, this->size() - n);
}
@@ -167,8 +172,9 @@ template<typename T> class Span {
* Returns a new Span with n elements removed from the beginning. This invokes undefined
* behavior when the array is too small.
*/
- Span drop_back(uint n) const
+ Span drop_back(int64_t n) const
{
+ BLI_assert(n >= 0);
BLI_assert(n <= this->size());
return this->slice(0, this->size() - n);
}
@@ -177,8 +183,9 @@ template<typename T> class Span {
* Returns a new Span that only contains the first n elements. This invokes undefined
* behavior when the array is too small.
*/
- Span take_front(uint n) const
+ Span take_front(int64_t n) const
{
+ BLI_assert(n >= 0);
BLI_assert(n <= this->size());
return this->slice(0, n);
}
@@ -187,8 +194,9 @@ template<typename T> class Span {
* Returns a new Span that only contains the last n elements. This invokes undefined
* behavior when the array is too small.
*/
- Span take_back(uint n) const
+ Span take_back(int64_t n) const
{
+ BLI_assert(n >= 0);
BLI_assert(n <= this->size());
return this->slice(this->size() - n, n);
}
@@ -216,8 +224,9 @@ template<typename T> class Span {
* Access an element in the array. This invokes undefined behavior when the index is out of
* bounds.
*/
- const T &operator[](uint index) const
+ const T &operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < size_);
return start_[index];
}
@@ -225,7 +234,7 @@ template<typename T> class Span {
/**
* Returns the number of elements in the referenced array.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -241,7 +250,7 @@ template<typename T> class Span {
/**
* Returns the number of bytes referenced by this Span.
*/
- uint size_in_bytes() const
+ int64_t size_in_bytes() const
{
return sizeof(T) * size_;
}
@@ -273,9 +282,9 @@ template<typename T> class Span {
* Does a linear search to count how often the value is in the array.
* Returns the number of occurrences.
*/
- uint count(const T &value) const
+ int64_t count(const T &value) const
{
- uint counter = 0;
+ int64_t counter = 0;
for (const T &element : *this) {
if (element == value) {
counter++;
@@ -308,9 +317,9 @@ template<typename T> class Span {
* Returns the element at the given index. If the index is out of range, return the fallback
* value.
*/
- T get(uint index, const T &fallback) const
+ T get(int64_t index, const T &fallback) const
{
- if (index < size_) {
+ if (index < size_ && index >= 0) {
return start_[index];
}
return fallback;
@@ -326,9 +335,9 @@ template<typename T> class Span {
* changed. */
BLI_assert(size_ < 1000);
- for (uint i = 0; i < size_; i++) {
+ for (int64_t i = 0; i < size_; i++) {
const T &value = start_[i];
- for (uint j = i + 1; j < size_; j++) {
+ for (int64_t j = i + 1; j < size_; j++) {
if (value == start_[j]) {
return true;
}
@@ -348,7 +357,7 @@ template<typename T> class Span {
* changed. */
BLI_assert(size_ < 1000);
- for (uint i = 0; i < size_; i++) {
+ for (int64_t i = 0; i < size_; i++) {
const T &value = start_[i];
if (other.contains(value)) {
return true;
@@ -361,19 +370,19 @@ template<typename T> class Span {
* Returns the index of the first occurrence of the given value. This invokes undefined behavior
* when the value is not in the array.
*/
- uint first_index(const T &search_value) const
+ int64_t first_index(const T &search_value) const
{
- const int index = this->first_index_try(search_value);
+ const int64_t index = this->first_index_try(search_value);
BLI_assert(index >= 0);
- return (uint)index;
+ return (int64_t)index;
}
/**
* Returns the index of the first occurrence of the given value or -1 if it does not exist.
*/
- int first_index_try(const T &search_value) const
+ int64_t first_index_try(const T &search_value) const
{
- for (uint i = 0; i < size_; i++) {
+ for (int64_t i = 0; i < size_; i++) {
if (start_[i] == search_value) {
return i;
}
@@ -396,7 +405,7 @@ template<typename T> class Span {
template<typename NewT> Span<NewT> cast() const
{
BLI_assert((size_ * sizeof(T)) % sizeof(NewT) == 0);
- uint new_size = size_ * sizeof(T) / sizeof(NewT);
+ int64_t new_size = size_ * sizeof(T) / sizeof(NewT);
return Span<NewT>(reinterpret_cast<const NewT *>(start_), new_size);
}
@@ -431,12 +440,12 @@ template<typename T> class Span {
template<typename T> class MutableSpan {
private:
T *start_;
- uint size_;
+ int64_t size_;
public:
MutableSpan() = default;
- MutableSpan(T *start, const uint size) : start_(start), size_(size)
+ MutableSpan(T *start, const int64_t size) : start_(start), size_(size)
{
}
@@ -456,7 +465,7 @@ template<typename T> class MutableSpan {
/**
* Returns the number of elements in the array.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -473,9 +482,9 @@ template<typename T> class MutableSpan {
* Replace a subset of all elements with the given value. This invokes undefined behavior when
* one of the indices is out of bounds.
*/
- void fill_indices(Span<uint> indices, const T &value)
+ void fill_indices(Span<int64_t> indices, const T &value)
{
- for (uint i : indices) {
+ for (int64_t i : indices) {
BLI_assert(i < size_);
start_[i] = value;
}
@@ -500,7 +509,7 @@ template<typename T> class MutableSpan {
return start_ + size_;
}
- T &operator[](const uint index) const
+ T &operator[](const int64_t index) const
{
BLI_assert(index < this->size());
return start_[index];
@@ -510,7 +519,7 @@ template<typename T> class MutableSpan {
* Returns a contiguous part of the array. This invokes undefined behavior when the slice would
* go out of bounds.
*/
- MutableSpan slice(const uint start, const uint length) const
+ MutableSpan slice(const int64_t start, const int64_t length) const
{
BLI_assert(start + length <= this->size());
return MutableSpan(start_ + start, length);
@@ -520,7 +529,7 @@ template<typename T> class MutableSpan {
* Returns a new MutableSpan with n elements removed from the beginning. This invokes
* undefined behavior when the array is too small.
*/
- MutableSpan drop_front(const uint n) const
+ MutableSpan drop_front(const int64_t n) const
{
BLI_assert(n <= this->size());
return this->slice(n, this->size() - n);
@@ -530,7 +539,7 @@ template<typename T> class MutableSpan {
* Returns a new MutableSpan with n elements removed from the end. This invokes undefined
* behavior when the array is too small.
*/
- MutableSpan drop_back(const uint n) const
+ MutableSpan drop_back(const int64_t n) const
{
BLI_assert(n <= this->size());
return this->slice(0, this->size() - n);
@@ -540,7 +549,7 @@ template<typename T> class MutableSpan {
* Returns a new MutableSpan that only contains the first n elements. This invokes undefined
* behavior when the array is too small.
*/
- MutableSpan take_front(const uint n) const
+ MutableSpan take_front(const int64_t n) const
{
BLI_assert(n <= this->size());
return this->slice(0, n);
@@ -550,7 +559,7 @@ template<typename T> class MutableSpan {
* Return a new MutableSpan that only contains the last n elements. This invokes undefined
* behavior when the array is too small.
*/
- MutableSpan take_back(const uint n) const
+ MutableSpan take_back(const int64_t n) const
{
BLI_assert(n <= this->size());
return this->slice(this->size() - n, n);
@@ -590,7 +599,7 @@ template<typename T> class MutableSpan {
template<typename NewT> MutableSpan<NewT> cast() const
{
BLI_assert((size_ * sizeof(T)) % sizeof(NewT) == 0);
- uint new_size = size_ * sizeof(T) / sizeof(NewT);
+ int64_t new_size = size_ * sizeof(T) / sizeof(NewT);
return MutableSpan<NewT>(reinterpret_cast<NewT *>(start_), new_size);
}
};
@@ -602,7 +611,7 @@ template<typename T1, typename T2> void assert_same_size(const T1 &v1, const T2
{
UNUSED_VARS_NDEBUG(v1, v2);
#ifdef DEBUG
- uint size = v1.size();
+ int64_t size = v1.size();
BLI_assert(size == v1.size());
BLI_assert(size == v2.size());
#endif
@@ -613,7 +622,7 @@ void assert_same_size(const T1 &v1, const T2 &v2, const T3 &v3)
{
UNUSED_VARS_NDEBUG(v1, v2, v3);
#ifdef DEBUG
- uint size = v1.size();
+ int64_t size = v1.size();
BLI_assert(size == v1.size());
BLI_assert(size == v2.size());
BLI_assert(size == v3.size());
diff --git a/source/blender/blenlib/BLI_stack.hh b/source/blender/blenlib/BLI_stack.hh
index a5a95186e37..422931862a8 100644
--- a/source/blender/blenlib/BLI_stack.hh
+++ b/source/blender/blenlib/BLI_stack.hh
@@ -60,7 +60,7 @@ template<typename T> struct StackChunk {
/** Pointer to one element past the end of the referenced buffer. */
T *capacity_end;
- uint capacity() const
+ int64_t capacity() const
{
return capacity_end - begin;
}
@@ -77,7 +77,7 @@ template<
* When T is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitly though.
*/
- uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
+ int64_t InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
/**
* The allocator used by this stack. Should rarely be changed, except when you don't want that
* MEM_* is used internally.
@@ -103,7 +103,7 @@ class Stack {
/**
* Number of elements in the entire stack. The sum of initialized element counts in the chunks.
*/
- uint size_;
+ int64_t size_;
/** The buffer used to implement small object optimization. */
TypedBuffer<T, InlineBufferCapacity> inline_buffer_;
@@ -298,8 +298,8 @@ class Stack {
this->activate_next_chunk(remaining_values.size());
}
- const uint remaining_capacity = top_chunk_->capacity_end - top_;
- const uint amount = std::min(remaining_values.size(), remaining_capacity);
+ const int64_t remaining_capacity = top_chunk_->capacity_end - top_;
+ const int64_t amount = std::min(remaining_values.size(), remaining_capacity);
uninitialized_copy_n(remaining_values.data(), amount, top_);
top_ += amount;
@@ -320,7 +320,7 @@ class Stack {
/**
* Returns the number of elements in the stack.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -344,11 +344,11 @@ class Stack {
*
* This invokes undefined behavior when the currently active chunk is not full.
*/
- void activate_next_chunk(const uint size_hint)
+ void activate_next_chunk(const int64_t size_hint)
{
BLI_assert(top_ == top_chunk_->capacity_end);
if (top_chunk_->above == nullptr) {
- const uint new_capacity = std::max(size_hint, top_chunk_->capacity() * 2 + 10);
+ const int64_t new_capacity = std::max(size_hint, top_chunk_->capacity() * 2 + 10);
/* Do a single memory allocation for the Chunk and the array it references. */
void *buffer = allocator_.allocate(
diff --git a/source/blender/blenlib/BLI_string_ref.hh b/source/blender/blenlib/BLI_string_ref.hh
index 5b555b8cd1d..06fc66f6b55 100644
--- a/source/blender/blenlib/BLI_string_ref.hh
+++ b/source/blender/blenlib/BLI_string_ref.hh
@@ -60,9 +60,9 @@ class StringRef;
class StringRefBase {
protected:
const char *data_;
- uint size_;
+ int64_t size_;
- StringRefBase(const char *data, const uint size) : data_(data), size_(size)
+ StringRefBase(const char *data, const int64_t size) : data_(data), size_(size)
{
}
@@ -70,7 +70,7 @@ class StringRefBase {
/**
* Return the (byte-)length of the referenced string, without any null-terminator.
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -94,7 +94,7 @@ class StringRefBase {
*/
operator std::string() const
{
- return std::string(data_, size_);
+ return std::string(data_, (size_t)size_);
}
const char *begin() const
@@ -114,7 +114,7 @@ class StringRefBase {
*/
void unsafe_copy(char *dst) const
{
- memcpy(dst, data_, size_);
+ memcpy(dst, data_, (size_t)size_);
dst[size_] = '\0';
}
@@ -122,7 +122,7 @@ class StringRefBase {
* Copy the string into a buffer. The copied string will be null-terminated. This invokes
* undefined behavior when dst_size is too small. (Should we define the behavior?)
*/
- void copy(char *dst, const uint dst_size) const
+ void copy(char *dst, const int64_t dst_size) const
{
if (size_ < dst_size) {
this->unsafe_copy(dst);
@@ -137,7 +137,7 @@ class StringRefBase {
* Copy the string into a char array. The copied string will be null-terminated. This invokes
* undefined behavior when dst is too small.
*/
- template<uint N> void copy(char (&dst)[N])
+ template<size_t N> void copy(char (&dst)[N])
{
this->copy(dst, N);
}
@@ -152,7 +152,7 @@ class StringRefBase {
*/
bool endswith(StringRef suffix) const;
- StringRef substr(uint start, const uint size) const;
+ StringRef substr(int64_t start, const int64_t size) const;
};
/**
@@ -168,7 +168,7 @@ class StringRefNull : public StringRefBase {
/**
* Construct a StringRefNull from a null terminated c-string. The pointer must not point to NULL.
*/
- StringRefNull(const char *str) : StringRefBase(str, (uint)strlen(str))
+ StringRefNull(const char *str) : StringRefBase(str, (int64_t)strlen(str))
{
BLI_assert(str != NULL);
BLI_assert(data_[size_] == '\0');
@@ -178,9 +178,9 @@ class StringRefNull : public StringRefBase {
* Construct a StringRefNull from a null terminated c-string. This invokes undefined behavior
* when the given size is not the correct size of the string.
*/
- StringRefNull(const char *str, const uint size) : StringRefBase(str, size)
+ StringRefNull(const char *str, const int64_t size) : StringRefBase(str, size)
{
- BLI_assert((uint)strlen(str) == size);
+ BLI_assert((int64_t)strlen(str) == size);
}
/**
@@ -194,8 +194,9 @@ class StringRefNull : public StringRefBase {
/**
* Get the char at the given index.
*/
- char operator[](const uint index) const
+ char operator[](const int64_t index) const
{
+ BLI_assert(index >= 0);
/* Use '<=' instead of just '<', so that the null character can be accessed as well. */
BLI_assert(index <= size_);
return data_[index];
@@ -231,11 +232,11 @@ class StringRef : public StringRefBase {
/**
* Create a StringRef from a null-terminated c-string.
*/
- StringRef(const char *str) : StringRefBase(str, str ? (uint)strlen(str) : 0)
+ StringRef(const char *str) : StringRefBase(str, str ? (int64_t)strlen(str) : 0)
{
}
- StringRef(const char *str, const uint length) : StringRefBase(str, length)
+ StringRef(const char *str, const int64_t length) : StringRefBase(str, length)
{
}
@@ -244,7 +245,7 @@ class StringRef : public StringRefBase {
* second point points to a smaller address than the first one.
*/
StringRef(const char *begin, const char *one_after_end)
- : StringRefBase(begin, (uint)(one_after_end - begin))
+ : StringRefBase(begin, (int64_t)(one_after_end - begin))
{
BLI_assert(begin <= one_after_end);
}
@@ -253,15 +254,16 @@ class StringRef : public StringRefBase {
* Reference a std::string. Remember that when the std::string is destructed, the StringRef
* will point to uninitialized memory.
*/
- StringRef(const std::string &str) : StringRefBase(str.data(), (uint)str.size())
+ StringRef(const std::string &str) : StringRefBase(str.data(), (int64_t)str.size())
{
}
/**
* Return a new StringRef that does not contain the first n chars.
*/
- StringRef drop_prefix(const uint n) const
+ StringRef drop_prefix(const int64_t n) const
{
+ BLI_assert(n >= 0);
BLI_assert(n <= size_);
return StringRef(data_ + n, size_ - n);
}
@@ -279,8 +281,9 @@ class StringRef : public StringRefBase {
/**
* Get the char at the given index.
*/
- char operator[](uint index) const
+ char operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < size_);
return data_[index];
}
@@ -297,7 +300,7 @@ inline std::ostream &operator<<(std::ostream &stream, StringRef ref)
inline std::ostream &operator<<(std::ostream &stream, StringRefNull ref)
{
- stream << std::string(ref.data(), ref.size());
+ stream << std::string(ref.data(), (size_t)ref.size());
return stream;
}
@@ -315,7 +318,7 @@ inline bool operator==(StringRef a, StringRef b)
if (a.size() != b.size()) {
return false;
}
- return STREQLEN(a.data(), b.data(), a.size());
+ return STREQLEN(a.data(), b.data(), (size_t)a.size());
}
inline bool operator!=(StringRef a, StringRef b)
@@ -331,7 +334,7 @@ inline bool StringRefBase::startswith(StringRef prefix) const
if (size_ < prefix.size_) {
return false;
}
- for (uint i = 0; i < prefix.size_; i++) {
+ for (int64_t i = 0; i < prefix.size_; i++) {
if (data_[i] != prefix.data_[i]) {
return false;
}
@@ -347,8 +350,8 @@ inline bool StringRefBase::endswith(StringRef suffix) const
if (size_ < suffix.size_) {
return false;
}
- const uint offset = size_ - suffix.size_;
- for (uint i = 0; i < suffix.size_; i++) {
+ const int64_t offset = size_ - suffix.size_;
+ for (int64_t i = 0; i < suffix.size_; i++) {
if (data_[offset + i] != suffix.data_[i]) {
return false;
}
@@ -359,8 +362,10 @@ inline bool StringRefBase::endswith(StringRef suffix) const
/**
* Return a new #StringRef containing only a sub-string of the original string.
*/
-inline StringRef StringRefBase::substr(const uint start, const uint size) const
+inline StringRef StringRefBase::substr(const int64_t start, const int64_t size) const
{
+ BLI_assert(size >= 0);
+ BLI_assert(start >= 0);
BLI_assert(start + size <= size_);
return StringRef(data_ + start, size);
}
diff --git a/source/blender/blenlib/BLI_vector.hh b/source/blender/blenlib/BLI_vector.hh
index 1fe38464ad0..66de8d2fbd1 100644
--- a/source/blender/blenlib/BLI_vector.hh
+++ b/source/blender/blenlib/BLI_vector.hh
@@ -70,7 +70,7 @@ template<
* When T is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitly though.
*/
- uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
+ int64_t InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
/**
* The allocator used by this vector. Should rarely be changed, except when you don't want that
* MEM_* is used internally.
@@ -100,8 +100,8 @@ class Vector {
* annoying. Knowing the size of a vector is often quite essential when debugging some code.
*/
#ifndef NDEBUG
- uint debug_size_;
-# define UPDATE_VECTOR_SIZE(ptr) (ptr)->debug_size_ = (uint)((ptr)->end_ - (ptr)->begin_)
+ int64_t debug_size_;
+# define UPDATE_VECTOR_SIZE(ptr) (ptr)->debug_size_ = (int64_t)((ptr)->end_ - (ptr)->begin_)
#else
# define UPDATE_VECTOR_SIZE(ptr) ((void)0)
#endif
@@ -110,7 +110,7 @@ class Vector {
* Be a friend with other vector instantiations. This is necessary to implement some memory
* management logic.
*/
- template<typename OtherT, uint OtherInlineBufferCapacity, typename OtherAllocator>
+ template<typename OtherT, int64_t OtherInlineBufferCapacity, typename OtherAllocator>
friend class Vector;
public:
@@ -131,7 +131,7 @@ class Vector {
* The elements will be default constructed.
* If T is trivially constructible, the elements in the vector are not touched.
*/
- explicit Vector(uint size) : Vector()
+ explicit Vector(int64_t size) : Vector()
{
this->resize(size);
}
@@ -139,7 +139,7 @@ class Vector {
/**
* Create a vector filled with a specific value.
*/
- Vector(uint size, const T &value) : Vector()
+ Vector(int64_t size, const T &value) : Vector()
{
this->resize(size, value);
}
@@ -150,7 +150,7 @@ class Vector {
template<typename U, typename std::enable_if_t<std::is_convertible_v<U, T>> * = nullptr>
Vector(Span<U> values, Allocator allocator = {}) : Vector(allocator)
{
- const uint size = values.size();
+ const int64_t size = values.size();
this->reserve(size);
this->increase_size_by_unchecked(size);
uninitialized_convert_n<U, T>(values.data(), size, begin_);
@@ -217,7 +217,7 @@ class Vector {
* Create a copy of a vector with a different InlineBufferCapacity. This needs to be handled
* separately, so that the other one is a valid copy constructor.
*/
- template<uint OtherInlineBufferCapacity>
+ template<int64_t OtherInlineBufferCapacity>
Vector(const Vector<T, OtherInlineBufferCapacity, Allocator> &other)
: Vector(other.as_span(), other.allocator_)
{
@@ -227,11 +227,11 @@ class Vector {
* Steal the elements from another vector. This does not do an allocation. The other vector will
* have zero elements afterwards.
*/
- template<uint OtherInlineBufferCapacity>
+ template<int64_t OtherInlineBufferCapacity>
Vector(Vector<T, OtherInlineBufferCapacity, Allocator> &&other) noexcept
: allocator_(other.allocator_)
{
- const uint size = other.size();
+ const int64_t size = other.size();
if (other.is_inline()) {
if (size <= InlineBufferCapacity) {
@@ -243,8 +243,8 @@ class Vector {
}
else {
/* Copy from inline buffer to newly allocated buffer. */
- const uint capacity = size;
- begin_ = (T *)allocator_.allocate(sizeof(T) * capacity, alignof(T), AT);
+ const int64_t capacity = size;
+ begin_ = (T *)allocator_.allocate(sizeof(T) * (size_t)capacity, alignof(T), AT);
end_ = begin_ + size;
capacity_end_ = begin_ + capacity;
uninitialized_relocate_n(other.begin_, size, begin_);
@@ -302,14 +302,16 @@ class Vector {
* Get the value at the given index. This invokes undefined behavior when the index is out of
* bounds.
*/
- const T &operator[](uint index) const
+ const T &operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->size());
return begin_[index];
}
- T &operator[](uint index)
+ T &operator[](int64_t index)
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->size());
return begin_[index];
}
@@ -351,7 +353,7 @@ class Vector {
* This won't necessarily make an allocation when min_capacity is small.
* The actual size of the vector does not change.
*/
- void reserve(const uint min_capacity)
+ void reserve(const int64_t min_capacity)
{
if (min_capacity > this->capacity()) {
this->realloc_to_at_least(min_capacity);
@@ -364,9 +366,10 @@ class Vector {
* destructed. If new_size is larger than the old size, the new elements at the end are default
* constructed. If T is trivially constructible, the memory is not touched by this function.
*/
- void resize(const uint new_size)
+ void resize(const int64_t new_size)
{
- const uint old_size = this->size();
+ BLI_assert(new_size >= 0);
+ const int64_t old_size = this->size();
if (new_size > old_size) {
this->reserve(new_size);
default_construct_n(begin_ + old_size, new_size - old_size);
@@ -384,9 +387,10 @@ class Vector {
* destructed. If new_size is larger than the old size, the new elements will be copy constructed
* from the given value.
*/
- void resize(const uint new_size, const T &value)
+ void resize(const int64_t new_size, const T &value)
{
- const uint old_size = this->size();
+ BLI_assert(new_size >= 0);
+ const int64_t old_size = this->size();
if (new_size > old_size) {
this->reserve(new_size);
uninitialized_fill_n(begin_ + old_size, new_size - old_size, value);
@@ -447,9 +451,9 @@ class Vector {
* Append the value to the vector and return the index that can be used to access the newly
* added value.
*/
- uint append_and_get_index(const T &value)
+ int64_t append_and_get_index(const T &value)
{
- const uint index = this->size();
+ const int64_t index = this->size();
this->append(value);
return index;
}
@@ -490,8 +494,9 @@ class Vector {
* Insert the same element n times at the end of the vector.
* This might result in a reallocation internally.
*/
- void append_n_times(const T &value, const uint n)
+ void append_n_times(const T &value, const int64_t n)
{
+ BLI_assert(n >= 0);
this->reserve(this->size() + n);
blender::uninitialized_fill_n(end_, n, value);
this->increase_size_by_unchecked(n);
@@ -503,7 +508,7 @@ class Vector {
* useful when you want to call constructors in the vector yourself. This should only be done in
* very rare cases and has to be justified every time.
*/
- void increase_size_by_unchecked(const uint n)
+ void increase_size_by_unchecked(const int64_t n)
{
BLI_assert(end_ + n <= capacity_end_);
end_ += n;
@@ -519,7 +524,7 @@ class Vector {
{
this->extend(array.data(), array.size());
}
- void extend(const T *start, uint amount)
+ void extend(const T *start, int64_t amount)
{
this->reserve(this->size() + amount);
this->extend_unchecked(start, amount);
@@ -545,8 +550,9 @@ class Vector {
{
this->extend_unchecked(array.data(), array.size());
}
- void extend_unchecked(const T *start, uint amount)
+ void extend_unchecked(const T *start, int64_t amount)
{
+ BLI_assert(amount >= 0);
BLI_assert(begin_ + amount <= capacity_end_);
blender::uninitialized_copy_n(start, amount, end_);
end_ += amount;
@@ -569,28 +575,12 @@ class Vector {
}
/**
- * Replace every element with a new value.
- */
- void fill(const T &value)
- {
- initialized_fill_n(begin_, this->size(), value);
- }
-
- /**
- * Copy the value to all positions specified by the indices array.
- */
- void fill_indices(Span<uint> indices, const T &value)
- {
- MutableSpan<T>(*this).fill_indices(indices, value);
- }
-
- /**
* Return how many values are currently stored in the vector.
*/
- uint size() const
+ int64_t size() const
{
- BLI_assert(debug_size_ == (uint)(end_ - begin_));
- return (uint)(end_ - begin_);
+ BLI_assert(debug_size_ == (int64_t)(end_ - begin_));
+ return (int64_t)(end_ - begin_);
}
/**
@@ -635,8 +625,9 @@ class Vector {
* Delete any element in the vector. The empty space will be filled by the previously last
* element. This takes O(1) time.
*/
- void remove_and_reorder(const uint index)
+ void remove_and_reorder(const int64_t index)
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->size());
T *element_to_remove = begin_ + index;
end_--;
@@ -653,8 +644,8 @@ class Vector {
*/
void remove_first_occurrence_and_reorder(const T &value)
{
- const uint index = this->first_index_of(value);
- this->remove_and_reorder((uint)index);
+ const int64_t index = this->first_index_of(value);
+ this->remove_and_reorder(index);
}
/**
@@ -664,11 +655,12 @@ class Vector {
*
* This is similar to std::vector::erase.
*/
- void remove(const uint index)
+ void remove(const int64_t index)
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->size());
- const uint last_index = this->size() - 1;
- for (uint i = index; i < last_index; i++) {
+ const int64_t last_index = this->size() - 1;
+ for (int64_t i = index; i < last_index; i++) {
begin_[i] = std::move(begin_[i + 1]);
}
begin_[last_index].~T();
@@ -680,11 +672,11 @@ class Vector {
* Do a linear search to find the value in the vector.
* When found, return the first index, otherwise return -1.
*/
- int first_index_of_try(const T &value) const
+ int64_t first_index_of_try(const T &value) const
{
for (const T *current = begin_; current != end_; current++) {
if (*current == value) {
- return (int)(current - begin_);
+ return (int64_t)(current - begin_);
}
}
return -1;
@@ -694,11 +686,11 @@ class Vector {
* Do a linear search to find the value in the vector and return the found index. This invokes
* undefined behavior when the value is not in the vector.
*/
- uint first_index_of(const T &value) const
+ int64_t first_index_of(const T &value) const
{
- const int index = this->first_index_of_try(value);
+ const int64_t index = this->first_index_of_try(value);
BLI_assert(index >= 0);
- return (uint)index;
+ return index;
}
/**
@@ -748,9 +740,9 @@ class Vector {
* Get the current capacity of the vector, i.e. the maximum number of elements the vector can
* hold, before it has to reallocate.
*/
- uint capacity() const
+ int64_t capacity() const
{
- return (uint)(capacity_end_ - begin_);
+ return (int64_t)(capacity_end_ - begin_);
}
/**
@@ -758,7 +750,7 @@ class Vector {
* Obviously, this should only be used when you actually need the index in the loop.
*
* Example:
- * for (uint i : myvector.index_range()) {
+ * for (int64_t i : myvector.index_range()) {
* do_something(i, my_vector[i]);
* }
*/
@@ -796,7 +788,7 @@ class Vector {
}
}
- BLI_NOINLINE void realloc_to_at_least(const uint min_capacity)
+ BLI_NOINLINE void realloc_to_at_least(const int64_t min_capacity)
{
if (this->capacity() >= min_capacity) {
return;
@@ -804,12 +796,12 @@ class Vector {
/* At least double the size of the previous allocation. Otherwise consecutive calls to grow can
* cause a reallocation every time even though min_capacity only increments. */
- const uint min_new_capacity = this->capacity() * 2;
+ const int64_t min_new_capacity = this->capacity() * 2;
- const uint new_capacity = std::max(min_capacity, min_new_capacity);
- const uint size = this->size();
+ const int64_t new_capacity = std::max(min_capacity, min_new_capacity);
+ const int64_t size = this->size();
- T *new_array = (T *)allocator_.allocate(new_capacity * (uint)sizeof(T), alignof(T), AT);
+ T *new_array = (T *)allocator_.allocate((size_t)new_capacity * sizeof(T), alignof(T), AT);
uninitialized_relocate_n(begin_, size, new_array);
if (!this->is_inline()) {
diff --git a/source/blender/blenlib/BLI_vector_set.hh b/source/blender/blenlib/BLI_vector_set.hh
index dd1b17653c0..7573b77cdf7 100644
--- a/source/blender/blenlib/BLI_vector_set.hh
+++ b/source/blender/blenlib/BLI_vector_set.hh
@@ -106,20 +106,20 @@ class VectorSet {
* Slots are either empty, occupied or removed. The number of occupied slots can be computed by
* subtracting the removed slots from the occupied-and-removed slots.
*/
- uint32_t removed_slots_;
- uint32_t occupied_and_removed_slots_;
+ int64_t removed_slots_;
+ int64_t occupied_and_removed_slots_;
/**
* The maximum number of slots that can be used (either occupied or removed) until the set has to
* grow. This is the total number of slots times the max load factor.
*/
- uint32_t usable_slots_;
+ int64_t usable_slots_;
/**
* The number of slots minus one. This is a bit mask that can be used to turn any integer into a
* valid slot index efficiently.
*/
- uint32_t slot_mask_;
+ uint64_t slot_mask_;
/** This is called to hash incoming keys. */
Hash hash_;
@@ -238,8 +238,9 @@ class VectorSet {
/**
* Get the key stored at the given position in the vector.
*/
- const Key &operator[](const uint32_t index) const
+ const Key &operator[](const int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index <= this->size());
return keys_[index];
}
@@ -362,11 +363,11 @@ class VectorSet {
* Return the location of the key in the vector. It is assumed, that the key is in the vector
* set. If this is not necessarily the case, use `index_of_try`.
*/
- uint32_t index_of(const Key &key) const
+ int64_t index_of(const Key &key) const
{
return this->index_of_as(key);
}
- template<typename ForwardKey> uint32_t index_of_as(const ForwardKey &key) const
+ template<typename ForwardKey> int64_t index_of_as(const ForwardKey &key) const
{
return this->index_of__impl(key, hash_(key));
}
@@ -375,11 +376,11 @@ class VectorSet {
* Return the location of the key in the vector. If the key is not in the set, -1 is returned.
* If you know for sure that the key is in the set, it is better to use `index_of` instead.
*/
- int32_t index_of_try(const Key &key) const
+ int64_t index_of_try(const Key &key) const
{
- return (int32_t)this->index_of_try_as(key);
+ return this->index_of_try_as(key);
}
- template<typename ForwardKey> int32_t index_of_try_as(const ForwardKey &key) const
+ template<typename ForwardKey> int64_t index_of_try_as(const ForwardKey &key) const
{
return this->index_of_try__impl(key, hash_(key));
}
@@ -414,7 +415,7 @@ class VectorSet {
/**
* Returns the number of keys stored in the vector set.
*/
- uint32_t size() const
+ int64_t size() const
{
return occupied_and_removed_slots_ - removed_slots_;
}
@@ -430,7 +431,7 @@ class VectorSet {
/**
* Returns the number of available slots. This is mostly for debugging purposes.
*/
- uint32_t capacity() const
+ int64_t capacity() const
{
return slots_.size();
}
@@ -438,7 +439,7 @@ class VectorSet {
/**
* Returns the amount of removed slots in the set. This is mostly for debugging purposes.
*/
- uint32_t removed_amount() const
+ int64_t removed_amount() const
{
return removed_slots_;
}
@@ -446,7 +447,7 @@ class VectorSet {
/**
* Returns the bytes required per element. This is mostly for debugging purposes.
*/
- uint32_t size_per_element() const
+ int64_t size_per_element() const
{
return sizeof(Slot) + sizeof(Key);
}
@@ -455,15 +456,15 @@ class VectorSet {
* Returns the approximate memory requirements of the set in bytes. This is more correct for
* larger sets.
*/
- uint32_t size_in_bytes() const
+ int64_t size_in_bytes() const
{
- return (uint32_t)(sizeof(Slot) * slots_.size() + sizeof(Key) * usable_slots_);
+ return (int64_t)(sizeof(Slot) * slots_.size() + sizeof(Key) * usable_slots_);
}
/**
* Potentially resize the vector set such that it can hold n elements without doing another grow.
*/
- void reserve(const uint32_t n)
+ void reserve(const int64_t n)
{
if (usable_slots_ < n) {
this->realloc_and_reinsert(n);
@@ -474,18 +475,19 @@ class VectorSet {
* Get the number of collisions that the probing strategy has to go through to find the key or
* determine that it is not in the set.
*/
- uint32_t count_collisions(const Key &key) const
+ int64_t count_collisions(const Key &key) const
{
return this->count_collisions__impl(key, hash_(key));
}
private:
- BLI_NOINLINE void realloc_and_reinsert(const uint32_t min_usable_slots)
+ BLI_NOINLINE void realloc_and_reinsert(const int64_t min_usable_slots)
{
- uint32_t total_slots, usable_slots;
+ int64_t total_slots, usable_slots;
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
- const uint32_t new_slot_mask = total_slots - 1;
+ BLI_assert(total_slots >= 1);
+ const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
/* Optimize the case when the set was empty beforehand. We can avoid some copies here. */
if (this->size() == 0) {
@@ -524,10 +526,10 @@ class VectorSet {
void add_after_grow_and_destruct_old(Slot &old_slot,
SlotArray &new_slots,
- const uint32_t new_slot_mask)
+ const uint64_t new_slot_mask)
{
const Key &key = keys_[old_slot.index()];
- const uint32_t hash = old_slot.get_hash(key, Hash());
+ const uint64_t hash = old_slot.get_hash(key, Hash());
SLOT_PROBING_BEGIN (ProbingStrategy, hash, new_slot_mask, slot_index) {
Slot &slot = new_slots[slot_index];
@@ -540,7 +542,7 @@ class VectorSet {
}
template<typename ForwardKey>
- bool contains__impl(const ForwardKey &key, const uint32_t hash) const
+ bool contains__impl(const ForwardKey &key, const uint64_t hash) const
{
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
@@ -553,7 +555,7 @@ class VectorSet {
VECTOR_SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> void add_new__impl(ForwardKey &&key, const uint32_t hash)
+ template<typename ForwardKey> void add_new__impl(ForwardKey &&key, const uint64_t hash)
{
BLI_assert(!this->contains_as(key));
@@ -561,7 +563,7 @@ class VectorSet {
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
- uint32_t index = this->size();
+ int64_t index = this->size();
new (keys_ + index) Key(std::forward<ForwardKey>(key));
slot.occupy(index, hash);
occupied_and_removed_slots_++;
@@ -571,13 +573,13 @@ class VectorSet {
VECTOR_SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> bool add__impl(ForwardKey &&key, const uint32_t hash)
+ template<typename ForwardKey> bool add__impl(ForwardKey &&key, const uint64_t hash)
{
this->ensure_can_add();
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.is_empty()) {
- uint32_t index = this->size();
+ int64_t index = this->size();
new (keys_ + index) Key(std::forward<ForwardKey>(key));
occupied_and_removed_slots_++;
slot.occupy(index, hash);
@@ -591,7 +593,7 @@ class VectorSet {
}
template<typename ForwardKey>
- uint32_t index_of__impl(const ForwardKey &key, const uint32_t hash) const
+ int64_t index_of__impl(const ForwardKey &key, const uint64_t hash) const
{
BLI_assert(this->contains_as(key));
@@ -604,11 +606,11 @@ class VectorSet {
}
template<typename ForwardKey>
- int32_t index_of_try__impl(const ForwardKey &key, const uint32_t hash) const
+ int64_t index_of_try__impl(const ForwardKey &key, const uint64_t hash) const
{
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash, keys_)) {
- return (int32_t)slot.index();
+ return slot.index();
}
if (slot.is_empty()) {
return -1;
@@ -621,10 +623,10 @@ class VectorSet {
{
BLI_assert(this->size() > 0);
- const uint32_t index_to_pop = this->size() - 1;
+ const int64_t index_to_pop = this->size() - 1;
Key key = std::move(keys_[index_to_pop]);
keys_[index_to_pop].~Key();
- const uint32_t hash = hash_(key);
+ const uint64_t hash = hash_(key);
removed_slots_++;
@@ -637,7 +639,7 @@ class VectorSet {
VECTOR_SET_SLOT_PROBING_END();
}
- template<typename ForwardKey> bool remove__impl(const ForwardKey &key, const uint32_t hash)
+ template<typename ForwardKey> bool remove__impl(const ForwardKey &key, const uint64_t hash)
{
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash, keys_)) {
@@ -652,7 +654,7 @@ class VectorSet {
}
template<typename ForwardKey>
- void remove_contained__impl(const ForwardKey &key, const uint32_t hash)
+ void remove_contained__impl(const ForwardKey &key, const uint64_t hash)
{
BLI_assert(this->contains_as(key));
@@ -667,9 +669,9 @@ class VectorSet {
void remove_key_internal(Slot &slot)
{
- uint32_t index_to_remove = slot.index();
- uint32_t size = this->size();
- uint32_t last_element_index = size - 1;
+ int64_t index_to_remove = slot.index();
+ int64_t size = this->size();
+ int64_t last_element_index = size - 1;
if (index_to_remove < last_element_index) {
keys_[index_to_remove] = std::move(keys_[last_element_index]);
@@ -682,9 +684,9 @@ class VectorSet {
return;
}
- void update_slot_index(const Key &key, const uint32_t old_index, const uint32_t new_index)
+ void update_slot_index(const Key &key, const int64_t old_index, const int64_t new_index)
{
- uint32_t hash = hash_(key);
+ uint64_t hash = hash_(key);
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.has_index(old_index)) {
slot.update_index(new_index);
@@ -695,9 +697,9 @@ class VectorSet {
}
template<typename ForwardKey>
- uint32_t count_collisions__impl(const ForwardKey &key, const uint32_t hash) const
+ int64_t count_collisions__impl(const ForwardKey &key, const uint64_t hash) const
{
- uint32_t collisions = 0;
+ int64_t collisions = 0;
VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) {
if (slot.contains(key, is_equal_, hash, keys_)) {
@@ -719,9 +721,9 @@ class VectorSet {
}
}
- Key *allocate_keys_array(const uint32_t size)
+ Key *allocate_keys_array(const int64_t size)
{
- return (Key *)slots_.allocator().allocate((uint32_t)sizeof(Key) * size, alignof(Key), AT);
+ return (Key *)slots_.allocator().allocate(sizeof(Key) * (size_t)size, alignof(Key), AT);
}
void deallocate_keys_array(Key *keys)
diff --git a/source/blender/blenlib/BLI_vector_set_slots.hh b/source/blender/blenlib/BLI_vector_set_slots.hh
index e43b892b3f7..49e6d4daedb 100644
--- a/source/blender/blenlib/BLI_vector_set_slots.hh
+++ b/source/blender/blenlib/BLI_vector_set_slots.hh
@@ -53,7 +53,7 @@ template<typename Key> class SimpleVectorSetSlot {
/**
* After the default constructor has run, the slot has to be in the empty state.
*/
- int32_t state_ = s_is_empty;
+ int64_t state_ = s_is_empty;
public:
/**
@@ -75,10 +75,10 @@ template<typename Key> class SimpleVectorSetSlot {
/**
* Return the stored index. It is assumed that the slot is occupied.
*/
- uint32_t index() const
+ int64_t index() const
{
BLI_assert(this->is_occupied());
- return (uint32_t)state_;
+ return state_;
}
/**
@@ -88,7 +88,7 @@ template<typename Key> class SimpleVectorSetSlot {
template<typename ForwardKey, typename IsEqual>
bool contains(const ForwardKey &key,
const IsEqual &is_equal,
- uint32_t UNUSED(hash),
+ uint64_t UNUSED(hash),
const Key *keys) const
{
if (state_ >= 0) {
@@ -102,7 +102,7 @@ template<typename Key> class SimpleVectorSetSlot {
* we can avoid a comparison with the state, since we know the slot is occupied. For this
* specific slot implementation, this does not make a difference.
*/
- void relocate_occupied_here(SimpleVectorSetSlot &other, uint32_t UNUSED(hash))
+ void relocate_occupied_here(SimpleVectorSetSlot &other, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
@@ -113,20 +113,20 @@ template<typename Key> class SimpleVectorSetSlot {
* Change the state of this slot from empty/removed to occupied. The hash can be used by other
* slot implementations.
*/
- void occupy(uint32_t index, uint32_t UNUSED(hash))
+ void occupy(int64_t index, uint64_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
- state_ = (int32_t)index;
+ state_ = index;
}
/**
* The key has changed its position in the vector, so the index has to be updated. This method
* can assume that the slot is currently occupied.
*/
- void update_index(uint32_t index)
+ void update_index(int64_t index)
{
BLI_assert(this->is_occupied());
- state_ = (int32_t)index;
+ state_ = index;
}
/**
@@ -141,16 +141,16 @@ template<typename Key> class SimpleVectorSetSlot {
/**
* Return true if this slot is currently occupied and its corresponding key has the given index.
*/
- bool has_index(uint32_t index) const
+ bool has_index(int64_t index) const
{
- return (uint32_t)state_ == index;
+ return state_ == index;
}
/**
* Return the hash of the currently stored key. In this simple set slot implementation, we just
* compute the hash here. Other implementations might store the hash in the slot instead.
*/
- template<typename Hash> uint32_t get_hash(const Key &key, const Hash &hash) const
+ template<typename Hash> uint64_t get_hash(const Key &key, const Hash &hash) const
{
BLI_assert(this->is_occupied());
return hash(key);
diff --git a/source/blender/blenlib/intern/BLI_index_range.cc b/source/blender/blenlib/intern/BLI_index_range.cc
index 9fa19143f91..a906416b491 100644
--- a/source/blender/blenlib/intern/BLI_index_range.cc
+++ b/source/blender/blenlib/intern/BLI_index_range.cc
@@ -24,28 +24,28 @@
namespace blender {
-static Vector<Array<uint, 0, RawAllocator>, 1, RawAllocator> arrays;
-static uint current_array_size = 0;
-static uint *current_array = nullptr;
+static Vector<Array<int64_t, 0, RawAllocator>, 1, RawAllocator> arrays;
+static int64_t current_array_size = 0;
+static int64_t *current_array = nullptr;
static std::mutex current_array_mutex;
-Span<uint> IndexRange::as_span() const
+Span<int64_t> IndexRange::as_span() const
{
- uint min_required_size = start_ + size_;
+ int64_t min_required_size = start_ + size_;
if (min_required_size <= current_array_size) {
- return Span<uint>(current_array + start_, size_);
+ return Span<int64_t>(current_array + start_, size_);
}
std::lock_guard<std::mutex> lock(current_array_mutex);
if (min_required_size <= current_array_size) {
- return Span<uint>(current_array + start_, size_);
+ return Span<int64_t>(current_array + start_, size_);
}
- uint new_size = std::max<uint>(1000, power_of_2_max_u(min_required_size));
- Array<uint, 0, RawAllocator> new_array(new_size);
- for (uint i = 0; i < new_size; i++) {
+ int64_t new_size = std::max<int64_t>(1000, power_of_2_max_u(min_required_size));
+ Array<int64_t, 0, RawAllocator> new_array(new_size);
+ for (int64_t i = 0; i < new_size; i++) {
new_array[i] = i;
}
arrays.append(std::move(new_array));
@@ -54,7 +54,7 @@ Span<uint> IndexRange::as_span() const
std::atomic_thread_fence(std::memory_order_seq_cst);
current_array_size = new_size;
- return Span<uint>(current_array + start_, size_);
+ return Span<int64_t>(current_array + start_, size_);
}
} // namespace blender
diff --git a/source/blender/blenlib/intern/dot_export.cc b/source/blender/blenlib/intern/dot_export.cc
index 0f60ea6fd1b..48b6dc826d0 100644
--- a/source/blender/blenlib/intern/dot_export.cc
+++ b/source/blender/blenlib/intern/dot_export.cc
@@ -263,8 +263,8 @@ NodeWithSocketsRef::NodeWithSocketsRef(Node &node,
ss << "</b></td></tr>";
/* Sockets */
- uint socket_max_amount = std::max(input_names.size(), output_names.size());
- for (uint i = 0; i < socket_max_amount; i++) {
+ int socket_max_amount = std::max(input_names.size(), output_names.size());
+ for (int i = 0; i < socket_max_amount; i++) {
ss << "<tr>";
if (i < input_names.size()) {
StringRef name = input_names[i];
diff --git a/source/blender/blenlib/intern/rand.cc b/source/blender/blenlib/intern/rand.cc
index 279682ea171..9bafc422db5 100644
--- a/source/blender/blenlib/intern/rand.cc
+++ b/source/blender/blenlib/intern/rand.cc
@@ -93,8 +93,7 @@ void BLI_rng_srandom(RNG *rng, unsigned int seed)
void BLI_rng_get_char_n(RNG *rng, char *bytes, size_t bytes_len)
{
- BLI_assert(bytes_len > UINT32_MAX);
- rng->rng.get_bytes(blender::MutableSpan(bytes, (uint32_t)bytes_len));
+ rng->rng.get_bytes(blender::MutableSpan(bytes, (int64_t)bytes_len));
}
int BLI_rng_get_int(RNG *rng)
@@ -428,11 +427,11 @@ float2 RandomNumberGenerator::get_triangle_sample(float2 v1, float2 v2, float2 v
void RandomNumberGenerator::get_bytes(MutableSpan<char> r_bytes)
{
- constexpr uint mask_bytes = 2;
- constexpr uint rand_stride = (uint)sizeof(x_) - mask_bytes;
+ constexpr int64_t mask_bytes = 2;
+ constexpr int64_t rand_stride = (int64_t)sizeof(x_) - mask_bytes;
- uint last_len = 0;
- uint trim_len = r_bytes.size();
+ int64_t last_len = 0;
+ int64_t trim_len = r_bytes.size();
if (trim_len > rand_stride) {
last_len = trim_len % rand_stride;
@@ -444,13 +443,13 @@ void RandomNumberGenerator::get_bytes(MutableSpan<char> r_bytes)
}
const char *data_src = (const char *)&x_;
- uint i = 0;
+ int64_t i = 0;
while (i != trim_len) {
BLI_assert(i < trim_len);
#ifdef __BIG_ENDIAN__
- for (uint j = (rand_stride + mask_bytes) - 1; j != mask_bytes - 1; j--)
+ for (int64_t j = (rand_stride + mask_bytes) - 1; j != mask_bytes - 1; j--)
#else
- for (uint j = 0; j != rand_stride; j++)
+ for (int64_t j = 0; j != rand_stride; j++)
#endif
{
r_bytes[i++] = data_src[j];
@@ -458,7 +457,7 @@ void RandomNumberGenerator::get_bytes(MutableSpan<char> r_bytes)
this->step();
}
if (last_len) {
- for (uint j = 0; j != last_len; j++) {
+ for (int64_t j = 0; j != last_len; j++) {
r_bytes[i++] = data_src[j];
}
}
diff --git a/source/blender/depsgraph/intern/builder/deg_builder_cache.cc b/source/blender/depsgraph/intern/builder/deg_builder_cache.cc
index 1095905c570..750bccf0e52 100644
--- a/source/blender/depsgraph/intern/builder/deg_builder_cache.cc
+++ b/source/blender/depsgraph/intern/builder/deg_builder_cache.cc
@@ -72,11 +72,11 @@ bool operator==(const AnimatedPropertyID &a, const AnimatedPropertyID &b)
return a.data == b.data && a.property_rna == b.property_rna;
}
-uint32_t AnimatedPropertyID::hash() const
+uint64_t AnimatedPropertyID::hash() const
{
uintptr_t ptr1 = (uintptr_t)data;
uintptr_t ptr2 = (uintptr_t)property_rna;
- return (uint32_t)(((ptr1 >> 4) * 33) ^ (ptr2 >> 4));
+ return (uint64_t)(((ptr1 >> 4) * 33) ^ (ptr2 >> 4));
}
namespace {
diff --git a/source/blender/depsgraph/intern/builder/deg_builder_cache.h b/source/blender/depsgraph/intern/builder/deg_builder_cache.h
index 43348e3bf23..e04ae3a3727 100644
--- a/source/blender/depsgraph/intern/builder/deg_builder_cache.h
+++ b/source/blender/depsgraph/intern/builder/deg_builder_cache.h
@@ -47,7 +47,7 @@ class AnimatedPropertyID {
AnimatedPropertyID(ID *id, StructRNA *type, const char *property_name);
AnimatedPropertyID(ID *id, StructRNA *type, void *data, const char *property_name);
- uint32_t hash() const;
+ uint64_t hash() const;
friend bool operator==(const AnimatedPropertyID &a, const AnimatedPropertyID &b);
/* Corresponds to PointerRNA.data. */
diff --git a/source/blender/depsgraph/intern/depsgraph_registry.cc b/source/blender/depsgraph/intern/depsgraph_registry.cc
index 6bfd2e881cc..bc3b85f9be5 100644
--- a/source/blender/depsgraph/intern/depsgraph_registry.cc
+++ b/source/blender/depsgraph/intern/depsgraph_registry.cc
@@ -30,6 +30,8 @@
namespace blender {
namespace deg {
+/* TODO: Static variables should use RawAllocator to avoid false positives of Blender's memory leak
+ * detector. */
static Map<Main *, VectorSet<Depsgraph *>> g_graph_registry;
void register_graph(Depsgraph *depsgraph)
diff --git a/source/blender/depsgraph/intern/eval/deg_eval_runtime_backup_modifier.cc b/source/blender/depsgraph/intern/eval/deg_eval_runtime_backup_modifier.cc
index f2d9a87ca9d..934403674a9 100644
--- a/source/blender/depsgraph/intern/eval/deg_eval_runtime_backup_modifier.cc
+++ b/source/blender/depsgraph/intern/eval/deg_eval_runtime_backup_modifier.cc
@@ -41,7 +41,7 @@ bool operator==(const ModifierDataBackupID &a, const ModifierDataBackupID &b)
return a.modifier_data == b.modifier_data && a.type == b.type;
}
-uint32_t ModifierDataBackupID::hash() const
+uint64_t ModifierDataBackupID::hash() const
{
uintptr_t ptr = (uintptr_t)modifier_data;
return (ptr >> 4) ^ (uintptr_t)type;
diff --git a/source/blender/depsgraph/intern/eval/deg_eval_runtime_backup_modifier.h b/source/blender/depsgraph/intern/eval/deg_eval_runtime_backup_modifier.h
index dc16bdcc1b8..a5bdf2359ee 100644
--- a/source/blender/depsgraph/intern/eval/deg_eval_runtime_backup_modifier.h
+++ b/source/blender/depsgraph/intern/eval/deg_eval_runtime_backup_modifier.h
@@ -49,7 +49,7 @@ class ModifierDataBackupID {
friend bool operator==(const ModifierDataBackupID &a, const ModifierDataBackupID &b);
- uint32_t hash() const;
+ uint64_t hash() const;
ModifierData *modifier_data;
ModifierType type;
diff --git a/source/blender/depsgraph/intern/node/deg_node_component.cc b/source/blender/depsgraph/intern/node/deg_node_component.cc
index 34514ba9bd7..cd82b7be050 100644
--- a/source/blender/depsgraph/intern/node/deg_node_component.cc
+++ b/source/blender/depsgraph/intern/node/deg_node_component.cc
@@ -72,7 +72,7 @@ bool ComponentNode::OperationIDKey::operator==(const OperationIDKey &other) cons
return (opcode == other.opcode) && (STREQ(name, other.name)) && (name_tag == other.name_tag);
}
-uint32_t ComponentNode::OperationIDKey::hash() const
+uint64_t ComponentNode::OperationIDKey::hash() const
{
const int opcode_as_int = static_cast<int>(opcode);
return BLI_ghashutil_combine_hash(
diff --git a/source/blender/depsgraph/intern/node/deg_node_component.h b/source/blender/depsgraph/intern/node/deg_node_component.h
index 3757a1dea5b..06582c88d8b 100644
--- a/source/blender/depsgraph/intern/node/deg_node_component.h
+++ b/source/blender/depsgraph/intern/node/deg_node_component.h
@@ -54,7 +54,7 @@ struct ComponentNode : public Node {
string identifier() const;
bool operator==(const OperationIDKey &other) const;
- uint32_t hash() const;
+ uint64_t hash() const;
};
/* Typedef for container of operations */
diff --git a/source/blender/depsgraph/intern/node/deg_node_id.cc b/source/blender/depsgraph/intern/node/deg_node_id.cc
index d0c23f326ce..8d19949adc8 100644
--- a/source/blender/depsgraph/intern/node/deg_node_id.cc
+++ b/source/blender/depsgraph/intern/node/deg_node_id.cc
@@ -67,7 +67,7 @@ bool IDNode::ComponentIDKey::operator==(const ComponentIDKey &other) const
return type == other.type && STREQ(name, other.name);
}
-uint32_t IDNode::ComponentIDKey::hash() const
+uint64_t IDNode::ComponentIDKey::hash() const
{
const int type_as_int = static_cast<int>(type);
return BLI_ghashutil_combine_hash(BLI_ghashutil_uinthash(type_as_int),
diff --git a/source/blender/depsgraph/intern/node/deg_node_id.h b/source/blender/depsgraph/intern/node/deg_node_id.h
index 9bd6130bbdc..04a9006ac10 100644
--- a/source/blender/depsgraph/intern/node/deg_node_id.h
+++ b/source/blender/depsgraph/intern/node/deg_node_id.h
@@ -51,7 +51,7 @@ const char *linkedStateAsString(eDepsNode_LinkedState_Type linked_state);
struct IDNode : public Node {
struct ComponentIDKey {
ComponentIDKey(NodeType type, const char *name = "");
- uint32_t hash() const;
+ uint64_t hash() const;
bool operator==(const ComponentIDKey &other) const;
NodeType type;
diff --git a/source/blender/functions/FN_array_spans.hh b/source/blender/functions/FN_array_spans.hh
index 3f71b36c49c..c362fef3630 100644
--- a/source/blender/functions/FN_array_spans.hh
+++ b/source/blender/functions/FN_array_spans.hh
@@ -39,17 +39,17 @@ enum class VArraySpanCategory {
template<typename T> class VArraySpanBase {
protected:
- uint virtual_size_;
+ int64_t virtual_size_;
VArraySpanCategory category_;
union {
struct {
const T *start;
- uint size;
+ int64_t size;
} single_array;
struct {
const T *const *starts;
- const uint *sizes;
+ const int64_t *sizes;
} starts_and_sizes;
} data_;
@@ -71,7 +71,7 @@ template<typename T> class VArraySpanBase {
return this->virtual_size_ == 0;
}
- uint size() const
+ int64_t size() const
{
return this->virtual_size_;
}
@@ -99,15 +99,16 @@ template<typename T> class VArraySpan : public VArraySpanBase<T> {
this->data_.starts_and_sizes.sizes = nullptr;
}
- VArraySpan(Span<T> span, uint virtual_size)
+ VArraySpan(Span<T> span, int64_t virtual_size)
{
+ BLI_assert(virtual_size >= 0);
this->virtual_size_ = virtual_size;
this->category_ = VArraySpanCategory::SingleArray;
this->data_.single_array.start = span.data();
this->data_.single_array.size = span.size();
}
- VArraySpan(Span<const T *> starts, Span<uint> sizes)
+ VArraySpan(Span<const T *> starts, Span<int64_t> sizes)
{
BLI_assert(starts.size() == sizes.size());
this->virtual_size_ = starts.size();
@@ -116,8 +117,9 @@ template<typename T> class VArraySpan : public VArraySpanBase<T> {
this->data_.starts_and_sizes.sizes = sizes.begin();
}
- VSpan<T> operator[](uint index) const
+ VSpan<T> operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->virtual_size_);
switch (this->category_) {
case VArraySpanCategory::SingleArray:
@@ -151,7 +153,7 @@ class GVArraySpan : public VArraySpanBase<void> {
this->data_.starts_and_sizes.sizes = nullptr;
}
- GVArraySpan(GSpan array, uint virtual_size)
+ GVArraySpan(GSpan array, int64_t virtual_size)
{
this->type_ = &array.type();
this->virtual_size_ = virtual_size;
@@ -160,7 +162,7 @@ class GVArraySpan : public VArraySpanBase<void> {
this->data_.single_array.size = array.size();
}
- GVArraySpan(const CPPType &type, Span<const void *> starts, Span<uint> sizes)
+ GVArraySpan(const CPPType &type, Span<const void *> starts, Span<int64_t> sizes)
{
BLI_assert(starts.size() == sizes.size());
this->type_ = &type;
@@ -187,7 +189,7 @@ class GVArraySpan : public VArraySpanBase<void> {
return VArraySpan<T>(*this);
}
- GVSpan operator[](uint index) const
+ GVSpan operator[](int64_t index) const
{
BLI_assert(index < virtual_size_);
switch (category_) {
diff --git a/source/blender/functions/FN_attributes_ref.hh b/source/blender/functions/FN_attributes_ref.hh
index cf5ef2af8a6..2af35317827 100644
--- a/source/blender/functions/FN_attributes_ref.hh
+++ b/source/blender/functions/FN_attributes_ref.hh
@@ -65,7 +65,7 @@ class AttributesInfoBuilder : NonCopyable, NonMovable {
class AttributesInfo : NonCopyable, NonMovable {
private:
LinearAllocator<> allocator_;
- Map<StringRefNull, uint> index_by_name_;
+ Map<StringRefNull, int> index_by_name_;
Vector<StringRefNull> name_by_index_;
Vector<const CPPType *> type_by_index_;
Vector<void *> defaults_;
@@ -75,7 +75,7 @@ class AttributesInfo : NonCopyable, NonMovable {
AttributesInfo(const AttributesInfoBuilder &builder);
~AttributesInfo();
- uint size() const
+ int size() const
{
return name_by_index_.size();
}
@@ -85,17 +85,17 @@ class AttributesInfo : NonCopyable, NonMovable {
return name_by_index_.index_range();
}
- StringRefNull name_of(uint index) const
+ StringRefNull name_of(int index) const
{
return name_by_index_[index];
}
- uint index_of(StringRef name) const
+ int index_of(StringRef name) const
{
return index_by_name_.lookup_as(name);
}
- const void *default_of(uint index) const
+ const void *default_of(int index) const
{
return defaults_[index];
}
@@ -105,7 +105,7 @@ class AttributesInfo : NonCopyable, NonMovable {
return this->default_of(this->index_of(name));
}
- template<typename T> const T &default_of(uint index) const
+ template<typename T> const T &default_of(int index) const
{
BLI_assert(type_by_index_[index]->is<T>());
return *(T *)defaults_[index];
@@ -116,7 +116,7 @@ class AttributesInfo : NonCopyable, NonMovable {
return this->default_of<T>(this->index_of(name));
}
- const CPPType &type_of(uint index) const
+ const CPPType &type_of(int index) const
{
return *type_by_index_[index];
}
@@ -133,7 +133,7 @@ class AttributesInfo : NonCopyable, NonMovable {
int try_index_of(StringRef name) const
{
- return (int)index_by_name_.lookup_default_as(name, -1);
+ return index_by_name_.lookup_default_as(name, -1);
}
int try_index_of(StringRef name, const CPPType &type) const
@@ -142,7 +142,7 @@ class AttributesInfo : NonCopyable, NonMovable {
if (index == -1) {
return -1;
}
- else if (this->type_of((uint)index) == type) {
+ else if (this->type_of(index) == type) {
return index;
}
else {
@@ -164,7 +164,7 @@ class MutableAttributesRef {
friend class AttributesRef;
public:
- MutableAttributesRef(const AttributesInfo &info, Span<void *> buffers, uint size)
+ MutableAttributesRef(const AttributesInfo &info, Span<void *> buffers, int64_t size)
: MutableAttributesRef(info, buffers, IndexRange(size))
{
}
@@ -174,7 +174,7 @@ class MutableAttributesRef {
{
}
- uint size() const
+ int64_t size() const
{
return range_.size();
}
@@ -184,7 +184,7 @@ class MutableAttributesRef {
return *info_;
}
- GMutableSpan get(uint index) const
+ GMutableSpan get(int index) const
{
const CPPType &type = info_->type_of(index);
void *ptr = POINTER_OFFSET(buffers_[index], type.size() * range_.start());
@@ -196,7 +196,7 @@ class MutableAttributesRef {
return this->get(info_->index_of(name));
}
- template<typename T> MutableSpan<T> get(uint index) const
+ template<typename T> MutableSpan<T> get(int index) const
{
BLI_assert(info_->type_of(index).is<T>());
return MutableSpan<T>((T *)buffers_[index] + range_.start(), range_.size());
@@ -214,7 +214,7 @@ class MutableAttributesRef {
return {};
}
else {
- return this->get((uint)index);
+ return this->get(index);
}
}
@@ -224,8 +224,8 @@ class MutableAttributesRef {
if (index == -1) {
return {};
}
- else if (info_->type_of((uint)index).is<T>()) {
- return this->get<T>((uint)index);
+ else if (info_->type_of(index).is<T>()) {
+ return this->get<T>(index);
}
else {
return {};
@@ -237,7 +237,7 @@ class MutableAttributesRef {
return this->slice(range.start(), range.size());
}
- MutableAttributesRef slice(uint start, uint size) const
+ MutableAttributesRef slice(int64_t start, int64_t size) const
{
return MutableAttributesRef(*info_, buffers_, range_.slice(start, size));
}
@@ -250,7 +250,7 @@ class AttributesRef {
IndexRange range_;
public:
- AttributesRef(const AttributesInfo &info, Span<const void *> buffers, uint size)
+ AttributesRef(const AttributesInfo &info, Span<const void *> buffers, int64_t size)
: AttributesRef(info, buffers, IndexRange(size))
{
}
@@ -265,7 +265,7 @@ class AttributesRef {
{
}
- uint size() const
+ int64_t size() const
{
return range_.size();
}
@@ -275,7 +275,7 @@ class AttributesRef {
return *info_;
}
- GSpan get(uint index) const
+ GSpan get(int index) const
{
const CPPType &type = info_->type_of(index);
const void *ptr = POINTER_OFFSET(buffers_[index], type.size() * range_.start());
@@ -287,7 +287,7 @@ class AttributesRef {
return this->get(info_->index_of(name));
}
- template<typename T> Span<T> get(uint index) const
+ template<typename T> Span<T> get(int index) const
{
BLI_assert(info_->type_of(index).is<T>());
return Span<T>((T *)buffers_[index] + range_.start(), range_.size());
@@ -300,12 +300,12 @@ class AttributesRef {
std::optional<GSpan> try_get(StringRef name, const CPPType &type) const
{
- int index = info_->try_index_of(name, type);
+ int64_t index = info_->try_index_of(name, type);
if (index == -1) {
return {};
}
else {
- return this->get((uint)index);
+ return this->get(index);
}
}
@@ -315,8 +315,8 @@ class AttributesRef {
if (index == -1) {
return {};
}
- else if (info_->type_of((uint)index).is<T>()) {
- return this->get<T>((uint)index);
+ else if (info_->type_of(index).is<T>()) {
+ return this->get<T>(index);
}
else {
return {};
@@ -328,7 +328,7 @@ class AttributesRef {
return this->slice(range.start(), range.size());
}
- AttributesRef slice(uint start, uint size) const
+ AttributesRef slice(int64_t start, int64_t size) const
{
return AttributesRef(*info_, buffers_, range_.slice(start, size));
}
diff --git a/source/blender/functions/FN_cpp_type.hh b/source/blender/functions/FN_cpp_type.hh
index ce02bfd05be..594890e353a 100644
--- a/source/blender/functions/FN_cpp_type.hh
+++ b/source/blender/functions/FN_cpp_type.hh
@@ -38,7 +38,7 @@
* methods come in three variants. Using the construct-default methods as example:
* - construct_default(void *ptr):
* Constructs a single instance of that type at the given pointer.
- * - construct_default_n(void *ptr, uint n):
+ * - construct_default_n(void *ptr, int64_t n):
* Constructs n instances of that type in an array that starts at the given pointer.
* - construct_default_indices(void *ptr, IndexMask mask):
* Constructs multiple instances of that type in an array that starts at the given pointer.
@@ -77,42 +77,42 @@ namespace blender::fn {
class CPPType : NonCopyable, NonMovable {
public:
using ConstructDefaultF = void (*)(void *ptr);
- using ConstructDefaultNF = void (*)(void *ptr, uint n);
+ using ConstructDefaultNF = void (*)(void *ptr, int64_t n);
using ConstructDefaultIndicesF = void (*)(void *ptr, IndexMask mask);
using DestructF = void (*)(void *ptr);
- using DestructNF = void (*)(void *ptr, uint n);
+ using DestructNF = void (*)(void *ptr, int64_t n);
using DestructIndicesF = void (*)(void *ptr, IndexMask mask);
using CopyToInitializedF = void (*)(const void *src, void *dst);
- using CopyToInitializedNF = void (*)(const void *src, void *dst, uint n);
+ using CopyToInitializedNF = void (*)(const void *src, void *dst, int64_t n);
using CopyToInitializedIndicesF = void (*)(const void *src, void *dst, IndexMask mask);
using CopyToUninitializedF = void (*)(const void *src, void *dst);
- using CopyToUninitializedNF = void (*)(const void *src, void *dst, uint n);
+ using CopyToUninitializedNF = void (*)(const void *src, void *dst, int64_t n);
using CopyToUninitializedIndicesF = void (*)(const void *src, void *dst, IndexMask mask);
using RelocateToInitializedF = void (*)(void *src, void *dst);
- using RelocateToInitializedNF = void (*)(void *src, void *dst, uint n);
+ using RelocateToInitializedNF = void (*)(void *src, void *dst, int64_t n);
using RelocateToInitializedIndicesF = void (*)(void *src, void *dst, IndexMask mask);
using RelocateToUninitializedF = void (*)(void *src, void *dst);
- using RelocateToUninitializedNF = void (*)(void *src, void *dst, uint n);
+ using RelocateToUninitializedNF = void (*)(void *src, void *dst, int64_t n);
using RelocateToUninitializedIndicesF = void (*)(void *src, void *dst, IndexMask mask);
- using FillInitializedF = void (*)(const void *value, void *dst, uint n);
+ using FillInitializedF = void (*)(const void *value, void *dst, int64_t n);
using FillInitializedIndicesF = void (*)(const void *value, void *dst, IndexMask mask);
- using FillUninitializedF = void (*)(const void *value, void *dst, uint n);
+ using FillUninitializedF = void (*)(const void *value, void *dst, int64_t n);
using FillUninitializedIndicesF = void (*)(const void *value, void *dst, IndexMask mask);
using DebugPrintF = void (*)(const void *value, std::stringstream &ss);
using IsEqualF = bool (*)(const void *a, const void *b);
- using HashF = uint32_t (*)(const void *value);
+ using HashF = uint64_t (*)(const void *value);
private:
- uint size_;
- uint alignment_;
+ int64_t size_;
+ int64_t alignment_;
uintptr_t alignment_mask_;
bool is_trivially_destructible_;
@@ -155,8 +155,8 @@ class CPPType : NonCopyable, NonMovable {
public:
CPPType(std::string name,
- uint size,
- uint alignment,
+ int64_t size,
+ int64_t alignment,
bool is_trivially_destructible,
ConstructDefaultF construct_default,
ConstructDefaultNF construct_default_n,
@@ -250,7 +250,7 @@ class CPPType : NonCopyable, NonMovable {
* C++ equivalent:
* sizeof(T);
*/
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -261,7 +261,7 @@ class CPPType : NonCopyable, NonMovable {
* C++ equivalent:
* alignof(T);
*/
- uint alignment() const
+ int64_t alignment() const
{
return alignment_;
}
@@ -306,7 +306,7 @@ class CPPType : NonCopyable, NonMovable {
construct_default_(ptr);
}
- void construct_default_n(void *ptr, uint n) const
+ void construct_default_n(void *ptr, int64_t n) const
{
BLI_assert(n == 0 || this->pointer_can_point_to_instance(ptr));
@@ -335,7 +335,7 @@ class CPPType : NonCopyable, NonMovable {
destruct_(ptr);
}
- void destruct_n(void *ptr, uint n) const
+ void destruct_n(void *ptr, int64_t n) const
{
BLI_assert(n == 0 || this->pointer_can_point_to_instance(ptr));
@@ -369,7 +369,7 @@ class CPPType : NonCopyable, NonMovable {
copy_to_initialized_(src, dst);
}
- void copy_to_initialized_n(const void *src, void *dst, uint n) const
+ void copy_to_initialized_n(const void *src, void *dst, int64_t n) const
{
BLI_assert(src != dst);
BLI_assert(n == 0 || this->pointer_can_point_to_instance(src));
@@ -404,7 +404,7 @@ class CPPType : NonCopyable, NonMovable {
copy_to_uninitialized_(src, dst);
}
- void copy_to_uninitialized_n(const void *src, void *dst, uint n) const
+ void copy_to_uninitialized_n(const void *src, void *dst, int64_t n) const
{
BLI_assert(src != dst);
BLI_assert(n == 0 || this->pointer_can_point_to_instance(src));
@@ -439,7 +439,7 @@ class CPPType : NonCopyable, NonMovable {
relocate_to_initialized_(src, dst);
}
- void relocate_to_initialized_n(void *src, void *dst, uint n) const
+ void relocate_to_initialized_n(void *src, void *dst, int64_t n) const
{
BLI_assert(src != dst);
BLI_assert(n == 0 || this->pointer_can_point_to_instance(src));
@@ -474,7 +474,7 @@ class CPPType : NonCopyable, NonMovable {
relocate_to_uninitialized_(src, dst);
}
- void relocate_to_uninitialized_n(void *src, void *dst, uint n) const
+ void relocate_to_uninitialized_n(void *src, void *dst, int64_t n) const
{
BLI_assert(src != dst);
BLI_assert(n == 0 || this->pointer_can_point_to_instance(src));
@@ -497,7 +497,7 @@ class CPPType : NonCopyable, NonMovable {
*
* Other instances of the same type should live in the array before this method is called.
*/
- void fill_initialized(const void *value, void *dst, uint n) const
+ void fill_initialized(const void *value, void *dst, int64_t n) const
{
BLI_assert(n == 0 || this->pointer_can_point_to_instance(value));
BLI_assert(n == 0 || this->pointer_can_point_to_instance(dst));
@@ -518,7 +518,7 @@ class CPPType : NonCopyable, NonMovable {
*
* The array should be uninitialized before this method is called.
*/
- void fill_uninitialized(const void *value, void *dst, uint n) const
+ void fill_uninitialized(const void *value, void *dst, int64_t n) const
{
BLI_assert(n == 0 || this->pointer_can_point_to_instance(value));
BLI_assert(n == 0 || this->pointer_can_point_to_instance(dst));
@@ -547,7 +547,7 @@ class CPPType : NonCopyable, NonMovable {
return is_equal_(a, b);
}
- uint32_t hash(const void *value) const
+ uint64_t hash(const void *value) const
{
BLI_assert(this->pointer_can_point_to_instance(value));
return hash_(value);
@@ -562,7 +562,7 @@ class CPPType : NonCopyable, NonMovable {
return default_value_;
}
- uint32_t hash() const
+ uint64_t hash() const
{
return DefaultHash<const CPPType *>{}(this);
}
@@ -583,39 +583,39 @@ template<typename T> void construct_default_cb(void *ptr)
{
new (ptr) T;
}
-template<typename T> void construct_default_n_cb(void *ptr, uint n)
+template<typename T> void construct_default_n_cb(void *ptr, int64_t n)
{
blender::default_construct_n((T *)ptr, n);
}
template<typename T> void construct_default_indices_cb(void *ptr, IndexMask mask)
{
- mask.foreach_index([&](uint i) { new ((T *)ptr + i) T; });
+ mask.foreach_index([&](int64_t i) { new ((T *)ptr + i) T; });
}
template<typename T> void destruct_cb(void *ptr)
{
((T *)ptr)->~T();
}
-template<typename T> void destruct_n_cb(void *ptr, uint n)
+template<typename T> void destruct_n_cb(void *ptr, int64_t n)
{
blender::destruct_n((T *)ptr, n);
}
template<typename T> void destruct_indices_cb(void *ptr, IndexMask mask)
{
T *ptr_ = (T *)ptr;
- mask.foreach_index([&](uint i) { ptr_[i].~T(); });
+ mask.foreach_index([&](int64_t i) { ptr_[i].~T(); });
}
template<typename T> void copy_to_initialized_cb(const void *src, void *dst)
{
*(T *)dst = *(T *)src;
}
-template<typename T> void copy_to_initialized_n_cb(const void *src, void *dst, uint n)
+template<typename T> void copy_to_initialized_n_cb(const void *src, void *dst, int64_t n)
{
const T *src_ = (const T *)src;
T *dst_ = (T *)dst;
- for (uint i = 0; i < n; i++) {
+ for (int64_t i = 0; i < n; i++) {
dst_[i] = src_[i];
}
}
@@ -625,14 +625,14 @@ void copy_to_initialized_indices_cb(const void *src, void *dst, IndexMask mask)
const T *src_ = (const T *)src;
T *dst_ = (T *)dst;
- mask.foreach_index([&](uint i) { dst_[i] = src_[i]; });
+ mask.foreach_index([&](int64_t i) { dst_[i] = src_[i]; });
}
template<typename T> void copy_to_uninitialized_cb(const void *src, void *dst)
{
blender::uninitialized_copy_n((T *)src, 1, (T *)dst);
}
-template<typename T> void copy_to_uninitialized_n_cb(const void *src, void *dst, uint n)
+template<typename T> void copy_to_uninitialized_n_cb(const void *src, void *dst, int64_t n)
{
blender::uninitialized_copy_n((T *)src, n, (T *)dst);
}
@@ -642,7 +642,7 @@ void copy_to_uninitialized_indices_cb(const void *src, void *dst, IndexMask mask
const T *src_ = (const T *)src;
T *dst_ = (T *)dst;
- mask.foreach_index([&](uint i) { new (dst_ + i) T(src_[i]); });
+ mask.foreach_index([&](int64_t i) { new (dst_ + i) T(src_[i]); });
}
template<typename T> void relocate_to_initialized_cb(void *src, void *dst)
@@ -653,7 +653,7 @@ template<typename T> void relocate_to_initialized_cb(void *src, void *dst)
*dst_ = std::move(*src_);
src_->~T();
}
-template<typename T> void relocate_to_initialized_n_cb(void *src, void *dst, uint n)
+template<typename T> void relocate_to_initialized_n_cb(void *src, void *dst, int64_t n)
{
blender::initialized_relocate_n((T *)src, n, (T *)dst);
}
@@ -662,7 +662,7 @@ template<typename T> void relocate_to_initialized_indices_cb(void *src, void *ds
T *src_ = (T *)src;
T *dst_ = (T *)dst;
- mask.foreach_index([&](uint i) {
+ mask.foreach_index([&](int64_t i) {
dst_[i] = std::move(src_[i]);
src_[i].~T();
});
@@ -676,7 +676,7 @@ template<typename T> void relocate_to_uninitialized_cb(void *src, void *dst)
new (dst_) T(std::move(*src_));
src_->~T();
}
-template<typename T> void relocate_to_uninitialized_n_cb(void *src, void *dst, uint n)
+template<typename T> void relocate_to_uninitialized_n_cb(void *src, void *dst, int64_t n)
{
blender::uninitialized_relocate_n((T *)src, n, (T *)dst);
}
@@ -686,18 +686,18 @@ void relocate_to_uninitialized_indices_cb(void *src, void *dst, IndexMask mask)
T *src_ = (T *)src;
T *dst_ = (T *)dst;
- mask.foreach_index([&](uint i) {
+ mask.foreach_index([&](int64_t i) {
new (dst_ + i) T(std::move(src_[i]));
src_[i].~T();
});
}
-template<typename T> void fill_initialized_cb(const void *value, void *dst, uint n)
+template<typename T> void fill_initialized_cb(const void *value, void *dst, int64_t n)
{
const T &value_ = *(const T *)value;
T *dst_ = (T *)dst;
- for (uint i = 0; i < n; i++) {
+ for (int64_t i = 0; i < n; i++) {
dst_[i] = value_;
}
}
@@ -706,15 +706,15 @@ template<typename T> void fill_initialized_indices_cb(const void *value, void *d
const T &value_ = *(const T *)value;
T *dst_ = (T *)dst;
- mask.foreach_index([&](uint i) { dst_[i] = value_; });
+ mask.foreach_index([&](int64_t i) { dst_[i] = value_; });
}
-template<typename T> void fill_uninitialized_cb(const void *value, void *dst, uint n)
+template<typename T> void fill_uninitialized_cb(const void *value, void *dst, int64_t n)
{
const T &value_ = *(const T *)value;
T *dst_ = (T *)dst;
- for (uint i = 0; i < n; i++) {
+ for (int64_t i = 0; i < n; i++) {
new (dst_ + i) T(value_);
}
}
@@ -724,7 +724,7 @@ void fill_uninitialized_indices_cb(const void *value, void *dst, IndexMask mask)
const T &value_ = *(const T *)value;
T *dst_ = (T *)dst;
- mask.foreach_index([&](uint i) { new (dst_ + i) T(value_); });
+ mask.foreach_index([&](int64_t i) { new (dst_ + i) T(value_); });
}
template<typename T> void debug_print_cb(const void *value, std::stringstream &ss)
@@ -740,7 +740,7 @@ template<typename T> bool is_equal_cb(const void *a, const void *b)
return a_ == b_;
}
-template<typename T> uint32_t hash_cb(const void *value)
+template<typename T> uint64_t hash_cb(const void *value)
{
const T &value_ = *(const T *)value;
return DefaultHash<T>{}(value_);
diff --git a/source/blender/functions/FN_generic_vector_array.hh b/source/blender/functions/FN_generic_vector_array.hh
index 2672484c184..ee67db000e5 100644
--- a/source/blender/functions/FN_generic_vector_array.hh
+++ b/source/blender/functions/FN_generic_vector_array.hh
@@ -42,10 +42,10 @@ template<typename T> class GVectorArrayRef;
class GVectorArray : NonCopyable, NonMovable {
private:
const CPPType &type_;
- uint element_size_;
+ int64_t element_size_;
Array<void *, 1> starts_;
- Array<uint, 1> lengths_;
- Array<uint, 1> capacities_;
+ Array<int64_t, 1> lengths_;
+ Array<int64_t, 1> capacities_;
LinearAllocator<> allocator_;
template<typename T> friend class GVectorArrayRef;
@@ -53,16 +53,16 @@ class GVectorArray : NonCopyable, NonMovable {
public:
GVectorArray() = delete;
- GVectorArray(const CPPType &type, uint array_size)
+ GVectorArray(const CPPType &type, int64_t array_size)
: type_(type),
element_size_(type.size()),
starts_(array_size),
lengths_(array_size),
capacities_(array_size)
{
- starts_.fill(nullptr);
- lengths_.fill(0);
- capacities_.fill(0);
+ starts_.as_mutable_span().fill(nullptr);
+ lengths_.as_mutable_span().fill(0);
+ capacities_.as_mutable_span().fill(0);
}
~GVectorArray()
@@ -71,7 +71,7 @@ class GVectorArray : NonCopyable, NonMovable {
return;
}
- for (uint i : starts_.index_range()) {
+ for (int64_t i : starts_.index_range()) {
type_.destruct_n(starts_[i], lengths_[i]);
}
}
@@ -86,7 +86,7 @@ class GVectorArray : NonCopyable, NonMovable {
return starts_.size() == 0;
}
- uint size() const
+ int64_t size() const
{
return starts_.size();
}
@@ -101,14 +101,14 @@ class GVectorArray : NonCopyable, NonMovable {
return starts_;
}
- Span<uint> lengths() const
+ Span<int64_t> lengths() const
{
return lengths_;
}
- void append(uint index, const void *src)
+ void append(int64_t index, const void *src)
{
- uint old_length = lengths_[index];
+ int64_t old_length = lengths_[index];
if (old_length == capacities_[index]) {
this->grow_at_least_one(index);
}
@@ -118,10 +118,10 @@ class GVectorArray : NonCopyable, NonMovable {
lengths_[index]++;
}
- void extend(uint index, GVSpan span)
+ void extend(int64_t index, GVSpan span)
{
BLI_assert(type_ == span.type());
- for (uint i = 0; i < span.size(); i++) {
+ for (int64_t i = 0; i < span.size(); i++) {
this->append(index, span[i]);
}
}
@@ -130,12 +130,12 @@ class GVectorArray : NonCopyable, NonMovable {
{
BLI_assert(type_ == array_span.type());
BLI_assert(mask.min_array_size() <= array_span.size());
- for (uint i : mask) {
+ for (int64_t i : mask) {
this->extend(i, array_span[i]);
}
}
- GMutableSpan operator[](uint index)
+ GMutableSpan operator[](int64_t index)
{
BLI_assert(index < starts_.size());
return GMutableSpan(type_, starts_[index], lengths_[index]);
@@ -146,10 +146,10 @@ class GVectorArray : NonCopyable, NonMovable {
}
private:
- void grow_at_least_one(uint index)
+ void grow_at_least_one(int64_t index)
{
BLI_assert(lengths_[index] == capacities_[index]);
- uint new_capacity = lengths_[index] * 2 + 1;
+ int64_t new_capacity = lengths_[index] * 2 + 1;
void *new_buffer = allocator_.allocate(element_size_ * new_capacity, type_.alignment());
type_.relocate_to_uninitialized_n(starts_[index], new_buffer, lengths_[index]);
@@ -169,28 +169,28 @@ template<typename T> class GVectorArrayRef {
BLI_assert(vector_array.type_.is<T>());
}
- void append(uint index, const T &value)
+ void append(int64_t index, const T &value)
{
vector_array_->append(index, &value);
}
- void extend(uint index, Span<T> values)
+ void extend(int64_t index, Span<T> values)
{
vector_array_->extend(index, values);
}
- void extend(uint index, VSpan<T> values)
+ void extend(int64_t index, VSpan<T> values)
{
vector_array_->extend(index, GVSpan(values));
}
- MutableSpan<T> operator[](uint index)
+ MutableSpan<T> operator[](int64_t index)
{
BLI_assert(index < vector_array_->starts_.size());
return MutableSpan<T>((T *)vector_array_->starts_[index], vector_array_->lengths_[index]);
}
- uint size() const
+ int64_t size() const
{
return vector_array_->size();
}
diff --git a/source/blender/functions/FN_multi_function.hh b/source/blender/functions/FN_multi_function.hh
index 35f144368ac..77ab2749377 100644
--- a/source/blender/functions/FN_multi_function.hh
+++ b/source/blender/functions/FN_multi_function.hh
@@ -63,7 +63,7 @@ class MultiFunction {
virtual void call(IndexMask mask, MFParams params, MFContext context) const = 0;
- virtual uint32_t hash() const
+ virtual uint64_t hash() const
{
return DefaultHash<const MultiFunction *>{}(this);
}
@@ -73,7 +73,7 @@ class MultiFunction {
return false;
}
- uint param_amount() const
+ int param_amount() const
{
return signature_.param_types.size();
}
@@ -83,12 +83,12 @@ class MultiFunction {
return signature_.param_types.index_range();
}
- MFParamType param_type(uint param_index) const
+ MFParamType param_type(int param_index) const
{
return signature_.param_types[param_index];
}
- StringRefNull param_name(uint param_index) const
+ StringRefNull param_name(int param_index) const
{
return signature_.param_names[param_index];
}
@@ -111,7 +111,7 @@ class MultiFunction {
}
};
-inline MFParamsBuilder::MFParamsBuilder(const class MultiFunction &fn, uint min_array_size)
+inline MFParamsBuilder::MFParamsBuilder(const class MultiFunction &fn, int64_t min_array_size)
: MFParamsBuilder(fn.signature(), min_array_size)
{
}
diff --git a/source/blender/functions/FN_multi_function_builder.hh b/source/blender/functions/FN_multi_function_builder.hh
index c2c95f7c355..95e216558e7 100644
--- a/source/blender/functions/FN_multi_function_builder.hh
+++ b/source/blender/functions/FN_multi_function_builder.hh
@@ -59,7 +59,7 @@ template<typename In1, typename Out1> class CustomMF_SI_SO : public MultiFunctio
template<typename ElementFuncT> static FunctionT create_function(ElementFuncT element_fn)
{
return [=](IndexMask mask, VSpan<In1> in1, MutableSpan<Out1> out1) {
- mask.foreach_index([&](uint i) { new ((void *)&out1[i]) Out1(element_fn(in1[i])); });
+ mask.foreach_index([&](int i) { new ((void *)&out1[i]) Out1(element_fn(in1[i])); });
};
}
@@ -101,7 +101,7 @@ class CustomMF_SI_SI_SO : public MultiFunction {
template<typename ElementFuncT> static FunctionT create_function(ElementFuncT element_fn)
{
return [=](IndexMask mask, VSpan<In1> in1, VSpan<In2> in2, MutableSpan<Out1> out1) {
- mask.foreach_index([&](uint i) { new ((void *)&out1[i]) Out1(element_fn(in1[i], in2[i])); });
+ mask.foreach_index([&](int i) { new ((void *)&out1[i]) Out1(element_fn(in1[i], in2[i])); });
};
}
@@ -152,7 +152,7 @@ class CustomMF_SI_SI_SI_SO : public MultiFunction {
VSpan<In3> in3,
MutableSpan<Out1> out1) {
mask.foreach_index(
- [&](uint i) { new ((void *)&out1[i]) Out1(element_fn(in1[i], in2[i], in3[i])); });
+ [&](int i) { new ((void *)&out1[i]) Out1(element_fn(in1[i], in2[i], in3[i])); });
};
}
@@ -191,7 +191,7 @@ template<typename Mut1> class CustomMF_SM : public MultiFunction {
template<typename ElementFuncT> static FunctionT create_function(ElementFuncT element_fn)
{
return [=](IndexMask mask, MutableSpan<Mut1> mut1) {
- mask.foreach_index([&](uint i) { element_fn(mut1[i]); });
+ mask.foreach_index([&](int i) { element_fn(mut1[i]); });
};
}
@@ -220,7 +220,7 @@ template<typename From, typename To> class CustomMF_Convert : public MultiFuncti
VSpan<From> inputs = params.readonly_single_input<From>(0);
MutableSpan<To> outputs = params.uninitialized_single_output<To>(1);
- for (uint i : mask) {
+ for (int64_t i : mask) {
new ((void *)&outputs[i]) To(inputs[i]);
}
}
@@ -240,7 +240,7 @@ class CustomMF_GenericConstant : public MultiFunction {
public:
CustomMF_GenericConstant(const CPPType &type, const void *value);
void call(IndexMask mask, MFParams params, MFContext context) const override;
- uint32_t hash() const override;
+ uint64_t hash() const override;
bool equals(const MultiFunction &other) const override;
};
@@ -276,10 +276,10 @@ template<typename T> class CustomMF_Constant : public MultiFunction {
void call(IndexMask mask, MFParams params, MFContext UNUSED(context)) const override
{
MutableSpan<T> output = params.uninitialized_single_output<T>(0);
- mask.foreach_index([&](uint i) { new (&output[i]) T(value_); });
+ mask.foreach_index([&](int i) { new (&output[i]) T(value_); });
}
- uint32_t hash() const override
+ uint64_t hash() const override
{
return DefaultHash<T>{}(value_);
}
@@ -304,7 +304,7 @@ template<typename T> class CustomMF_Constant : public MultiFunction {
class CustomMF_DefaultOutput : public MultiFunction {
private:
- uint output_amount_;
+ int output_amount_;
public:
CustomMF_DefaultOutput(StringRef name,
diff --git a/source/blender/functions/FN_multi_function_data_type.hh b/source/blender/functions/FN_multi_function_data_type.hh
index 57aea046006..23a1c0e5680 100644
--- a/source/blender/functions/FN_multi_function_data_type.hh
+++ b/source/blender/functions/FN_multi_function_data_type.hh
@@ -109,9 +109,9 @@ class MFDataType {
return "";
}
- uint hash() const
+ uint64_t hash() const
{
- return DefaultHash<CPPType>{}(*type_) + (uint32_t)category_;
+ return DefaultHash<CPPType>{}(*type_) + (uint64_t)category_;
}
};
diff --git a/source/blender/functions/FN_multi_function_network.hh b/source/blender/functions/FN_multi_function_network.hh
index 91eb5bb65dc..f6d6c7417e7 100644
--- a/source/blender/functions/FN_multi_function_network.hh
+++ b/source/blender/functions/FN_multi_function_network.hh
@@ -62,14 +62,14 @@ class MFNode : NonCopyable, NonMovable {
Span<MFInputSocket *> inputs_;
Span<MFOutputSocket *> outputs_;
bool is_dummy_;
- uint id_;
+ int id_;
friend MFNetwork;
public:
StringRefNull name() const;
- uint id() const;
+ int id() const;
MFNetwork &network();
const MFNetwork &network() const;
@@ -83,11 +83,11 @@ class MFNode : NonCopyable, NonMovable {
MFFunctionNode &as_function();
const MFFunctionNode &as_function() const;
- MFInputSocket &input(uint index);
- const MFInputSocket &input(uint index) const;
+ MFInputSocket &input(int index);
+ const MFInputSocket &input(int index) const;
- MFOutputSocket &output(uint index);
- const MFOutputSocket &output(uint index) const;
+ MFOutputSocket &output(int index);
+ const MFOutputSocket &output(int index) const;
Span<MFInputSocket *> inputs();
Span<const MFInputSocket *> inputs() const;
@@ -104,8 +104,8 @@ class MFNode : NonCopyable, NonMovable {
class MFFunctionNode : public MFNode {
private:
const MultiFunction *function_;
- Span<uint> input_param_indices_;
- Span<uint> output_param_indices_;
+ Span<int> input_param_indices_;
+ Span<int> output_param_indices_;
friend MFNetwork;
@@ -114,8 +114,8 @@ class MFFunctionNode : public MFNode {
const MultiFunction &function() const;
- const MFInputSocket &input_for_param(uint param_index) const;
- const MFOutputSocket &output_for_param(uint param_index) const;
+ const MFInputSocket &input_for_param(int param_index) const;
+ const MFOutputSocket &output_for_param(int param_index) const;
};
class MFDummyNode : public MFNode {
@@ -137,9 +137,9 @@ class MFSocket : NonCopyable, NonMovable {
protected:
MFNode *node_;
bool is_output_;
- uint index_;
+ int index_;
MFDataType data_type_;
- uint id_;
+ int id_;
StringRefNull name_;
friend MFNetwork;
@@ -147,8 +147,8 @@ class MFSocket : NonCopyable, NonMovable {
public:
StringRefNull name() const;
- uint id() const;
- uint index() const;
+ int id() const;
+ int index() const;
const MFDataType &data_type() const;
@@ -217,17 +217,17 @@ class MFNetwork : NonCopyable, NonMovable {
void remove(MFNode &node);
void remove(Span<MFNode *> nodes);
- uint socket_id_amount() const;
- uint node_id_amount() const;
+ int socket_id_amount() const;
+ int node_id_amount() const;
Span<MFDummyNode *> dummy_nodes();
Span<MFFunctionNode *> function_nodes();
- MFNode *node_or_null_by_id(uint id);
- const MFNode *node_or_null_by_id(uint id) const;
+ MFNode *node_or_null_by_id(int id);
+ const MFNode *node_or_null_by_id(int id) const;
- MFSocket *socket_or_null_by_id(uint id);
- const MFSocket *socket_or_null_by_id(uint id) const;
+ MFSocket *socket_or_null_by_id(int id);
+ const MFSocket *socket_or_null_by_id(int id) const;
void find_dependencies(Span<const MFInputSocket *> sockets,
VectorSet<const MFOutputSocket *> &r_dummy_sockets,
@@ -252,7 +252,7 @@ inline StringRefNull MFNode::name() const
}
}
-inline uint MFNode::id() const
+inline int MFNode::id() const
{
return id_;
}
@@ -301,22 +301,22 @@ inline const MFFunctionNode &MFNode::as_function() const
return *(const MFFunctionNode *)this;
}
-inline MFInputSocket &MFNode::input(uint index)
+inline MFInputSocket &MFNode::input(int index)
{
return *inputs_[index];
}
-inline const MFInputSocket &MFNode::input(uint index) const
+inline const MFInputSocket &MFNode::input(int index) const
{
return *inputs_[index];
}
-inline MFOutputSocket &MFNode::output(uint index)
+inline MFOutputSocket &MFNode::output(int index)
{
return *outputs_[index];
}
-inline const MFOutputSocket &MFNode::output(uint index) const
+inline const MFOutputSocket &MFNode::output(int index) const
{
return *outputs_[index];
}
@@ -365,12 +365,12 @@ inline const MultiFunction &MFFunctionNode::function() const
return *function_;
}
-inline const MFInputSocket &MFFunctionNode::input_for_param(uint param_index) const
+inline const MFInputSocket &MFFunctionNode::input_for_param(int param_index) const
{
return this->input(input_param_indices_.first_index(param_index));
}
-inline const MFOutputSocket &MFFunctionNode::output_for_param(uint param_index) const
+inline const MFOutputSocket &MFFunctionNode::output_for_param(int param_index) const
{
return this->output(output_param_indices_.first_index(param_index));
}
@@ -403,12 +403,12 @@ inline StringRefNull MFSocket::name() const
return name_;
}
-inline uint MFSocket::id() const
+inline int MFSocket::id() const
{
return id_;
}
-inline uint MFSocket::index() const
+inline int MFSocket::index() const
{
return index_;
}
@@ -504,32 +504,32 @@ inline Span<MFFunctionNode *> MFNetwork::function_nodes()
return function_nodes_;
}
-inline MFNode *MFNetwork::node_or_null_by_id(uint id)
+inline MFNode *MFNetwork::node_or_null_by_id(int id)
{
return node_or_null_by_id_[id];
}
-inline const MFNode *MFNetwork::node_or_null_by_id(uint id) const
+inline const MFNode *MFNetwork::node_or_null_by_id(int id) const
{
return node_or_null_by_id_[id];
}
-inline MFSocket *MFNetwork::socket_or_null_by_id(uint id)
+inline MFSocket *MFNetwork::socket_or_null_by_id(int id)
{
return socket_or_null_by_id_[id];
}
-inline const MFSocket *MFNetwork::socket_or_null_by_id(uint id) const
+inline const MFSocket *MFNetwork::socket_or_null_by_id(int id) const
{
return socket_or_null_by_id_[id];
}
-inline uint MFNetwork::socket_id_amount() const
+inline int MFNetwork::socket_id_amount() const
{
return socket_or_null_by_id_.size();
}
-inline uint MFNetwork::node_id_amount() const
+inline int MFNetwork::node_id_amount() const
{
return node_or_null_by_id_.size();
}
diff --git a/source/blender/functions/FN_multi_function_params.hh b/source/blender/functions/FN_multi_function_params.hh
index e6683693a0a..9cce8bb7401 100644
--- a/source/blender/functions/FN_multi_function_params.hh
+++ b/source/blender/functions/FN_multi_function_params.hh
@@ -34,7 +34,7 @@ namespace blender::fn {
class MFParamsBuilder {
private:
const MFSignature *signature_;
- uint min_array_size_;
+ int64_t min_array_size_;
Vector<GVSpan> virtual_spans_;
Vector<GMutableSpan> mutable_spans_;
Vector<GVArraySpan> virtual_array_spans_;
@@ -43,12 +43,12 @@ class MFParamsBuilder {
friend class MFParams;
public:
- MFParamsBuilder(const MFSignature &signature, uint min_array_size)
+ MFParamsBuilder(const MFSignature &signature, int64_t min_array_size)
: signature_(&signature), min_array_size_(min_array_size)
{
}
- MFParamsBuilder(const class MultiFunction &fn, uint min_array_size);
+ MFParamsBuilder(const class MultiFunction &fn, int64_t min_array_size);
template<typename T> void add_readonly_single_input(const T *value)
{
@@ -96,21 +96,21 @@ class MFParamsBuilder {
vector_arrays_.append(&vector_array);
}
- GMutableSpan computed_array(uint param_index)
+ GMutableSpan computed_array(int param_index)
{
BLI_assert(ELEM(signature_->param_types[param_index].category(),
MFParamType::SingleOutput,
MFParamType::SingleMutable));
- uint data_index = signature_->data_index(param_index);
+ int data_index = signature_->data_index(param_index);
return mutable_spans_[data_index];
}
- GVectorArray &computed_vector_array(uint param_index)
+ GVectorArray &computed_vector_array(int param_index)
{
BLI_assert(ELEM(signature_->param_types[param_index].category(),
MFParamType::VectorOutput,
MFParamType::VectorMutable));
- uint data_index = signature_->data_index(param_index);
+ int data_index = signature_->data_index(param_index);
return *vector_arrays_[data_index];
}
@@ -119,13 +119,13 @@ class MFParamsBuilder {
{
UNUSED_VARS_NDEBUG(param_type);
#ifdef DEBUG
- uint param_index = this->current_param_index();
+ int param_index = this->current_param_index();
MFParamType expected_type = signature_->param_types[param_index];
BLI_assert(expected_type == param_type);
#endif
}
- uint current_param_index() const
+ int current_param_index() const
{
return virtual_spans_.size() + mutable_spans_.size() + virtual_array_spans_.size() +
vector_arrays_.size();
@@ -141,75 +141,75 @@ class MFParams {
{
}
- template<typename T> VSpan<T> readonly_single_input(uint param_index, StringRef name = "")
+ template<typename T> VSpan<T> readonly_single_input(int param_index, StringRef name = "")
{
return this->readonly_single_input(param_index, name).typed<T>();
}
- GVSpan readonly_single_input(uint param_index, StringRef name = "")
+ GVSpan readonly_single_input(int param_index, StringRef name = "")
{
this->assert_correct_param(param_index, name, MFParamType::SingleInput);
- uint data_index = builder_->signature_->data_index(param_index);
+ int data_index = builder_->signature_->data_index(param_index);
return builder_->virtual_spans_[data_index];
}
template<typename T>
- MutableSpan<T> uninitialized_single_output(uint param_index, StringRef name = "")
+ MutableSpan<T> uninitialized_single_output(int param_index, StringRef name = "")
{
return this->uninitialized_single_output(param_index, name).typed<T>();
}
- GMutableSpan uninitialized_single_output(uint param_index, StringRef name = "")
+ GMutableSpan uninitialized_single_output(int param_index, StringRef name = "")
{
this->assert_correct_param(param_index, name, MFParamType::SingleOutput);
- uint data_index = builder_->signature_->data_index(param_index);
+ int data_index = builder_->signature_->data_index(param_index);
return builder_->mutable_spans_[data_index];
}
- template<typename T> VArraySpan<T> readonly_vector_input(uint param_index, StringRef name = "")
+ template<typename T> VArraySpan<T> readonly_vector_input(int param_index, StringRef name = "")
{
return this->readonly_vector_input(param_index, name).typed<T>();
}
- GVArraySpan readonly_vector_input(uint param_index, StringRef name = "")
+ GVArraySpan readonly_vector_input(int param_index, StringRef name = "")
{
this->assert_correct_param(param_index, name, MFParamType::VectorInput);
- uint data_index = builder_->signature_->data_index(param_index);
+ int data_index = builder_->signature_->data_index(param_index);
return builder_->virtual_array_spans_[data_index];
}
- template<typename T> GVectorArrayRef<T> vector_output(uint param_index, StringRef name = "")
+ template<typename T> GVectorArrayRef<T> vector_output(int param_index, StringRef name = "")
{
return this->vector_output(param_index, name).typed<T>();
}
- GVectorArray &vector_output(uint param_index, StringRef name = "")
+ GVectorArray &vector_output(int param_index, StringRef name = "")
{
this->assert_correct_param(param_index, name, MFParamType::VectorOutput);
- uint data_index = builder_->signature_->data_index(param_index);
+ int data_index = builder_->signature_->data_index(param_index);
return *builder_->vector_arrays_[data_index];
}
- template<typename T> MutableSpan<T> single_mutable(uint param_index, StringRef name = "")
+ template<typename T> MutableSpan<T> single_mutable(int param_index, StringRef name = "")
{
return this->single_mutable(param_index, name).typed<T>();
}
- GMutableSpan single_mutable(uint param_index, StringRef name = "")
+ GMutableSpan single_mutable(int param_index, StringRef name = "")
{
this->assert_correct_param(param_index, name, MFParamType::SingleMutable);
- uint data_index = builder_->signature_->data_index(param_index);
+ int data_index = builder_->signature_->data_index(param_index);
return builder_->mutable_spans_[data_index];
}
- template<typename T> GVectorArrayRef<T> vector_mutable(uint param_index, StringRef name = "")
+ template<typename T> GVectorArrayRef<T> vector_mutable(int param_index, StringRef name = "")
{
return this->vector_mutable(param_index, name).typed<T>();
}
- GVectorArray &vector_mutable(uint param_index, StringRef name = "")
+ GVectorArray &vector_mutable(int param_index, StringRef name = "")
{
this->assert_correct_param(param_index, name, MFParamType::VectorMutable);
- uint data_index = builder_->signature_->data_index(param_index);
+ int data_index = builder_->signature_->data_index(param_index);
return *builder_->vector_arrays_[data_index];
}
private:
- void assert_correct_param(uint param_index, StringRef name, MFParamType param_type)
+ void assert_correct_param(int param_index, StringRef name, MFParamType param_type)
{
UNUSED_VARS_NDEBUG(param_index, name, param_type);
#ifdef DEBUG
@@ -220,7 +220,7 @@ class MFParams {
#endif
}
- void assert_correct_param(uint param_index, StringRef name, MFParamType::Category category)
+ void assert_correct_param(int param_index, StringRef name, MFParamType::Category category)
{
UNUSED_VARS_NDEBUG(param_index, name, category);
#ifdef DEBUG
diff --git a/source/blender/functions/FN_multi_function_signature.hh b/source/blender/functions/FN_multi_function_signature.hh
index e2cf783753e..4ccceb39503 100644
--- a/source/blender/functions/FN_multi_function_signature.hh
+++ b/source/blender/functions/FN_multi_function_signature.hh
@@ -35,9 +35,9 @@ struct MFSignature {
/* Use RawAllocator so that a MultiFunction can have static storage duration. */
Vector<std::string, 4, RawAllocator> param_names;
Vector<MFParamType, 4, RawAllocator> param_types;
- Vector<uint, 4, RawAllocator> param_data_indices;
+ Vector<int, 4, RawAllocator> param_data_indices;
- uint data_index(uint param_index) const
+ int data_index(int param_index) const
{
return param_data_indices[param_index];
}
@@ -46,10 +46,10 @@ struct MFSignature {
class MFSignatureBuilder {
private:
MFSignature &data_;
- uint span_count_ = 0;
- uint virtual_span_count_ = 0;
- uint virtual_array_span_count_ = 0;
- uint vector_array_count_ = 0;
+ int span_count_ = 0;
+ int virtual_span_count_ = 0;
+ int virtual_array_span_count_ = 0;
+ int vector_array_count_ = 0;
public:
MFSignatureBuilder(MFSignature &data) : data_(data)
diff --git a/source/blender/functions/FN_spans.hh b/source/blender/functions/FN_spans.hh
index c8c98d66628..d8b381199cc 100644
--- a/source/blender/functions/FN_spans.hh
+++ b/source/blender/functions/FN_spans.hh
@@ -52,12 +52,13 @@ class GSpan {
private:
const CPPType *type_;
const void *buffer_;
- uint size_;
+ int64_t size_;
public:
- GSpan(const CPPType &type, const void *buffer, uint size)
+ GSpan(const CPPType &type, const void *buffer, int64_t size)
: type_(&type), buffer_(buffer), size_(size)
{
+ BLI_assert(size >= 0);
BLI_assert(buffer != nullptr || size == 0);
BLI_assert(type.pointer_has_valid_alignment(buffer));
}
@@ -81,7 +82,7 @@ class GSpan {
return size_ == 0;
}
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -91,7 +92,7 @@ class GSpan {
return buffer_;
}
- const void *operator[](uint index) const
+ const void *operator[](int64_t index) const
{
BLI_assert(index < size_);
return POINTER_OFFSET(buffer_, type_->size() * index);
@@ -112,12 +113,13 @@ class GMutableSpan {
private:
const CPPType *type_;
void *buffer_;
- uint size_;
+ int64_t size_;
public:
- GMutableSpan(const CPPType &type, void *buffer, uint size)
+ GMutableSpan(const CPPType &type, void *buffer, int64_t size)
: type_(&type), buffer_(buffer), size_(size)
{
+ BLI_assert(size >= 0);
BLI_assert(buffer != nullptr || size == 0);
BLI_assert(type.pointer_has_valid_alignment(buffer));
}
@@ -147,7 +149,7 @@ class GMutableSpan {
return size_ == 0;
}
- uint size() const
+ int64_t size() const
{
return size_;
}
@@ -157,7 +159,7 @@ class GMutableSpan {
return buffer_;
}
- void *operator[](uint index)
+ void *operator[](int64_t index)
{
BLI_assert(index < size_);
return POINTER_OFFSET(buffer_, type_->size() * index);
@@ -178,7 +180,7 @@ enum class VSpanCategory {
template<typename T> struct VSpanBase {
protected:
- uint virtual_size_;
+ int64_t virtual_size_;
VSpanCategory category_;
union {
struct {
@@ -212,7 +214,7 @@ template<typename T> struct VSpanBase {
return this->virtual_size_ == 0;
}
- uint size() const
+ int64_t size() const
{
return this->virtual_size_;
}
@@ -259,7 +261,7 @@ template<typename T> class VSpan : public VSpanBase<T> {
this->data_.full_pointer_array.data = values.begin();
}
- static VSpan FromSingle(const T *value, uint virtual_size)
+ static VSpan FromSingle(const T *value, int64_t virtual_size)
{
VSpan ref;
ref.virtual_size_ = virtual_size;
@@ -268,8 +270,9 @@ template<typename T> class VSpan : public VSpanBase<T> {
return ref;
}
- const T &operator[](uint index) const
+ const T &operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->virtual_size_);
switch (this->category_) {
case VSpanCategory::Single:
@@ -329,7 +332,7 @@ class GVSpan : public VSpanBase<void> {
{
}
- static GVSpan FromSingle(const CPPType &type, const void *value, uint virtual_size)
+ static GVSpan FromSingle(const CPPType &type, const void *value, int64_t virtual_size)
{
GVSpan ref;
ref.type_ = &type;
@@ -341,7 +344,7 @@ class GVSpan : public VSpanBase<void> {
static GVSpan FromSingleWithMaxSize(const CPPType &type, const void *value)
{
- return GVSpan::FromSingle(type, value, UINT32_MAX);
+ return GVSpan::FromSingle(type, value, INT64_MAX);
}
static GVSpan FromDefault(const CPPType &type)
@@ -349,7 +352,7 @@ class GVSpan : public VSpanBase<void> {
return GVSpan::FromSingleWithMaxSize(type, type.default_value());
}
- static GVSpan FromFullPointerArray(const CPPType &type, const void *const *values, uint size)
+ static GVSpan FromFullPointerArray(const CPPType &type, const void *const *values, int64_t size)
{
GVSpan ref;
ref.type_ = &type;
@@ -364,8 +367,9 @@ class GVSpan : public VSpanBase<void> {
return *this->type_;
}
- const void *operator[](uint index) const
+ const void *operator[](int64_t index) const
{
+ BLI_assert(index >= 0);
BLI_assert(index < this->virtual_size_);
switch (this->category_) {
case VSpanCategory::Single:
@@ -400,8 +404,8 @@ class GVSpan : public VSpanBase<void> {
{
BLI_assert(this->size() >= mask.min_array_size());
- uint element_size = type_->size();
- for (uint i : mask) {
+ int64_t element_size = type_->size();
+ for (int64_t i : mask) {
type_->copy_to_uninitialized((*this)[i], POINTER_OFFSET(dst, element_size * i));
}
}
diff --git a/source/blender/functions/intern/attributes_ref.cc b/source/blender/functions/intern/attributes_ref.cc
index f61a9165c47..7bfcc69671a 100644
--- a/source/blender/functions/intern/attributes_ref.cc
+++ b/source/blender/functions/intern/attributes_ref.cc
@@ -20,7 +20,7 @@ namespace blender::fn {
AttributesInfoBuilder::~AttributesInfoBuilder()
{
- for (uint i : defaults_.index_range()) {
+ for (int i : defaults_.index_range()) {
types_[i]->destruct(defaults_[i]);
}
}
@@ -45,7 +45,7 @@ void AttributesInfoBuilder::add(StringRef name, const CPPType &type, const void
AttributesInfo::AttributesInfo(const AttributesInfoBuilder &builder)
{
- for (uint i : builder.types_.index_range()) {
+ for (int i : builder.types_.index_range()) {
StringRefNull name = allocator_.copy_string(builder.names_[i]);
const CPPType &type = *builder.types_[i];
const void *default_value = builder.defaults_[i];
@@ -62,7 +62,7 @@ AttributesInfo::AttributesInfo(const AttributesInfoBuilder &builder)
AttributesInfo::~AttributesInfo()
{
- for (uint i : defaults_.index_range()) {
+ for (int i : defaults_.index_range()) {
type_by_index_[i]->destruct(defaults_[i]);
}
}
diff --git a/source/blender/functions/intern/multi_function_builder.cc b/source/blender/functions/intern/multi_function_builder.cc
index 7797c19d563..06084247e66 100644
--- a/source/blender/functions/intern/multi_function_builder.cc
+++ b/source/blender/functions/intern/multi_function_builder.cc
@@ -37,7 +37,7 @@ void CustomMF_GenericConstant::call(IndexMask mask,
type_.fill_uninitialized_indices(value_, output.buffer(), mask);
}
-uint CustomMF_GenericConstant::hash() const
+uint64_t CustomMF_GenericConstant::hash() const
{
return type_.hash(value_);
}
@@ -58,8 +58,8 @@ static std::string gspan_to_string(GSpan array)
{
std::stringstream ss;
ss << "[";
- uint max_amount = 5;
- for (uint i : IndexRange(std::min(max_amount, array.size()))) {
+ const int64_t max_amount = 5;
+ for (int64_t i : IndexRange(std::min(max_amount, array.size()))) {
array.type().debug_print(array[i], ss);
ss << ", ";
}
@@ -82,7 +82,7 @@ void CustomMF_GenericConstantArray::call(IndexMask mask,
MFContext UNUSED(context)) const
{
GVectorArray &vectors = params.vector_output(0);
- for (uint i : mask) {
+ for (int64_t i : mask) {
vectors.extend(i, array_);
}
}
@@ -102,7 +102,7 @@ CustomMF_DefaultOutput::CustomMF_DefaultOutput(StringRef name,
}
void CustomMF_DefaultOutput::call(IndexMask mask, MFParams params, MFContext UNUSED(context)) const
{
- for (uint param_index : this->param_indices()) {
+ for (int param_index : this->param_indices()) {
MFParamType param_type = this->param_type(param_index);
if (!param_type.is_output()) {
continue;
diff --git a/source/blender/functions/intern/multi_function_network.cc b/source/blender/functions/intern/multi_function_network.cc
index 1d3d3a8b5f2..cd3c38dd09f 100644
--- a/source/blender/functions/intern/multi_function_network.cc
+++ b/source/blender/functions/intern/multi_function_network.cc
@@ -50,9 +50,9 @@ void MFNode::destruct_sockets()
*/
MFFunctionNode &MFNetwork::add_function(const MultiFunction &function)
{
- Vector<uint, 16> input_param_indices, output_param_indices;
+ Vector<int, 16> input_param_indices, output_param_indices;
- for (uint param_index : function.param_indices()) {
+ for (int param_index : function.param_indices()) {
switch (function.param_type(param_index).interface_type()) {
case MFParamType::Input: {
input_param_indices.append(param_index);
@@ -77,16 +77,16 @@ MFFunctionNode &MFNetwork::add_function(const MultiFunction &function)
node.is_dummy_ = false;
node.id_ = node_or_null_by_id_.append_and_get_index(&node);
node.function_ = &function;
- node.input_param_indices_ = allocator_.construct_array_copy<uint>(input_param_indices);
- node.output_param_indices_ = allocator_.construct_array_copy<uint>(output_param_indices);
+ node.input_param_indices_ = allocator_.construct_array_copy<int>(input_param_indices);
+ node.output_param_indices_ = allocator_.construct_array_copy<int>(output_param_indices);
node.inputs_ = allocator_.construct_elements_and_pointer_array<MFInputSocket>(
input_param_indices.size());
node.outputs_ = allocator_.construct_elements_and_pointer_array<MFOutputSocket>(
output_param_indices.size());
- for (uint i : input_param_indices.index_range()) {
- uint param_index = input_param_indices[i];
+ for (int i : input_param_indices.index_range()) {
+ int param_index = input_param_indices[i];
MFParamType param = function.param_type(param_index);
BLI_assert(param.is_input_or_mutable());
@@ -100,8 +100,8 @@ MFFunctionNode &MFNetwork::add_function(const MultiFunction &function)
socket.id_ = socket_or_null_by_id_.append_and_get_index(&socket);
}
- for (uint i : output_param_indices.index_range()) {
- uint param_index = output_param_indices[i];
+ for (int i : output_param_indices.index_range()) {
+ int param_index = output_param_indices[i];
MFParamType param = function.param_type(param_index);
BLI_assert(param.is_output_or_mutable());
@@ -145,7 +145,7 @@ MFDummyNode &MFNetwork::add_dummy(StringRef name,
node.input_names_ = allocator_.allocate_array<StringRefNull>(input_types.size());
node.output_names_ = allocator_.allocate_array<StringRefNull>(output_types.size());
- for (uint i : input_types.index_range()) {
+ for (int i : input_types.index_range()) {
MFInputSocket &socket = *node.inputs_[i];
socket.data_type_ = input_types[i];
socket.node_ = &node;
@@ -156,7 +156,7 @@ MFDummyNode &MFNetwork::add_dummy(StringRef name,
node.input_names_[i] = socket.name_;
}
- for (uint i : output_types.index_range()) {
+ for (int i : output_types.index_range()) {
MFOutputSocket &socket = *node.outputs_[i];
socket.data_type_ = output_types[i];
socket.node_ = &node;
diff --git a/source/blender/functions/intern/multi_function_network_evaluation.cc b/source/blender/functions/intern/multi_function_network_evaluation.cc
index b59cbc6a1a2..a7e1a2f42af 100644
--- a/source/blender/functions/intern/multi_function_network_evaluation.cc
+++ b/source/blender/functions/intern/multi_function_network_evaluation.cc
@@ -55,10 +55,10 @@ class MFNetworkEvaluationStorage {
LinearAllocator<> allocator_;
IndexMask mask_;
Array<Value *> value_per_output_id_;
- uint min_array_size_;
+ int64_t min_array_size_;
public:
- MFNetworkEvaluationStorage(IndexMask mask, uint socket_id_amount);
+ MFNetworkEvaluationStorage(IndexMask mask, int socket_id_amount);
~MFNetworkEvaluationStorage();
/* Add the values that have been provided by the caller of the multi-function network. */
@@ -155,8 +155,8 @@ void MFNetworkEvaluator::call(IndexMask mask, MFParams params, MFContext context
BLI_NOINLINE void MFNetworkEvaluator::copy_inputs_to_storage(MFParams params,
Storage &storage) const
{
- for (uint input_index : inputs_.index_range()) {
- uint param_index = input_index + 0;
+ for (int input_index : inputs_.index_range()) {
+ int param_index = input_index + 0;
const MFOutputSocket &socket = *inputs_[input_index];
switch (socket.data_type().category()) {
case MFDataType::Single: {
@@ -178,8 +178,8 @@ BLI_NOINLINE void MFNetworkEvaluator::copy_outputs_to_storage(
Storage &storage,
Vector<const MFInputSocket *> &outputs_to_initialize_in_the_end) const
{
- for (uint output_index : outputs_.index_range()) {
- uint param_index = output_index + inputs_.size();
+ for (int output_index : outputs_.index_range()) {
+ int param_index = output_index + inputs_.size();
const MFInputSocket &socket = *outputs_[output_index];
const MFOutputSocket &origin = *socket.origin();
@@ -263,7 +263,7 @@ BLI_NOINLINE void MFNetworkEvaluator::evaluate_function(MFContext &global_contex
* function only on a single element. This can avoid many duplicate computations. */
MFParamsBuilder params{function, 1};
- for (uint param_index : function.param_indices()) {
+ for (int param_index : function.param_indices()) {
MFParamType param_type = function.param_type(param_index);
switch (param_type.category()) {
case MFParamType::SingleInput: {
@@ -312,7 +312,7 @@ BLI_NOINLINE void MFNetworkEvaluator::evaluate_function(MFContext &global_contex
else {
MFParamsBuilder params{function, storage.mask().min_array_size()};
- for (uint param_index : function.param_indices()) {
+ for (int param_index : function.param_indices()) {
MFParamType param_type = function.param_type(param_index);
switch (param_type.category()) {
case MFParamType::SingleInput: {
@@ -384,7 +384,7 @@ BLI_NOINLINE void MFNetworkEvaluator::initialize_remaining_outputs(
MFParams params, Storage &storage, Span<const MFInputSocket *> remaining_outputs) const
{
for (const MFInputSocket *socket : remaining_outputs) {
- uint param_index = inputs_.size() + outputs_.first_index_of(socket);
+ int param_index = inputs_.size() + outputs_.first_index_of(socket);
switch (socket->data_type().category()) {
case MFDataType::Single: {
@@ -506,7 +506,7 @@ struct OwnVectorValue : public Value {
/** \name Storage methods
* \{ */
-MFNetworkEvaluationStorage::MFNetworkEvaluationStorage(IndexMask mask, uint socket_id_amount)
+MFNetworkEvaluationStorage::MFNetworkEvaluationStorage(IndexMask mask, int socket_id_amount)
: mask_(mask),
value_per_output_id_(socket_id_amount, nullptr),
min_array_size_(mask.min_array_size())
diff --git a/source/blender/functions/intern/multi_function_network_optimization.cc b/source/blender/functions/intern/multi_function_network_optimization.cc
index 849b24a318f..e76b2f51a15 100644
--- a/source/blender/functions/intern/multi_function_network_optimization.cc
+++ b/source/blender/functions/intern/multi_function_network_optimization.cc
@@ -107,7 +107,7 @@ static Vector<MFNode *> find_nodes_based_on_mask(MFNetwork &network,
bool mask_value)
{
Vector<MFNode *> nodes;
- for (uint id : id_mask.index_range()) {
+ for (int id : id_mask.index_range()) {
if (id_mask[id] == mask_value) {
MFNode *node = network.node_or_null_by_id(id);
if (node != nullptr) {
@@ -212,7 +212,7 @@ static void prepare_params_for_constant_folding(const MultiFunction &network_fn,
MFParamsBuilder &params,
ResourceCollector &resources)
{
- for (uint param_index : network_fn.param_indices()) {
+ for (int param_index : network_fn.param_indices()) {
MFParamType param_type = network_fn.param_type(param_index);
MFDataType data_type = param_type.data_type();
@@ -244,7 +244,7 @@ static Array<MFOutputSocket *> add_constant_folded_sockets(const MultiFunction &
{
Array<MFOutputSocket *> folded_sockets{network_fn.param_indices().size(), nullptr};
- for (uint param_index : network_fn.param_indices()) {
+ for (int param_index : network_fn.param_indices()) {
MFParamType param_type = network_fn.param_type(param_index);
MFDataType data_type = param_type.data_type();
@@ -302,7 +302,7 @@ void constant_folding(MFNetwork &network, ResourceCollector &resources)
Array<MFOutputSocket *> folded_sockets = compute_constant_sockets_and_add_folded_nodes(
network, inputs_to_fold, resources);
- for (uint i : inputs_to_fold.index_range()) {
+ for (int i : inputs_to_fold.index_range()) {
MFOutputSocket &original_socket = *inputs_to_fold[i]->origin();
network.relink(original_socket, *folded_sockets[i]);
}
@@ -317,12 +317,12 @@ void constant_folding(MFNetwork &network, ResourceCollector &resources)
*
* \{ */
-static uint32_t compute_node_hash(MFFunctionNode &node, RNG *rng, Span<uint32_t> node_hashes)
+static uint64_t compute_node_hash(MFFunctionNode &node, RNG *rng, Span<uint64_t> node_hashes)
{
- uint32_t combined_inputs_hash = 394659347u;
+ uint64_t combined_inputs_hash = 394659347u;
for (MFInputSocket *input_socket : node.inputs()) {
MFOutputSocket *origin_socket = input_socket->origin();
- uint32_t input_hash;
+ uint64_t input_hash;
if (origin_socket == nullptr) {
input_hash = BLI_rng_get_uint(rng);
}
@@ -333,8 +333,8 @@ static uint32_t compute_node_hash(MFFunctionNode &node, RNG *rng, Span<uint32_t>
combined_inputs_hash = BLI_ghashutil_combine_hash(combined_inputs_hash, input_hash);
}
- uint32_t function_hash = node.function().hash();
- uint32_t node_hash = BLI_ghashutil_combine_hash(combined_inputs_hash, function_hash);
+ uint64_t function_hash = node.function().hash();
+ uint64_t node_hash = BLI_ghashutil_combine_hash(combined_inputs_hash, function_hash);
return node_hash;
}
@@ -342,15 +342,15 @@ static uint32_t compute_node_hash(MFFunctionNode &node, RNG *rng, Span<uint32_t>
* Produces a hash for every node. Two nodes with the same hash should have a high probability of
* outputting the same values.
*/
-static Array<uint32_t> compute_node_hashes(MFNetwork &network)
+static Array<uint64_t> compute_node_hashes(MFNetwork &network)
{
RNG *rng = BLI_rng_new(0);
- Array<uint32_t> node_hashes(network.node_id_amount());
+ Array<uint64_t> node_hashes(network.node_id_amount());
Array<bool> node_is_hashed(network.node_id_amount(), false);
/* No dummy nodes are not assumed to output the same values. */
for (MFDummyNode *node : network.dummy_nodes()) {
- uint32_t node_hash = BLI_rng_get_uint(rng);
+ uint64_t node_hash = BLI_rng_get_uint(rng);
node_hashes[node->id()] = node_hash;
node_is_hashed[node->id()] = true;
}
@@ -381,7 +381,7 @@ static Array<uint32_t> compute_node_hashes(MFNetwork &network)
continue;
}
- uint32_t node_hash = compute_node_hash(node, rng, node_hashes);
+ uint64_t node_hash = compute_node_hash(node, rng, node_hashes);
node_hashes[node.id()] = node_hash;
node_is_hashed[node.id()] = true;
nodes_to_check.pop();
@@ -391,14 +391,14 @@ static Array<uint32_t> compute_node_hashes(MFNetwork &network)
return node_hashes;
}
-static Map<uint32_t, Vector<MFNode *, 1>> group_nodes_by_hash(MFNetwork &network,
- Span<uint32_t> node_hashes)
+static Map<uint64_t, Vector<MFNode *, 1>> group_nodes_by_hash(MFNetwork &network,
+ Span<uint64_t> node_hashes)
{
- Map<uint32_t, Vector<MFNode *, 1>> nodes_by_hash;
- for (uint id : IndexRange(network.node_id_amount())) {
+ Map<uint64_t, Vector<MFNode *, 1>> nodes_by_hash;
+ for (int id : IndexRange(network.node_id_amount())) {
MFNode *node = network.node_or_null_by_id(id);
if (node != nullptr) {
- uint32_t node_hash = node_hashes[id];
+ uint64_t node_hash = node_hashes[id];
nodes_by_hash.lookup_or_add_default(node_hash).append(node);
}
}
@@ -428,7 +428,7 @@ static bool nodes_output_same_values(DisjointSet &cache, const MFNode &a, const
if (!functions_are_equal(a.as_function().function(), b.as_function().function())) {
return false;
}
- for (uint i : a.inputs().index_range()) {
+ for (int i : a.inputs().index_range()) {
const MFOutputSocket *origin_a = a.input(i).origin();
const MFOutputSocket *origin_b = b.input(i).origin();
if (origin_a == nullptr || origin_b == nullptr) {
@@ -444,7 +444,7 @@ static bool nodes_output_same_values(DisjointSet &cache, const MFNode &a, const
}
static void relink_duplicate_nodes(MFNetwork &network,
- Map<uint32_t, Vector<MFNode *, 1>> &nodes_by_hash)
+ Map<uint64_t, Vector<MFNode *, 1>> &nodes_by_hash)
{
DisjointSet same_node_cache{network.node_id_amount()};
@@ -462,7 +462,7 @@ static void relink_duplicate_nodes(MFNetwork &network,
/* This is true with fairly high probability, but hash collisions can happen. So we have to
* check if the node actually output the same values. */
if (nodes_output_same_values(same_node_cache, deduplicated_node, *node)) {
- for (uint i : deduplicated_node.outputs().index_range()) {
+ for (int i : deduplicated_node.outputs().index_range()) {
network.relink(node->output(i), deduplicated_node.output(i));
}
}
@@ -481,8 +481,8 @@ static void relink_duplicate_nodes(MFNetwork &network,
*/
void common_subnetwork_elimination(MFNetwork &network)
{
- Array<uint32_t> node_hashes = compute_node_hashes(network);
- Map<uint32_t, Vector<MFNode *, 1>> nodes_by_hash = group_nodes_by_hash(network, node_hashes);
+ Array<uint64_t> node_hashes = compute_node_hashes(network);
+ Map<uint64_t, Vector<MFNode *, 1>> nodes_by_hash = group_nodes_by_hash(network, node_hashes);
relink_duplicate_nodes(network, nodes_by_hash);
}
diff --git a/source/blender/nodes/NOD_derived_node_tree.hh b/source/blender/nodes/NOD_derived_node_tree.hh
index 84370dcd399..d79bd9031b8 100644
--- a/source/blender/nodes/NOD_derived_node_tree.hh
+++ b/source/blender/nodes/NOD_derived_node_tree.hh
@@ -46,15 +46,15 @@ class DSocket : NonCopyable, NonMovable {
protected:
DNode *node_;
const SocketRef *socket_ref_;
- uint id_;
+ int id_;
friend DerivedNodeTree;
public:
const DNode &node() const;
- uint id() const;
- uint index() const;
+ int id() const;
+ int index() const;
bool is_input() const;
bool is_output() const;
@@ -105,7 +105,7 @@ class DGroupInput : NonCopyable, NonMovable {
const InputSocketRef *socket_ref_;
DParentNode *parent_;
Vector<DInputSocket *> linked_sockets_;
- uint id_;
+ int id_;
friend DerivedNodeTree;
@@ -114,7 +114,7 @@ class DGroupInput : NonCopyable, NonMovable {
bNodeSocket *bsocket() const;
const DParentNode *parent() const;
Span<const DInputSocket *> linked_sockets() const;
- uint id() const;
+ int id() const;
StringRefNull name() const;
};
@@ -126,7 +126,7 @@ class DNode : NonCopyable, NonMovable {
Span<DInputSocket *> inputs_;
Span<DOutputSocket *> outputs_;
- uint id_;
+ int id_;
friend DerivedNodeTree;
@@ -137,13 +137,13 @@ class DNode : NonCopyable, NonMovable {
Span<const DInputSocket *> inputs() const;
Span<const DOutputSocket *> outputs() const;
- const DInputSocket &input(uint index) const;
- const DOutputSocket &output(uint index) const;
+ const DInputSocket &input(int index) const;
+ const DOutputSocket &output(int index) const;
- const DInputSocket &input(uint index, StringRef expected_name) const;
- const DOutputSocket &output(uint index, StringRef expected_name) const;
+ const DInputSocket &input(int index, StringRef expected_name) const;
+ const DOutputSocket &output(int index, StringRef expected_name) const;
- uint id() const;
+ int id() const;
PointerRNA *rna() const;
StringRefNull idname() const;
@@ -157,14 +157,14 @@ class DParentNode : NonCopyable, NonMovable {
private:
const NodeRef *node_ref_;
DParentNode *parent_;
- uint id_;
+ int id_;
friend DerivedNodeTree;
public:
const DParentNode *parent() const;
const NodeRef &node_ref() const;
- uint id() const;
+ int id() const;
};
using NodeTreeRefMap = Map<bNodeTree *, std::unique_ptr<const NodeTreeRef>>;
@@ -240,12 +240,12 @@ inline const DNode &DSocket::node() const
return *node_;
}
-inline uint DSocket::id() const
+inline int DSocket::id() const
{
return id_;
}
-inline uint DSocket::index() const
+inline int DSocket::index() const
{
return socket_ref_->index();
}
@@ -367,7 +367,7 @@ inline Span<const DInputSocket *> DGroupInput::linked_sockets() const
return linked_sockets_;
}
-inline uint DGroupInput::id() const
+inline int DGroupInput::id() const
{
return id_;
}
@@ -401,17 +401,17 @@ inline Span<const DOutputSocket *> DNode::outputs() const
return outputs_;
}
-inline const DInputSocket &DNode::input(uint index) const
+inline const DInputSocket &DNode::input(int index) const
{
return *inputs_[index];
}
-inline const DOutputSocket &DNode::output(uint index) const
+inline const DOutputSocket &DNode::output(int index) const
{
return *outputs_[index];
}
-inline const DInputSocket &DNode::input(uint index, StringRef expected_name) const
+inline const DInputSocket &DNode::input(int index, StringRef expected_name) const
{
const DInputSocket &socket = *inputs_[index];
BLI_assert(socket.name() == expected_name);
@@ -419,7 +419,7 @@ inline const DInputSocket &DNode::input(uint index, StringRef expected_name) con
return socket;
}
-inline const DOutputSocket &DNode::output(uint index, StringRef expected_name) const
+inline const DOutputSocket &DNode::output(int index, StringRef expected_name) const
{
const DOutputSocket &socket = *outputs_[index];
BLI_assert(socket.name() == expected_name);
@@ -427,7 +427,7 @@ inline const DOutputSocket &DNode::output(uint index, StringRef expected_name) c
return socket;
}
-inline uint DNode::id() const
+inline int DNode::id() const
{
return id_;
}
@@ -461,7 +461,7 @@ inline const NodeRef &DParentNode::node_ref() const
return *node_ref_;
}
-inline uint DParentNode::id() const
+inline int DParentNode::id() const
{
return id_;
}
diff --git a/source/blender/nodes/NOD_node_tree_multi_function.hh b/source/blender/nodes/NOD_node_tree_multi_function.hh
index f7a1fbb114d..79c2b3c7ce8 100644
--- a/source/blender/nodes/NOD_node_tree_multi_function.hh
+++ b/source/blender/nodes/NOD_node_tree_multi_function.hh
@@ -106,7 +106,7 @@ class MFNetworkTreeMap {
void add(Span<const DInputSocket *> dsockets, Span<fn::MFInputSocket *> sockets)
{
assert_same_size(dsockets, sockets);
- for (uint i : dsockets.index_range()) {
+ for (int i : dsockets.index_range()) {
this->add(*dsockets[i], *sockets[i]);
}
}
@@ -114,7 +114,7 @@ class MFNetworkTreeMap {
void add(Span<const DOutputSocket *> dsockets, Span<fn::MFOutputSocket *> sockets)
{
assert_same_size(dsockets, sockets);
- for (uint i : dsockets.index_range()) {
+ for (int i : dsockets.index_range()) {
this->add(*dsockets[i], *sockets[i]);
}
}
@@ -133,7 +133,7 @@ class MFNetworkTreeMap {
void add_try_match(Span<const DSocket *> dsockets, Span<fn::MFSocket *> sockets)
{
- uint used_sockets = 0;
+ int used_sockets = 0;
for (const DSocket *dsocket : dsockets) {
if (!dsocket->is_available()) {
continue;
diff --git a/source/blender/nodes/NOD_node_tree_ref.hh b/source/blender/nodes/NOD_node_tree_ref.hh
index 907184125b8..ebf5709ef50 100644
--- a/source/blender/nodes/NOD_node_tree_ref.hh
+++ b/source/blender/nodes/NOD_node_tree_ref.hh
@@ -71,8 +71,8 @@ class SocketRef : NonCopyable, NonMovable {
NodeRef *node_;
bNodeSocket *bsocket_;
bool is_input_;
- uint id_;
- uint index_;
+ int id_;
+ int index_;
PointerRNA rna_;
Vector<SocketRef *> linked_sockets_;
Vector<SocketRef *> directly_linked_sockets_;
@@ -87,8 +87,8 @@ class SocketRef : NonCopyable, NonMovable {
const NodeRef &node() const;
const NodeTreeRef &tree() const;
- uint id() const;
- uint index() const;
+ int id() const;
+ int index() const;
bool is_input() const;
bool is_output() const;
@@ -124,7 +124,7 @@ class NodeRef : NonCopyable, NonMovable {
NodeTreeRef *tree_;
bNode *bnode_;
PointerRNA rna_;
- uint id_;
+ int id_;
Vector<InputSocketRef *> inputs_;
Vector<OutputSocketRef *> outputs_;
@@ -136,8 +136,8 @@ class NodeRef : NonCopyable, NonMovable {
Span<const InputSocketRef *> inputs() const;
Span<const OutputSocketRef *> outputs() const;
- const InputSocketRef &input(uint index) const;
- const OutputSocketRef &output(uint index) const;
+ const InputSocketRef &input(int index) const;
+ const OutputSocketRef &output(int index) const;
bNode *bnode() const;
bNodeTree *btree() const;
@@ -146,7 +146,7 @@ class NodeRef : NonCopyable, NonMovable {
StringRefNull idname() const;
StringRefNull name() const;
- uint id() const;
+ int id() const;
bool is_reroute_node() const;
bool is_group_node() const;
@@ -220,12 +220,12 @@ inline const NodeTreeRef &SocketRef::tree() const
return node_->tree();
}
-inline uint SocketRef::id() const
+inline int SocketRef::id() const
{
return id_;
}
-inline uint SocketRef::index() const
+inline int SocketRef::index() const
{
return index_;
}
@@ -334,12 +334,12 @@ inline Span<const OutputSocketRef *> NodeRef::outputs() const
return outputs_;
}
-inline const InputSocketRef &NodeRef::input(uint index) const
+inline const InputSocketRef &NodeRef::input(int index) const
{
return *inputs_[index];
}
-inline const OutputSocketRef &NodeRef::output(uint index) const
+inline const OutputSocketRef &NodeRef::output(int index) const
{
return *outputs_[index];
}
@@ -369,7 +369,7 @@ inline StringRefNull NodeRef::name() const
return bnode_->name;
}
-inline uint NodeRef::id() const
+inline int NodeRef::id() const
{
return id_;
}
diff --git a/source/blender/nodes/intern/derived_node_tree.cc b/source/blender/nodes/intern/derived_node_tree.cc
index daec67b53a9..b7c78cb1499 100644
--- a/source/blender/nodes/intern/derived_node_tree.cc
+++ b/source/blender/nodes/intern/derived_node_tree.cc
@@ -83,7 +83,7 @@ DNode &DerivedNodeTree::create_node(const NodeRef &node_ref,
node.outputs_ = allocator_.construct_elements_and_pointer_array<DOutputSocket>(
node_ref.outputs().size());
- for (uint i : node.inputs_.index_range()) {
+ for (int i : node.inputs_.index_range()) {
const InputSocketRef &socket_ref = node_ref.input(i);
DInputSocket &socket = *node.inputs_[i];
@@ -94,7 +94,7 @@ DNode &DerivedNodeTree::create_node(const NodeRef &node_ref,
r_sockets_map[socket_ref.id()] = &socket;
}
- for (uint i : node.outputs_.index_range()) {
+ for (int i : node.outputs_.index_range()) {
const OutputSocketRef &socket_ref = node_ref.output(i);
DOutputSocket &socket = *node.outputs_[i];
@@ -113,7 +113,7 @@ BLI_NOINLINE void DerivedNodeTree::expand_groups(Vector<DNode *> &all_nodes,
Vector<DParentNode *> &all_parent_nodes,
NodeTreeRefMap &node_tree_refs)
{
- for (uint i = 0; i < all_nodes.size(); i++) {
+ for (int i = 0; i < all_nodes.size(); i++) {
DNode &node = *all_nodes[i];
if (node.node_ref_->is_group_node()) {
this->expand_group_node(node, all_nodes, all_group_inputs, all_parent_nodes, node_tree_refs);
@@ -181,10 +181,10 @@ BLI_NOINLINE void DerivedNodeTree::relink_group_inputs(const NodeTreeRef &group_
const NodeRef &input_node_ref = *node_refs[0];
DNode &input_node = *nodes_by_id[input_node_ref.id()];
- uint input_amount = group_node.inputs().size();
+ int input_amount = group_node.inputs().size();
BLI_assert(input_amount == input_node_ref.outputs().size() - 1);
- for (uint input_index : IndexRange(input_amount)) {
+ for (int input_index : IndexRange(input_amount)) {
DInputSocket *outside_group = group_node.inputs_[input_index];
DOutputSocket *inside_group = input_node.outputs_[input_index];
@@ -228,10 +228,10 @@ BLI_NOINLINE void DerivedNodeTree::relink_group_outputs(const NodeTreeRef &group
const NodeRef &output_node_ref = *node_refs[0];
DNode &output_node = *nodes_by_id[output_node_ref.id()];
- uint output_amount = group_node.outputs().size();
+ int output_amount = group_node.outputs().size();
BLI_assert(output_amount == output_node_ref.inputs().size() - 1);
- for (uint output_index : IndexRange(output_amount)) {
+ for (int output_index : IndexRange(output_amount)) {
DOutputSocket *outside_group = group_node.outputs_[output_index];
DInputSocket *inside_group = output_node.inputs_[output_index];
@@ -316,7 +316,7 @@ BLI_NOINLINE void DerivedNodeTree::store_in_this_and_init_ids(
group_inputs_ = std::move(all_group_inputs);
parent_nodes_ = std::move(all_parent_nodes);
- for (uint node_index : nodes_by_id_.index_range()) {
+ for (int node_index : nodes_by_id_.index_range()) {
DNode *node = nodes_by_id_[node_index];
node->id_ = node_index;
@@ -333,7 +333,7 @@ BLI_NOINLINE void DerivedNodeTree::store_in_this_and_init_ids(
}
}
- for (uint i : group_inputs_.index_range()) {
+ for (int i : group_inputs_.index_range()) {
group_inputs_[i]->id_ = i;
}
}
diff --git a/source/blender/nodes/intern/node_tree_multi_function.cc b/source/blender/nodes/intern/node_tree_multi_function.cc
index f77b19354a4..82842c4ef32 100644
--- a/source/blender/nodes/intern/node_tree_multi_function.cc
+++ b/source/blender/nodes/intern/node_tree_multi_function.cc
@@ -62,7 +62,7 @@ const fn::MultiFunction &NodeMFNetworkBuilder::get_default_fn(StringRef name)
static void insert_dummy_node(CommonMFNetworkBuilderData &common, const DNode &dnode)
{
- constexpr uint stack_capacity = 10;
+ constexpr int stack_capacity = 10;
Vector<fn::MFDataType, stack_capacity> input_types;
Vector<StringRef, stack_capacity> input_names;
@@ -159,7 +159,7 @@ static fn::MFOutputSocket *try_find_origin(CommonMFNetworkBuilderData &common,
{
Span<const DOutputSocket *> from_dsockets = to_dsocket.linked_sockets();
Span<const DGroupInput *> from_group_inputs = to_dsocket.linked_group_inputs();
- uint total_linked_amount = from_dsockets.size() + from_group_inputs.size();
+ int total_linked_amount = from_dsockets.size() + from_group_inputs.size();
BLI_assert(total_linked_amount <= 1);
if (total_linked_amount == 0) {
diff --git a/source/blender/nodes/shader/nodes/node_shader_map_range.cc b/source/blender/nodes/shader/nodes/node_shader_map_range.cc
index b026499ec4b..4a6c9ec9a4d 100644
--- a/source/blender/nodes/shader/nodes/node_shader_map_range.cc
+++ b/source/blender/nodes/shader/nodes/node_shader_map_range.cc
@@ -115,13 +115,13 @@ class MapRangeFunction : public blender::fn::MultiFunction {
blender::fn::VSpan<float> to_max = params.readonly_single_input<float>(4, "To Max");
blender::MutableSpan<float> results = params.uninitialized_single_output<float>(5, "Result");
- for (uint i : mask) {
+ for (int64_t i : mask) {
float factor = safe_divide(values[i] - from_min[i], from_max[i] - from_min[i]);
results[i] = to_min[i] + factor * (to_max[i] - to_min[i]);
}
if (clamp_) {
- for (uint i : mask) {
+ for (int64_t i : mask) {
CLAMP(results[i], 0.0f, 1.0f);
}
}
diff --git a/source/blender/nodes/shader/nodes/node_shader_sepcombRGB.cc b/source/blender/nodes/shader/nodes/node_shader_sepcombRGB.cc
index 4b4b80ea1ad..c1543f791f1 100644
--- a/source/blender/nodes/shader/nodes/node_shader_sepcombRGB.cc
+++ b/source/blender/nodes/shader/nodes/node_shader_sepcombRGB.cc
@@ -80,7 +80,7 @@ class SeparateRGBFunction : public blender::fn::MultiFunction {
blender::MutableSpan<float> gs = params.uninitialized_single_output<float>(2, "G");
blender::MutableSpan<float> bs = params.uninitialized_single_output<float>(3, "B");
- for (uint i : mask) {
+ for (int64_t i : mask) {
blender::Color4f color = colors[i];
rs[i] = color.r;
gs[i] = color.g;
diff --git a/source/blender/nodes/shader/nodes/node_shader_sepcombXYZ.cc b/source/blender/nodes/shader/nodes/node_shader_sepcombXYZ.cc
index 03e18acc245..8b23327460f 100644
--- a/source/blender/nodes/shader/nodes/node_shader_sepcombXYZ.cc
+++ b/source/blender/nodes/shader/nodes/node_shader_sepcombXYZ.cc
@@ -65,7 +65,7 @@ class MF_SeparateXYZ : public blender::fn::MultiFunction {
blender::MutableSpan<float> ys = params.uninitialized_single_output<float>(2, "Y");
blender::MutableSpan<float> zs = params.uninitialized_single_output<float>(3, "Z");
- for (uint i : mask) {
+ for (int64_t i : mask) {
blender::float3 xyz = vectors[i];
xs[i] = xyz.x;
ys[i] = xyz.y;
diff --git a/source/blender/nodes/shader/nodes/node_shader_valToRgb.cc b/source/blender/nodes/shader/nodes/node_shader_valToRgb.cc
index 21f15bcf6f4..7f712b0db40 100644
--- a/source/blender/nodes/shader/nodes/node_shader_valToRgb.cc
+++ b/source/blender/nodes/shader/nodes/node_shader_valToRgb.cc
@@ -148,7 +148,7 @@ class ColorBandFunction : public blender::fn::MultiFunction {
params.uninitialized_single_output<blender::Color4f>(1, "Color");
blender::MutableSpan<float> alphas = params.uninitialized_single_output<float>(2, "Alpha");
- for (uint i : mask) {
+ for (int64_t i : mask) {
blender::Color4f color;
BKE_colorband_evaluate(&color_band_, values[i], color);
colors[i] = color;
diff --git a/source/blender/simulation/intern/particle_allocator.cc b/source/blender/simulation/intern/particle_allocator.cc
index b65c0197c76..eb1e998e63a 100644
--- a/source/blender/simulation/intern/particle_allocator.cc
+++ b/source/blender/simulation/intern/particle_allocator.cc
@@ -21,7 +21,7 @@ namespace blender::sim {
AttributesAllocator::~AttributesAllocator()
{
for (std::unique_ptr<AttributesBlock> &block : allocated_blocks_) {
- for (uint i : attributes_info_.index_range()) {
+ for (int i : attributes_info_.index_range()) {
const fn::CPPType &type = attributes_info_.type_of(i);
type.destruct_n(block->buffers[i], block->size);
MEM_freeN(block->buffers[i]);
@@ -29,13 +29,13 @@ AttributesAllocator::~AttributesAllocator()
}
}
-fn::MutableAttributesRef AttributesAllocator::allocate_uninitialized(uint size)
+fn::MutableAttributesRef AttributesAllocator::allocate_uninitialized(int size)
{
std::unique_ptr<AttributesBlock> block = std::make_unique<AttributesBlock>();
block->buffers = Array<void *>(attributes_info_.size(), nullptr);
block->size = size;
- for (uint i : attributes_info_.index_range()) {
+ for (int i : attributes_info_.index_range()) {
const fn::CPPType &type = attributes_info_.type_of(i);
void *buffer = MEM_mallocN_aligned(size * type.size(), type.alignment(), AT);
block->buffers[i] = buffer;
@@ -53,17 +53,17 @@ fn::MutableAttributesRef AttributesAllocator::allocate_uninitialized(uint size)
return attributes;
}
-fn::MutableAttributesRef ParticleAllocator::allocate(uint size)
+fn::MutableAttributesRef ParticleAllocator::allocate(int size)
{
const fn::AttributesInfo &info = attributes_allocator_.attributes_info();
fn::MutableAttributesRef attributes = attributes_allocator_.allocate_uninitialized(size);
- for (uint i : info.index_range()) {
+ for (int i : info.index_range()) {
const fn::CPPType &type = info.type_of(i);
StringRef name = info.name_of(i);
if (name == "ID") {
- uint start_id = next_id_.fetch_add(size);
+ int start_id = next_id_.fetch_add(size);
MutableSpan<int> ids = attributes.get<int>("ID");
- for (uint pindex : IndexRange(size)) {
+ for (int pindex : IndexRange(size)) {
ids[pindex] = start_id + pindex;
}
}
diff --git a/source/blender/simulation/intern/particle_allocator.hh b/source/blender/simulation/intern/particle_allocator.hh
index f854413c9aa..b742459b3c2 100644
--- a/source/blender/simulation/intern/particle_allocator.hh
+++ b/source/blender/simulation/intern/particle_allocator.hh
@@ -31,13 +31,13 @@ class AttributesAllocator : NonCopyable, NonMovable {
private:
struct AttributesBlock {
Array<void *> buffers;
- uint size;
+ int size;
};
const fn::AttributesInfo &attributes_info_;
Vector<std::unique_ptr<AttributesBlock>> allocated_blocks_;
Vector<fn::MutableAttributesRef> allocated_attributes_;
- uint total_allocated_ = 0;
+ int total_allocated_ = 0;
std::mutex mutex_;
public:
@@ -53,7 +53,7 @@ class AttributesAllocator : NonCopyable, NonMovable {
return allocated_attributes_;
}
- uint total_allocated() const
+ int total_allocated() const
{
return total_allocated_;
}
@@ -63,16 +63,16 @@ class AttributesAllocator : NonCopyable, NonMovable {
return attributes_info_;
}
- fn::MutableAttributesRef allocate_uninitialized(uint size);
+ fn::MutableAttributesRef allocate_uninitialized(int size);
};
class ParticleAllocator : NonCopyable, NonMovable {
private:
AttributesAllocator attributes_allocator_;
- std::atomic<uint> next_id_;
+ std::atomic<int> next_id_;
public:
- ParticleAllocator(const fn::AttributesInfo &attributes_info, uint next_id)
+ ParticleAllocator(const fn::AttributesInfo &attributes_info, int next_id)
: attributes_allocator_(attributes_info), next_id_(next_id)
{
}
@@ -82,12 +82,12 @@ class ParticleAllocator : NonCopyable, NonMovable {
return attributes_allocator_.get_allocations();
}
- uint total_allocated() const
+ int total_allocated() const
{
return attributes_allocator_.total_allocated();
}
- fn::MutableAttributesRef allocate(uint size);
+ fn::MutableAttributesRef allocate(int size);
};
} // namespace blender::sim
diff --git a/source/blender/simulation/intern/particle_function.cc b/source/blender/simulation/intern/particle_function.cc
index 3788fd17e36..e0de68d9b29 100644
--- a/source/blender/simulation/intern/particle_function.cc
+++ b/source/blender/simulation/intern/particle_function.cc
@@ -29,9 +29,9 @@ ParticleFunction::ParticleFunction(const fn::MultiFunction *global_fn,
per_particle_inputs_(per_particle_inputs),
output_is_global_(output_is_global)
{
- for (uint i : output_is_global_.index_range()) {
+ for (int i : output_is_global_.index_range()) {
if (output_is_global_[i]) {
- uint param_index = global_inputs_.size() + global_output_indices_.size();
+ int param_index = global_inputs_.size() + global_output_indices_.size();
fn::MFParamType param_type = global_fn_->param_type(param_index);
BLI_assert(param_type.is_output());
output_types_.append(param_type.data_type());
@@ -39,7 +39,7 @@ ParticleFunction::ParticleFunction(const fn::MultiFunction *global_fn,
global_output_indices_.append(i);
}
else {
- uint param_index = per_particle_inputs_.size() + per_particle_output_indices_.size();
+ int param_index = per_particle_inputs_.size() + per_particle_output_indices_.size();
fn::MFParamType param_type = per_particle_fn_->param_type(param_index);
BLI_assert(param_type.is_output());
output_types_.append(param_type.data_type());
@@ -60,7 +60,7 @@ ParticleFunctionEvaluator::ParticleFunctionEvaluator(
ParticleFunctionEvaluator::~ParticleFunctionEvaluator()
{
- for (uint output_index : outputs_.index_range()) {
+ for (int output_index : outputs_.index_range()) {
void *buffer = outputs_[output_index];
fn::MFDataType data_type = particle_fn_.output_types_[output_index];
BLI_assert(data_type.is_single()); /* For now. */
@@ -83,7 +83,7 @@ void ParticleFunctionEvaluator::compute()
is_computed_ = true;
}
-fn::GVSpan ParticleFunctionEvaluator::get(uint output_index, StringRef expected_name) const
+fn::GVSpan ParticleFunctionEvaluator::get(int output_index, StringRef expected_name) const
{
#ifdef DEBUG
StringRef real_name = particle_fn_.output_names_[output_index];
@@ -115,7 +115,7 @@ void ParticleFunctionEvaluator::compute_globals()
}
/* Add output parameters. */
- for (uint output_index : particle_fn_.global_output_indices_) {
+ for (int output_index : particle_fn_.global_output_indices_) {
fn::MFDataType data_type = particle_fn_.output_types_[output_index];
BLI_assert(data_type.is_single()); /* For now. */
@@ -142,7 +142,7 @@ void ParticleFunctionEvaluator::compute_per_particle()
}
/* Add output parameters. */
- for (uint output_index : particle_fn_.per_particle_output_indices_) {
+ for (int output_index : particle_fn_.per_particle_output_indices_) {
fn::MFDataType data_type = particle_fn_.output_types_[output_index];
BLI_assert(data_type.is_single()); /* For now. */
diff --git a/source/blender/simulation/intern/particle_function.hh b/source/blender/simulation/intern/particle_function.hh
index abed9063bae..bbb40efb388 100644
--- a/source/blender/simulation/intern/particle_function.hh
+++ b/source/blender/simulation/intern/particle_function.hh
@@ -41,8 +41,8 @@ class ParticleFunction {
Array<const ParticleFunctionInput *> global_inputs_;
Array<const ParticleFunctionInput *> per_particle_inputs_;
Array<bool> output_is_global_;
- Vector<uint> global_output_indices_;
- Vector<uint> per_particle_output_indices_;
+ Vector<int> global_output_indices_;
+ Vector<int> per_particle_output_indices_;
Vector<fn::MFDataType> output_types_;
Vector<StringRefNull> output_names_;
@@ -73,9 +73,9 @@ class ParticleFunctionEvaluator {
~ParticleFunctionEvaluator();
void compute();
- fn::GVSpan get(uint output_index, StringRef expected_name) const;
+ fn::GVSpan get(int output_index, StringRef expected_name) const;
- template<typename T> fn::VSpan<T> get(uint output_index, StringRef expected_name) const
+ template<typename T> fn::VSpan<T> get(int output_index, StringRef expected_name) const
{
return this->get(output_index, expected_name).typed<T>();
}
diff --git a/source/blender/simulation/intern/simulation_collect_influences.cc b/source/blender/simulation/intern/simulation_collect_influences.cc
index 98b055802c9..c1b936d9aa3 100644
--- a/source/blender/simulation/intern/simulation_collect_influences.cc
+++ b/source/blender/simulation/intern/simulation_collect_influences.cc
@@ -53,7 +53,7 @@ static Span<const nodes::DNode *> get_particle_simulation_nodes(const nodes::Der
static std::optional<Array<std::string>> compute_global_string_inputs(
nodes::MFNetworkTreeMap &network_map, Span<const fn::MFInputSocket *> sockets)
{
- uint amount = sockets.size();
+ int amount = sockets.size();
if (amount == 0) {
return Array<std::string>();
}
@@ -67,7 +67,7 @@ static std::optional<Array<std::string>> compute_global_string_inputs(
fn::MFParamsBuilder params{network_fn, 1};
Array<std::string> strings(amount, NoInitialization());
- for (uint i : IndexRange(amount)) {
+ for (int i : IndexRange(amount)) {
params.add_uninitialized_single_output(
fn::GMutableSpan(fn::CPPType::get<std::string>(), strings.data() + i, 1));
}
@@ -101,7 +101,7 @@ static void find_and_deduplicate_particle_attribute_nodes(nodes::MFNetworkTreeMa
Map<std::pair<std::string, fn::MFDataType>, Vector<fn::MFNode *>>
attribute_nodes_by_name_and_type;
- for (uint i : attribute_names->index_range()) {
+ for (int i : attribute_names->index_range()) {
attribute_nodes_by_name_and_type
.lookup_or_add_default(
{(*attribute_names)[i], name_sockets[i]->node().output(0).data_type()})
@@ -207,7 +207,7 @@ class ParticleFunctionForce : public ParticleForce {
evaluator.compute();
fn::VSpan<float3> forces = evaluator.get<float3>(0, "Force");
- for (uint i : mask) {
+ for (int64_t i : mask) {
r_combined_force[i] += forces[i];
}
}
@@ -273,13 +273,13 @@ class MyBasicEmitter : public ParticleEmitter {
}
fn::MutableAttributesRef attributes = allocator->allocate(10);
- RandomNumberGenerator rng{(uint)context.simulation_time_interval().start() ^
- DefaultHash<std::string>{}(name_)};
+ RandomNumberGenerator rng{(uint32_t)context.simulation_time_interval().start() ^
+ (uint32_t)DefaultHash<std::string>{}(name_)};
MutableSpan<float3> positions = attributes.get<float3>("Position");
MutableSpan<float3> velocities = attributes.get<float3>("Velocity");
- for (uint i : IndexRange(attributes.size())) {
+ for (int i : IndexRange(attributes.size())) {
positions[i] = rng.get_unit_float3();
velocities[i] = rng.get_unit_float3();
}
diff --git a/source/blender/simulation/intern/simulation_solver.cc b/source/blender/simulation/intern/simulation_solver.cc
index 310261f6c95..8460b9e148e 100644
--- a/source/blender/simulation/intern/simulation_solver.cc
+++ b/source/blender/simulation/intern/simulation_solver.cc
@@ -63,14 +63,14 @@ static const fn::CPPType &custom_to_cpp_data_type(CustomDataType type)
class CustomDataAttributesRef {
private:
Array<void *> buffers_;
- uint size_;
+ int64_t size_;
const fn::AttributesInfo &info_;
public:
- CustomDataAttributesRef(CustomData &custom_data, uint size, const fn::AttributesInfo &info)
+ CustomDataAttributesRef(CustomData &custom_data, int64_t size, const fn::AttributesInfo &info)
: buffers_(info.size(), nullptr), size_(size), info_(info)
{
- for (uint attribute_index : info.index_range()) {
+ for (int attribute_index : info.index_range()) {
StringRefNull name = info.name_of(attribute_index);
const fn::CPPType &cpp_type = info.type_of(attribute_index);
CustomDataType custom_type = cpp_to_custom_data_type(cpp_type);
@@ -108,7 +108,7 @@ static void ensure_attributes_exist(ParticleSimulationState *state, const fn::At
}
} while (found_layer_to_remove);
- for (uint attribute_index : info.index_range()) {
+ for (int attribute_index : info.index_range()) {
StringRefNull attribute_name = info.name_of(attribute_index);
const fn::CPPType &cpp_type = info.type_of(attribute_index);
CustomDataType custom_type = cpp_to_custom_data_type(cpp_type);
@@ -120,8 +120,7 @@ static void ensure_attributes_exist(ParticleSimulationState *state, const fn::At
nullptr,
state->tot_particles,
attribute_name.c_str());
- cpp_type.fill_uninitialized(
- info.default_of(attribute_index), data, (uint)state->tot_particles);
+ cpp_type.fill_uninitialized(info.default_of(attribute_index), data, state->tot_particles);
}
}
}
@@ -156,21 +155,21 @@ void solve_simulation_time_step(Simulation &simulation,
}
LISTBASE_FOREACH (ParticleSimulationState *, state, &simulation.states) {
+
const fn::AttributesInfo &attributes_info = *attribute_infos.lookup_as(state->head.name);
CustomDataAttributesRef custom_data_attributes{
- state->attributes, (uint)state->tot_particles, attributes_info};
+ state->attributes, state->tot_particles, attributes_info};
fn::MutableAttributesRef attributes = custom_data_attributes;
MutableSpan<float3> positions = attributes.get<float3>("Position");
MutableSpan<float3> velocities = attributes.get<float3>("Velocity");
- Array<float3> force_vectors{(uint)state->tot_particles, {0, 0, 0}};
+ Array<float3> force_vectors{state->tot_particles, {0, 0, 0}};
const Vector<const ParticleForce *> *forces = influences.particle_forces.lookup_ptr(
state->head.name);
if (forces != nullptr) {
- ParticleChunkContext particle_chunk_context{IndexMask((uint)state->tot_particles),
- attributes};
+ ParticleChunkContext particle_chunk_context{IndexMask(state->tot_particles), attributes};
ParticleForceContext particle_force_context{
solve_context, particle_chunk_context, force_vectors};
@@ -179,7 +178,7 @@ void solve_simulation_time_step(Simulation &simulation,
}
}
- for (uint i : positions.index_range()) {
+ for (int i : positions.index_range()) {
velocities[i] += force_vectors[i] * time_step;
positions[i] += velocities[i] * time_step;
}
@@ -195,9 +194,9 @@ void solve_simulation_time_step(Simulation &simulation,
const fn::AttributesInfo &attributes_info = *attribute_infos.lookup_as(state->head.name);
ParticleAllocator &allocator = *particle_allocators.lookup_as(state->head.name);
- const uint emitted_particle_amount = allocator.total_allocated();
- const uint old_particle_amount = state->tot_particles;
- const uint new_particle_amount = old_particle_amount + emitted_particle_amount;
+ const int emitted_particle_amount = allocator.total_allocated();
+ const int old_particle_amount = state->tot_particles;
+ const int new_particle_amount = old_particle_amount + emitted_particle_amount;
CustomData_realloc(&state->attributes, new_particle_amount);
@@ -205,11 +204,11 @@ void solve_simulation_time_step(Simulation &simulation,
state->attributes, new_particle_amount, attributes_info};
fn::MutableAttributesRef attributes = custom_data_attributes;
- uint offset = old_particle_amount;
+ int offset = old_particle_amount;
for (fn::MutableAttributesRef emitted_attributes : allocator.get_allocations()) {
fn::MutableAttributesRef dst_attributes = attributes.slice(
IndexRange(offset, emitted_attributes.size()));
- for (uint attribute_index : attributes.info().index_range()) {
+ for (int attribute_index : attributes.info().index_range()) {
fn::GMutableSpan emitted_data = emitted_attributes.get(attribute_index);
fn::GMutableSpan dst = dst_attributes.get(attribute_index);
const fn::CPPType &type = dst.type();