Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacques Lucke <jacques@blender.org>2020-08-07 19:24:59 +0300
committerJacques Lucke <jacques@blender.org>2020-08-07 19:42:21 +0300
commitc50e5fcc344d00b03eb4a3141b5b45944c3570fd (patch)
treef683ae1a1f38551d160a5be2ee86561d51faca26 /source/blender/blenlib
parent28b10224346a9a2e55267f98357991a841eeda5b (diff)
Cleanup: use C++ style casts in various places
Diffstat (limited to 'source/blender/blenlib')
-rw-r--r--source/blender/blenlib/BLI_allocator.hh15
-rw-r--r--source/blender/blenlib/BLI_array.hh5
-rw-r--r--source/blender/blenlib/BLI_color.hh12
-rw-r--r--source/blender/blenlib/BLI_float3.hh8
-rw-r--r--source/blender/blenlib/BLI_float4x4.hh10
-rw-r--r--source/blender/blenlib/BLI_hash.hh10
-rw-r--r--source/blender/blenlib/BLI_hash_tables.hh14
-rw-r--r--source/blender/blenlib/BLI_linear_allocator.hh15
-rw-r--r--source/blender/blenlib/BLI_listbase_wrapper.hh4
-rw-r--r--source/blender/blenlib/BLI_map.hh8
-rw-r--r--source/blender/blenlib/BLI_memory_utils.hh36
-rw-r--r--source/blender/blenlib/BLI_probing_strategies.hh2
-rw-r--r--source/blender/blenlib/BLI_rand.hh6
-rw-r--r--source/blender/blenlib/BLI_set.hh4
-rw-r--r--source/blender/blenlib/BLI_span.hh12
-rw-r--r--source/blender/blenlib/BLI_stack.hh7
-rw-r--r--source/blender/blenlib/BLI_string_ref.hh10
-rw-r--r--source/blender/blenlib/BLI_vector.hh18
-rw-r--r--source/blender/blenlib/BLI_vector_set.hh7
-rw-r--r--source/blender/blenlib/intern/rand.cc4
-rw-r--r--source/blender/blenlib/tests/BLI_linear_allocator_test.cc2
-rw-r--r--source/blender/blenlib/tests/BLI_span_test.cc2
22 files changed, 112 insertions, 99 deletions
diff --git a/source/blender/blenlib/BLI_allocator.hh b/source/blender/blenlib/BLI_allocator.hh
index 3f753d1d81d..899c8499807 100644
--- a/source/blender/blenlib/BLI_allocator.hh
+++ b/source/blender/blenlib/BLI_allocator.hh
@@ -79,19 +79,20 @@ class RawAllocator {
public:
void *allocate(size_t size, size_t alignment, const char *UNUSED(name))
{
- BLI_assert(is_power_of_2_i((int)alignment));
+ BLI_assert(is_power_of_2_i(static_cast<int>(alignment)));
void *ptr = malloc(size + alignment + sizeof(MemHead));
- void *used_ptr = (void *)((uintptr_t)POINTER_OFFSET(ptr, alignment + sizeof(MemHead)) &
- ~((uintptr_t)alignment - 1));
- int offset = (int)((intptr_t)used_ptr - (intptr_t)ptr);
- BLI_assert(offset >= (int)sizeof(MemHead));
- ((MemHead *)used_ptr - 1)->offset = (int)offset;
+ void *used_ptr = reinterpret_cast<void *>(
+ reinterpret_cast<uintptr_t>(POINTER_OFFSET(ptr, alignment + sizeof(MemHead))) &
+ ~(static_cast<uintptr_t>(alignment) - 1));
+ int offset = static_cast<int>((intptr_t)used_ptr - (intptr_t)ptr);
+ BLI_assert(offset >= static_cast<int>(sizeof(MemHead)));
+ (static_cast<MemHead *>(used_ptr) - 1)->offset = offset;
return used_ptr;
}
void deallocate(void *ptr)
{
- MemHead *head = (MemHead *)ptr - 1;
+ MemHead *head = static_cast<MemHead *>(ptr) - 1;
int offset = -head->offset;
void *actual_pointer = POINTER_OFFSET(ptr, offset);
free(actual_pointer);
diff --git a/source/blender/blenlib/BLI_array.hh b/source/blender/blenlib/BLI_array.hh
index 796123af765..9b307dc6a04 100644
--- a/source/blender/blenlib/BLI_array.hh
+++ b/source/blender/blenlib/BLI_array.hh
@@ -176,7 +176,7 @@ class Array {
{
destruct_n(data_, size_);
if (!this->uses_inline_buffer()) {
- allocator_.deallocate((void *)data_);
+ allocator_.deallocate(static_cast<void *>(data_));
}
}
@@ -351,7 +351,8 @@ class Array {
T *allocate(int64_t size)
{
- return (T *)allocator_.allocate((size_t)size * sizeof(T), alignof(T), AT);
+ return static_cast<T *>(
+ allocator_.allocate(static_cast<size_t>(size) * sizeof(T), alignof(T), AT));
}
bool uses_inline_buffer() const
diff --git a/source/blender/blenlib/BLI_color.hh b/source/blender/blenlib/BLI_color.hh
index c0d2f43645d..e57a5109a66 100644
--- a/source/blender/blenlib/BLI_color.hh
+++ b/source/blender/blenlib/BLI_color.hh
@@ -63,10 +63,10 @@ struct Color4f {
uint64_t hash() const
{
- uint64_t x1 = *(uint32_t *)&r;
- uint64_t x2 = *(uint32_t *)&g;
- uint64_t x3 = *(uint32_t *)&b;
- uint64_t x4 = *(uint32_t *)&a;
+ uint64_t x1 = *reinterpret_cast<const uint32_t *>(&r);
+ uint64_t x2 = *reinterpret_cast<const uint32_t *>(&g);
+ uint64_t x3 = *reinterpret_cast<const uint32_t *>(&b);
+ uint64_t x4 = *reinterpret_cast<const uint32_t *>(&a);
return (x1 * 1283591) ^ (x2 * 850177) ^ (x3 * 735391) ^ (x4 * 442319);
}
};
@@ -120,8 +120,8 @@ struct Color4b {
uint64_t hash() const
{
- return ((uint64_t)r * 1283591) ^ ((uint64_t)g * 850177) ^ ((uint64_t)b * 735391) ^
- ((uint64_t)a * 442319);
+ return static_cast<uint64_t>(r * 1283591) ^ static_cast<uint64_t>(g * 850177) ^
+ static_cast<uint64_t>(b * 735391) ^ static_cast<uint64_t>(a * 442319);
}
};
diff --git a/source/blender/blenlib/BLI_float3.hh b/source/blender/blenlib/BLI_float3.hh
index a976e909738..2d90498fee8 100644
--- a/source/blender/blenlib/BLI_float3.hh
+++ b/source/blender/blenlib/BLI_float3.hh
@@ -31,7 +31,7 @@ struct float3 {
{
}
- float3(const float (*ptr)[3]) : float3((const float *)ptr)
+ float3(const float (*ptr)[3]) : float3(static_cast<const float *>(ptr[0]))
{
}
@@ -204,9 +204,9 @@ struct float3 {
uint64_t hash() const
{
- uint64_t x1 = *(uint32_t *)&x;
- uint64_t x2 = *(uint32_t *)&y;
- uint64_t x3 = *(uint32_t *)&z;
+ uint64_t x1 = *reinterpret_cast<const uint32_t *>(&x);
+ uint64_t x2 = *reinterpret_cast<const uint32_t *>(&y);
+ uint64_t x3 = *reinterpret_cast<const uint32_t *>(&z);
return (x1 * 435109) ^ (x2 * 380867) ^ (x3 * 1059217);
}
diff --git a/source/blender/blenlib/BLI_float4x4.hh b/source/blender/blenlib/BLI_float4x4.hh
index 85d38149bb9..0433197b22a 100644
--- a/source/blender/blenlib/BLI_float4x4.hh
+++ b/source/blender/blenlib/BLI_float4x4.hh
@@ -31,18 +31,18 @@ struct float4x4 {
memcpy(values, matrix, sizeof(float) * 16);
}
- float4x4(const float matrix[4][4]) : float4x4((float *)matrix)
+ float4x4(const float matrix[4][4]) : float4x4(static_cast<const float *>(matrix[0]))
{
}
operator float *()
{
- return (float *)this;
+ return &values[0][0];
}
operator const float *() const
{
- return (const float *)this;
+ return &values[0][0];
}
friend float4x4 operator*(const float4x4 &a, const float4x4 &b)
@@ -124,8 +124,8 @@ struct float4x4 {
{
uint64_t h = 435109;
for (int i = 0; i < 16; i++) {
- float value = ((const float *)this)[i];
- h = h * 33 + (*(uint32_t *)&value);
+ float value = (static_cast<const float *>(values[0]))[i];
+ h = h * 33 + *reinterpret_cast<const uint32_t *>(&value);
}
return h;
}
diff --git a/source/blender/blenlib/BLI_hash.hh b/source/blender/blenlib/BLI_hash.hh
index ad3224e037a..8301dbb8e3a 100644
--- a/source/blender/blenlib/BLI_hash.hh
+++ b/source/blender/blenlib/BLI_hash.hh
@@ -110,7 +110,7 @@ template<typename T> struct DefaultHash<const T> {
template<> struct DefaultHash<TYPE> { \
uint64_t operator()(TYPE value) const \
{ \
- return (uint64_t)value; \
+ return static_cast<uint64_t>(value); \
} \
}
@@ -135,14 +135,14 @@ TRIVIAL_DEFAULT_INT_HASH(uint64_t);
template<> struct DefaultHash<float> {
uint64_t operator()(float value) const
{
- return *(uint32_t *)&value;
+ return *reinterpret_cast<uint32_t *>(&value);
}
};
template<> struct DefaultHash<bool> {
uint64_t operator()(bool value) const
{
- return (uint64_t)(value != false) * 1298191;
+ return static_cast<uint64_t>((value != false) * 1298191);
}
};
@@ -186,8 +186,8 @@ template<> struct DefaultHash<StringRefNull> {
template<typename T> struct DefaultHash<T *> {
uint64_t operator()(const T *value) const
{
- uintptr_t ptr = (uintptr_t)value;
- uint64_t hash = (uint64_t)(ptr >> 4);
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(value);
+ uint64_t hash = static_cast<uint64_t>(ptr >> 4);
return hash;
}
};
diff --git a/source/blender/blenlib/BLI_hash_tables.hh b/source/blender/blenlib/BLI_hash_tables.hh
index e6026d93e2c..9fc8ff30f79 100644
--- a/source/blender/blenlib/BLI_hash_tables.hh
+++ b/source/blender/blenlib/BLI_hash_tables.hh
@@ -56,7 +56,8 @@ inline constexpr int64_t log2_floor_constexpr(const int64_t x)
inline constexpr int64_t log2_ceil_constexpr(const int64_t x)
{
BLI_assert(x >= 0);
- return (is_power_of_2_constexpr((int)x)) ? log2_floor_constexpr(x) : log2_floor_constexpr(x) + 1;
+ return (is_power_of_2_constexpr(static_cast<int>(x))) ? log2_floor_constexpr(x) :
+ log2_floor_constexpr(x) + 1;
}
inline constexpr int64_t power_of_2_max_constexpr(const int64_t x)
@@ -83,14 +84,17 @@ inline constexpr int64_t ceil_division_by_fraction(const int64_t x,
const int64_t numerator,
const int64_t denominator)
{
- return (int64_t)ceil_division((uint64_t)x * (uint64_t)denominator, (uint64_t)numerator);
+ return static_cast<int64_t>(
+ ceil_division(static_cast<uint64_t>(x) * static_cast<uint64_t>(denominator),
+ static_cast<uint64_t>(numerator)));
}
inline constexpr int64_t floor_multiplication_with_fraction(const int64_t x,
const int64_t numerator,
const int64_t denominator)
{
- return (int64_t)((uint64_t)x * (uint64_t)numerator / (uint64_t)denominator);
+ return static_cast<int64_t>((static_cast<uint64_t>(x) * static_cast<uint64_t>(numerator) /
+ static_cast<uint64_t>(denominator)));
}
inline constexpr int64_t total_slot_amount_for_usable_slots(
@@ -130,7 +134,7 @@ class LoadFactor {
int64_t *r_total_slots,
int64_t *r_usable_slots) const
{
- BLI_assert(is_power_of_2_i((int)min_total_slots));
+ BLI_assert(is_power_of_2_i(static_cast<int>(min_total_slots)));
int64_t total_slots = this->compute_total_slots(min_usable_slots, numerator_, denominator_);
total_slots = std::max(total_slots, min_total_slots);
@@ -297,7 +301,7 @@ class HashTableStats {
removed_amount_ = hash_table.removed_amount();
size_per_element_ = hash_table.size_per_element();
size_in_bytes_ = hash_table.size_in_bytes();
- address_ = (const void *)&hash_table;
+ address_ = static_cast<const void *>(&hash_table);
for (const auto &key : keys) {
int64_t collisions = hash_table.count_collisions(key);
diff --git a/source/blender/blenlib/BLI_linear_allocator.hh b/source/blender/blenlib/BLI_linear_allocator.hh
index fcc20d483c9..a616ec5cf28 100644
--- a/source/blender/blenlib/BLI_linear_allocator.hh
+++ b/source/blender/blenlib/BLI_linear_allocator.hh
@@ -82,7 +82,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
if (potential_allocation_end <= current_end_) {
current_begin_ = potential_allocation_end;
- return (void *)potential_allocation_begin;
+ return reinterpret_cast<void *>(potential_allocation_begin);
}
else {
this->allocate_new_buffer(size + alignment);
@@ -97,7 +97,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*/
template<typename T> T *allocate()
{
- return (T *)this->allocate(sizeof(T), alignof(T));
+ return static_cast<T *>(this->allocate(sizeof(T), alignof(T)));
}
/**
@@ -107,7 +107,8 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*/
template<typename T> MutableSpan<T> allocate_array(int64_t size)
{
- return MutableSpan<T>((T *)this->allocate(sizeof(T) * size, alignof(T)), size);
+ T *array = static_cast<T *>(this->allocate(sizeof(T) * size, alignof(T)));
+ return MutableSpan<T>(array, size);
}
/**
@@ -142,9 +143,9 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
StringRefNull copy_string(StringRef str)
{
const int64_t alloc_size = str.size() + 1;
- char *buffer = (char *)this->allocate(alloc_size, 1);
+ char *buffer = static_cast<char *>(this->allocate(alloc_size, 1));
str.copy(buffer, alloc_size);
- return StringRefNull((const char *)buffer);
+ return StringRefNull(static_cast<const char *>(buffer));
}
MutableSpan<void *> allocate_elements_and_pointer_array(int64_t element_amount,
@@ -172,7 +173,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
MutableSpan<T *> pointers = void_pointers.cast<T *>();
for (int64_t i : IndexRange(n)) {
- new ((void *)pointers[i]) T(std::forward<Args>(args)...);
+ new (static_cast<void *>(pointers[i])) T(std::forward<Args>(args)...);
}
return pointers;
@@ -184,7 +185,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*/
void provide_buffer(void *buffer, uint size)
{
- unused_borrowed_buffers_.append(Span<char>((char *)buffer, size));
+ unused_borrowed_buffers_.append(Span<char>(static_cast<char *>(buffer), size));
}
template<size_t Size, size_t Alignment>
diff --git a/source/blender/blenlib/BLI_listbase_wrapper.hh b/source/blender/blenlib/BLI_listbase_wrapper.hh
index 00f757d4bc2..c31694d7d9e 100644
--- a/source/blender/blenlib/BLI_listbase_wrapper.hh
+++ b/source/blender/blenlib/BLI_listbase_wrapper.hh
@@ -80,7 +80,7 @@ template<typename T> class ListBaseWrapper {
Iterator begin() const
{
- return Iterator(listbase_, (T *)listbase_->first);
+ return Iterator(listbase_, static_cast<T *>(listbase_->first));
}
Iterator end() const
@@ -92,7 +92,7 @@ template<typename T> class ListBaseWrapper {
{
void *ptr = BLI_findlink(listbase_, index);
BLI_assert(ptr);
- return (T *)ptr;
+ return static_cast<T *>(ptr);
}
int64_t index_of(const T *value) const
diff --git a/source/blender/blenlib/BLI_map.hh b/source/blender/blenlib/BLI_map.hh
index f90d59f45a5..229bbfad0e4 100644
--- a/source/blender/blenlib/BLI_map.hh
+++ b/source/blender/blenlib/BLI_map.hh
@@ -824,7 +824,7 @@ class Map {
*/
int64_t size_in_bytes() const
{
- return (int64_t)(sizeof(Slot) * slots_.size());
+ return static_cast<int64_t>(sizeof(Slot) * slots_.size());
}
/**
@@ -863,7 +863,7 @@ class Map {
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
BLI_assert(total_slots >= 1);
- const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
+ const uint64_t new_slot_mask = static_cast<uint64_t>(total_slots) - 1;
/**
* Optimize the case when the map was empty beforehand. We can avoid some copies here.
@@ -1107,7 +1107,7 @@ class Map {
bool add_overwrite__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
auto create_func = [&](Value *ptr) {
- new ((void *)ptr) Value(std::forward<ForwardValue>(value));
+ new (static_cast<void *>(ptr)) Value(std::forward<ForwardValue>(value));
return true;
};
auto modify_func = [&](Value *ptr) {
@@ -1185,7 +1185,7 @@ template<typename Key, typename Value> class StdUnorderedMapWrapper {
public:
int64_t size() const
{
- return (int64_t)map_.size();
+ return static_cast<int64_t>(map_.size());
}
bool is_empty() const
diff --git a/source/blender/blenlib/BLI_memory_utils.hh b/source/blender/blenlib/BLI_memory_utils.hh
index d663bf4038d..7216536a884 100644
--- a/source/blender/blenlib/BLI_memory_utils.hh
+++ b/source/blender/blenlib/BLI_memory_utils.hh
@@ -84,7 +84,7 @@ template<typename T> void default_construct_n(T *ptr, int64_t n)
int64_t current = 0;
try {
for (; current < n; current++) {
- new ((void *)(ptr + current)) T;
+ new (static_cast<void *>(ptr + current)) T;
}
}
catch (...) {
@@ -133,7 +133,7 @@ template<typename T> void uninitialized_copy_n(const T *src, int64_t n, T *dst)
int64_t current = 0;
try {
for (; current < n; current++) {
- new ((void *)(dst + current)) T(src[current]);
+ new (static_cast<void *>(dst + current)) T(src[current]);
}
}
catch (...) {
@@ -162,7 +162,7 @@ void uninitialized_convert_n(const From *src, int64_t n, To *dst)
int64_t current = 0;
try {
for (; current < n; current++) {
- new ((void *)(dst + current)) To((To)src[current]);
+ new (static_cast<void *>(dst + current)) To((To)src[current]);
}
}
catch (...) {
@@ -211,7 +211,7 @@ template<typename T> void uninitialized_move_n(T *src, int64_t n, T *dst)
int64_t current = 0;
try {
for (; current < n; current++) {
- new ((void *)(dst + current)) T(std::move(src[current]));
+ new (static_cast<void *>(dst + current)) T(std::move(src[current]));
}
}
catch (...) {
@@ -298,7 +298,7 @@ template<typename T> void uninitialized_fill_n(T *dst, int64_t n, const T &value
int64_t current = 0;
try {
for (; current < n; current++) {
- new ((void *)(dst + current)) T(value);
+ new (static_cast<void *>(dst + current)) T(value);
}
}
catch (...) {
@@ -332,22 +332,22 @@ template<size_t Size, size_t Alignment> class alignas(Alignment) AlignedBuffer {
public:
operator void *()
{
- return (void *)buffer_;
+ return buffer_;
}
operator const void *() const
{
- return (void *)buffer_;
+ return buffer_;
}
void *ptr()
{
- return (void *)buffer_;
+ return buffer_;
}
const void *ptr() const
{
- return (const void *)buffer_;
+ return buffer_;
}
};
@@ -363,42 +363,42 @@ template<typename T, int64_t Size = 1> class TypedBuffer {
public:
operator T *()
{
- return (T *)&buffer_;
+ return static_cast<T *>(buffer_.ptr());
}
operator const T *() const
{
- return (const T *)&buffer_;
+ return static_cast<const T *>(buffer_.ptr());
}
T &operator*()
{
- return *(T *)&buffer_;
+ return *static_cast<T *>(buffer_.ptr());
}
const T &operator*() const
{
- return *(const T *)&buffer_;
+ return *static_cast<const T *>(buffer_.ptr());
}
T *ptr()
{
- return (T *)&buffer_;
+ return static_cast<T *>(buffer_.ptr());
}
const T *ptr() const
{
- return (const T *)&buffer_;
+ return static_cast<const T *>(buffer_.ptr());
}
T &ref()
{
- return *(T *)&buffer_;
+ return *static_cast<T *>(buffer_.ptr());
}
const T &ref() const
{
- return *(const T *)&buffer_;
+ return *static_cast<const T *>(buffer_.ptr());
}
};
@@ -424,7 +424,7 @@ inline constexpr bool is_convertible_pointer_v =
*/
inline constexpr int64_t default_inline_buffer_capacity(size_t element_size)
{
- return ((int64_t)element_size < 100) ? 4 : 0;
+ return (static_cast<int64_t>(element_size) < 100) ? 4 : 0;
}
} // namespace blender
diff --git a/source/blender/blenlib/BLI_probing_strategies.hh b/source/blender/blenlib/BLI_probing_strategies.hh
index a37a979b754..d0b7eaafbf6 100644
--- a/source/blender/blenlib/BLI_probing_strategies.hh
+++ b/source/blender/blenlib/BLI_probing_strategies.hh
@@ -235,7 +235,7 @@ using DefaultProbingStrategy = PythonProbingStrategy<>;
int64_t linear_offset = 0; \
uint64_t current_hash = probing_strategy.get(); \
do { \
- int64_t R_SLOT_INDEX = (int64_t)((current_hash + (uint64_t)linear_offset) & MASK);
+ int64_t R_SLOT_INDEX = static_cast<int64_t>((current_hash + static_cast<uint64_t>(linear_offset)) & MASK);
#define SLOT_PROBING_END() \
} while (++linear_offset < probing_strategy.linear_steps()); \
diff --git a/source/blender/blenlib/BLI_rand.hh b/source/blender/blenlib/BLI_rand.hh
index b3c9c376141..1b321bd7bcd 100644
--- a/source/blender/blenlib/BLI_rand.hh
+++ b/source/blender/blenlib/BLI_rand.hh
@@ -44,7 +44,7 @@ class RandomNumberGenerator {
void seed(uint32_t seed)
{
constexpr uint64_t lowseed = 0x330E;
- x_ = (((uint64_t)seed) << 16) | lowseed;
+ x_ = (static_cast<uint64_t>(seed) << 16) | lowseed;
}
void seed_random(uint32_t seed);
@@ -52,13 +52,13 @@ class RandomNumberGenerator {
uint32_t get_uint32()
{
this->step();
- return (uint32_t)(x_ >> 17);
+ return static_cast<uint32_t>(x_ >> 17);
}
int32_t get_int32()
{
this->step();
- return (int32_t)(x_ >> 17);
+ return static_cast<int32_t>(x_ >> 17);
}
/**
diff --git a/source/blender/blenlib/BLI_set.hh b/source/blender/blenlib/BLI_set.hh
index eb4e258b679..477a03cf623 100644
--- a/source/blender/blenlib/BLI_set.hh
+++ b/source/blender/blenlib/BLI_set.hh
@@ -556,7 +556,7 @@ class Set {
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
BLI_assert(total_slots >= 1);
- const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
+ const uint64_t new_slot_mask = static_cast<uint64_t>(total_slots) - 1;
/**
* Optimize the case when the set was empty beforehand. We can avoid some copies here.
@@ -748,7 +748,7 @@ template<typename Key> class StdUnorderedSetWrapper {
public:
int64_t size() const
{
- return (int64_t)set_.size();
+ return static_cast<int64_t>(set_.size());
}
bool is_empty() const
diff --git a/source/blender/blenlib/BLI_span.hh b/source/blender/blenlib/BLI_span.hh
index 5aee0028846..165814cf23c 100644
--- a/source/blender/blenlib/BLI_span.hh
+++ b/source/blender/blenlib/BLI_span.hh
@@ -101,7 +101,7 @@ template<typename T> class Span {
}
template<typename U, typename std::enable_if_t<is_convertible_pointer_v<U, T>> * = nullptr>
- Span(const U *start, int64_t size) : data_((const T *)start), size_(size)
+ Span(const U *start, int64_t size) : data_(static_cast<const T *>(start)), size_(size)
{
BLI_assert(size >= 0);
}
@@ -117,11 +117,12 @@ template<typename T> class Span {
* Span<int> span = {1, 2, 3, 4};
* call_function_with_array(span);
*/
- Span(const std::initializer_list<T> &list) : Span(list.begin(), (int64_t)list.size())
+ Span(const std::initializer_list<T> &list)
+ : Span(list.begin(), static_cast<int64_t>(list.size()))
{
}
- Span(const std::vector<T> &vector) : Span(vector.data(), (int64_t)vector.size())
+ Span(const std::vector<T> &vector) : Span(vector.data(), static_cast<int64_t>(vector.size()))
{
}
@@ -132,10 +133,9 @@ template<typename T> class Span {
/**
* Support implicit conversions like the ones below:
* Span<T *> -> Span<const T *>
- * Span<Derived *> -> Span<Base *>
*/
template<typename U, typename std::enable_if_t<is_convertible_pointer_v<U, T>> * = nullptr>
- Span(Span<U> array) : data_((T *)array.data()), size_(array.size())
+ Span(Span<U> array) : data_(static_cast<const T *>(array.data())), size_(array.size())
{
}
@@ -373,7 +373,7 @@ template<typename T> class Span {
{
const int64_t index = this->first_index_try(search_value);
BLI_assert(index >= 0);
- return (int64_t)index;
+ return index;
}
/**
diff --git a/source/blender/blenlib/BLI_stack.hh b/source/blender/blenlib/BLI_stack.hh
index 5fa7867e176..8eca356ec54 100644
--- a/source/blender/blenlib/BLI_stack.hh
+++ b/source/blender/blenlib/BLI_stack.hh
@@ -350,11 +350,12 @@ class Stack {
void *buffer = allocator_.allocate(
sizeof(Chunk) + sizeof(T) * new_capacity + alignof(T), alignof(Chunk), AT);
void *chunk_buffer = buffer;
- void *data_buffer = (void *)(((uintptr_t)buffer + sizeof(Chunk) + alignof(T) - 1) &
- ~(alignof(T) - 1));
+ void *data_buffer = reinterpret_cast<void *>(
+ (reinterpret_cast<uintptr_t>(buffer) + sizeof(Chunk) + alignof(T) - 1) &
+ ~(alignof(T) - 1));
Chunk *new_chunk = new (chunk_buffer) Chunk();
- new_chunk->begin = (T *)data_buffer;
+ new_chunk->begin = static_cast<T *>(data_buffer);
new_chunk->capacity_end = new_chunk->begin + new_capacity;
new_chunk->above = nullptr;
new_chunk->below = top_chunk_;
diff --git a/source/blender/blenlib/BLI_string_ref.hh b/source/blender/blenlib/BLI_string_ref.hh
index 8bf4821f72a..a217dc4c772 100644
--- a/source/blender/blenlib/BLI_string_ref.hh
+++ b/source/blender/blenlib/BLI_string_ref.hh
@@ -167,7 +167,7 @@ class StringRefNull : public StringRefBase {
/**
* Construct a StringRefNull from a null terminated c-string. The pointer must not point to NULL.
*/
- StringRefNull(const char *str) : StringRefBase(str, (int64_t)strlen(str))
+ StringRefNull(const char *str) : StringRefBase(str, static_cast<int64_t>(strlen(str)))
{
BLI_assert(str != NULL);
BLI_assert(data_[size_] == '\0');
@@ -179,7 +179,7 @@ class StringRefNull : public StringRefBase {
*/
StringRefNull(const char *str, const int64_t size) : StringRefBase(str, size)
{
- BLI_assert((int64_t)strlen(str) == size);
+ BLI_assert(static_cast<int64_t>(strlen(str)) == size);
}
/**
@@ -231,7 +231,7 @@ class StringRef : public StringRefBase {
/**
* Create a StringRef from a null-terminated c-string.
*/
- StringRef(const char *str) : StringRefBase(str, str ? (int64_t)strlen(str) : 0)
+ StringRef(const char *str) : StringRefBase(str, str ? static_cast<int64_t>(strlen(str)) : 0)
{
}
@@ -244,7 +244,7 @@ class StringRef : public StringRefBase {
* second point points to a smaller address than the first one.
*/
StringRef(const char *begin, const char *one_after_end)
- : StringRefBase(begin, (int64_t)(one_after_end - begin))
+ : StringRefBase(begin, static_cast<int64_t>(one_after_end - begin))
{
BLI_assert(begin <= one_after_end);
}
@@ -253,7 +253,7 @@ class StringRef : public StringRefBase {
* Reference a std::string. Remember that when the std::string is destructed, the StringRef
* will point to uninitialized memory.
*/
- StringRef(const std::string &str) : StringRefBase(str.data(), (int64_t)str.size())
+ StringRef(const std::string &str) : StringRefBase(str.data(), static_cast<int64_t>(str.size()))
{
}
diff --git a/source/blender/blenlib/BLI_vector.hh b/source/blender/blenlib/BLI_vector.hh
index 52e966267b5..48110ef2814 100644
--- a/source/blender/blenlib/BLI_vector.hh
+++ b/source/blender/blenlib/BLI_vector.hh
@@ -100,7 +100,8 @@ class Vector {
*/
#ifndef NDEBUG
int64_t debug_size_;
-# define UPDATE_VECTOR_SIZE(ptr) (ptr)->debug_size_ = (int64_t)((ptr)->end_ - (ptr)->begin_)
+# define UPDATE_VECTOR_SIZE(ptr) \
+ (ptr)->debug_size_ = static_cast<int64_t>((ptr)->end_ - (ptr)->begin_)
#else
# define UPDATE_VECTOR_SIZE(ptr) ((void)0)
#endif
@@ -243,7 +244,8 @@ class Vector {
else {
/* Copy from inline buffer to newly allocated buffer. */
const int64_t capacity = size;
- begin_ = (T *)allocator_.allocate(sizeof(T) * (size_t)capacity, alignof(T), AT);
+ begin_ = static_cast<T *>(
+ allocator_.allocate(sizeof(T) * static_cast<size_t>(capacity), alignof(T), AT));
end_ = begin_ + size;
capacity_end_ = begin_ + capacity;
uninitialized_relocate_n(other.begin_, size, begin_);
@@ -578,8 +580,9 @@ class Vector {
*/
int64_t size() const
{
- BLI_assert(debug_size_ == (int64_t)(end_ - begin_));
- return (int64_t)(end_ - begin_);
+ const int64_t current_size = static_cast<int64_t>(end_ - begin_);
+ BLI_assert(debug_size_ == current_size);
+ return current_size;
}
/**
@@ -675,7 +678,7 @@ class Vector {
{
for (const T *current = begin_; current != end_; current++) {
if (*current == value) {
- return (int64_t)(current - begin_);
+ return static_cast<int64_t>(current - begin_);
}
}
return -1;
@@ -749,7 +752,7 @@ class Vector {
*/
int64_t capacity() const
{
- return (int64_t)(capacity_end_ - begin_);
+ return static_cast<int64_t>(capacity_end_ - begin_);
}
/**
@@ -808,7 +811,8 @@ class Vector {
const int64_t new_capacity = std::max(min_capacity, min_new_capacity);
const int64_t size = this->size();
- T *new_array = (T *)allocator_.allocate((size_t)new_capacity * sizeof(T), alignof(T), AT);
+ T *new_array = static_cast<T *>(
+ allocator_.allocate(static_cast<size_t>(new_capacity) * sizeof(T), alignof(T), AT));
uninitialized_relocate_n(begin_, size, new_array);
if (!this->is_inline()) {
diff --git a/source/blender/blenlib/BLI_vector_set.hh b/source/blender/blenlib/BLI_vector_set.hh
index c3b15bcf454..9d7c61f9e3b 100644
--- a/source/blender/blenlib/BLI_vector_set.hh
+++ b/source/blender/blenlib/BLI_vector_set.hh
@@ -457,7 +457,7 @@ class VectorSet {
*/
int64_t size_in_bytes() const
{
- return (int64_t)(sizeof(Slot) * slots_.size() + sizeof(Key) * usable_slots_);
+ return static_cast<int64_t>(sizeof(Slot) * slots_.size() + sizeof(Key) * usable_slots_);
}
/**
@@ -486,7 +486,7 @@ class VectorSet {
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
BLI_assert(total_slots >= 1);
- const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
+ const uint64_t new_slot_mask = static_cast<uint64_t>(total_slots) - 1;
/* Optimize the case when the set was empty beforehand. We can avoid some copies here. */
if (this->size() == 0) {
@@ -722,7 +722,8 @@ class VectorSet {
Key *allocate_keys_array(const int64_t size)
{
- return (Key *)slots_.allocator().allocate(sizeof(Key) * (size_t)size, alignof(Key), AT);
+ return static_cast<Key *>(
+ slots_.allocator().allocate(sizeof(Key) * static_cast<size_t>(size), alignof(Key), AT));
}
void deallocate_keys_array(Key *keys)
diff --git a/source/blender/blenlib/intern/rand.cc b/source/blender/blenlib/intern/rand.cc
index 9bafc422db5..224db31471d 100644
--- a/source/blender/blenlib/intern/rand.cc
+++ b/source/blender/blenlib/intern/rand.cc
@@ -93,7 +93,7 @@ void BLI_rng_srandom(RNG *rng, unsigned int seed)
void BLI_rng_get_char_n(RNG *rng, char *bytes, size_t bytes_len)
{
- rng->rng.get_bytes(blender::MutableSpan(bytes, (int64_t)bytes_len));
+ rng->rng.get_bytes(blender::MutableSpan(bytes, static_cast<int64_t>(bytes_len)));
}
int BLI_rng_get_int(RNG *rng)
@@ -428,7 +428,7 @@ float2 RandomNumberGenerator::get_triangle_sample(float2 v1, float2 v2, float2 v
void RandomNumberGenerator::get_bytes(MutableSpan<char> r_bytes)
{
constexpr int64_t mask_bytes = 2;
- constexpr int64_t rand_stride = (int64_t)sizeof(x_) - mask_bytes;
+ constexpr int64_t rand_stride = static_cast<int64_t>(sizeof(x_)) - mask_bytes;
int64_t last_len = 0;
int64_t trim_len = r_bytes.size();
diff --git a/source/blender/blenlib/tests/BLI_linear_allocator_test.cc b/source/blender/blenlib/tests/BLI_linear_allocator_test.cc
index 44b70d1f55d..a35fbf70711 100644
--- a/source/blender/blenlib/tests/BLI_linear_allocator_test.cc
+++ b/source/blender/blenlib/tests/BLI_linear_allocator_test.cc
@@ -8,7 +8,7 @@ namespace blender::tests {
static bool is_aligned(void *ptr, uint alignment)
{
- BLI_assert(is_power_of_2_i((int)alignment));
+ BLI_assert(is_power_of_2_i(static_cast<int>(alignment)));
return (POINTER_AS_UINT(ptr) & (alignment - 1)) == 0;
}
diff --git a/source/blender/blenlib/tests/BLI_span_test.cc b/source/blender/blenlib/tests/BLI_span_test.cc
index 587497624f4..6ad2a5633ad 100644
--- a/source/blender/blenlib/tests/BLI_span_test.cc
+++ b/source/blender/blenlib/tests/BLI_span_test.cc
@@ -197,7 +197,7 @@ TEST(span, SizeInBytes)
{
std::array<int, 10> a;
Span<int> a_span(a);
- EXPECT_EQ(a_span.size_in_bytes(), (int64_t)sizeof(a));
+ EXPECT_EQ(a_span.size_in_bytes(), static_cast<int64_t>(sizeof(a)));
EXPECT_EQ(a_span.size_in_bytes(), 40);
}