Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenlib/intern')
-rw-r--r--source/blender/blenlib/intern/BLI_index_range.cc30
-rw-r--r--source/blender/blenlib/intern/BLI_kdopbvh.c2
-rw-r--r--source/blender/blenlib/intern/BLI_memarena.c1
-rw-r--r--source/blender/blenlib/intern/BLI_memblock.c1
-rw-r--r--source/blender/blenlib/intern/array_utils.cc20
-rw-r--r--source/blender/blenlib/intern/bitmap.c20
-rw-r--r--source/blender/blenlib/intern/boxpack_2d.c1
-rw-r--r--source/blender/blenlib/intern/compute_context.cc48
-rw-r--r--source/blender/blenlib/intern/cpp_type.cc1
-rw-r--r--source/blender/blenlib/intern/filereader_zstd.c5
-rw-r--r--source/blender/blenlib/intern/generic_virtual_array.cc264
-rw-r--r--source/blender/blenlib/intern/index_mask.cc52
-rw-r--r--source/blender/blenlib/intern/kdtree_impl.h27
-rw-r--r--source/blender/blenlib/intern/length_parameterize.cc150
-rw-r--r--source/blender/blenlib/intern/math_base_inline.c18
-rw-r--r--source/blender/blenlib/intern/math_geom.c6
-rw-r--r--source/blender/blenlib/intern/math_matrix.c174
-rw-r--r--source/blender/blenlib/intern/math_rotation.c150
-rw-r--r--source/blender/blenlib/intern/math_rotation.cc13
-rw-r--r--source/blender/blenlib/intern/math_vector_inline.c29
-rw-r--r--source/blender/blenlib/intern/mesh_boolean.cc34
-rw-r--r--source/blender/blenlib/intern/mesh_intersect.cc2
-rw-r--r--source/blender/blenlib/intern/noise.c8
-rw-r--r--source/blender/blenlib/intern/noise.cc69
-rw-r--r--source/blender/blenlib/intern/path_util.c101
-rw-r--r--source/blender/blenlib/intern/rct.c4
-rw-r--r--source/blender/blenlib/intern/smallhash.c3
-rw-r--r--source/blender/blenlib/intern/string_search.cc4
-rw-r--r--source/blender/blenlib/intern/string_utf8.c56
-rw-r--r--source/blender/blenlib/intern/system.c14
-rw-r--r--source/blender/blenlib/intern/task_pool.cc8
-rw-r--r--source/blender/blenlib/intern/timeit.cc18
-rw-r--r--source/blender/blenlib/intern/winstuff.c33
33 files changed, 810 insertions, 556 deletions
diff --git a/source/blender/blenlib/intern/BLI_index_range.cc b/source/blender/blenlib/intern/BLI_index_range.cc
index 398228ab461..624dcc39fc5 100644
--- a/source/blender/blenlib/intern/BLI_index_range.cc
+++ b/source/blender/blenlib/intern/BLI_index_range.cc
@@ -44,4 +44,34 @@ Span<int64_t> IndexRange::as_span_internal() const
return Span<int64_t>(s_current_array + start_, size_);
}
+AlignedIndexRanges split_index_range_by_alignment(const IndexRange range, const int64_t alignment)
+{
+ BLI_assert(is_power_of_2_i(alignment));
+ const int64_t mask = alignment - 1;
+
+ AlignedIndexRanges aligned_ranges;
+
+ const int64_t start_chunk = range.start() & ~mask;
+ const int64_t end_chunk = range.one_after_last() & ~mask;
+ if (start_chunk == end_chunk) {
+ aligned_ranges.prefix = range;
+ }
+ else {
+ int64_t prefix_size = 0;
+ int64_t suffix_size = 0;
+ if (range.start() != start_chunk) {
+ prefix_size = alignment - (range.start() & mask);
+ }
+ if (range.one_after_last() != end_chunk) {
+ suffix_size = range.one_after_last() - end_chunk;
+ }
+ aligned_ranges.prefix = IndexRange(range.start(), prefix_size);
+ aligned_ranges.suffix = IndexRange(end_chunk, suffix_size);
+ aligned_ranges.aligned = IndexRange(aligned_ranges.prefix.one_after_last(),
+ range.size() - prefix_size - suffix_size);
+ }
+
+ return aligned_ranges;
+}
+
} // namespace blender
diff --git a/source/blender/blenlib/intern/BLI_kdopbvh.c b/source/blender/blenlib/intern/BLI_kdopbvh.c
index 62bf17bd415..a43b725b6e3 100644
--- a/source/blender/blenlib/intern/BLI_kdopbvh.c
+++ b/source/blender/blenlib/intern/BLI_kdopbvh.c
@@ -1385,7 +1385,7 @@ BVHTreeOverlap *BLI_bvhtree_overlap(
static bool tree_intersect_plane_test(const float *bv, const float plane[4])
{
- /* TODO(germano): Support other KDOP geometries. */
+ /* TODO(@germano): Support other KDOP geometries. */
const float bb_min[3] = {bv[0], bv[2], bv[4]};
const float bb_max[3] = {bv[1], bv[3], bv[5]};
float bb_near[3], bb_far[3];
diff --git a/source/blender/blenlib/intern/BLI_memarena.c b/source/blender/blenlib/intern/BLI_memarena.c
index 3b73a81012d..ada2d27f9b2 100644
--- a/source/blender/blenlib/intern/BLI_memarena.c
+++ b/source/blender/blenlib/intern/BLI_memarena.c
@@ -158,6 +158,7 @@ void *BLI_memarena_calloc(MemArena *ma, size_t size)
BLI_assert(ma->use_calloc == false);
ptr = BLI_memarena_alloc(ma, size);
+ BLI_assert(ptr != NULL);
memset(ptr, 0, size);
return ptr;
diff --git a/source/blender/blenlib/intern/BLI_memblock.c b/source/blender/blenlib/intern/BLI_memblock.c
index f780d520301..b03efd2b8a2 100644
--- a/source/blender/blenlib/intern/BLI_memblock.c
+++ b/source/blender/blenlib/intern/BLI_memblock.c
@@ -5,7 +5,6 @@
* \ingroup bli
*
* Dead simple, fast memory allocator for allocating many elements of the same size.
- *
*/
#include <stdlib.h>
diff --git a/source/blender/blenlib/intern/array_utils.cc b/source/blender/blenlib/intern/array_utils.cc
new file mode 100644
index 00000000000..a0fc8810199
--- /dev/null
+++ b/source/blender/blenlib/intern/array_utils.cc
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "BLI_array_utils.hh"
+#include "BLI_task.hh"
+
+namespace blender::array_utils {
+
+void copy(const GVArray &src,
+ const IndexMask selection,
+ GMutableSpan dst,
+ const int64_t grain_size)
+{
+ BLI_assert(src.type() == dst.type());
+ BLI_assert(src.size() == dst.size());
+ threading::parallel_for(selection.index_range(), grain_size, [&](const IndexRange range) {
+ src.materialize_to_uninitialized(selection.slice(range), dst.data());
+ });
+}
+
+} // namespace blender::array_utils
diff --git a/source/blender/blenlib/intern/bitmap.c b/source/blender/blenlib/intern/bitmap.c
index 7fcbc31c066..2cc2fbc3e2f 100644
--- a/source/blender/blenlib/intern/bitmap.c
+++ b/source/blender/blenlib/intern/bitmap.c
@@ -11,6 +11,7 @@
#include <string.h>
#include "BLI_bitmap.h"
+#include "BLI_math_bits.h"
#include "BLI_utildefines.h"
void BLI_bitmap_set_all(BLI_bitmap *bitmap, bool set, size_t bits)
@@ -46,3 +47,22 @@ void BLI_bitmap_or_all(BLI_bitmap *dst, const BLI_bitmap *src, size_t bits)
dst[i] |= src[i];
}
}
+
+int BLI_bitmap_find_first_unset(const BLI_bitmap *bitmap, const size_t bits)
+{
+ const size_t blocks_num = _BITMAP_NUM_BLOCKS(bits);
+ int result = -1;
+ /* Skip over completely set blocks. */
+ int index = 0;
+ while (index < blocks_num && bitmap[index] == ~0u) {
+ index++;
+ }
+ if (index < blocks_num) {
+ /* Found a partially used block: find the lowest unused bit. */
+ const uint m = ~bitmap[index];
+ BLI_assert(m != 0);
+ const uint bit_index = bitscan_forward_uint(m);
+ result = bit_index + (index << _BITMAP_POWER);
+ }
+ return result;
+}
diff --git a/source/blender/blenlib/intern/boxpack_2d.c b/source/blender/blenlib/intern/boxpack_2d.c
index 78f5088e8b1..d55a4a8c9ff 100644
--- a/source/blender/blenlib/intern/boxpack_2d.c
+++ b/source/blender/blenlib/intern/boxpack_2d.c
@@ -712,7 +712,6 @@ void BLI_box_pack_2d_fixedarea(ListBase *boxes, int width, int height, ListBase
* # Box * Small # # Box * #
* # * # # * #
* ################### ###################
- *
*/
int area_hsplit_large = space->w * (space->h - box->h);
int area_vsplit_large = (space->w - box->w) * space->h;
diff --git a/source/blender/blenlib/intern/compute_context.cc b/source/blender/blenlib/intern/compute_context.cc
new file mode 100644
index 00000000000..50a4a90a4a9
--- /dev/null
+++ b/source/blender/blenlib/intern/compute_context.cc
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "BLI_compute_context.hh"
+#include "BLI_hash_md5.h"
+
+namespace blender {
+
+void ComputeContextHash::mix_in(const void *data, int64_t len)
+{
+ DynamicStackBuffer<> buffer_owner(HashSizeInBytes + len, 8);
+ char *buffer = static_cast<char *>(buffer_owner.buffer());
+ memcpy(buffer, this, HashSizeInBytes);
+ memcpy(buffer + HashSizeInBytes, data, len);
+
+ BLI_hash_md5_buffer(buffer, HashSizeInBytes + len, this);
+}
+
+std::ostream &operator<<(std::ostream &stream, const ComputeContextHash &hash)
+{
+ std::stringstream ss;
+ ss << "0x" << std::hex << hash.v1 << hash.v2;
+ stream << ss.str();
+ return stream;
+}
+
+void ComputeContext::print_stack(std::ostream &stream, StringRef name) const
+{
+ Stack<const ComputeContext *> stack;
+ for (const ComputeContext *current = this; current; current = current->parent_) {
+ stack.push(current);
+ }
+ stream << "Context Stack: " << name << "\n";
+ while (!stack.is_empty()) {
+ const ComputeContext *current = stack.pop();
+ stream << "-> ";
+ current->print_current_in_line(stream);
+ const ComputeContextHash &current_hash = current->hash_;
+ stream << " \t(hash: " << current_hash << ")\n";
+ }
+}
+
+std::ostream &operator<<(std::ostream &stream, const ComputeContext &compute_context)
+{
+ compute_context.print_stack(stream, "");
+ return stream;
+}
+
+} // namespace blender
diff --git a/source/blender/blenlib/intern/cpp_type.cc b/source/blender/blenlib/intern/cpp_type.cc
index d6a087cf175..38de32d3ec8 100644
--- a/source/blender/blenlib/intern/cpp_type.cc
+++ b/source/blender/blenlib/intern/cpp_type.cc
@@ -26,3 +26,4 @@ BLI_CPP_TYPE_MAKE(ColorGeometry4f, blender::ColorGeometry4f, CPPTypeFlags::Basic
BLI_CPP_TYPE_MAKE(ColorGeometry4b, blender::ColorGeometry4b, CPPTypeFlags::BasicType)
BLI_CPP_TYPE_MAKE(string, std::string, CPPTypeFlags::BasicType)
+BLI_CPP_TYPE_MAKE(StringVector, blender::Vector<std::string>, CPPTypeFlags::None)
diff --git a/source/blender/blenlib/intern/filereader_zstd.c b/source/blender/blenlib/intern/filereader_zstd.c
index 5f114f24fb0..aeb000e9754 100644
--- a/source/blender/blenlib/intern/filereader_zstd.c
+++ b/source/blender/blenlib/intern/filereader_zstd.c
@@ -281,7 +281,10 @@ static void zstd_close(FileReader *reader)
if (zstd->reader.seek) {
MEM_freeN(zstd->seek.uncompressed_ofs);
MEM_freeN(zstd->seek.compressed_ofs);
- MEM_freeN(zstd->seek.cached_content);
+ /* When an error has occurred this may be NULL, see: T99744. */
+ if (zstd->seek.cached_content) {
+ MEM_freeN(zstd->seek.cached_content);
+ }
}
else {
MEM_freeN((void *)zstd->in_buf.src);
diff --git a/source/blender/blenlib/intern/generic_virtual_array.cc b/source/blender/blenlib/intern/generic_virtual_array.cc
index a6fbf4bff5b..f66b1e14fc6 100644
--- a/source/blender/blenlib/intern/generic_virtual_array.cc
+++ b/source/blender/blenlib/intern/generic_virtual_array.cc
@@ -46,25 +46,9 @@ void GVArrayImpl::get(const int64_t index, void *r_value) const
this->get_to_uninitialized(index, r_value);
}
-bool GVArrayImpl::is_span() const
+CommonVArrayInfo GVArrayImpl::common_info() const
{
- return false;
-}
-
-GSpan GVArrayImpl::get_internal_span() const
-{
- BLI_assert(false);
- return GSpan(*type_);
-}
-
-bool GVArrayImpl::is_single() const
-{
- return false;
-}
-
-void GVArrayImpl::get_internal_single(void *UNUSED(r_value)) const
-{
- BLI_assert(false);
+ return {};
}
bool GVArrayImpl::try_assign_VArray(void *UNUSED(varray)) const
@@ -72,13 +56,6 @@ bool GVArrayImpl::try_assign_VArray(void *UNUSED(varray)) const
return false;
}
-bool GVArrayImpl::may_have_ownership() const
-{
- /* Use true as default to avoid accidentally creating subclasses that have this set to false but
- * actually own data. Subclasses should set the to false instead. */
- return true;
-}
-
/** \} */
/* -------------------------------------------------------------------- */
@@ -101,9 +78,9 @@ void GVMutableArrayImpl::set_by_relocate(const int64_t index, void *value)
void GVMutableArrayImpl::set_all(const void *src)
{
- if (this->is_span()) {
- const GSpan span = this->get_internal_span();
- type_->copy_assign_n(src, const_cast<void *>(span.data()), size_);
+ const CommonVArrayInfo info = this->common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ type_->copy_assign_n(src, const_cast<void *>(info.data), size_);
}
else {
for (int64_t i : IndexRange(size_)) {
@@ -114,9 +91,9 @@ void GVMutableArrayImpl::set_all(const void *src)
void GVMutableArray::fill(const void *value)
{
- if (this->is_span()) {
- const GSpan span = this->get_internal_span();
- this->type().fill_assign_n(value, const_cast<void *>(span.data()), this->size());
+ const CommonVArrayInfo info = this->common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ this->type().fill_assign_n(value, const_cast<void *>(info.data), this->size());
}
else {
for (int64_t i : IndexRange(this->size())) {
@@ -161,14 +138,9 @@ void GVArrayImpl_For_GSpan::set_by_relocate(const int64_t index, void *value)
type_->relocate_assign(value, POINTER_OFFSET(data_, element_size_ * index));
}
-bool GVArrayImpl_For_GSpan::is_span() const
-{
- return true;
-}
-
-GSpan GVArrayImpl_For_GSpan::get_internal_span() const
+CommonVArrayInfo GVArrayImpl_For_GSpan::common_info() const
{
- return GSpan(*type_, data_, size_);
+ return CommonVArrayInfo{CommonVArrayInfo::Type::Span, true, data_};
}
void GVArrayImpl_For_GSpan::materialize(const IndexMask mask, void *dst) const
@@ -210,22 +182,9 @@ void GVArrayImpl_For_SingleValueRef::get_to_uninitialized(const int64_t UNUSED(i
type_->copy_construct(value_, r_value);
}
-bool GVArrayImpl_For_SingleValueRef::is_span() const
-{
- return size_ == 1;
-}
-GSpan GVArrayImpl_For_SingleValueRef::get_internal_span() const
-{
- return GSpan{*type_, value_, 1};
-}
-
-bool GVArrayImpl_For_SingleValueRef::is_single() const
+CommonVArrayInfo GVArrayImpl_For_SingleValueRef::common_info() const
{
- return true;
-}
-void GVArrayImpl_For_SingleValueRef::get_internal_single(void *r_value) const
-{
- type_->copy_assign(value_, r_value);
+ return CommonVArrayInfo{CommonVArrayInfo::Type::Single, true, value_};
}
void GVArrayImpl_For_SingleValueRef::materialize(const IndexMask mask, void *dst) const
@@ -311,32 +270,36 @@ template<int BufferSize> class GVArrayImpl_For_SmallTrivialSingleValue : public
this->copy_value_to(r_value);
}
- bool is_single() const override
- {
- return true;
- }
- void get_internal_single(void *r_value) const override
+ void copy_value_to(void *dst) const
{
- this->copy_value_to(r_value);
+ memcpy(dst, &buffer_, type_->size());
}
- void copy_value_to(void *dst) const
+ CommonVArrayInfo common_info() const override
{
- memcpy(dst, &buffer_, type_->size());
+ return CommonVArrayInfo{CommonVArrayInfo::Type::Single, true, &buffer_};
}
};
/** \} */
/* -------------------------------------------------------------------- */
-/** \name #GVArray_GSpan
+/** \name #GVArraySpan
* \{ */
-GVArray_GSpan::GVArray_GSpan(GVArray varray) : GSpan(varray.type()), varray_(std::move(varray))
+GVArraySpan::GVArraySpan() = default;
+
+GVArraySpan::GVArraySpan(GVArray varray)
+ : GSpan(varray ? &varray.type() : nullptr), varray_(std::move(varray))
{
+ if (!varray_) {
+ return;
+ }
+
size_ = varray_.size();
- if (varray_.is_span()) {
- data_ = varray_.get_internal_span().data();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ data_ = info.data;
}
else {
owned_data_ = MEM_mallocN_aligned(type_->size() * size_, type_->alignment(), __func__);
@@ -345,7 +308,27 @@ GVArray_GSpan::GVArray_GSpan(GVArray varray) : GSpan(varray.type()), varray_(std
}
}
-GVArray_GSpan::~GVArray_GSpan()
+GVArraySpan::GVArraySpan(GVArraySpan &&other)
+ : GSpan(other.type_ptr()), varray_(std::move(other.varray_)), owned_data_(other.owned_data_)
+{
+ if (!varray_) {
+ return;
+ }
+
+ size_ = varray_.size();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ data_ = info.data;
+ }
+ else {
+ data_ = owned_data_;
+ }
+ other.owned_data_ = nullptr;
+ other.data_ = nullptr;
+ other.size_ = 0;
+}
+
+GVArraySpan::~GVArraySpan()
{
if (owned_data_ != nullptr) {
type_->destruct_n(owned_data_, size_);
@@ -353,18 +336,34 @@ GVArray_GSpan::~GVArray_GSpan()
}
}
+GVArraySpan &GVArraySpan::operator=(GVArraySpan &&other)
+{
+ if (this == &other) {
+ return *this;
+ }
+ std::destroy_at(this);
+ new (this) GVArraySpan(std::move(other));
+ return *this;
+}
+
/** \} */
/* -------------------------------------------------------------------- */
-/** \name #GVMutableArray_GSpan
+/** \name #GMutableVArraySpan
* \{ */
-GVMutableArray_GSpan::GVMutableArray_GSpan(GVMutableArray varray, const bool copy_values_to_span)
- : GMutableSpan(varray.type()), varray_(std::move(varray))
+GMutableVArraySpan::GMutableVArraySpan() = default;
+
+GMutableVArraySpan::GMutableVArraySpan(GVMutableArray varray, const bool copy_values_to_span)
+ : GMutableSpan(varray ? &varray.type() : nullptr), varray_(std::move(varray))
{
+ if (!varray_) {
+ return;
+ }
size_ = varray_.size();
- if (varray_.is_span()) {
- data_ = varray_.get_internal_span().data();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ data_ = const_cast<void *>(info.data);
}
else {
owned_data_ = MEM_mallocN_aligned(type_->size() * size_, type_->alignment(), __func__);
@@ -378,11 +377,35 @@ GVMutableArray_GSpan::GVMutableArray_GSpan(GVMutableArray varray, const bool cop
}
}
-GVMutableArray_GSpan::~GVMutableArray_GSpan()
+GMutableVArraySpan::GMutableVArraySpan(GMutableVArraySpan &&other)
+ : GMutableSpan(other.type_ptr()),
+ varray_(std::move(other.varray_)),
+ owned_data_(other.owned_data_),
+ show_not_saved_warning_(other.show_not_saved_warning_)
{
- if (show_not_saved_warning_) {
- if (!save_has_been_called_) {
- std::cout << "Warning: Call `apply()` to make sure that changes persist in all cases.\n";
+ if (!varray_) {
+ return;
+ }
+ size_ = varray_.size();
+ const CommonVArrayInfo info = varray_.common_info();
+ if (info.type == CommonVArrayInfo::Type::Span) {
+ data_ = const_cast<void *>(info.data);
+ }
+ else {
+ data_ = owned_data_;
+ }
+ other.owned_data_ = nullptr;
+ other.data_ = nullptr;
+ other.size_ = 0;
+}
+
+GMutableVArraySpan::~GMutableVArraySpan()
+{
+ if (varray_) {
+ if (show_not_saved_warning_) {
+ if (!save_has_been_called_) {
+ std::cout << "Warning: Call `save()` to make sure that changes persist in all cases.\n";
+ }
}
}
if (owned_data_ != nullptr) {
@@ -391,7 +414,17 @@ GVMutableArray_GSpan::~GVMutableArray_GSpan()
}
}
-void GVMutableArray_GSpan::save()
+GMutableVArraySpan &GMutableVArraySpan::operator=(GMutableVArraySpan &&other)
+{
+ if (this == &other) {
+ return *this;
+ }
+ std::destroy_at(this);
+ new (this) GMutableVArraySpan(std::move(other));
+ return *this;
+}
+
+void GMutableVArraySpan::save()
{
save_has_been_called_ = true;
if (data_ != owned_data_) {
@@ -400,11 +433,16 @@ void GVMutableArray_GSpan::save()
varray_.set_all(owned_data_);
}
-void GVMutableArray_GSpan::disable_not_applied_warning()
+void GMutableVArraySpan::disable_not_applied_warning()
{
show_not_saved_warning_ = false;
}
+const GVMutableArray &GMutableVArraySpan::varray() const
+{
+ return varray_;
+}
+
/** \} */
/* -------------------------------------------------------------------- */
@@ -437,22 +475,24 @@ class GVArrayImpl_For_SlicedGVArray : public GVArrayImpl {
varray_.get_to_uninitialized(index + offset_, r_value);
}
- bool is_span() const override
- {
- return varray_.is_span();
- }
- GSpan get_internal_span() const override
- {
- return varray_.get_internal_span().slice(slice_);
- }
-
- bool is_single() const override
- {
- return varray_.is_single();
- }
- void get_internal_single(void *r_value) const override
+ CommonVArrayInfo common_info() const override
{
- varray_.get_internal_single(r_value);
+ const CommonVArrayInfo internal_info = varray_.common_info();
+ switch (internal_info.type) {
+ case CommonVArrayInfo::Type::Any: {
+ return {};
+ }
+ case CommonVArrayInfo::Type::Span: {
+ return CommonVArrayInfo(CommonVArrayInfo::Type::Span,
+ internal_info.may_have_ownership,
+ POINTER_OFFSET(internal_info.data, type_->size() * offset_));
+ }
+ case CommonVArrayInfo::Type::Single: {
+ return internal_info;
+ }
+ }
+ BLI_assert_unreachable();
+ return {};
}
void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
@@ -535,11 +575,6 @@ void GVArrayCommon::materialize_compressed_to_uninitialized(IndexMask mask, void
impl_->materialize_compressed_to_uninitialized(mask, dst);
}
-bool GVArrayCommon::may_have_ownership() const
-{
- return impl_->may_have_ownership();
-}
-
void GVArrayCommon::copy_from(const GVArrayCommon &other)
{
if (this == &other) {
@@ -562,24 +597,28 @@ void GVArrayCommon::move_from(GVArrayCommon &&other) noexcept
bool GVArrayCommon::is_span() const
{
- return impl_->is_span();
+ const CommonVArrayInfo info = impl_->common_info();
+ return info.type == CommonVArrayInfo::Type::Span;
}
GSpan GVArrayCommon::get_internal_span() const
{
BLI_assert(this->is_span());
- return impl_->get_internal_span();
+ const CommonVArrayInfo info = impl_->common_info();
+ return GSpan(this->type(), info.data, this->size());
}
bool GVArrayCommon::is_single() const
{
- return impl_->is_single();
+ const CommonVArrayInfo info = impl_->common_info();
+ return info.type == CommonVArrayInfo::Type::Single;
}
void GVArrayCommon::get_internal_single(void *r_value) const
{
BLI_assert(this->is_single());
- impl_->get_internal_single(r_value);
+ const CommonVArrayInfo info = impl_->common_info();
+ this->type().copy_assign(info.data, r_value);
}
void GVArrayCommon::get_internal_single_to_uninitialized(void *r_value) const
@@ -675,6 +714,15 @@ GVArray GVArray::ForEmpty(const CPPType &type)
GVArray GVArray::slice(IndexRange slice) const
{
+ const CommonVArrayInfo info = this->common_info();
+ if (info.type == CommonVArrayInfo::Type::Single) {
+ return GVArray::ForSingle(this->type(), slice.size(), info.data);
+ }
+ /* Need to check for ownership, because otherwise the referenced data can be destructed when
+ * #this is destructed. */
+ if (info.type == CommonVArrayInfo::Type::Span && !info.may_have_ownership) {
+ return GVArray::ForSpan(GSpan(this->type(), info.data, this->size()).slice(slice));
+ }
return GVArray::For<GVArrayImpl_For_SlicedGVArray>(*this, slice);
}
@@ -752,10 +800,20 @@ void GVMutableArray::set_all(const void *src)
GMutableSpan GVMutableArray::get_internal_span() const
{
BLI_assert(this->is_span());
- const GSpan span = impl_->get_internal_span();
- return GMutableSpan(span.type(), const_cast<void *>(span.data()), span.size());
+ const CommonVArrayInfo info = impl_->common_info();
+ return GMutableSpan(this->type(), const_cast<void *>(info.data), this->size());
}
/** \} */
+CommonVArrayInfo GVArrayImpl_For_GSpan_final::common_info() const
+{
+ return CommonVArrayInfo(CommonVArrayInfo::Type::Span, false, data_);
+}
+
+CommonVArrayInfo GVArrayImpl_For_SingleValueRef_final::common_info() const
+{
+ return CommonVArrayInfo(CommonVArrayInfo::Type::Single, false, value_);
+}
+
} // namespace blender
diff --git a/source/blender/blenlib/intern/index_mask.cc b/source/blender/blenlib/intern/index_mask.cc
index 1e301bc5fb9..e9af183d60d 100644
--- a/source/blender/blenlib/intern/index_mask.cc
+++ b/source/blender/blenlib/intern/index_mask.cc
@@ -128,7 +128,9 @@ Vector<IndexRange> IndexMask::extract_ranges_invert(const IndexRange full_range,
} // namespace blender
-namespace blender::index_mask_ops::detail {
+namespace blender::index_mask_ops {
+
+namespace detail {
IndexMask find_indices_based_on_predicate__merge(
IndexMask indices_to_check,
@@ -140,6 +142,7 @@ IndexMask find_indices_based_on_predicate__merge(
int64_t result_mask_size = 0;
for (Vector<Vector<int64_t>> &local_sub_masks : sub_masks) {
for (Vector<int64_t> &sub_mask : local_sub_masks) {
+ BLI_assert(!sub_mask.is_empty());
all_vectors.append(&sub_mask);
result_mask_size += sub_mask.size();
}
@@ -193,4 +196,49 @@ IndexMask find_indices_based_on_predicate__merge(
return r_indices.as_span();
}
-} // namespace blender::index_mask_ops::detail
+} // namespace detail
+
+IndexMask find_indices_from_virtual_array(const IndexMask indices_to_check,
+ const VArray<bool> &virtual_array,
+ const int64_t parallel_grain_size,
+ Vector<int64_t> &r_indices)
+{
+ if (virtual_array.is_single()) {
+ return virtual_array.get_internal_single() ? indices_to_check : IndexMask(0);
+ }
+ if (virtual_array.is_span()) {
+ const Span<bool> span = virtual_array.get_internal_span();
+ return find_indices_based_on_predicate(
+ indices_to_check, 4096, r_indices, [&](const int64_t i) { return span[i]; });
+ }
+
+ threading::EnumerableThreadSpecific<Vector<bool>> materialize_buffers;
+ threading::EnumerableThreadSpecific<Vector<Vector<int64_t>>> sub_masks;
+
+ threading::parallel_for(
+ indices_to_check.index_range(), parallel_grain_size, [&](const IndexRange range) {
+ const IndexMask sliced_mask = indices_to_check.slice(range);
+
+ /* To avoid virtual function call overhead from accessing the virtual array,
+ * materialize the necessary indices for this chunk into a reused buffer. */
+ Vector<bool> &buffer = materialize_buffers.local();
+ buffer.reinitialize(sliced_mask.size());
+ virtual_array.materialize_compressed(sliced_mask, buffer);
+
+ Vector<int64_t> masked_indices;
+ sliced_mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ if (buffer[i]) {
+ masked_indices.append(best_mask[i]);
+ }
+ }
+ });
+ if (!masked_indices.is_empty()) {
+ sub_masks.local().append(std::move(masked_indices));
+ }
+ });
+
+ return detail::find_indices_based_on_predicate__merge(indices_to_check, sub_masks, r_indices);
+}
+
+} // namespace blender::index_mask_ops
diff --git a/source/blender/blenlib/intern/kdtree_impl.h b/source/blender/blenlib/intern/kdtree_impl.h
index d9ae826093c..6614f1bf964 100644
--- a/source/blender/blenlib/intern/kdtree_impl.h
+++ b/source/blender/blenlib/intern/kdtree_impl.h
@@ -927,6 +927,14 @@ int BLI_kdtree_nd_(calc_duplicates_fast)(const KDTree *tree,
/** \name BLI_kdtree_3d_deduplicate
* \{ */
+static int kdtree_cmp_bool(const bool a, const bool b)
+{
+ if (a == b) {
+ return 0;
+ }
+ return b ? -1 : 1;
+}
+
static int kdtree_node_cmp_deduplicate(const void *n0_p, const void *n1_p)
{
const KDTreeNode *n0 = n0_p;
@@ -939,17 +947,16 @@ static int kdtree_node_cmp_deduplicate(const void *n0_p, const void *n1_p)
return 1;
}
}
- /* Sort by pointer so the first added will be used.
- * assignment below ignores const correctness,
- * however the values aren't used for sorting and are to be discarded. */
- if (n0 < n1) {
- ((KDTreeNode *)n1)->d = KD_DIMS; /* tag invalid */
- return -1;
- }
- else {
- ((KDTreeNode *)n0)->d = KD_DIMS; /* tag invalid */
- return 1;
+
+ if (n0->d != KD_DIMS && n1->d != KD_DIMS) {
+ /* Two nodes share identical `co`
+ * Both are still valid.
+ * Cast away `const` and tag one of them as invalid. */
+ ((KDTreeNode *)n1)->d = KD_DIMS;
}
+
+ /* Keep sorting until each unique value has one and only one valid node. */
+ return kdtree_cmp_bool(n0->d == KD_DIMS, n1->d == KD_DIMS);
}
/**
diff --git a/source/blender/blenlib/intern/length_parameterize.cc b/source/blender/blenlib/intern/length_parameterize.cc
index 7c0fc860b53..06cca281510 100644
--- a/source/blender/blenlib/intern/length_parameterize.cc
+++ b/source/blender/blenlib/intern/length_parameterize.cc
@@ -1,144 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_length_parameterize.hh"
+#include "BLI_task.hh"
namespace blender::length_parameterize {
-void create_uniform_samples(const Span<float> lengths,
- const bool cyclic,
- MutableSpan<int> indices,
- MutableSpan<float> factors)
+void sample_uniform(const Span<float> lengths,
+ const bool include_last_point,
+ MutableSpan<int> r_segment_indices,
+ MutableSpan<float> r_factors)
{
- const int count = indices.size();
+ const int count = r_segment_indices.size();
BLI_assert(count > 0);
BLI_assert(lengths.size() >= 1);
BLI_assert(std::is_sorted(lengths.begin(), lengths.end()));
- const int segments_num = lengths.size();
- const int points_num = cyclic ? segments_num : segments_num + 1;
- indices.first() = 0;
- factors.first() = 0.0f;
if (count == 1) {
+ r_segment_indices[0] = 0;
+ r_factors[0] = 0.0f;
return;
}
-
const float total_length = lengths.last();
- if (total_length == 0.0f) {
- indices.fill(0);
- factors.fill(0.0f);
- return;
- }
-
- const float step_length = total_length / (count - (cyclic ? 0 : 1));
- const float step_length_inv = 1.0f / step_length;
-
- int i_dst = 1;
- /* Store the length at the previous point in a variable so it can start out at zero
- * (the lengths array doesn't contain 0 for the first point). */
- float prev_length = 0.0f;
- for (const int i_src : IndexRange(points_num - 1)) {
- const float next_length = lengths[i_src];
- const float segment_length = next_length - prev_length;
- if (segment_length == 0.0f) {
- continue;
- }
- /* Add every sample that fits in this segment. */
- const float segment_length_inv = 1.0f / segment_length;
- const int segment_samples_num = std::ceil(next_length * step_length_inv - i_dst);
- indices.slice(i_dst, segment_samples_num).fill(i_src);
-
- for (const int i : factors.index_range().slice(i_dst, segment_samples_num)) {
- const float length_in_segment = step_length * i - prev_length;
- factors[i] = length_in_segment * segment_length_inv;
- }
-
- i_dst += segment_samples_num;
-
- prev_length = next_length;
- }
-
- /* Add the samples on the last cyclic segment if necessary, and also the samples
- * that weren't created in the previous loop due to floating point inaccuracy. */
- if (cyclic && lengths.size() > 1) {
- indices.drop_front(i_dst).fill(points_num - 1);
- const float segment_length = lengths.last() - lengths.last(1);
- if (segment_length == 0.0f) {
- return;
- }
- const float segment_length_inv = 1.0f / segment_length;
- for (const int i : indices.index_range().drop_front(i_dst)) {
- const float length_in_segment = step_length * i - prev_length;
- factors[i] = length_in_segment * segment_length_inv;
+ const float step_length = total_length / (count - include_last_point);
+ threading::parallel_for(IndexRange(count), 512, [&](const IndexRange range) {
+ SampleSegmentHint hint;
+ for (const int i : range) {
+ /* Use minimum to avoid issues with floating point accuracy. */
+ const float sample_length = std::min(total_length, i * step_length);
+ sample_at_length(lengths, sample_length, r_segment_indices[i], r_factors[i], &hint);
}
- }
- else {
- indices.drop_front(i_dst).fill(points_num - 2);
- factors.drop_front(i_dst).fill(1.0f);
- }
+ });
}
-void create_samples_from_sorted_lengths(const Span<float> lengths,
- const Span<float> sample_lengths,
- const bool cyclic,
- MutableSpan<int> indices,
- MutableSpan<float> factors)
+void sample_at_lengths(const Span<float> accumulated_segment_lengths,
+ const Span<float> sample_lengths,
+ MutableSpan<int> r_segment_indices,
+ MutableSpan<float> r_factors)
{
- BLI_assert(std::is_sorted(lengths.begin(), lengths.end()));
+ BLI_assert(
+ std::is_sorted(accumulated_segment_lengths.begin(), accumulated_segment_lengths.end()));
BLI_assert(std::is_sorted(sample_lengths.begin(), sample_lengths.end()));
- BLI_assert(indices.size() == sample_lengths.size());
- BLI_assert(indices.size() == factors.size());
- const int segments_num = lengths.size();
- const int points_num = cyclic ? segments_num : segments_num + 1;
- const float total_length = lengths.last();
- if (total_length == 0.0f) {
- indices.fill(0);
- factors.fill(0.0f);
- return;
- }
+ const int count = sample_lengths.size();
+ BLI_assert(count == r_segment_indices.size());
+ BLI_assert(count == r_factors.size());
- int i_dst = 0;
- /* Store the length at the previous point in a variable so it can start out at zero
- * (the lengths array doesn't contain 0 for the first point). */
- float prev_length = 0.0f;
- for (const int i_src : IndexRange(points_num - 1)) {
- const float next_length = lengths[i_src];
- const float segment_length = next_length - prev_length;
- if (segment_length == 0.0f) {
- continue;
- }
- /* Add every sample that fits in this segment. It's also necessary to check if the last sample
- * has been reached, since there is no upper bound on the number of samples in each segment. */
- const float segment_length_inv = 1.0f / segment_length;
- while (i_dst < sample_lengths.size() && sample_lengths[i_dst] < next_length) {
- const float length_in_segment = sample_lengths[i_dst] - prev_length;
- const float factor = length_in_segment * segment_length_inv;
- indices[i_dst] = i_src;
- factors[i_dst] = factor;
- i_dst++;
+ threading::parallel_for(IndexRange(count), 512, [&](const IndexRange range) {
+ SampleSegmentHint hint;
+ for (const int i : range) {
+ const float sample_length = sample_lengths[i];
+ sample_at_length(
+ accumulated_segment_lengths, sample_length, r_segment_indices[i], r_factors[i], &hint);
}
-
- prev_length = next_length;
- }
-
- /* Add the samples on the last cyclic segment if necessary, and also the samples
- * that weren't created in the previous loop due to floating point inaccuracy. */
- if (cyclic && lengths.size() > 1) {
- const float segment_length = lengths.last() - lengths.last(1);
- while (sample_lengths[i_dst] < total_length) {
- const float length_in_segment = sample_lengths[i_dst] - prev_length;
- const float factor = length_in_segment / segment_length;
- indices[i_dst] = points_num - 1;
- factors[i_dst] = factor;
- i_dst++;
- }
- indices.drop_front(i_dst).fill(points_num - 1);
- factors.drop_front(i_dst).fill(1.0f);
- }
- else {
- indices.drop_front(i_dst).fill(points_num - 2);
- factors.drop_front(i_dst).fill(1.0f);
- }
+ });
}
} // namespace blender::length_parameterize
diff --git a/source/blender/blenlib/intern/math_base_inline.c b/source/blender/blenlib/intern/math_base_inline.c
index 4a213f5fe74..fb71e84c23e 100644
--- a/source/blender/blenlib/intern/math_base_inline.c
+++ b/source/blender/blenlib/intern/math_base_inline.c
@@ -370,6 +370,24 @@ MINLINE uint divide_ceil_u(uint a, uint b)
return (a + b - 1) / b;
}
+MINLINE uint64_t divide_ceil_ul(uint64_t a, uint64_t b)
+{
+ return (a + b - 1) / b;
+}
+
+/**
+ * Returns \a a if it is a multiple of \a b or the next multiple or \a b after \b a .
+ */
+MINLINE uint ceil_to_multiple_u(uint a, uint b)
+{
+ return divide_ceil_u(a, b) * b;
+}
+
+MINLINE uint64_t ceil_to_multiple_ul(uint64_t a, uint64_t b)
+{
+ return divide_ceil_ul(a, b) * b;
+}
+
MINLINE int mod_i(int i, int n)
{
return (i % n + n) % n;
diff --git a/source/blender/blenlib/intern/math_geom.c b/source/blender/blenlib/intern/math_geom.c
index e7ccdeab80a..773aac95193 100644
--- a/source/blender/blenlib/intern/math_geom.c
+++ b/source/blender/blenlib/intern/math_geom.c
@@ -1667,8 +1667,8 @@ bool isect_ray_tri_v3(const float ray_origin[3],
float *r_lambda,
float r_uv[2])
{
- /* NOTE(campbell): these values were 0.000001 in 2.4x but for projection snapping on
- * a human head (1BU == 1m), subsurf level 2, this gave many errors. */
+ /* NOTE(@campbellbarton): these values were 0.000001 in 2.4x but for projection snapping on
+ * a human head `(1BU == 1m)`, subdivision-surface level 2, this gave many errors. */
const float epsilon = 0.00000001f;
float p[3], s[3], e1[3], e2[3], q[3];
float a, f, u, v;
@@ -3773,7 +3773,7 @@ void barycentric_weights_v2_quad(const float v1[2],
const float co[2],
float w[4])
{
- /* NOTE(campbell): fabsf() here is not needed for convex quads
+ /* NOTE(@campbellbarton): fabsf() here is not needed for convex quads
* (and not used in #interp_weights_poly_v2).
* But in the case of concave/bow-tie quads for the mask rasterizer it
* gives unreliable results without adding `absf()`. If this becomes an issue for more general
diff --git a/source/blender/blenlib/intern/math_matrix.c b/source/blender/blenlib/intern/math_matrix.c
index ce9abc36cad..221ae84e74d 100644
--- a/source/blender/blenlib/intern/math_matrix.c
+++ b/source/blender/blenlib/intern/math_matrix.c
@@ -113,7 +113,6 @@ void copy_m4_m3(float m1[4][4], const float m2[3][3]) /* no clear */
m1[2][1] = m2[2][1];
m1[2][2] = m2[2][2];
- /* Reevan's Bugfix */
m1[0][3] = 0.0f;
m1[1][3] = 0.0f;
m1[2][3] = 0.0f;
@@ -787,14 +786,14 @@ void mul_m2_v2(const float mat[2][2], float vec[2])
mul_v2_m2v2(vec, mat, vec);
}
-void mul_mat3_m4_v3(const float M[4][4], float r[3])
+void mul_mat3_m4_v3(const float mat[4][4], float r[3])
{
const float x = r[0];
const float y = r[1];
- r[0] = x * M[0][0] + y * M[1][0] + M[2][0] * r[2];
- r[1] = x * M[0][1] + y * M[1][1] + M[2][1] * r[2];
- r[2] = x * M[0][2] + y * M[1][2] + M[2][2] * r[2];
+ r[0] = x * mat[0][0] + y * mat[1][0] + mat[2][0] * r[2];
+ r[1] = x * mat[0][1] + y * mat[1][1] + mat[2][1] * r[2];
+ r[2] = x * mat[0][2] + y * mat[1][2] + mat[2][2] * r[2];
}
void mul_v3_mat3_m4v3(float r[3], const float mat[4][4], const float vec[3])
@@ -1116,16 +1115,32 @@ double determinant_m3_array_db(const double m[3][3])
m[2][0] * (m[0][1] * m[1][2] - m[0][2] * m[1][1]));
}
-bool invert_m3_ex(float m[3][3], const float epsilon)
+bool invert_m2_m2(float inverse[2][2], const float mat[2][2])
{
- float tmp[3][3];
- const bool success = invert_m3_m3_ex(tmp, m, epsilon);
+ adjoint_m2_m2(inverse, mat);
+ float det = determinant_m2(mat[0][0], mat[1][0], mat[0][1], mat[1][1]);
- copy_m3_m3(m, tmp);
+ bool success = (det != 0.0f);
+ if (success) {
+ inverse[0][0] /= det;
+ inverse[1][0] /= det;
+ inverse[0][1] /= det;
+ inverse[1][1] /= det;
+ }
+
+ return success;
+}
+
+bool invert_m3_ex(float mat[3][3], const float epsilon)
+{
+ float mat_tmp[3][3];
+ const bool success = invert_m3_m3_ex(mat_tmp, mat, epsilon);
+
+ copy_m3_m3(mat, mat_tmp);
return success;
}
-bool invert_m3_m3_ex(float m1[3][3], const float m2[3][3], const float epsilon)
+bool invert_m3_m3_ex(float inverse[3][3], const float mat[3][3], const float epsilon)
{
float det;
int a, b;
@@ -1134,10 +1149,10 @@ bool invert_m3_m3_ex(float m1[3][3], const float m2[3][3], const float epsilon)
BLI_assert(epsilon >= 0.0f);
/* calc adjoint */
- adjoint_m3_m3(m1, m2);
+ adjoint_m3_m3(inverse, mat);
/* then determinant old matrix! */
- det = determinant_m3_array(m2);
+ det = determinant_m3_array(mat);
success = (fabsf(det) > epsilon);
@@ -1145,33 +1160,33 @@ bool invert_m3_m3_ex(float m1[3][3], const float m2[3][3], const float epsilon)
det = 1.0f / det;
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
- m1[a][b] *= det;
+ inverse[a][b] *= det;
}
}
}
return success;
}
-bool invert_m3(float m[3][3])
+bool invert_m3(float mat[3][3])
{
- float tmp[3][3];
- const bool success = invert_m3_m3(tmp, m);
+ float mat_tmp[3][3];
+ const bool success = invert_m3_m3(mat_tmp, mat);
- copy_m3_m3(m, tmp);
+ copy_m3_m3(mat, mat_tmp);
return success;
}
-bool invert_m3_m3(float m1[3][3], const float m2[3][3])
+bool invert_m3_m3(float inverse[3][3], const float mat[3][3])
{
float det;
int a, b;
bool success;
/* calc adjoint */
- adjoint_m3_m3(m1, m2);
+ adjoint_m3_m3(inverse, mat);
/* then determinant old matrix! */
- det = determinant_m3_array(m2);
+ det = determinant_m3_array(mat);
success = (det != 0.0f);
@@ -1179,7 +1194,7 @@ bool invert_m3_m3(float m1[3][3], const float m2[3][3])
det = 1.0f / det;
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
- m1[a][b] *= det;
+ inverse[a][b] *= det;
}
}
}
@@ -1187,12 +1202,12 @@ bool invert_m3_m3(float m1[3][3], const float m2[3][3])
return success;
}
-bool invert_m4(float m[4][4])
+bool invert_m4(float mat[4][4])
{
- float tmp[4][4];
- const bool success = invert_m4_m4(tmp, m);
+ float mat_tmp[4][4];
+ const bool success = invert_m4_m4(mat_tmp, mat);
- copy_m4_m4(m, tmp);
+ copy_m4_m4(mat, mat_tmp);
return success;
}
@@ -2112,6 +2127,12 @@ void size_to_mat4(float R[4][4], const float size[3])
R[3][3] = 1.0f;
}
+void mat3_to_size_2d(float size[2], const float M[3][3])
+{
+ size[0] = len_v2(M[0]);
+ size[1] = len_v2(M[1]);
+}
+
void mat3_to_size(float size[3], const float M[3][3])
{
size[0] = len_v3(M[0]);
@@ -2175,11 +2196,11 @@ float mat4_to_scale(const float mat[4][4])
return len_v3(unit_vec);
}
-float mat4_to_xy_scale(const float M[4][4])
+float mat4_to_xy_scale(const float mat[4][4])
{
/* unit length vector in xy plane */
float unit_vec[3] = {(float)M_SQRT1_2, (float)M_SQRT1_2, 0.0f};
- mul_mat3_m4_v3(M, unit_vec);
+ mul_mat3_m4_v3(mat, unit_vec);
return len_v3(unit_vec);
}
@@ -2224,12 +2245,6 @@ void mat4_to_loc_quat(float loc[3], float quat[4], const float wmat[4][4])
copy_m3_m4(mat3, wmat);
normalize_m3_m3(mat3_n, mat3);
- /* So scale doesn't interfere with rotation T24291. */
- /* FIXME: this is a workaround for negative matrix not working for rotation conversion. */
- if (is_negative_m3(mat3)) {
- negate_m3(mat3_n);
- }
-
mat3_normalized_to_quat(quat, mat3_n);
copy_v3_v3(loc, wmat[3]);
}
@@ -2238,7 +2253,7 @@ void mat4_decompose(float loc[3], float quat[4], float size[3], const float wmat
{
float rot[3][3];
mat4_to_loc_rot_size(loc, rot, size, wmat);
- mat3_normalized_to_quat(quat, rot);
+ mat3_normalized_to_quat_fast(quat, rot);
}
/**
@@ -2377,8 +2392,8 @@ void blend_m3_m3m3(float out[3][3],
mat3_to_rot_size(drot, dscale, dst);
mat3_to_rot_size(srot, sscale, src);
- mat3_normalized_to_quat(dquat, drot);
- mat3_normalized_to_quat(squat, srot);
+ mat3_normalized_to_quat_fast(dquat, drot);
+ mat3_normalized_to_quat_fast(squat, srot);
/* do blending */
interp_qt_qtqt(fquat, dquat, squat, srcweight);
@@ -2403,8 +2418,8 @@ void blend_m4_m4m4(float out[4][4],
mat4_to_loc_rot_size(dloc, drot, dscale, dst);
mat4_to_loc_rot_size(sloc, srot, sscale, src);
- mat3_normalized_to_quat(dquat, drot);
- mat3_normalized_to_quat(squat, srot);
+ mat3_normalized_to_quat_fast(dquat, drot);
+ mat3_normalized_to_quat_fast(squat, srot);
/* do blending */
interp_v3_v3v3(floc, dloc, sloc, srcweight);
@@ -2440,11 +2455,11 @@ void interp_m3_m3m3(float R[3][3], const float A[3][3], const float B[3][3], con
* Note that a flip of two axes is just a rotation of 180 degrees around the third axis, and
* three flipped axes are just an 180 degree rotation + a single axis flip. It is thus sufficient
* to solve this problem for single axis flips. */
- if (determinant_m3_array(U_A) < 0) {
+ if (is_negative_m3(U_A)) {
mul_m3_fl(U_A, -1.0f);
mul_m3_fl(P_A, -1.0f);
}
- if (determinant_m3_array(U_B) < 0) {
+ if (is_negative_m3(U_B)) {
mul_m3_fl(U_B, -1.0f);
mul_m3_fl(P_B, -1.0f);
}
@@ -2485,16 +2500,14 @@ void interp_m4_m4m4(float R[4][4], const float A[4][4], const float B[4][4], con
bool is_negative_m3(const float mat[3][3])
{
- float vec[3];
- cross_v3_v3v3(vec, mat[0], mat[1]);
- return (dot_v3v3(vec, mat[2]) < 0.0f);
+ return determinant_m3_array(mat) < 0.0f;
}
bool is_negative_m4(const float mat[4][4])
{
- float vec[3];
- cross_v3_v3v3(vec, mat[0], mat[1]);
- return (dot_v3v3(vec, mat[2]) < 0.0f);
+ /* Don't use #determinant_m4 as only the 3x3 components are needed
+ * when the matrix is used as a transformation to represent location/scale/rotation. */
+ return determinant_m4_mat3_array(mat) < 0.0f;
}
bool is_zero_m3(const float mat[3][3])
@@ -2552,11 +2565,8 @@ void loc_eul_size_to_mat4(float R[4][4],
R[3][2] = loc[2];
}
-void loc_eulO_size_to_mat4(float R[4][4],
- const float loc[3],
- const float eul[3],
- const float size[3],
- const short rotOrder)
+void loc_eulO_size_to_mat4(
+ float R[4][4], const float loc[3], const float eul[3], const float size[3], const short order)
{
float rmat[3][3], smat[3][3], tmat[3][3];
@@ -2564,7 +2574,7 @@ void loc_eulO_size_to_mat4(float R[4][4],
unit_m4(R);
/* Make rotation + scaling part. */
- eulO_to_mat3(rmat, eul, rotOrder);
+ eulO_to_mat3(rmat, eul, order);
size_to_mat3(smat, size);
mul_m3_m3m3(tmat, rmat, smat);
@@ -3066,14 +3076,14 @@ void svd_m4(float U[4][4], float s[4], float V[4][4], float A_[4][4])
}
}
-void pseudoinverse_m4_m4(float Ainv[4][4], const float A_[4][4], float epsilon)
+void pseudoinverse_m4_m4(float inverse[4][4], const float mat[4][4], float epsilon)
{
/* compute Moore-Penrose pseudo inverse of matrix, singular values
* below epsilon are ignored for stability (truncated SVD) */
float A[4][4], V[4][4], W[4], Wm[4][4], U[4][4];
int i;
- transpose_m4_m4(A, A_);
+ transpose_m4_m4(A, mat);
svd_m4(V, W, U, A);
transpose_m4(U);
transpose_m4(V);
@@ -3085,18 +3095,18 @@ void pseudoinverse_m4_m4(float Ainv[4][4], const float A_[4][4], float epsilon)
transpose_m4(V);
- mul_m4_series(Ainv, U, Wm, V);
+ mul_m4_series(inverse, U, Wm, V);
}
-void pseudoinverse_m3_m3(float Ainv[3][3], const float A[3][3], float epsilon)
+void pseudoinverse_m3_m3(float inverse[3][3], const float mat[3][3], float epsilon)
{
/* try regular inverse when possible, otherwise fall back to slow svd */
- if (!invert_m3_m3(Ainv, A)) {
- float tmp[4][4], tmpinv[4][4];
+ if (!invert_m3_m3(inverse, mat)) {
+ float mat_tmp[4][4], tmpinv[4][4];
- copy_m4_m3(tmp, A);
- pseudoinverse_m4_m4(tmpinv, tmp, epsilon);
- copy_m3_m4(Ainv, tmpinv);
+ copy_m4_m3(mat_tmp, mat);
+ pseudoinverse_m4_m4(tmpinv, mat_tmp, epsilon);
+ copy_m3_m4(inverse, tmpinv);
}
}
@@ -3106,22 +3116,22 @@ bool has_zero_axis_m4(const float matrix[4][4])
len_squared_v3(matrix[2]) < FLT_EPSILON;
}
-void invert_m4_m4_safe(float Ainv[4][4], const float A[4][4])
+void invert_m4_m4_safe(float inverse[4][4], const float mat[4][4])
{
- if (!invert_m4_m4(Ainv, A)) {
- float Atemp[4][4];
+ if (!invert_m4_m4(inverse, mat)) {
+ float mat_tmp[4][4];
- copy_m4_m4(Atemp, A);
+ copy_m4_m4(mat_tmp, mat);
/* Matrix is degenerate (e.g. 0 scale on some axis), ideally we should
* never be in this situation, but try to invert it anyway with tweak.
*/
- Atemp[0][0] += 1e-8f;
- Atemp[1][1] += 1e-8f;
- Atemp[2][2] += 1e-8f;
+ mat_tmp[0][0] += 1e-8f;
+ mat_tmp[1][1] += 1e-8f;
+ mat_tmp[2][2] += 1e-8f;
- if (!invert_m4_m4(Ainv, Atemp)) {
- unit_m4(Ainv);
+ if (!invert_m4_m4(inverse, mat_tmp)) {
+ unit_m4(inverse);
}
}
}
@@ -3141,24 +3151,24 @@ void invert_m4_m4_safe(float Ainv[4][4], const float A[4][4])
* where we want to specify the length of the degenerate axes.
* \{ */
-void invert_m4_m4_safe_ortho(float Ainv[4][4], const float A[4][4])
+void invert_m4_m4_safe_ortho(float inverse[4][4], const float mat[4][4])
{
- if (UNLIKELY(!invert_m4_m4(Ainv, A))) {
- float Atemp[4][4];
- copy_m4_m4(Atemp, A);
- if (UNLIKELY(!(orthogonalize_m4_zero_axes(Atemp, 1.0f) && invert_m4_m4(Ainv, Atemp)))) {
- unit_m4(Ainv);
+ if (UNLIKELY(!invert_m4_m4(inverse, mat))) {
+ float mat_tmp[4][4];
+ copy_m4_m4(mat_tmp, mat);
+ if (UNLIKELY(!(orthogonalize_m4_zero_axes(mat_tmp, 1.0f) && invert_m4_m4(inverse, mat_tmp)))) {
+ unit_m4(inverse);
}
}
}
-void invert_m3_m3_safe_ortho(float Ainv[3][3], const float A[3][3])
+void invert_m3_m3_safe_ortho(float inverse[3][3], const float mat[3][3])
{
- if (UNLIKELY(!invert_m3_m3(Ainv, A))) {
- float Atemp[3][3];
- copy_m3_m3(Atemp, A);
- if (UNLIKELY(!(orthogonalize_m3_zero_axes(Atemp, 1.0f) && invert_m3_m3(Ainv, Atemp)))) {
- unit_m3(Ainv);
+ if (UNLIKELY(!invert_m3_m3(inverse, mat))) {
+ float mat_tmp[3][3];
+ copy_m3_m3(mat_tmp, mat);
+ if (UNLIKELY(!(orthogonalize_m3_zero_axes(mat_tmp, 1.0f) && invert_m3_m3(inverse, mat_tmp)))) {
+ unit_m3(inverse);
}
}
}
diff --git a/source/blender/blenlib/intern/math_rotation.c b/source/blender/blenlib/intern/math_rotation.c
index 92223bdf1d5..ae068e3fb19 100644
--- a/source/blender/blenlib/intern/math_rotation.c
+++ b/source/blender/blenlib/intern/math_rotation.c
@@ -176,7 +176,7 @@ void quat_to_compatible_quat(float q[4], const float a[4], const float old[4])
}
}
-/* skip error check, currently only needed by mat3_to_quat_is_ok */
+/* Skip error check, currently only needed by #mat3_to_quat_legacy. */
static void quat_to_mat3_no_error(float m[3][3], const float q[4])
{
double q0, q1, q2, q3, qda, qdb, qdc, qaa, qab, qac, qbb, qbc, qcc;
@@ -269,9 +269,11 @@ void quat_to_mat4(float m[4][4], const float q[4])
m[3][3] = 1.0f;
}
-void mat3_normalized_to_quat(float q[4], const float mat[3][3])
+void mat3_normalized_to_quat_fast(float q[4], const float mat[3][3])
{
BLI_ASSERT_UNIT_M3(mat);
+ /* Caller must ensure matrices aren't negative for valid results, see: T24291, T94231. */
+ BLI_assert(!is_negative_m3(mat));
/* Check the trace of the matrix - bad precision if close to -1. */
const float trace = mat[0][0] + mat[1][1] + mat[2][2];
@@ -332,34 +334,54 @@ void mat3_normalized_to_quat(float q[4], const float mat[3][3])
normalize_qt(q);
}
-void mat3_to_quat(float q[4], const float m[3][3])
-{
- float unit_mat[3][3];
- /* work on a copy */
- /* this is needed AND a 'normalize_qt' in the end */
- normalize_m3_m3(unit_mat, m);
- mat3_normalized_to_quat(q, unit_mat);
+static void mat3_normalized_to_quat_with_checks(float q[4], float mat[3][3])
+{
+ const float det = determinant_m3_array(mat);
+ if (UNLIKELY(!isfinite(det))) {
+ unit_m3(mat);
+ }
+ else if (UNLIKELY(det < 0.0f)) {
+ negate_m3(mat);
+ }
+ mat3_normalized_to_quat_fast(q, mat);
}
-void mat4_normalized_to_quat(float q[4], const float m[4][4])
+void mat3_normalized_to_quat(float q[4], const float mat[3][3])
{
- float mat3[3][3];
+ float unit_mat_abs[3][3];
+ copy_m3_m3(unit_mat_abs, mat);
+ mat3_normalized_to_quat_with_checks(q, unit_mat_abs);
+}
- copy_m3_m4(mat3, m);
- mat3_normalized_to_quat(q, mat3);
+void mat3_to_quat(float q[4], const float mat[3][3])
+{
+ float unit_mat_abs[3][3];
+ normalize_m3_m3(unit_mat_abs, mat);
+ mat3_normalized_to_quat_with_checks(q, unit_mat_abs);
}
-void mat4_to_quat(float q[4], const float m[4][4])
+void mat4_normalized_to_quat(float q[4], const float mat[4][4])
{
- float mat3[3][3];
+ float unit_mat_abs[3][3];
+ copy_m3_m4(unit_mat_abs, mat);
+ mat3_normalized_to_quat_with_checks(q, unit_mat_abs);
+}
- copy_m3_m4(mat3, m);
- mat3_to_quat(q, mat3);
+void mat4_to_quat(float q[4], const float mat[4][4])
+{
+ float unit_mat_abs[3][3];
+ copy_m3_m4(unit_mat_abs, mat);
+ normalize_m3(unit_mat_abs);
+ mat3_normalized_to_quat_with_checks(q, unit_mat_abs);
}
-void mat3_to_quat_is_ok(float q[4], const float wmat[3][3])
+void mat3_to_quat_legacy(float q[4], const float wmat[3][3])
{
+ /* Legacy version of #mat3_to_quat which has slightly different behavior.
+ * Keep for particle-system & boids since replacing this will make subtle changes
+ * that impact hair in existing files. See: D15772. */
+
float mat[3][3], matr[3][3], matn[3][3], q1[4], q2[4], angle, si, co, nor[3];
/* work on a copy */
@@ -498,7 +520,10 @@ void rotation_between_quats_to_quat(float q[4], const float q1[4], const float q
mul_qt_qtqt(q, tquat, q2);
}
-float quat_split_swing_and_twist(const float q_in[4], int axis, float r_swing[4], float r_twist[4])
+float quat_split_swing_and_twist(const float q_in[4],
+ const int axis,
+ float r_swing[4],
+ float r_twist[4])
{
BLI_assert(axis >= 0 && axis <= 2);
@@ -915,6 +940,65 @@ float tri_to_quat(float q[4], const float a[3], const float b[3], const float c[
return len;
}
+void sin_cos_from_fraction(int numerator, int denominator, float *r_sin, float *r_cos)
+{
+ /* By default, creating a circle from an integer: calling #sinf & #cosf on the fraction doesn't
+ * create symmetrical values (because floats can't represent Pi exactly).
+ * Resolve this when the rotation is calculated from a fraction by mapping the `numerator`
+ * to lower values so X/Y values for points around a circle are exactly symmetrical, see T87779.
+ *
+ * Multiply both the `numerator` and `denominator` by eight to ensure we can divide the circle
+ * into 8 octants. For each octant, we then use symmetry and negation to bring the `numerator`
+ * closer to the origin where precision is highest.
+ *
+ * Cases 2, 4, 5 and 7, use the trigonometric identity sin(-x) == -sin(x).
+ * Cases 1, 2, 5 and 6, swap the pointers `r_sin` and `r_cos`.
+ */
+ BLI_assert(0 <= numerator);
+ BLI_assert(numerator <= denominator);
+ BLI_assert(denominator > 0);
+
+ numerator *= 8; /* Multiply numerator the same as denominator. */
+ const int octant = numerator / denominator; /* Determine the octant. */
+ denominator *= 8; /* Ensure denominator is a multiple of eight. */
+ float cos_sign = 1.0f; /* Either 1.0f or -1.0f. */
+
+ switch (octant) {
+ case 0:
+ /* Primary octant, nothing to do. */
+ break;
+ case 1:
+ case 2:
+ numerator = (denominator / 4) - numerator;
+ SWAP(float *, r_sin, r_cos);
+ break;
+ case 3:
+ case 4:
+ numerator = (denominator / 2) - numerator;
+ cos_sign = -1.0f;
+ break;
+ case 5:
+ case 6:
+ numerator = numerator - (denominator * 3 / 4);
+ SWAP(float *, r_sin, r_cos);
+ cos_sign = -1.0f;
+ break;
+ case 7:
+ numerator = numerator - denominator;
+ break;
+ default:
+ BLI_assert_unreachable();
+ }
+
+ BLI_assert(-denominator / 4 <= numerator); /* Numerator may be negative. */
+ BLI_assert(numerator <= denominator / 4);
+ BLI_assert(cos_sign == -1.0f || cos_sign == 1.0f);
+
+ const float angle = (float)(2.0 * M_PI) * ((float)numerator / (float)denominator);
+ *r_sin = sinf(angle);
+ *r_cos = cosf(angle) * cos_sign;
+}
+
void print_qt(const char *str, const float q[4])
{
printf("%s: %.3f %.3f %.3f %.3f\n", str, q[0], q[1], q[2], q[3]);
@@ -1322,10 +1406,10 @@ void mat4_normalized_to_eul(float eul[3], const float m[4][4])
copy_m3_m4(mat3, m);
mat3_normalized_to_eul(eul, mat3);
}
-void mat4_to_eul(float eul[3], const float m[4][4])
+void mat4_to_eul(float eul[3], const float mat[4][4])
{
float mat3[3][3];
- copy_m3_m4(mat3, m);
+ copy_m3_m4(mat3, mat);
mat3_to_eul(eul, mat3);
}
@@ -1360,7 +1444,7 @@ void eul_to_quat(float quat[4], const float eul[3])
quat[3] = cj * cs - sj * sc;
}
-void rotate_eul(float beul[3], const char axis, const float ang)
+void rotate_eul(float beul[3], const char axis, const float angle)
{
float eul[3], mat1[3][3], mat2[3][3], totmat[3][3];
@@ -1368,13 +1452,13 @@ void rotate_eul(float beul[3], const char axis, const float ang)
eul[0] = eul[1] = eul[2] = 0.0f;
if (axis == 'X') {
- eul[0] = ang;
+ eul[0] = angle;
}
else if (axis == 'Y') {
- eul[1] = ang;
+ eul[1] = angle;
}
else {
- eul[2] = ang;
+ eul[2] = angle;
}
eul_to_mat3(mat1, eul);
@@ -1730,23 +1814,23 @@ void mat3_to_compatible_eulO(float eul[3],
void mat4_normalized_to_compatible_eulO(float eul[3],
const float oldrot[3],
const short order,
- const float m[4][4])
+ const float mat[4][4])
{
float mat3[3][3];
/* for now, we'll just do this the slow way (i.e. copying matrices) */
- copy_m3_m4(mat3, m);
+ copy_m3_m4(mat3, mat);
mat3_normalized_to_compatible_eulO(eul, oldrot, order, mat3);
}
void mat4_to_compatible_eulO(float eul[3],
const float oldrot[3],
const short order,
- const float m[4][4])
+ const float mat[4][4])
{
float mat3[3][3];
/* for now, we'll just do this the slow way (i.e. copying matrices) */
- copy_m3_m4(mat3, m);
+ copy_m3_m4(mat3, mat);
normalize_m3(mat3);
mat3_normalized_to_compatible_eulO(eul, oldrot, order, mat3);
}
@@ -1765,7 +1849,7 @@ void quat_to_compatible_eulO(float eul[3],
/* rotate the given euler by the given angle on the specified axis */
/* NOTE: is this safe to do with different axis orders? */
-void rotate_eulO(float beul[3], const short order, char axis, float ang)
+void rotate_eulO(float beul[3], const short order, const char axis, const float angle)
{
float eul[3], mat1[3][3], mat2[3][3], totmat[3][3];
@@ -1774,13 +1858,13 @@ void rotate_eulO(float beul[3], const short order, char axis, float ang)
zero_v3(eul);
if (axis == 'X') {
- eul[0] = ang;
+ eul[0] = angle;
}
else if (axis == 'Y') {
- eul[1] = ang;
+ eul[1] = angle;
}
else {
- eul[2] = ang;
+ eul[2] = angle;
}
eulO_to_mat3(mat1, eul, order);
diff --git a/source/blender/blenlib/intern/math_rotation.cc b/source/blender/blenlib/intern/math_rotation.cc
index 74300d55954..091e8af85d9 100644
--- a/source/blender/blenlib/intern/math_rotation.cc
+++ b/source/blender/blenlib/intern/math_rotation.cc
@@ -23,4 +23,17 @@ float3 rotate_direction_around_axis(const float3 &direction, const float3 &axis,
return axis_scaled + diff * std::cos(angle) + cross * std::sin(angle);
}
+float3 rotate_around_axis(const float3 &vector,
+ const float3 &center,
+ const float3 &axis,
+ const float angle)
+
+{
+ float3 result = vector - center;
+ float mat[3][3];
+ axis_angle_normalized_to_mat3(mat, axis, angle);
+ mul_m3_v3(mat, result);
+ return result + center;
+}
+
} // namespace blender::math
diff --git a/source/blender/blenlib/intern/math_vector_inline.c b/source/blender/blenlib/intern/math_vector_inline.c
index 339bfb8f95e..27c17a90f5f 100644
--- a/source/blender/blenlib/intern/math_vector_inline.c
+++ b/source/blender/blenlib/intern/math_vector_inline.c
@@ -316,6 +316,27 @@ MINLINE void swap_v4_v4(float a[4], float b[4])
SWAP(float, a[3], b[3]);
}
+MINLINE void swap_v2_v2_db(double a[2], double b[2])
+{
+ SWAP(double, a[0], b[0]);
+ SWAP(double, a[1], b[1]);
+}
+
+MINLINE void swap_v3_v3_db(double a[3], double b[3])
+{
+ SWAP(double, a[0], b[0]);
+ SWAP(double, a[1], b[1]);
+ SWAP(double, a[2], b[2]);
+}
+
+MINLINE void swap_v4_v4_db(double a[4], double b[4])
+{
+ SWAP(double, a[0], b[0]);
+ SWAP(double, a[1], b[1]);
+ SWAP(double, a[2], b[2]);
+ SWAP(double, a[3], b[3]);
+}
+
/* float args -> vec */
MINLINE void copy_v2_fl2(float v[2], float x, float y)
@@ -613,10 +634,10 @@ MINLINE void mul_v2_v2_cw(float r[2], const float mat[2], const float vec[2])
MINLINE void mul_v2_v2_ccw(float r[2], const float mat[2], const float vec[2])
{
- BLI_assert(r != vec);
-
- r[0] = mat[0] * vec[0] + (-mat[1]) * vec[1];
- r[1] = mat[1] * vec[0] + (+mat[0]) * vec[1];
+ float r0 = mat[0] * vec[0] + (-mat[1]) * vec[1];
+ float r1 = mat[1] * vec[0] + (+mat[0]) * vec[1];
+ r[0] = r0;
+ r[1] = r1;
}
MINLINE float mul_project_m4_v3_zfac(const float mat[4][4], const float co[3])
diff --git a/source/blender/blenlib/intern/mesh_boolean.cc b/source/blender/blenlib/intern/mesh_boolean.cc
index 700c126ca4c..0d8ad1da582 100644
--- a/source/blender/blenlib/intern/mesh_boolean.cc
+++ b/source/blender/blenlib/intern/mesh_boolean.cc
@@ -1675,7 +1675,7 @@ static Edge find_good_sorting_edge(const Vert *testp,
* The algorithm is similar to the one for find_ambient_cell, except that
* instead of an arbitrary point known to be outside the whole mesh, we
* have a particular point (v) and we just want to determine the patches
- * that that point is between in sorting-around-an-edge order.
+ * that point is between in sorting-around-an-edge order.
*/
static int find_containing_cell(const Vert *v,
int t,
@@ -2966,6 +2966,11 @@ static std::ostream &operator<<(std::ostream &os, const FaceMergeState &fms)
* \a tris all have the same original face.
* Find the 2d edge/triangle topology for these triangles, but only the ones facing in the
* norm direction, and whether each edge is dissolvable or not.
+ * If we did the initial triangulation properly, and any Delaunay triangulations of intersections
+ * properly, then each triangle edge should have at most one neighbor.
+ * However, there can be anomalies. For example, if an input face is self-intersecting, we fall
+ * back on the floating point poly-fill triangulation, which, after which all bets are off.
+ * Hence, try to be tolerant of such unexpected topology.
*/
static void init_face_merge_state(FaceMergeState *fms,
const Vector<int> &tris,
@@ -3053,16 +3058,35 @@ static void init_face_merge_state(FaceMergeState *fms,
std::cout << "me.v1 == mf.vert[i] so set edge[" << me_index << "].left_face = " << f
<< "\n";
}
- BLI_assert(me.left_face == -1);
- fms->edge[me_index].left_face = f;
+ if (me.left_face != -1) {
+ /* Unexpected in the normal case: this means more than one triangle shares this
+ * edge in the same orientation. But be tolerant of this case. By making this
+ * edge not dissolvable, we'll avoid future problems due to this non-manifold topology.
+ */
+ if (dbg_level > 1) {
+ std::cout << "me.left_face was already occupied, so triangulation wasn't good\n";
+ }
+ me.dissolvable = false;
+ }
+ else {
+ fms->edge[me_index].left_face = f;
+ }
}
else {
if (dbg_level > 1) {
std::cout << "me.v1 != mf.vert[i] so set edge[" << me_index << "].right_face = " << f
<< "\n";
}
- BLI_assert(me.right_face == -1);
- fms->edge[me_index].right_face = f;
+ if (me.right_face != -1) {
+ /* Unexpected, analogous to the me.left_face != -1 case above. */
+ if (dbg_level > 1) {
+ std::cout << "me.right_face was already occupied, so triangulation wasn't good\n";
+ }
+ me.dissolvable = false;
+ }
+ else {
+ fms->edge[me_index].right_face = f;
+ }
}
fms->face[f].edge.append(me_index);
}
diff --git a/source/blender/blenlib/intern/mesh_intersect.cc b/source/blender/blenlib/intern/mesh_intersect.cc
index d5585f953ec..e8aa359fbe4 100644
--- a/source/blender/blenlib/intern/mesh_intersect.cc
+++ b/source/blender/blenlib/intern/mesh_intersect.cc
@@ -710,7 +710,7 @@ bool IMesh::erase_face_positions(int f_index, Span<bool> face_pos_erase, IMeshAr
* mark with null pointer and caller should call remove_null_faces().
* the loop is done.
*/
- this->face_[f_index] = NULL;
+ this->face_[f_index] = nullptr;
return true;
}
Array<const Vert *> new_vert(new_len);
diff --git a/source/blender/blenlib/intern/noise.c b/source/blender/blenlib/intern/noise.c
index c39a2b5a27e..3ec7c3f9804 100644
--- a/source/blender/blenlib/intern/noise.c
+++ b/source/blender/blenlib/intern/noise.c
@@ -1125,7 +1125,7 @@ float BLI_noise_cell(float x, float y, float z)
return (2.0f * BLI_cellNoiseU(x, y, z) - 1.0f);
}
-void BLI_noise_cell_v3(float x, float y, float z, float ca[3])
+void BLI_noise_cell_v3(float x, float y, float z, float r_ca[3])
{
/* avoid precision issues on unit coordinates */
x = (x + 0.000001f) * 1.00001f;
@@ -1136,9 +1136,9 @@ void BLI_noise_cell_v3(float x, float y, float z, float ca[3])
int yi = (int)(floor(y));
int zi = (int)(floor(z));
const float *p = HASHPNT(xi, yi, zi);
- ca[0] = p[0];
- ca[1] = p[1];
- ca[2] = p[2];
+ r_ca[0] = p[0];
+ r_ca[1] = p[1];
+ r_ca[2] = p[2];
}
/** \} */
diff --git a/source/blender/blenlib/intern/noise.cc b/source/blender/blenlib/intern/noise.cc
index a514c9e5183..8a073239b31 100644
--- a/source/blender/blenlib/intern/noise.cc
+++ b/source/blender/blenlib/intern/noise.cc
@@ -263,7 +263,6 @@ BLI_INLINE float mix(float v0, float v1, float x)
* + + |
* @ + + + + @ @------> x
* v0 v1
- *
*/
BLI_INLINE float mix(float v0, float v1, float v2, float v3, float x, float y)
{
@@ -809,15 +808,14 @@ float musgrave_hybrid_multi_fractal(const float co,
{
float p = co;
const float pwHL = std::pow(lacunarity, -H);
- float pwr = pwHL;
- float value = perlin_signed(p) + offset;
- float weight = gain * value;
- p *= lacunarity;
+ float pwr = 1.0f;
+ float value = 0.0f;
+ float weight = 1.0f;
const float octaves = CLAMPIS(octaves_unclamped, 0.0f, 15.0f);
- for (int i = 1; (weight > 0.001f) && (i < (int)octaves); i++) {
+ for (int i = 0; (weight > 0.001f) && (i < (int)octaves); i++) {
if (weight > 1.0f) {
weight = 1.0f;
}
@@ -830,8 +828,12 @@ float musgrave_hybrid_multi_fractal(const float co,
}
const float rmd = octaves - floorf(octaves);
- if (rmd != 0.0f) {
- value += rmd * ((perlin_signed(p) + offset) * pwr);
+ if ((rmd != 0.0f) && (weight > 0.001f)) {
+ if (weight > 1.0f) {
+ weight = 1.0f;
+ }
+ float signal = (perlin_signed(p) + offset) * pwr;
+ value += rmd * weight * signal;
}
return value;
@@ -961,15 +963,14 @@ float musgrave_hybrid_multi_fractal(const float2 co,
{
float2 p = co;
const float pwHL = std::pow(lacunarity, -H);
- float pwr = pwHL;
- float value = perlin_signed(p) + offset;
- float weight = gain * value;
- p *= lacunarity;
+ float pwr = 1.0f;
+ float value = 0.0f;
+ float weight = 1.0f;
const float octaves = CLAMPIS(octaves_unclamped, 0.0f, 15.0f);
- for (int i = 1; (weight > 0.001f) && (i < (int)octaves); i++) {
+ for (int i = 0; (weight > 0.001f) && (i < (int)octaves); i++) {
if (weight > 1.0f) {
weight = 1.0f;
}
@@ -982,8 +983,12 @@ float musgrave_hybrid_multi_fractal(const float2 co,
}
const float rmd = octaves - floorf(octaves);
- if (rmd != 0.0f) {
- value += rmd * ((perlin_signed(p) + offset) * pwr);
+ if ((rmd != 0.0f) && (weight > 0.001f)) {
+ if (weight > 1.0f) {
+ weight = 1.0f;
+ }
+ float signal = (perlin_signed(p) + offset) * pwr;
+ value += rmd * weight * signal;
}
return value;
@@ -1115,15 +1120,14 @@ float musgrave_hybrid_multi_fractal(const float3 co,
{
float3 p = co;
const float pwHL = std::pow(lacunarity, -H);
- float pwr = pwHL;
- float value = perlin_signed(p) + offset;
- float weight = gain * value;
- p *= lacunarity;
+ float pwr = 1.0f;
+ float value = 0.0f;
+ float weight = 1.0f;
const float octaves = CLAMPIS(octaves_unclamped, 0.0f, 15.0f);
- for (int i = 1; (weight > 0.001f) && (i < (int)octaves); i++) {
+ for (int i = 0; (weight > 0.001f) && (i < (int)octaves); i++) {
if (weight > 1.0f) {
weight = 1.0f;
}
@@ -1136,8 +1140,12 @@ float musgrave_hybrid_multi_fractal(const float3 co,
}
const float rmd = octaves - floorf(octaves);
- if (rmd != 0.0f) {
- value += rmd * ((perlin_signed(p) + offset) * pwr);
+ if ((rmd != 0.0f) && (weight > 0.001f)) {
+ if (weight > 1.0f) {
+ weight = 1.0f;
+ }
+ float signal = (perlin_signed(p) + offset) * pwr;
+ value += rmd * weight * signal;
}
return value;
@@ -1269,15 +1277,14 @@ float musgrave_hybrid_multi_fractal(const float4 co,
{
float4 p = co;
const float pwHL = std::pow(lacunarity, -H);
- float pwr = pwHL;
- float value = perlin_signed(p) + offset;
- float weight = gain * value;
- p *= lacunarity;
+ float pwr = 1.0f;
+ float value = 0.0f;
+ float weight = 1.0f;
const float octaves = CLAMPIS(octaves_unclamped, 0.0f, 15.0f);
- for (int i = 1; (weight > 0.001f) && (i < (int)octaves); i++) {
+ for (int i = 0; (weight > 0.001f) && (i < (int)octaves); i++) {
if (weight > 1.0f) {
weight = 1.0f;
}
@@ -1290,8 +1297,12 @@ float musgrave_hybrid_multi_fractal(const float4 co,
}
const float rmd = octaves - floorf(octaves);
- if (rmd != 0.0f) {
- value += rmd * ((perlin_signed(p) + offset) * pwr);
+ if ((rmd != 0.0f) && (weight > 0.001f)) {
+ if (weight > 1.0f) {
+ weight = 1.0f;
+ }
+ float signal = (perlin_signed(p) + offset) * pwr;
+ value += rmd * weight * signal;
}
return value;
diff --git a/source/blender/blenlib/intern/path_util.c b/source/blender/blenlib/intern/path_util.c
index 5a96221c8d1..c053c3907db 100644
--- a/source/blender/blenlib/intern/path_util.c
+++ b/source/blender/blenlib/intern/path_util.c
@@ -1105,29 +1105,29 @@ bool BLI_path_program_search(char *fullname, const size_t maxlen, const char *na
path = BLI_getenv("PATH");
if (path) {
- char filename[FILE_MAX];
+ char filepath_test[FILE_MAX];
const char *temp;
do {
temp = strchr(path, separator);
if (temp) {
- memcpy(filename, path, temp - path);
- filename[temp - path] = 0;
+ memcpy(filepath_test, path, temp - path);
+ filepath_test[temp - path] = 0;
path = temp + 1;
}
else {
- BLI_strncpy(filename, path, sizeof(filename));
+ BLI_strncpy(filepath_test, path, sizeof(filepath_test));
}
- BLI_path_append(filename, maxlen, name);
+ BLI_path_append(filepath_test, maxlen, name);
if (
#ifdef _WIN32
- BLI_path_program_extensions_add_win32(filename, maxlen)
+ BLI_path_program_extensions_add_win32(filepath_test, maxlen)
#else
- BLI_exists(filename)
+ BLI_exists(filepath_test)
#endif
) {
- BLI_strncpy(fullname, filename, maxlen);
+ BLI_strncpy(fullname, filepath_test, maxlen);
retval = true;
break;
}
@@ -1204,87 +1204,6 @@ bool BLI_make_existing_file(const char *name)
return BLI_dir_create_recursive(di);
}
-void BLI_make_file_string(const char *relabase, char *string, const char *dir, const char *file)
-{
- int sl;
-
- if (string) {
- /* ensure this is always set even if dir/file are NULL */
- string[0] = '\0';
-
- if (ELEM(NULL, dir, file)) {
- return; /* We don't want any NULLs */
- }
- }
- else {
- return; /* string is NULL, probably shouldn't happen but return anyway */
- }
-
- /* Resolve relative references */
- if (relabase && dir[0] == '/' && dir[1] == '/') {
- char *lslash;
-
- /* Get the file name, chop everything past the last slash (ie. the filename) */
- strcpy(string, relabase);
-
- lslash = (char *)BLI_path_slash_rfind(string);
- if (lslash) {
- *(lslash + 1) = 0;
- }
-
- dir += 2; /* Skip over the relative reference */
- }
-#ifdef WIN32
- else {
- if (BLI_strnlen(dir, 3) >= 2 && dir[1] == ':') {
- BLI_strncpy(string, dir, 3);
- dir += 2;
- }
- else if (BLI_strnlen(dir, 3) >= 2 && BLI_path_is_unc(dir)) {
- string[0] = 0;
- }
- else { /* no drive specified */
- /* first option: get the drive from the relabase if it has one */
- if (relabase && BLI_strnlen(relabase, 3) >= 2 && relabase[1] == ':') {
- BLI_strncpy(string, relabase, 3);
- string[2] = '\\';
- string[3] = '\0';
- }
- else { /* we're out of luck here, guessing the first valid drive, usually c:\ */
- BLI_windows_get_default_root_dir(string);
- }
-
- /* ignore leading slashes */
- while (ELEM(*dir, '/', '\\')) {
- dir++;
- }
- }
- }
-#endif
-
- strcat(string, dir);
-
- /* Make sure string ends in one (and only one) slash */
- /* first trim all slashes from the end of the string */
- sl = strlen(string);
- while ((sl > 0) && ELEM(string[sl - 1], '/', '\\')) {
- string[sl - 1] = '\0';
- sl--;
- }
- /* since we've now removed all slashes, put back one slash at the end. */
- strcat(string, "/");
-
- while (ELEM(*file, '/', '\\')) {
- /* Trim slashes from the front of file */
- file++;
- }
-
- strcat(string, file);
-
- /* Push all slashes to the system preferred direction */
- BLI_path_slash_native(string);
-}
-
static bool path_extension_check_ex(const char *str,
const size_t str_len,
const char *ext,
@@ -1586,8 +1505,8 @@ size_t BLI_path_join(char *__restrict dst, const size_t dst_len, const char *pat
return ofs;
}
- /* remove trailing slashes, unless there are _only_ trailing slashes
- * (allow "//" as the first argument). */
+ /* Remove trailing slashes, unless there are *only* trailing slashes
+ * (allow `//` or `//some_path` as the first argument). */
bool has_trailing_slash = false;
if (ofs != 0) {
size_t len = ofs;
diff --git a/source/blender/blenlib/intern/rct.c b/source/blender/blenlib/intern/rct.c
index 0bb606c288e..7248db5b718 100644
--- a/source/blender/blenlib/intern/rct.c
+++ b/source/blender/blenlib/intern/rct.c
@@ -265,7 +265,7 @@ bool BLI_rcti_isect_segment(const rcti *rect, const int s1[2], const int s2[2])
/* diagonal: [/] */
tvec1[0] = rect->xmin;
tvec1[1] = rect->ymin;
- tvec2[0] = rect->xmin;
+ tvec2[0] = rect->xmax;
tvec2[1] = rect->ymax;
if (isect_segments_i(s1, s2, tvec1, tvec2)) {
return true;
@@ -311,7 +311,7 @@ bool BLI_rctf_isect_segment(const rctf *rect, const float s1[2], const float s2[
/* diagonal: [/] */
tvec1[0] = rect->xmin;
tvec1[1] = rect->ymin;
- tvec2[0] = rect->xmin;
+ tvec2[0] = rect->xmax;
tvec2[1] = rect->ymax;
if (isect_segments_fl(s1, s2, tvec1, tvec2)) {
return true;
diff --git a/source/blender/blenlib/intern/smallhash.c b/source/blender/blenlib/intern/smallhash.c
index 2d76f662611..8263f8ff34e 100644
--- a/source/blender/blenlib/intern/smallhash.c
+++ b/source/blender/blenlib/intern/smallhash.c
@@ -329,8 +329,7 @@ void **BLI_smallhash_iternew_p(const SmallHash *sh, SmallHashIter *iter, uintptr
/** \name Debugging & Introspection
* \{ */
-/* NOTE(campbell): this was called _print_smhash in knifetool.c
- * it may not be intended for general use. */
+/* NOTE(@campbellbarton): useful for debugging but may not be intended for general use. */
#if 0
void BLI_smallhash_print(SmallHash *sh)
{
diff --git a/source/blender/blenlib/intern/string_search.cc b/source/blender/blenlib/intern/string_search.cc
index 14d85b99739..31ea24eb494 100644
--- a/source/blender/blenlib/intern/string_search.cc
+++ b/source/blender/blenlib/intern/string_search.cc
@@ -11,8 +11,8 @@
#include "BLI_timeit.hh"
/* Right arrow, keep in sync with #UI_MENU_ARROW_SEP in `UI_interface.h`. */
-#define UI_MENU_ARROW_SEP "\xe2\x96\xb6"
-#define UI_MENU_ARROW_SEP_UNICODE 0x25b6
+#define UI_MENU_ARROW_SEP "\xe2\x96\xb8"
+#define UI_MENU_ARROW_SEP_UNICODE 0x25b8
namespace blender::string_search {
diff --git a/source/blender/blenlib/intern/string_utf8.c b/source/blender/blenlib/intern/string_utf8.c
index 0cbf62cce03..17fb451e422 100644
--- a/source/blender/blenlib/intern/string_utf8.c
+++ b/source/blender/blenlib/intern/string_utf8.c
@@ -403,7 +403,7 @@ int BLI_str_utf8_char_width_safe(const char *p)
/* copied from glib's gutf8.c, added 'Err' arg */
-/* NOTE(campbell): glib uses uint for unicode, best we do the same,
+/* NOTE(@campbellbarton): glib uses uint for unicode, best we do the same,
* though we don't typedef it. */
#define UTF8_COMPUTE(Char, Mask, Len, Err) \
@@ -692,25 +692,25 @@ const char *BLI_str_find_next_char_utf8(const char *p, const char *str_end)
size_t BLI_str_partition_utf8(const char *str,
const uint delim[],
- const char **sep,
- const char **suf)
+ const char **r_sep,
+ const char **r_suf)
{
- return BLI_str_partition_ex_utf8(str, NULL, delim, sep, suf, false);
+ return BLI_str_partition_ex_utf8(str, NULL, delim, r_sep, r_suf, false);
}
size_t BLI_str_rpartition_utf8(const char *str,
const uint delim[],
- const char **sep,
- const char **suf)
+ const char **r_sep,
+ const char **r_suf)
{
- return BLI_str_partition_ex_utf8(str, NULL, delim, sep, suf, true);
+ return BLI_str_partition_ex_utf8(str, NULL, delim, r_sep, r_suf, true);
}
size_t BLI_str_partition_ex_utf8(const char *str,
const char *end,
const uint delim[],
- const char **sep,
- const char **suf,
+ const char **r_sep,
+ const char **r_suf,
const bool from_right)
{
const size_t str_len = end ? (size_t)(end - str) : strlen(str);
@@ -721,36 +721,32 @@ size_t BLI_str_partition_ex_utf8(const char *str,
/* Note that here, we assume end points to a valid utf8 char! */
BLI_assert((end >= str) && (BLI_str_utf8_as_unicode(end) != BLI_UTF8_ERR));
- *suf = (char *)(str + str_len);
-
- size_t index;
- for (*sep = (char *)(from_right ? BLI_str_find_prev_char_utf8(end, str) : str), index = 0;
- from_right ? (*sep > str) : ((*sep < end) && (**sep != '\0'));
- *sep = (char *)(from_right ? (str != *sep ? BLI_str_find_prev_char_utf8(*sep, str) : NULL) :
- str + index)) {
+ char *suf = (char *)(str + str_len);
+ size_t index = 0;
+ for (char *sep = (char *)(from_right ? BLI_str_find_prev_char_utf8(end, str) : str);
+ from_right ? (sep > str) : ((sep < end) && (*sep != '\0'));
+ sep = (char *)(from_right ? (str != sep ? BLI_str_find_prev_char_utf8(sep, str) : NULL) :
+ str + index)) {
size_t index_ofs = 0;
- const uint c = BLI_str_utf8_as_unicode_step_or_error(*sep, (size_t)(end - *sep), &index_ofs);
- index += index_ofs;
-
- if (c == BLI_UTF8_ERR) {
- *suf = *sep = NULL;
+ const uint c = BLI_str_utf8_as_unicode_step_or_error(sep, (size_t)(end - sep), &index_ofs);
+ if (UNLIKELY(c == BLI_UTF8_ERR)) {
break;
}
+ index += index_ofs;
for (const uint *d = delim; *d != '\0'; d++) {
if (*d == c) {
- /* *suf is already correct in case from_right is true. */
- if (!from_right) {
- *suf = (char *)(str + index);
- }
- return (size_t)(*sep - str);
+ /* `suf` is already correct in case from_right is true. */
+ *r_sep = sep;
+ *r_suf = from_right ? suf : (char *)(str + index);
+ return (size_t)(sep - str);
}
}
- *suf = *sep; /* Useful in 'from_right' case! */
+ suf = sep; /* Useful in 'from_right' case! */
}
- *suf = *sep = NULL;
+ *r_suf = *r_sep = NULL;
return str_len;
}
@@ -790,9 +786,9 @@ int BLI_str_utf8_offset_to_column(const char *str, int offset)
int BLI_str_utf8_offset_from_column(const char *str, int column)
{
- int offset = 0, pos = 0, col;
+ int offset = 0, pos = 0;
while (*(str + offset) && pos < column) {
- col = BLI_str_utf8_char_width_safe(str + offset);
+ const int col = BLI_str_utf8_char_width_safe(str + offset);
if (pos + col > column) {
break;
}
diff --git a/source/blender/blenlib/intern/system.c b/source/blender/blenlib/intern/system.c
index 35e26e0cb33..f7249e491d7 100644
--- a/source/blender/blenlib/intern/system.c
+++ b/source/blender/blenlib/intern/system.c
@@ -21,7 +21,9 @@
# include "BLI_winstuff.h"
#else
-# include <execinfo.h>
+# if defined(HAVE_EXECINFO_H)
+# include <execinfo.h>
+# endif
# include <unistd.h>
#endif
@@ -61,9 +63,9 @@ int BLI_cpu_support_sse2(void)
#if !defined(_MSC_VER)
void BLI_system_backtrace(FILE *fp)
{
- /* ------------- */
- /* Linux / Apple */
-# if defined(__linux__) || defined(__APPLE__)
+ /* ----------------------- */
+ /* If system as execinfo.h */
+# if defined(HAVE_EXECINFO_H)
# define SIZE 100
void *buffer[SIZE];
@@ -152,12 +154,12 @@ void BLI_hostname_get(char *buffer, size_t bufsize)
if (gethostname(buffer, bufsize - 1) < 0) {
BLI_strncpy(buffer, "-unknown-", bufsize);
}
- /* When gethostname() truncates, it doesn't guarantee the trailing \0. */
+ /* When `gethostname()` truncates, it doesn't guarantee the trailing `\0`. */
buffer[bufsize - 1] = '\0';
#else
DWORD bufsize_inout = bufsize;
if (!GetComputerName(buffer, &bufsize_inout)) {
- strncpy(buffer, "-unknown-", bufsize);
+ BLI_strncpy(buffer, "-unknown-", bufsize);
}
#endif
}
diff --git a/source/blender/blenlib/intern/task_pool.cc b/source/blender/blenlib/intern/task_pool.cc
index a29dbe95ba9..c335d04413c 100644
--- a/source/blender/blenlib/intern/task_pool.cc
+++ b/source/blender/blenlib/intern/task_pool.cc
@@ -84,11 +84,11 @@ class Task {
free_taskdata(other.free_taskdata),
freedata(other.freedata)
{
- ((Task &)other).pool = NULL;
- ((Task &)other).run = NULL;
- ((Task &)other).taskdata = NULL;
+ ((Task &)other).pool = nullptr;
+ ((Task &)other).run = nullptr;
+ ((Task &)other).taskdata = nullptr;
((Task &)other).free_taskdata = false;
- ((Task &)other).freedata = NULL;
+ ((Task &)other).freedata = nullptr;
}
#else
Task(const Task &other) = delete;
diff --git a/source/blender/blenlib/intern/timeit.cc b/source/blender/blenlib/intern/timeit.cc
index f11f9c4ad94..7a8cf8da038 100644
--- a/source/blender/blenlib/intern/timeit.cc
+++ b/source/blender/blenlib/intern/timeit.cc
@@ -3,19 +3,29 @@
#include "BLI_timeit.hh"
#include <algorithm>
+#include <iomanip>
namespace blender::timeit {
void print_duration(Nanoseconds duration)
{
- if (duration < std::chrono::microseconds(100)) {
+ using namespace std::chrono;
+ if (duration < microseconds(100)) {
std::cout << duration.count() << " ns";
}
- else if (duration < std::chrono::seconds(5)) {
- std::cout << duration.count() / 1.0e6 << " ms";
+ else if (duration < seconds(5)) {
+ std::cout << std::fixed << std::setprecision(1) << duration.count() / 1.0e6 << " ms";
+ }
+ else if (duration > seconds(90)) {
+ /* Long durations: print seconds, and also H:m:s */
+ const auto dur_hours = duration_cast<hours>(duration);
+ const auto dur_mins = duration_cast<minutes>(duration - dur_hours);
+ const auto dur_sec = duration_cast<seconds>(duration - dur_hours - dur_mins);
+ std::cout << std::fixed << std::setprecision(1) << duration.count() / 1.0e9 << " s ("
+ << dur_hours.count() << "H:" << dur_mins.count() << "m:" << dur_sec.count() << "s)";
}
else {
- std::cout << duration.count() / 1.0e9 << " s";
+ std::cout << std::fixed << std::setprecision(1) << duration.count() / 1.0e9 << " s";
}
}
diff --git a/source/blender/blenlib/intern/winstuff.c b/source/blender/blenlib/intern/winstuff.c
index e90a0ee02db..7e2c5e8f1dd 100644
--- a/source/blender/blenlib/intern/winstuff.c
+++ b/source/blender/blenlib/intern/winstuff.c
@@ -63,23 +63,17 @@ bool BLI_windows_register_blend_extension(const bool background)
char buffer[256];
char BlPath[MAX_PATH];
- char InstallDir[FILE_MAXDIR];
- char SysDir[FILE_MAXDIR];
- const char *ThumbHandlerDLL;
- char RegCmd[MAX_PATH * 2];
char MBox[256];
- char *blender_app;
-# ifndef _WIN64
- BOOL IsWOW64;
-# endif
printf("Registering file extension...");
GetModuleFileName(0, BlPath, MAX_PATH);
/* Replace the actual app name with the wrapper. */
- blender_app = strstr(BlPath, "blender.exe");
- if (blender_app != NULL) {
- strcpy(blender_app, "blender-launcher.exe");
+ {
+ char *blender_app = strstr(BlPath, "blender.exe");
+ if (blender_app != NULL) {
+ strcpy(blender_app, "blender-launcher.exe");
+ }
}
/* root is HKLM by default */
@@ -157,12 +151,17 @@ bool BLI_windows_register_blend_extension(const bool background)
}
# ifdef WITH_BLENDER_THUMBNAILER
- BLI_windows_get_executable_dir(InstallDir);
- GetSystemDirectory(SysDir, FILE_MAXDIR);
- ThumbHandlerDLL = "BlendThumb.dll";
- snprintf(
- RegCmd, MAX_PATH * 2, "%s\\regsvr32 /s \"%s\\%s\"", SysDir, InstallDir, ThumbHandlerDLL);
- system(RegCmd);
+ {
+ char RegCmd[MAX_PATH * 2];
+ char InstallDir[FILE_MAXDIR];
+ char SysDir[FILE_MAXDIR];
+ BLI_windows_get_executable_dir(InstallDir);
+ GetSystemDirectory(SysDir, FILE_MAXDIR);
+ const char *ThumbHandlerDLL = "BlendThumb.dll";
+ snprintf(
+ RegCmd, MAX_PATH * 2, "%s\\regsvr32 /s \"%s\\%s\"", SysDir, InstallDir, ThumbHandlerDLL);
+ system(RegCmd);
+ }
# endif
RegCloseKey(root);