Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenlib/intern')
-rw-r--r--source/blender/blenlib/intern/delaunay_2d.cc1
-rw-r--r--source/blender/blenlib/intern/generic_virtual_array.cc162
-rw-r--r--source/blender/blenlib/intern/hash_md5.c6
-rw-r--r--source/blender/blenlib/intern/math_base_inline.c20
-rw-r--r--source/blender/blenlib/intern/math_solvers.c4
5 files changed, 81 insertions, 112 deletions
diff --git a/source/blender/blenlib/intern/delaunay_2d.cc b/source/blender/blenlib/intern/delaunay_2d.cc
index ece22bcf82e..db6cb0824dc 100644
--- a/source/blender/blenlib/intern/delaunay_2d.cc
+++ b/source/blender/blenlib/intern/delaunay_2d.cc
@@ -2637,6 +2637,7 @@ void prepare_cdt_for_output(CDT_state<T> *cdt_state, const CDT_output_type outpu
remove_faces_in_holes(cdt_state);
}
else if (output_type == CDT_CONSTRAINTS_VALID_BMESH_WITH_HOLES) {
+ remove_outer_edges_until_constraints(cdt_state);
remove_non_constraint_edges_leave_valid_bmesh(cdt_state);
remove_faces_in_holes(cdt_state);
}
diff --git a/source/blender/blenlib/intern/generic_virtual_array.cc b/source/blender/blenlib/intern/generic_virtual_array.cc
index a3a17952a97..a6fbf4bff5b 100644
--- a/source/blender/blenlib/intern/generic_virtual_array.cc
+++ b/source/blender/blenlib/intern/generic_virtual_array.cc
@@ -85,11 +85,6 @@ bool GVArrayImpl::may_have_ownership() const
/** \name #GVMutableArrayImpl
* \{ */
-GVMutableArrayImpl::GVMutableArrayImpl(const CPPType &type, const int64_t size)
- : GVArrayImpl(type, size)
-{
-}
-
void GVMutableArrayImpl::set_by_copy(const int64_t index, const void *value)
{
BUFFER_FOR_CPP_TYPE_VALUE(*type_, buffer);
@@ -141,18 +136,6 @@ bool GVMutableArrayImpl::try_assign_VMutableArray(void *UNUSED(varray)) const
/** \name #GVArrayImpl_For_GSpan
* \{ */
-GVArrayImpl_For_GSpan::GVArrayImpl_For_GSpan(const GMutableSpan span)
- : GVMutableArrayImpl(span.type(), span.size()),
- data_(span.data()),
- element_size_(span.type().size())
-{
-}
-
-GVArrayImpl_For_GSpan::GVArrayImpl_For_GSpan(const CPPType &type, const int64_t size)
- : GVMutableArrayImpl(type, size), element_size_(type.size())
-{
-}
-
void GVArrayImpl_For_GSpan::get(const int64_t index, void *r_value) const
{
type_->copy_assign(POINTER_OFFSET(data_, element_size_ * index), r_value);
@@ -209,17 +192,6 @@ void GVArrayImpl_For_GSpan::materialize_compressed_to_uninitialized(const IndexM
type_->copy_construct_compressed(data_, dst, mask);
}
-class GVArrayImpl_For_GSpan_final final : public GVArrayImpl_For_GSpan {
- public:
- using GVArrayImpl_For_GSpan::GVArrayImpl_For_GSpan;
-
- private:
- bool may_have_ownership() const override
- {
- return false;
- }
-};
-
/** \} */
/* -------------------------------------------------------------------- */
@@ -227,79 +199,56 @@ class GVArrayImpl_For_GSpan_final final : public GVArrayImpl_For_GSpan {
* \{ */
/* Generic virtual array where each element has the same value. The value is not owned. */
-class GVArrayImpl_For_SingleValueRef : public GVArrayImpl {
- protected:
- const void *value_ = nullptr;
- public:
- GVArrayImpl_For_SingleValueRef(const CPPType &type, const int64_t size, const void *value)
- : GVArrayImpl(type, size), value_(value)
- {
- }
-
- protected:
- GVArrayImpl_For_SingleValueRef(const CPPType &type, const int64_t size) : GVArrayImpl(type, size)
- {
- }
-
- void get(const int64_t UNUSED(index), void *r_value) const override
- {
- type_->copy_assign(value_, r_value);
- }
- void get_to_uninitialized(const int64_t UNUSED(index), void *r_value) const override
- {
- type_->copy_construct(value_, r_value);
- }
-
- bool is_span() const override
- {
- return size_ == 1;
- }
- GSpan get_internal_span() const override
- {
- return GSpan{*type_, value_, 1};
- }
-
- bool is_single() const override
- {
- return true;
- }
- void get_internal_single(void *r_value) const override
- {
- type_->copy_assign(value_, r_value);
- }
+void GVArrayImpl_For_SingleValueRef::get(const int64_t UNUSED(index), void *r_value) const
+{
+ type_->copy_assign(value_, r_value);
+}
+void GVArrayImpl_For_SingleValueRef::get_to_uninitialized(const int64_t UNUSED(index),
+ void *r_value) const
+{
+ type_->copy_construct(value_, r_value);
+}
- void materialize(const IndexMask mask, void *dst) const override
- {
- type_->fill_assign_indices(value_, dst, mask);
- }
+bool GVArrayImpl_For_SingleValueRef::is_span() const
+{
+ return size_ == 1;
+}
+GSpan GVArrayImpl_For_SingleValueRef::get_internal_span() const
+{
+ return GSpan{*type_, value_, 1};
+}
- void materialize_to_uninitialized(const IndexMask mask, void *dst) const override
- {
- type_->fill_construct_indices(value_, dst, mask);
- }
+bool GVArrayImpl_For_SingleValueRef::is_single() const
+{
+ return true;
+}
+void GVArrayImpl_For_SingleValueRef::get_internal_single(void *r_value) const
+{
+ type_->copy_assign(value_, r_value);
+}
- void materialize_compressed(const IndexMask mask, void *dst) const override
- {
- type_->fill_assign_n(value_, dst, mask.size());
- }
+void GVArrayImpl_For_SingleValueRef::materialize(const IndexMask mask, void *dst) const
+{
+ type_->fill_assign_indices(value_, dst, mask);
+}
- void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
- {
- type_->fill_construct_n(value_, dst, mask.size());
- }
-};
+void GVArrayImpl_For_SingleValueRef::materialize_to_uninitialized(const IndexMask mask,
+ void *dst) const
+{
+ type_->fill_construct_indices(value_, dst, mask);
+}
-class GVArrayImpl_For_SingleValueRef_final final : public GVArrayImpl_For_SingleValueRef {
- public:
- using GVArrayImpl_For_SingleValueRef::GVArrayImpl_For_SingleValueRef;
+void GVArrayImpl_For_SingleValueRef::materialize_compressed(const IndexMask mask, void *dst) const
+{
+ type_->fill_assign_n(value_, dst, mask.size());
+}
- private:
- bool may_have_ownership() const override
- {
- return false;
- }
-};
+void GVArrayImpl_For_SingleValueRef::materialize_compressed_to_uninitialized(const IndexMask mask,
+ void *dst) const
+{
+ type_->fill_construct_n(value_, dst, mask.size());
+}
/** \} */
@@ -529,8 +478,6 @@ class GVArrayImpl_For_SlicedGVArray : public GVArrayImpl {
/** \name #GVArrayCommon
* \{ */
-GVArrayCommon::GVArrayCommon() = default;
-
GVArrayCommon::GVArrayCommon(const GVArrayCommon &other) : storage_(other.storage_)
{
impl_ = this->impl_from_storage();
@@ -672,17 +619,27 @@ GVArray::GVArray(std::shared_ptr<const GVArrayImpl> impl) : GVArrayCommon(std::m
{
}
-GVArray GVArray::ForSingle(const CPPType &type, const int64_t size, const void *value)
+GVArray::GVArray(varray_tag::single /* tag */,
+ const CPPType &type,
+ int64_t size,
+ const void *value)
{
if (type.is_trivial() && type.size() <= 16 && type.alignment() <= 8) {
- return GVArray::For<GVArrayImpl_For_SmallTrivialSingleValue<16>>(type, size, value);
+ this->emplace<GVArrayImpl_For_SmallTrivialSingleValue<16>>(type, size, value);
}
- return GVArray::For<GVArrayImpl_For_SingleValue>(type, size, value);
+ else {
+ this->emplace<GVArrayImpl_For_SingleValue>(type, size, value);
+ }
+}
+
+GVArray GVArray::ForSingle(const CPPType &type, const int64_t size, const void *value)
+{
+ return GVArray(varray_tag::single{}, type, size, value);
}
GVArray GVArray::ForSingleRef(const CPPType &type, const int64_t size, const void *value)
{
- return GVArray::For<GVArrayImpl_For_SingleValueRef_final>(type, size, value);
+ return GVArray(varray_tag::single_ref{}, type, size, value);
}
GVArray GVArray::ForSingleDefault(const CPPType &type, const int64_t size)
@@ -692,10 +649,7 @@ GVArray GVArray::ForSingleDefault(const CPPType &type, const int64_t size)
GVArray GVArray::ForSpan(GSpan span)
{
- /* Use const-cast because the underlying virtual array implementation is shared between const
- * and non const data. */
- GMutableSpan mutable_span{span.type(), const_cast<void *>(span.data()), span.size()};
- return GVArray::For<GVArrayImpl_For_GSpan_final>(mutable_span);
+ return GVArray(varray_tag::span{}, span);
}
class GVArrayImpl_For_GArray : public GVArrayImpl_For_GSpan {
diff --git a/source/blender/blenlib/intern/hash_md5.c b/source/blender/blenlib/intern/hash_md5.c
index 9da8c0a0941..d57f859eb1b 100644
--- a/source/blender/blenlib/intern/hash_md5.c
+++ b/source/blender/blenlib/intern/hash_md5.c
@@ -143,7 +143,7 @@ static void md5_process_block(const void *buffer, size_t len, struct md5_ctx *ct
(void)0
/* Before we start, one word to the strange constants. They are defined in RFC 1321 as:
- * T[i] = (int) (4294967296.0 * fabs (sin (i))), i=1..64
+ * `T[i] = (int) (4294967296.0 * fabs (sin (i))), i=1..64`
*/
/* Round 1. */
@@ -315,7 +315,7 @@ int BLI_hash_md5_stream(FILE *stream, void *resblock)
break;
}
- /* Process buffer with BLOCKSIZE bytes. Note that BLOCKSIZE % 64 == 0. */
+ /* Process buffer with BLOCKSIZE bytes. Note that `BLOCKSIZE % 64 == 0`. */
md5_process_block(buffer, BLOCKSIZE, &ctx);
}
@@ -323,7 +323,7 @@ int BLI_hash_md5_stream(FILE *stream, void *resblock)
* 'fillbuf' contains the needed bits. */
memcpy(&buffer[sum], fillbuf, 64);
- /* Compute amount of padding bytes needed. Alignment is done to (N + PAD) % 64 == 56.
+ /* Compute amount of padding bytes needed. Alignment is done to `(N + PAD) % 64 == 56`.
* There is always at least one byte padded, i.e. if the alignment is correctly aligned,
* 64 padding bytes are added.
*/
diff --git a/source/blender/blenlib/intern/math_base_inline.c b/source/blender/blenlib/intern/math_base_inline.c
index a983821f15e..4a213f5fe74 100644
--- a/source/blender/blenlib/intern/math_base_inline.c
+++ b/source/blender/blenlib/intern/math_base_inline.c
@@ -767,6 +767,20 @@ MALWAYS_INLINE __m128 _bli_math_fastpow24(const __m128 arg)
return _mm_mul_ps(x, _mm_mul_ps(x, x));
}
+MALWAYS_INLINE __m128 _bli_math_rsqrt(__m128 in)
+{
+ __m128 r = _mm_rsqrt_ps(in);
+ /* Only do additional Newton-Raphson iterations when using actual SSE
+ * code path. When we are emulating SSE on NEON via sse2neon, the
+ * additional NR iterations are already done inside _mm_rsqrt_ps
+ * emulation. */
+# if defined(__SSE2__)
+ r = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f), r),
+ _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(in, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
+# endif
+ return r;
+}
+
/* Calculate powf(x, 1.0f / 2.4) */
MALWAYS_INLINE __m128 _bli_math_fastpow512(const __m128 arg)
{
@@ -776,14 +790,14 @@ MALWAYS_INLINE __m128 _bli_math_fastpow512(const __m128 arg)
*/
__m128 xf = _bli_math_fastpow(0x3f2aaaab, 0x5eb504f3, arg);
__m128 xover = _mm_mul_ps(arg, xf);
- __m128 xfm1 = _mm_rsqrt_ps(xf);
+ __m128 xfm1 = _bli_math_rsqrt(xf);
__m128 x2 = _mm_mul_ps(arg, arg);
__m128 xunder = _mm_mul_ps(x2, xfm1);
/* sqrt2 * over + 2 * sqrt2 * under */
__m128 xavg = _mm_mul_ps(_mm_set1_ps(1.0f / (3.0f * 0.629960524947437f) * 0.999852f),
_mm_add_ps(xover, xunder));
- xavg = _mm_mul_ps(xavg, _mm_rsqrt_ps(xavg));
- xavg = _mm_mul_ps(xavg, _mm_rsqrt_ps(xavg));
+ xavg = _mm_mul_ps(xavg, _bli_math_rsqrt(xavg));
+ xavg = _mm_mul_ps(xavg, _bli_math_rsqrt(xavg));
return xavg;
}
diff --git a/source/blender/blenlib/intern/math_solvers.c b/source/blender/blenlib/intern/math_solvers.c
index 2352d687061..b5650410a70 100644
--- a/source/blender/blenlib/intern/math_solvers.c
+++ b/source/blender/blenlib/intern/math_solvers.c
@@ -99,8 +99,8 @@ bool BLI_tridiagonal_solve_cyclic(
/* Degenerate case that works but can be simplified. */
if (count == 2) {
- float a2[2] = {0, a[1] + c[1]};
- float c2[2] = {a[0] + c[0], 0};
+ const float a2[2] = {0, a[1] + c[1]};
+ const float c2[2] = {a[0] + c[0], 0};
return BLI_tridiagonal_solve(a2, b, c2, d, r_x, count);
}