diff options
Diffstat (limited to 'source/blender/blenlib')
95 files changed, 7721 insertions, 2417 deletions
diff --git a/source/blender/blenlib/BLI_allocator.hh b/source/blender/blenlib/BLI_allocator.hh index d57703f71bc..47d8156476f 100644 --- a/source/blender/blenlib/BLI_allocator.hh +++ b/source/blender/blenlib/BLI_allocator.hh @@ -84,8 +84,8 @@ class RawAllocator { void *ptr = malloc(size + alignment + sizeof(MemHead)); void *used_ptr = (void *)((uintptr_t)POINTER_OFFSET(ptr, alignment + sizeof(MemHead)) & ~((uintptr_t)alignment - 1)); - uint offset = (uint)((uintptr_t)used_ptr - (uintptr_t)ptr); - BLI_assert(offset >= sizeof(MemHead)); + int offset = (int)((intptr_t)used_ptr - (intptr_t)ptr); + BLI_assert(offset >= (int)sizeof(MemHead)); ((MemHead *)used_ptr - 1)->offset = (int)offset; return used_ptr; } diff --git a/source/blender/blenlib/BLI_array.hh b/source/blender/blenlib/BLI_array.hh index b929d1220da..c7b4bdc977f 100644 --- a/source/blender/blenlib/BLI_array.hh +++ b/source/blender/blenlib/BLI_array.hh @@ -13,6 +13,7 @@ * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + #ifndef __BLI_ARRAY_HH__ #define __BLI_ARRAY_HH__ @@ -27,8 +28,7 @@ * blender::Array should usually be used instead of blender::Vector whenever the number of elements * is known at construction time. Note however, that blender::Array will default construct all * elements when initialized with the size-constructor. For trivial types, this does nothing. In - * all other cases, this adds overhead. If this becomes a problem, a different constructor which - * does not do default construction can be added. + * all other cases, this adds overhead. * * A main benefit of using Array over Vector is that it expresses the intent of the developer * better. It indicates that the size of the data structure is not expected to change. Furthermore, @@ -53,11 +53,8 @@ template< typename T, /** * The number of values that can be stored in the array, without doing a heap allocation. - * - * When T is large, the small buffer optimization is disabled by default to avoid large - * unexpected allocations on the stack. It can still be enabled explicitely though. */ - uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0, + int64_t InlineBufferCapacity = default_inline_buffer_capacity(sizeof(T)), /** * The allocator used by this array. Should rarely be changed, except when you don't want that * MEM_* functions are used internally. @@ -66,16 +63,16 @@ template< class Array { private: /** The beginning of the array. It might point into the inline buffer. */ - T *m_data; + T *data_; /** Number of elements in the array. */ - uint m_size; + int64_t size_; /** Used for allocations when the inline buffer is too small. */ - Allocator m_allocator; + Allocator allocator_; /** A placeholder buffer that will remain uninitialized until it is used. */ - AlignedBuffer<sizeof(T) * InlineBufferCapacity, alignof(T)> m_inline_buffer; + TypedBuffer<T, InlineBufferCapacity> inline_buffer_; public: /** @@ -83,23 +80,29 @@ class Array { */ Array() { - m_data = this->inline_buffer(); - m_size = 0; + data_ = inline_buffer_; + size_ = 0; } /** * Create a new array that contains copies of all values. */ - Array(Span<T> values) + template<typename U, typename std::enable_if_t<std::is_convertible_v<U, T>> * = nullptr> + Array(Span<U> values, Allocator allocator = {}) : allocator_(allocator) { - m_size = values.size(); - m_data = this->get_buffer_for_size(values.size()); - uninitialized_copy_n(values.data(), m_size, m_data); + size_ = values.size(); + data_ = this->get_buffer_for_size(values.size()); + uninitialized_convert_n<U, T>(values.data(), size_, data_); } /** * Create a new array that contains copies of all values. */ + template<typename U, typename std::enable_if_t<std::is_convertible_v<U, T>> * = nullptr> + Array(const std::initializer_list<U> &values) : Array(Span<U>(values)) + { + } + Array(const std::initializer_list<T> &values) : Array(Span<T>(values)) { } @@ -112,53 +115,69 @@ class Array { * even for non-trivial types. This should not be the default though, because one can easily mess * up when dealing with uninitialized memory. */ - explicit Array(uint size) + explicit Array(int64_t size) { - m_size = size; - m_data = this->get_buffer_for_size(size); - default_construct_n(m_data, size); + size_ = size; + data_ = this->get_buffer_for_size(size); + default_construct_n(data_, size); } /** * Create a new array with the given size. All values will be initialized by copying the given * default. */ - Array(uint size, const T &value) + Array(int64_t size, const T &value) { - m_size = size; - m_data = this->get_buffer_for_size(size); - uninitialized_fill_n(m_data, m_size, value); + BLI_assert(size >= 0); + size_ = size; + data_ = this->get_buffer_for_size(size); + uninitialized_fill_n(data_, size_, value); } - Array(const Array &other) : m_allocator(other.m_allocator) + /** + * Create a new array with uninitialized elements. The caller is responsible for constructing the + * elements. Moving, copying or destructing an Array with uninitialized elements invokes + * undefined behavior. + * + * This should be used very rarely. Note, that the normal size-constructor also does not + * initialize the elements when T is trivially constructible. Therefore, it only makes sense to + * use this with non trivially constructible types. + * + * Usage: + * Array<std::string> my_strings(10, NoInitialization()); + */ + Array(int64_t size, NoInitialization) { - m_size = other.size(); + BLI_assert(size >= 0); + size_ = size; + data_ = this->get_buffer_for_size(size); + } - m_data = this->get_buffer_for_size(other.size()); - uninitialized_copy_n(other.data(), m_size, m_data); + Array(const Array &other) : Array(other.as_span(), other.allocator_) + { } - Array(Array &&other) noexcept : m_allocator(other.m_allocator) + Array(Array &&other) noexcept : allocator_(other.allocator_) { - m_size = other.m_size; + size_ = other.size_; if (!other.uses_inline_buffer()) { - m_data = other.m_data; + data_ = other.data_; } else { - m_data = this->get_buffer_for_size(m_size); - uninitialized_relocate_n(other.m_data, m_size, m_data); + data_ = this->get_buffer_for_size(size_); + uninitialized_relocate_n(other.data_, size_, data_); } - other.m_data = other.inline_buffer(); - other.m_size = 0; + other.data_ = other.inline_buffer_; + other.size_ = 0; } ~Array() { - destruct_n(m_data, m_size); + destruct_n(data_, size_); if (!this->uses_inline_buffer()) { - m_allocator.deallocate((void *)m_data); + allocator_.deallocate((void *)data_); } } @@ -184,44 +203,58 @@ class Array { return *this; } + T &operator[](int64_t index) + { + BLI_assert(index >= 0); + BLI_assert(index < size_); + return data_[index]; + } + + const T &operator[](int64_t index) const + { + BLI_assert(index >= 0); + BLI_assert(index < size_); + return data_[index]; + } + operator Span<T>() const { - return Span<T>(m_data, m_size); + return Span<T>(data_, size_); } operator MutableSpan<T>() { - return MutableSpan<T>(m_data, m_size); + return MutableSpan<T>(data_, size_); } - Span<T> as_span() const + template<typename U, typename std::enable_if_t<is_convertible_pointer_v<T, U>> * = nullptr> + operator Span<U>() const { - return *this; + return Span<U>(data_, size_); } - MutableSpan<T> as_mutable_span() + template<typename U, typename std::enable_if_t<is_convertible_pointer_v<T, U>> * = nullptr> + operator MutableSpan<U>() { - return *this; + return MutableSpan<U>(data_, size_); } - T &operator[](uint index) + Span<T> as_span() const { - BLI_assert(index < m_size); - return m_data[index]; + return *this; } - const T &operator[](uint index) const + MutableSpan<T> as_mutable_span() { - BLI_assert(index < m_size); - return m_data[index]; + return *this; } /** * Returns the number of elements in the array. */ - uint size() const + int64_t size() const { - return m_size; + return size_; } /** @@ -229,23 +262,15 @@ class Array { */ bool is_empty() const { - return m_size == 0; + return size_ == 0; } /** - * Copies the value to all indices in the array. + * Copies the given value to every element in the array. */ - void fill(const T &value) + void fill(const T &value) const { - initialized_fill_n(m_data, m_size, value); - } - - /** - * Copies the value to the given indices in the array. - */ - void fill_indices(Span<uint> indices, const T &value) - { - MutableSpan<T>(*this).fill_indices(indices, value); + initialized_fill_n(data_, size_, value); } /** @@ -253,31 +278,31 @@ class Array { */ const T *data() const { - return m_data; + return data_; } T *data() { - return m_data; + return data_; } const T *begin() const { - return m_data; + return data_; } const T *end() const { - return m_data + m_size; + return data_ + size_; } T *begin() { - return m_data; + return data_; } T *end() { - return m_data + m_size; + return data_ + size_; } /** @@ -285,7 +310,7 @@ class Array { */ IndexRange index_range() const { - return IndexRange(m_size); + return IndexRange(size_); } /** @@ -294,7 +319,7 @@ class Array { */ void clear_without_destruct() { - m_size = 0; + size_ = 0; } /** @@ -302,45 +327,47 @@ class Array { */ Allocator &allocator() { - return m_allocator; + return allocator_; } /** * Get the value of the InlineBufferCapacity template argument. This is the number of elements * that can be stored without doing an allocation. */ - static uint inline_buffer_capacity() + static int64_t inline_buffer_capacity() { return InlineBufferCapacity; } private: - T *get_buffer_for_size(uint size) + T *get_buffer_for_size(int64_t size) { if (size <= InlineBufferCapacity) { - return this->inline_buffer(); + return inline_buffer_; } else { return this->allocate(size); } } - T *inline_buffer() const - { - return (T *)m_inline_buffer.ptr(); - } - - T *allocate(uint size) + T *allocate(int64_t size) { - return (T *)m_allocator.allocate(size * sizeof(T), alignof(T), AT); + return (T *)allocator_.allocate((size_t)size * sizeof(T), alignof(T), AT); } bool uses_inline_buffer() const { - return m_data == this->inline_buffer(); + return data_ == inline_buffer_; } }; +/** + * Same as a normal Array, but does not use Blender's guarded allocator. This is useful when + * allocating memory with static storage duration. + */ +template<typename T, int64_t InlineBufferCapacity = default_inline_buffer_capacity(sizeof(T))> +using RawArray = Array<T, InlineBufferCapacity, RawAllocator>; + } // namespace blender #endif /* __BLI_ARRAY_HH__ */ diff --git a/source/blender/blenlib/BLI_blenlib.h b/source/blender/blenlib/BLI_blenlib.h index 6dd1abacf78..4ebef814337 100644 --- a/source/blender/blenlib/BLI_blenlib.h +++ b/source/blender/blenlib/BLI_blenlib.h @@ -28,7 +28,7 @@ * a call to a BLI function that is not prototyped here, please add a * prototype here. The library offers mathematical operations (mainly * vector and matrix calculus), an abstraction layer for file i/o, - * functions for calculating Perlin noise, scanfilling services for + * functions for calculating Perlin noise, scan-filling services for * triangles, and a system for guarded memory * allocation/deallocation. There is also a patch to make MS Windows * behave more or less Posix-compliant. diff --git a/source/blender/blenlib/BLI_color.hh b/source/blender/blenlib/BLI_color.hh index 432459c9998..72caa5b1118 100644 --- a/source/blender/blenlib/BLI_color.hh +++ b/source/blender/blenlib/BLI_color.hh @@ -28,6 +28,10 @@ struct Color4f { Color4f() = default; + Color4f(const float *rgba) : r(rgba[0]), g(rgba[1]), b(rgba[2]), a(rgba[3]) + { + } + Color4f(float r, float g, float b, float a) : r(r), g(g), b(b), a(a) { } @@ -47,6 +51,25 @@ struct Color4f { stream << "(" << c.r << ", " << c.g << ", " << c.b << ", " << c.a << ")"; return stream; } + + friend bool operator==(const Color4f &a, const Color4f &b) + { + return a.r == b.r && a.g == b.g && a.b == b.b && a.a == b.a; + } + + friend bool operator!=(const Color4f &a, const Color4f &b) + { + return !(a == b); + } + + uint64_t hash() const + { + uint64_t x1 = *(uint32_t *)&r; + uint64_t x2 = *(uint32_t *)&g; + uint64_t x3 = *(uint32_t *)&b; + uint64_t x4 = *(uint32_t *)&a; + return (x1 * 1283591) ^ (x2 * 850177) ^ (x3 * 735391) ^ (x4 * 442319); + } }; struct Color4b { @@ -85,6 +108,22 @@ struct Color4b { stream << "(" << c.r << ", " << c.g << ", " << c.b << ", " << c.a << ")"; return stream; } + + friend bool operator==(const Color4b &a, const Color4b &b) + { + return a.r == b.r && a.g == b.g && a.b == b.b && a.a == b.a; + } + + friend bool operator!=(const Color4b &a, const Color4b &b) + { + return !(a == b); + } + + uint64_t hash() const + { + return ((uint64_t)r * 1283591) ^ ((uint64_t)g * 850177) ^ ((uint64_t)b * 735391) ^ + ((uint64_t)a * 442319); + } }; } // namespace blender diff --git a/source/blender/blenlib/BLI_disjoint_set.hh b/source/blender/blenlib/BLI_disjoint_set.hh new file mode 100644 index 00000000000..e0580709a44 --- /dev/null +++ b/source/blender/blenlib/BLI_disjoint_set.hh @@ -0,0 +1,106 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __BLI_DISJOINT_SET_HH__ +#define __BLI_DISJOINT_SET_HH__ + +/** \file + * \ingroup bli + * + * This implements the disjoint set data structure with path compression and union by rank. + */ + +#include "BLI_array.hh" + +namespace blender { + +class DisjointSet { + private: + Array<int64_t> parents_; + Array<int64_t> ranks_; + + public: + /** + * Create a new disjoint set with the given size. Initially, every element is in a separate set. + */ + DisjointSet(int64_t size) : parents_(size), ranks_(size, 0) + { + BLI_assert(size >= 0); + for (int64_t i = 0; i < size; i++) { + parents_[i] = i; + } + } + + /** + * Join the sets containing elements x and y. Nothing happens when they have been in the same set + * before. + */ + void join(int64_t x, int64_t y) + { + int64_t root1 = this->find_root(x); + int64_t root2 = this->find_root(y); + + /* x and y are in the same set already. */ + if (root1 == root2) { + return; + } + + /* Implement union by rank heuristic. */ + if (ranks_[root1] < ranks_[root2]) { + std::swap(root1, root2); + } + parents_[root2] = root1; + + if (ranks_[root1] == ranks_[root2]) { + ranks_[root1]++; + } + } + + /** + * Return true when x and y are in the same set. + */ + bool in_same_set(int64_t x, int64_t y) + { + int64_t root1 = this->find_root(x); + int64_t root2 = this->find_root(y); + return root1 == root2; + } + + /** + * Find the element that represents the set containing x currently. + */ + int64_t find_root(int64_t x) + { + /* Find root by following parents. */ + int64_t root = x; + while (parents_[root] != root) { + root = parents_[root]; + } + + /* Compress path. */ + while (parents_[x] != root) { + int64_t parent = parents_[x]; + parents_[x] = root; + x = parent; + } + + return root; + } +}; + +} // namespace blender + +#endif /* __BLI_DISJOINT_SET_HH__ */ diff --git a/source/blender/blenlib/BLI_dot_export.hh b/source/blender/blenlib/BLI_dot_export.hh index 67af4391a55..0870d8c4c30 100644 --- a/source/blender/blenlib/BLI_dot_export.hh +++ b/source/blender/blenlib/BLI_dot_export.hh @@ -25,17 +25,16 @@ */ #include "BLI_map.hh" -#include "BLI_optional.hh" #include "BLI_set.hh" #include "BLI_utility_mixins.hh" #include "BLI_vector.hh" #include "BLI_dot_export_attribute_enums.hh" +#include <optional> #include <sstream> -namespace blender { -namespace DotExport { +namespace blender::dot { class Graph; class DirectedGraph; @@ -49,25 +48,25 @@ class AttributeList; class AttributeList { private: - Map<std::string, std::string> m_attributes; + Map<std::string, std::string> attributes_; public: void export__as_bracket_list(std::stringstream &ss) const; void set(StringRef key, StringRef value) { - m_attributes.add_overwrite(key, value); + attributes_.add_overwrite(key, value); } }; class Graph { private: - AttributeList m_attributes; - Vector<std::unique_ptr<Node>> m_nodes; - Vector<std::unique_ptr<Cluster>> m_clusters; + AttributeList attributes_; + Vector<std::unique_ptr<Node>> nodes_; + Vector<std::unique_ptr<Cluster>> clusters_; - Set<Node *> m_top_level_nodes; - Set<Cluster *> m_top_level_clusters; + Set<Node *> top_level_nodes_; + Set<Cluster *> top_level_clusters_; friend Cluster; friend Node; @@ -80,7 +79,7 @@ class Graph { void set_attribute(StringRef key, StringRef value) { - m_attributes.set(key, value); + attributes_.set(key, value); } void set_rankdir(Attr_rankdir rankdir) @@ -93,16 +92,16 @@ class Graph { class Cluster { private: - AttributeList m_attributes; - Graph &m_graph; - Cluster *m_parent = nullptr; - Set<Cluster *> m_children; - Set<Node *> m_nodes; + AttributeList attributes_; + Graph &graph_; + Cluster *parent_ = nullptr; + Set<Cluster *> children_; + Set<Node *> nodes_; friend Graph; friend Node; - Cluster(Graph &graph) : m_graph(graph) + Cluster(Graph &graph) : graph_(graph) { } @@ -111,7 +110,7 @@ class Cluster { void set_attribute(StringRef key, StringRef value) { - m_attributes.set(key, value); + attributes_.set(key, value); } void set_parent_cluster(Cluster *cluster); @@ -125,25 +124,25 @@ class Cluster { class Node { private: - AttributeList m_attributes; - Graph &m_graph; - Cluster *m_cluster = nullptr; + AttributeList attributes_; + Graph &graph_; + Cluster *cluster_ = nullptr; friend Graph; - Node(Graph &graph) : m_graph(graph) + Node(Graph &graph) : graph_(graph) { } public: const AttributeList &attributes() const { - return m_attributes; + return attributes_; } AttributeList &attributes() { - return m_attributes; + return attributes_; } void set_parent_cluster(Cluster *cluster); @@ -154,7 +153,7 @@ class Node { void set_attribute(StringRef key, StringRef value) { - m_attributes.set(key, value); + attributes_.set(key, value); } void set_shape(Attr_shape shape) @@ -176,7 +175,7 @@ class Node { class UndirectedGraph final : public Graph { private: - Vector<std::unique_ptr<UndirectedEdge>> m_edges; + Vector<std::unique_ptr<UndirectedEdge>> edges_; public: std::string to_dot_string() const; @@ -186,7 +185,7 @@ class UndirectedGraph final : public Graph { class DirectedGraph final : public Graph { private: - Vector<std::unique_ptr<DirectedEdge>> m_edges; + Vector<std::unique_ptr<DirectedEdge>> edges_; public: std::string to_dot_string() const; @@ -196,12 +195,12 @@ class DirectedGraph final : public Graph { class NodePort { private: - Node *m_node; - Optional<std::string> m_port_name; + Node *node_; + std::optional<std::string> port_name_; public: - NodePort(Node &node, Optional<std::string> port_name = {}) - : m_node(&node), m_port_name(std::move(port_name)) + NodePort(Node &node, std::optional<std::string> port_name = {}) + : node_(&node), port_name_(std::move(port_name)) { } @@ -210,18 +209,18 @@ class NodePort { class Edge : blender::NonCopyable, blender::NonMovable { protected: - AttributeList m_attributes; - NodePort m_a; - NodePort m_b; + AttributeList attributes_; + NodePort a_; + NodePort b_; public: - Edge(NodePort a, NodePort b) : m_a(std::move(a)), m_b(std::move(b)) + Edge(NodePort a, NodePort b) : a_(std::move(a)), b_(std::move(b)) { } void set_attribute(StringRef key, StringRef value) { - m_attributes.set(key, value); + attributes_.set(key, value); } void set_arrowhead(Attr_arrowType type) @@ -262,7 +261,7 @@ std::string color_attr_from_hsv(float h, float s, float v); class NodeWithSocketsRef { private: - Node *m_node; + Node *node_; public: NodeWithSocketsRef(Node &node, @@ -270,20 +269,24 @@ class NodeWithSocketsRef { Span<std::string> input_names, Span<std::string> output_names); - NodePort input(uint index) const + Node &node() + { + return *node_; + } + + NodePort input(int index) const { std::string port = "\"in" + std::to_string(index) + "\""; - return NodePort(*m_node, port); + return NodePort(*node_, port); } - NodePort output(uint index) const + NodePort output(int index) const { std::string port = "\"out" + std::to_string(index) + "\""; - return NodePort(*m_node, port); + return NodePort(*node_, port); } }; -} // namespace DotExport -} // namespace blender +} // namespace blender::dot #endif /* __BLI_DOT_EXPORT_HH__ */ diff --git a/source/blender/blenlib/BLI_dot_export_attribute_enums.hh b/source/blender/blenlib/BLI_dot_export_attribute_enums.hh index 8fe1cda05f3..94c7025b2a6 100644 --- a/source/blender/blenlib/BLI_dot_export_attribute_enums.hh +++ b/source/blender/blenlib/BLI_dot_export_attribute_enums.hh @@ -19,8 +19,7 @@ #include "BLI_string_ref.hh" -namespace blender { -namespace DotExport { +namespace blender ::dot { enum class Attr_rankdir { LeftToRight, @@ -119,7 +118,6 @@ inline StringRef dirType_to_string(Attr_dirType value) return ""; } -} // namespace DotExport -} // namespace blender +} // namespace blender::dot #endif /* __BLI_DOT_EXPORT_ATTRIBUTE_ENUMS_HH__ */ diff --git a/source/blender/blenlib/BLI_float2.hh b/source/blender/blenlib/BLI_float2.hh index 94da5d18ad2..5fe9d1b8ca9 100644 --- a/source/blender/blenlib/BLI_float2.hh +++ b/source/blender/blenlib/BLI_float2.hh @@ -48,6 +48,34 @@ struct float2 { return &x; } + float2 &operator+=(const float2 &other) + { + x += other.x; + y += other.y; + return *this; + } + + float2 &operator-=(const float2 &other) + { + x -= other.x; + y -= other.y; + return *this; + } + + float2 &operator*=(float factor) + { + x *= factor; + y *= factor; + return *this; + } + + float2 &operator/=(float divisor) + { + x /= divisor; + y /= divisor; + return *this; + } + friend float2 operator+(const float2 &a, const float2 &b) { return {a.x + b.x, a.y + b.y}; @@ -79,6 +107,16 @@ struct float2 { stream << "(" << v.x << ", " << v.y << ")"; return stream; } + + friend bool operator==(const float2 &a, const float2 &b) + { + return a.x == b.x && a.y == b.y; + } + + friend bool operator!=(const float2 &a, const float2 &b) + { + return !(a == b); + } }; } // namespace blender diff --git a/source/blender/blenlib/BLI_float3.hh b/source/blender/blenlib/BLI_float3.hh index 7ef4f1b4973..a0979bc75bd 100644 --- a/source/blender/blenlib/BLI_float3.hh +++ b/source/blender/blenlib/BLI_float3.hh @@ -58,56 +58,6 @@ struct float3 { return &x; } - float normalize_and_get_length() - { - return normalize_v3(*this); - } - - float3 normalized() const - { - float3 result; - normalize_v3_v3(result, *this); - return result; - } - - float length() const - { - return len_v3(*this); - } - - float length_squared() const - { - return len_squared_v3(*this); - } - - void reflect(const float3 &normal) - { - *this = this->reflected(normal); - } - - float3 reflected(const float3 &normal) const - { - float3 result; - reflect_v3_v3v3(result, *this, normal); - return result; - } - - static float3 safe_divide(const float3 &a, const float3 &b) - { - float3 result; - result.x = (b.x == 0.0f) ? 0.0f : a.x / b.x; - result.y = (b.y == 0.0f) ? 0.0f : a.y / b.y; - result.z = (b.z == 0.0f) ? 0.0f : a.z / b.z; - return result; - } - - void invert() - { - x = -x; - y = -y; - z = -z; - } - friend float3 operator+(const float3 &a, const float3 &b) { return {a.x + b.x, a.y + b.y, a.z + b.z}; @@ -178,6 +128,85 @@ struct float3 { return stream; } + friend bool operator==(const float3 &a, const float3 &b) + { + return a.x == b.x && a.y == b.y && a.z == b.z; + } + + friend bool operator!=(const float3 &a, const float3 &b) + { + return !(a == b); + } + + float normalize_and_get_length() + { + return normalize_v3(*this); + } + + /** + * Normalizes the vector in place. + */ + void normalize() + { + normalize_v3(*this); + } + + /** + * Returns a normalized vector. The original vector is not changed. + */ + float3 normalized() const + { + float3 result; + normalize_v3_v3(result, *this); + return result; + } + + float length() const + { + return len_v3(*this); + } + + float length_squared() const + { + return len_squared_v3(*this); + } + + void reflect(const float3 &normal) + { + *this = this->reflected(normal); + } + + float3 reflected(const float3 &normal) const + { + float3 result; + reflect_v3_v3v3(result, *this, normal); + return result; + } + + static float3 safe_divide(const float3 &a, const float3 &b) + { + float3 result; + result.x = (b.x == 0.0f) ? 0.0f : a.x / b.x; + result.y = (b.y == 0.0f) ? 0.0f : a.y / b.y; + result.z = (b.z == 0.0f) ? 0.0f : a.z / b.z; + return result; + } + + void invert() + { + x = -x; + y = -y; + z = -z; + } + + uint64_t hash() const + { + uint64_t x1 = *(uint32_t *)&x; + uint64_t x2 = *(uint32_t *)&y; + uint64_t x3 = *(uint32_t *)&z; + return (x1 * 435109) ^ (x2 * 380867) ^ (x3 * 1059217); + } + static float dot(const float3 &a, const float3 &b) { return a.x * b.x + a.y * b.y + a.z * b.z; diff --git a/source/blender/blenlib/BLI_float4x4.hh b/source/blender/blenlib/BLI_float4x4.hh index 0abfb751ebf..b4f12f17cc2 100644 --- a/source/blender/blenlib/BLI_float4x4.hh +++ b/source/blender/blenlib/BLI_float4x4.hh @@ -46,23 +46,6 @@ struct float4x4 { return (const float *)this; } - float4x4 inverted() const - { - float result[4][4]; - invert_m4_m4(result, values); - return result; - } - - /** - * Matrix inversion can be implemented more efficiently for affine matrices. - */ - float4x4 inverted_affine() const - { - BLI_assert(values[0][3] == 0.0f && values[1][3] == 0.0f && values[2][3] == 0.0f && - values[3][3] == 1.0f); - return this->inverted(); - } - friend float4x4 operator*(const float4x4 &a, const float4x4 &b) { float4x4 result; @@ -86,6 +69,35 @@ struct float4x4 { return m * float3(v); } + float4x4 inverted() const + { + float4x4 result; + invert_m4_m4(result.values, values); + return result; + } + + /** + * Matrix inversion can be implemented more efficiently for affine matrices. + */ + float4x4 inverted_affine() const + { + BLI_assert(values[0][3] == 0.0f && values[1][3] == 0.0f && values[2][3] == 0.0f && + values[3][3] == 1.0f); + return this->inverted(); + } + + float4x4 transposed() const + { + float4x4 result; + transpose_m4_m4(result.values, values); + return result; + } + + float4x4 inverted_transposed_affine() const + { + return this->inverted_affine().transposed(); + } + struct float3x3_ref { const float4x4 &data; @@ -108,6 +120,16 @@ struct float4x4 { interp_m4_m4m4(result, a.values, b.values, t); return result; } + + uint64_t hash() const + { + uint64_t h = 435109; + for (int i = 0; i < 16; i++) { + float value = ((const float *)this)[i]; + h = h * 33 + (*(uint32_t *)&value); + } + return h; + } }; } // namespace blender diff --git a/source/blender/blenlib/BLI_ghash.h b/source/blender/blenlib/BLI_ghash.h index 141c631381b..31a9658bd7e 100644 --- a/source/blender/blenlib/BLI_ghash.h +++ b/source/blender/blenlib/BLI_ghash.h @@ -371,20 +371,6 @@ unsigned int BLI_ghashutil_uinthash_v4_murmur(const unsigned int key[4]); bool BLI_ghashutil_uinthash_v4_cmp(const void *a, const void *b); #define BLI_ghashutil_inthash_v4_cmp BLI_ghashutil_uinthash_v4_cmp -unsigned int BLI_ghashutil_uinthash_v2(const unsigned int key[2]); -#define BLI_ghashutil_inthash_v2(key) \ - (CHECK_TYPE_ANY(key, int *, const int *), BLI_ghashutil_uinthash_v2((const unsigned int *)key)) -#define BLI_ghashutil_inthash_v2_p ((GSetHashFP)BLI_ghashutil_uinthash_v2) -#define BLI_ghashutil_uinthash_v2_p ((GSetHashFP)BLI_ghashutil_uinthash_v2) -unsigned int BLI_ghashutil_uinthash_v2_murmur(const unsigned int key[2]); -#define BLI_ghashutil_inthash_v2_murmur(key) \ - (CHECK_TYPE_ANY(key, int *, const int *), \ - BLI_ghashutil_uinthash_v2_murmur((const unsigned int *)key)) -#define BLI_ghashutil_inthash_v2_p_murmur ((GSetHashFP)BLI_ghashutil_uinthash_v2_murmur) -#define BLI_ghashutil_uinthash_v2_p_murmur ((GSetHashFP)BLI_ghashutil_uinthash_v2_murmur) -bool BLI_ghashutil_uinthash_v2_cmp(const void *a, const void *b); -#define BLI_ghashutil_inthash_v2_cmp BLI_ghashutil_uinthash_v2_cmp - typedef struct GHashPair { const void *first; const void *second; diff --git a/source/blender/blenlib/BLI_hash.hh b/source/blender/blenlib/BLI_hash.hh index 57d5f7f9d8a..b14a4ca933c 100644 --- a/source/blender/blenlib/BLI_hash.hh +++ b/source/blender/blenlib/BLI_hash.hh @@ -23,7 +23,7 @@ * A specialization of `blender::DefaultHash<T>` provides a hash function for values of type T. * This hash function is used by default in hash table implementations in blenlib. * - * The actual hash function is in the `operator()` method of DefaultHash<T>. The following code + * The actual hash function is in the `operator()` method of `DefaultHash<T>`. The following code * computes the hash of some value using DefaultHash. * * T value = ...; @@ -32,43 +32,43 @@ * * Hash table implementations like blender::Set support heterogeneous key lookups. That means that * one can do a lookup with a key of type A in a hash table that stores keys of type B. This is - * commonly done when B is std::string, because the conversion from e.g. a StringRef to std::string - * can be costly and is unnecessary. To make this work, values of type A and B that compare equal - * have to have the same hash value. This is achieved by defining potentially multiple `operator()` - * in a specialization of DefaultHash. All those methods have to compute the same hash for values - * that compare equal. + * commonly done when B is std::string, because the conversion from e.g. a #StringRef to + * std::string can be costly and is unnecessary. To make this work, values of type A and B that + * compare equal have to have the same hash value. This is achieved by defining potentially + * multiple `operator()` in a specialization of #DefaultHash. All those methods have to compute the + * same hash for values that compare equal. * - * The computed hash is an unsigned 32 bit integer. Ideally, the hash function would generate + * The computed hash is an unsigned 64 bit integer. Ideally, the hash function would generate * uniformly random hash values for a set of keys. However, in many cases trivial hash functions * are faster and produce a good enough distribution. In general it is better when more information * is in the lower bits of the hash. By choosing a good probing strategy, the effects of a bad hash - * function are less noticable though. In this context a good probing strategy is one that takes + * function are less noticeable though. In this context a good probing strategy is one that takes * all bits of the hash into account eventually. One has to check on a case by case basis to see if * a better but more expensive or trivial hash function works better. * * There are three main ways to provide a hash table implementation with a custom hash function. * * - When you want to provide a default hash function for your own custom type: Add a `hash` - * member function to it. The function should return `uint32_t` and take no arguments. This - * method will be called by the default implementation of DefaultHash. It will automatically be + * member function to it. The function should return `uint64_t` and take no arguments. This + * method will be called by the default implementation of #DefaultHash. It will automatically be * used by hash table implementations. * * - When you want to provide a default hash function for a type that you cannot modify: Add a new - * specialization to the DefaultHash struct. This can be done by writing code like below in + * specialization to the #DefaultHash struct. This can be done by writing code like below in * either global or BLI namespace. * * template<> struct blender::DefaultHash<TheType> { - * uint32_t operator()(const TheType &value) const { + * uint64_t operator()(const TheType &value) const { * return ...; * } * }; * * - When you want to provide a different hash function for a type that already has a default hash * function: Implement a struct like the one below and pass it as template parameter to the hash - * table explicitely. + * table explicitly. * * struct MyCustomHash { - * uint32_t operator()(const TheType &value) const { + * uint64_t operator()(const TheType &value) const { * return ...; * } * }; @@ -86,22 +86,32 @@ namespace blender { /** - * If there is no other specialization of DefaultHash for a given type, try to call `hash()` on the - * value. If there is no such method, this will result in a compiler error. Usually that means that - * you have to implement a hash function using one of three strategies listed above. + * If there is no other specialization of #DefaultHash for a given type, try to call `hash()` on + * the value. If there is no such method, this will result in a compiler error. Usually that means + * that you have to implement a hash function using one of three strategies listed above. */ template<typename T> struct DefaultHash { - uint32_t operator()(const T &value) const + uint64_t operator()(const T &value) const { return value.hash(); } }; +/** + * Use the same hash function for const and non const variants of a type. + */ +template<typename T> struct DefaultHash<const T> { + uint64_t operator()(const T &value) const + { + return DefaultHash<T>{}(value); + } +}; + #define TRIVIAL_DEFAULT_INT_HASH(TYPE) \ template<> struct DefaultHash<TYPE> { \ - uint32_t operator()(TYPE value) const \ + uint64_t operator()(TYPE value) const \ { \ - return (uint32_t)value; \ + return (uint64_t)value; \ } \ } @@ -117,36 +127,29 @@ TRIVIAL_DEFAULT_INT_HASH(int16_t); TRIVIAL_DEFAULT_INT_HASH(uint16_t); TRIVIAL_DEFAULT_INT_HASH(int32_t); TRIVIAL_DEFAULT_INT_HASH(uint32_t); - -template<> struct DefaultHash<uint64_t> { - uint32_t operator()(uint64_t value) const - { - uint32_t low = (uint32_t)value; - uint32_t high = (uint32_t)(value >> 32); - return low ^ (high * 0x45d9f3b); - } -}; - -template<> struct DefaultHash<int64_t> { - uint32_t operator()(uint64_t value) const - { - return DefaultHash<uint64_t>{}((uint64_t)value); - } -}; +TRIVIAL_DEFAULT_INT_HASH(int64_t); +TRIVIAL_DEFAULT_INT_HASH(uint64_t); /** * One should try to avoid using floats as keys in hash tables, but sometimes it is convenient. */ template<> struct DefaultHash<float> { - uint32_t operator()(float value) const + uint64_t operator()(float value) const { return *(uint32_t *)&value; } }; -inline uint32_t hash_string(StringRef str) +template<> struct DefaultHash<bool> { + uint64_t operator()(bool value) const + { + return (uint64_t)(value != false) * 1298191; + } +}; + +inline uint64_t hash_string(StringRef str) { - uint32_t hash = 5381; + uint64_t hash = 5381; for (char c : str) { hash = hash * 33 + c; } @@ -155,24 +158,24 @@ inline uint32_t hash_string(StringRef str) template<> struct DefaultHash<std::string> { /** - * Take a StringRef as parameter to support heterogeneous lookups in hash table implementations + * Take a #StringRef as parameter to support heterogeneous lookups in hash table implementations * when std::string is used as key. */ - uint32_t operator()(StringRef value) const + uint64_t operator()(StringRef value) const { return hash_string(value); } }; template<> struct DefaultHash<StringRef> { - uint32_t operator()(StringRef value) const + uint64_t operator()(StringRef value) const { return hash_string(value); } }; template<> struct DefaultHash<StringRefNull> { - uint32_t operator()(StringRef value) const + uint64_t operator()(StringRef value) const { return hash_string(value); } @@ -182,26 +185,26 @@ template<> struct DefaultHash<StringRefNull> { * While we cannot guarantee that the lower 4 bits of a pointer are zero, it is often the case. */ template<typename T> struct DefaultHash<T *> { - uint32_t operator()(const T *value) const + uint64_t operator()(const T *value) const { uintptr_t ptr = (uintptr_t)value; - uint32_t hash = (uint32_t)(ptr >> 4); + uint64_t hash = (uint64_t)(ptr >> 4); return hash; } }; template<typename T> struct DefaultHash<std::unique_ptr<T>> { - uint32_t operator()(const std::unique_ptr<T> &value) const + uint64_t operator()(const std::unique_ptr<T> &value) const { return DefaultHash<T *>{}(value.get()); } }; template<typename T1, typename T2> struct DefaultHash<std::pair<T1, T2>> { - uint32_t operator()(const std::pair<T1, T2> &value) const + uint64_t operator()(const std::pair<T1, T2> &value) const { - uint32_t hash1 = DefaultHash<T1>{}(value.first); - uint32_t hash2 = DefaultHash<T2>{}(value.second); + uint64_t hash1 = DefaultHash<T1>{}(value.first); + uint64_t hash2 = DefaultHash<T2>{}(value.second); return hash1 ^ (hash2 * 33); } }; diff --git a/source/blender/blenlib/BLI_hash_tables.hh b/source/blender/blenlib/BLI_hash_tables.hh index b565b396a7a..5d8f8862a09 100644 --- a/source/blender/blenlib/BLI_hash_tables.hh +++ b/source/blender/blenlib/BLI_hash_tables.hh @@ -30,6 +30,7 @@ #include "BLI_math_base.h" #include "BLI_memory_utils.hh" #include "BLI_string.h" +#include "BLI_string_ref.hh" #include "BLI_utildefines.h" #include "BLI_vector.hh" @@ -38,61 +39,67 @@ namespace blender { /* -------------------------------------------------------------------- */ /** \name Constexpr Utility Functions * - * Those should eventually be deduplicated with functions in BLI_math_base.h. + * Those should eventually be de-duplicated with functions in BLI_math_base.h. * \{ */ -inline constexpr int is_power_of_2_i_constexpr(int n) +inline constexpr int64_t is_power_of_2_constexpr(const int64_t x) { - return (n & (n - 1)) == 0; + BLI_assert(x >= 0); + return (x & (x - 1)) == 0; } -inline constexpr uint32_t log2_floor_u_constexpr(uint32_t x) +inline constexpr int64_t log2_floor_constexpr(const int64_t x) { - return x <= 1 ? 0 : 1 + log2_floor_u_constexpr(x >> 1); + BLI_assert(x >= 0); + return x <= 1 ? 0 : 1 + log2_floor_constexpr(x >> 1); } -inline constexpr uint32_t log2_ceil_u_constexpr(uint32_t x) +inline constexpr int64_t log2_ceil_constexpr(const int64_t x) { - return (is_power_of_2_i_constexpr((int)x)) ? log2_floor_u_constexpr(x) : - log2_floor_u_constexpr(x) + 1; + BLI_assert(x >= 0); + return (is_power_of_2_constexpr((int)x)) ? log2_floor_constexpr(x) : log2_floor_constexpr(x) + 1; } -inline constexpr uint32_t power_of_2_max_u_constexpr(uint32_t x) +inline constexpr int64_t power_of_2_max_constexpr(const int64_t x) { - return 1u << log2_ceil_u_constexpr(x); + BLI_assert(x >= 0); + return 1ll << log2_ceil_constexpr(x); } -template<typename IntT> inline constexpr IntT ceil_division(IntT x, IntT y) +template<typename IntT> inline constexpr IntT ceil_division(const IntT x, const IntT y) { - BLI_STATIC_ASSERT(!std::is_signed<IntT>::value, ""); + BLI_assert(x >= 0); + BLI_assert(y >= 0); return x / y + ((x % y) != 0); } -template<typename IntT> inline constexpr IntT floor_division(IntT x, IntT y) +template<typename IntT> inline constexpr IntT floor_division(const IntT x, const IntT y) { - BLI_STATIC_ASSERT(!std::is_signed<IntT>::value, ""); + BLI_assert(x >= 0); + BLI_assert(y >= 0); return x / y; } -inline constexpr uint32_t ceil_division_by_fraction(uint32_t x, - uint32_t numerator, - uint32_t denominator) +inline constexpr int64_t ceil_division_by_fraction(const int64_t x, + const int64_t numerator, + const int64_t denominator) { - return (uint32_t)ceil_division((uint64_t)x * (uint64_t)denominator, (uint64_t)numerator); + return (int64_t)ceil_division((uint64_t)x * (uint64_t)denominator, (uint64_t)numerator); } -inline constexpr uint32_t floor_multiplication_with_fraction(uint32_t x, - uint32_t numerator, - uint32_t denominator) +inline constexpr int64_t floor_multiplication_with_fraction(const int64_t x, + const int64_t numerator, + const int64_t denominator) { - return (uint32_t)((uint64_t)x * (uint64_t)numerator / (uint64_t)denominator); + return (int64_t)((uint64_t)x * (uint64_t)numerator / (uint64_t)denominator); } -inline constexpr uint32_t total_slot_amount_for_usable_slots(uint32_t min_usable_slots, - uint32_t max_load_factor_numerator, - uint32_t max_load_factor_denominator) +inline constexpr int64_t total_slot_amount_for_usable_slots( + const int64_t min_usable_slots, + const int64_t max_load_factor_numerator, + const int64_t max_load_factor_denominator) { - return power_of_2_max_u_constexpr(ceil_division_by_fraction( + return power_of_2_max_constexpr(ceil_division_by_fraction( min_usable_slots, max_load_factor_numerator, max_load_factor_denominator)); } @@ -108,37 +115,37 @@ inline constexpr uint32_t total_slot_amount_for_usable_slots(uint32_t min_usable class LoadFactor { private: - uint8_t m_numerator; - uint8_t m_denominator; + uint8_t numerator_; + uint8_t denominator_; public: LoadFactor(uint8_t numerator, uint8_t denominator) - : m_numerator(numerator), m_denominator(denominator) + : numerator_(numerator), denominator_(denominator) { BLI_assert(numerator > 0); BLI_assert(numerator < denominator); } - void compute_total_and_usable_slots(uint32_t min_total_slots, - uint32_t min_usable_slots, - uint32_t *r_total_slots, - uint32_t *r_usable_slots) const + void compute_total_and_usable_slots(int64_t min_total_slots, + int64_t min_usable_slots, + int64_t *r_total_slots, + int64_t *r_usable_slots) const { BLI_assert(is_power_of_2_i((int)min_total_slots)); - uint32_t total_slots = this->compute_total_slots(min_usable_slots, m_numerator, m_denominator); + int64_t total_slots = this->compute_total_slots(min_usable_slots, numerator_, denominator_); total_slots = std::max(total_slots, min_total_slots); - uint32_t usable_slots = floor_multiplication_with_fraction( - total_slots, m_numerator, m_denominator); + const int64_t usable_slots = floor_multiplication_with_fraction( + total_slots, numerator_, denominator_); BLI_assert(min_usable_slots <= usable_slots); *r_total_slots = total_slots; *r_usable_slots = usable_slots; } - static constexpr uint32_t compute_total_slots(uint32_t min_usable_slots, - uint8_t numerator, - uint8_t denominator) + static constexpr int64_t compute_total_slots(int64_t min_usable_slots, + uint8_t numerator, + uint8_t denominator) { return total_slot_amount_for_usable_slots(min_usable_slots, numerator, denominator); } @@ -157,10 +164,10 @@ class LoadFactor { * two values of the key type are selected to indicate whether the slot is empty or removed. * * The classes below tell a slot implementation which special key values it can use. They can be - * used as KeyInfo in slot types like IntrusiveSetSlot and IntrusiveMapSlot. + * used as #KeyInfo in slot types like #IntrusiveSetSlot and #IntrusiveMapSlot. * - * A KeyInfo type has to implement a couple of static methods that are descriped in - * TemplatedKeyInfo. + * A #KeyInfo type has to implement a couple of static methods that are descried in + * #TemplatedKeyInfo. * * \{ */ @@ -260,72 +267,72 @@ template<typename Pointer> struct PointerKeyInfo { class HashTableStats { private: - Vector<uint32_t> m_keys_by_collision_count; - uint32_t m_total_collisions; - float m_average_collisions; - uint32_t m_size; - uint32_t m_capacity; - uint32_t m_removed_amount; - float m_load_factor; - float m_removed_load_factor; - uint32_t m_size_per_element; - uint32_t m_size_in_bytes; - const void *m_address; + Vector<int64_t> keys_by_collision_count_; + int64_t total_collisions_; + float average_collisions_; + int64_t size_; + int64_t capacity_; + int64_t removed_amount_; + float load_factor_; + float removed_load_factor_; + int64_t size_per_element_; + int64_t size_in_bytes_; + const void *address_; public: /** * Requires that the hash table has the following methods: - * - count_collisions(key) -> uint32_t - * - size() -> uint32_t - * - capacity() -> uint32_t - * - removed_amount() -> uint32_t - * - size_per_element() -> uint32_t - * - size_in_bytes() -> uint32_t + * - count_collisions(key) -> int64_t + * - size() -> int64_t + * - capacity() -> int64_t + * - removed_amount() -> int64_t + * - size_per_element() -> int64_t + * - size_in_bytes() -> int64_t */ template<typename HashTable, typename Keys> HashTableStats(const HashTable &hash_table, const Keys &keys) { - m_total_collisions = 0; - m_size = hash_table.size(); - m_capacity = hash_table.capacity(); - m_removed_amount = hash_table.removed_amount(); - m_size_per_element = hash_table.size_per_element(); - m_size_in_bytes = hash_table.size_in_bytes(); - m_address = (const void *)&hash_table; + total_collisions_ = 0; + size_ = hash_table.size(); + capacity_ = hash_table.capacity(); + removed_amount_ = hash_table.removed_amount(); + size_per_element_ = hash_table.size_per_element(); + size_in_bytes_ = hash_table.size_in_bytes(); + address_ = (const void *)&hash_table; for (const auto &key : keys) { - uint32_t collisions = hash_table.count_collisions(key); - if (m_keys_by_collision_count.size() <= collisions) { - m_keys_by_collision_count.append_n_times( - 0, collisions - m_keys_by_collision_count.size() + 1); + int64_t collisions = hash_table.count_collisions(key); + if (keys_by_collision_count_.size() <= collisions) { + keys_by_collision_count_.append_n_times(0, + collisions - keys_by_collision_count_.size() + 1); } - m_keys_by_collision_count[collisions]++; - m_total_collisions += collisions; + keys_by_collision_count_[collisions]++; + total_collisions_ += collisions; } - m_average_collisions = (m_size == 0) ? 0 : (float)m_total_collisions / (float)m_size; - m_load_factor = (float)m_size / (float)m_capacity; - m_removed_load_factor = (float)m_removed_amount / (float)m_capacity; + average_collisions_ = (size_ == 0) ? 0 : (float)total_collisions_ / (float)size_; + load_factor_ = (float)size_ / (float)capacity_; + removed_load_factor_ = (float)removed_amount_ / (float)capacity_; } void print(StringRef name = "") { std::cout << "Hash Table Stats: " << name << "\n"; - std::cout << " Address: " << m_address << "\n"; - std::cout << " Total Slots: " << m_capacity << "\n"; - std::cout << " Occupied Slots: " << m_size << " (" << m_load_factor * 100.0f << " %)\n"; - std::cout << " Removed Slots: " << m_removed_amount << " (" << m_removed_load_factor * 100.0f + std::cout << " Address: " << address_ << "\n"; + std::cout << " Total Slots: " << capacity_ << "\n"; + std::cout << " Occupied Slots: " << size_ << " (" << load_factor_ * 100.0f << " %)\n"; + std::cout << " Removed Slots: " << removed_amount_ << " (" << removed_load_factor_ * 100.0f << " %)\n"; char memory_size_str[15]; - BLI_str_format_byte_unit(memory_size_str, m_size_in_bytes, true); + BLI_str_format_byte_unit(memory_size_str, size_in_bytes_, true); std::cout << " Size: ~" << memory_size_str << "\n"; - std::cout << " Size per Slot: " << m_size_per_element << " bytes\n"; + std::cout << " Size per Slot: " << size_per_element_ << " bytes\n"; - std::cout << " Average Collisions: " << m_average_collisions << "\n"; - for (uint32_t collision_count : m_keys_by_collision_count.index_range()) { + std::cout << " Average Collisions: " << average_collisions_ << "\n"; + for (int64_t collision_count : keys_by_collision_count_.index_range()) { std::cout << " " << collision_count - << " Collisions: " << m_keys_by_collision_count[collision_count] << "\n"; + << " Collisions: " << keys_by_collision_count_[collision_count] << "\n"; } } }; diff --git a/source/blender/blenlib/BLI_index_mask.hh b/source/blender/blenlib/BLI_index_mask.hh index cc1bf05f936..ff271faa0c2 100644 --- a/source/blender/blenlib/BLI_index_mask.hh +++ b/source/blender/blenlib/BLI_index_mask.hh @@ -46,7 +46,7 @@ namespace blender { class IndexMask { private: /* The underlying reference to sorted integers. */ - Span<uint> m_indices; + Span<int64_t> indices_; public: /* Creates an IndexMask that contains no indices. */ @@ -57,10 +57,10 @@ class IndexMask { * This constructor asserts that the given integers are in ascending order and that there are no * duplicates. */ - IndexMask(Span<uint> indices) : m_indices(indices) + IndexMask(Span<int64_t> indices) : indices_(indices) { #ifdef DEBUG - for (uint i = 1; i < indices.size(); i++) { + for (int64_t i = 1; i < indices.size(); i++) { BLI_assert(indices[i - 1] < indices[i]); } #endif @@ -70,7 +70,7 @@ class IndexMask { * Use this method when you know that no indices are skipped. It is more efficient than preparing * an integer array all the time. */ - IndexMask(IndexRange range) : m_indices(range.as_span()) + IndexMask(IndexRange range) : indices_(range.as_span()) { } @@ -84,58 +84,58 @@ class IndexMask { * Do this: * do_something_with_an_index_mask({3, 4, 5}); */ - IndexMask(const std::initializer_list<uint> &indices) : IndexMask(Span<uint>(indices)) + IndexMask(const std::initializer_list<int64_t> &indices) : IndexMask(Span<int64_t>(indices)) { } /** * Creates an IndexMask that references the indices [0, n-1]. */ - explicit IndexMask(uint n) : IndexMask(IndexRange(n)) + explicit IndexMask(int64_t n) : IndexMask(IndexRange(n)) { } - operator Span<uint>() const + operator Span<int64_t>() const { - return m_indices; + return indices_; } - const uint *begin() const + const int64_t *begin() const { - return m_indices.begin(); + return indices_.begin(); } - const uint *end() const + const int64_t *end() const { - return m_indices.end(); + return indices_.end(); } /** * Returns the n-th index referenced by this IndexMask. The `index_mask` method returns an * IndexRange containing all indices that can be used as parameter here. */ - uint operator[](uint n) const + int64_t operator[](int64_t n) const { - return m_indices[n]; + return indices_[n]; } /** * Returns the minimum size an array has to have, if the integers in this IndexMask are going to * be used as indices in that array. */ - uint min_array_size() const + int64_t min_array_size() const { - if (m_indices.size() == 0) { + if (indices_.size() == 0) { return 0; } else { - return m_indices.last() + 1; + return indices_.last() + 1; } } - Span<uint> indices() const + Span<int64_t> indices() const { - return m_indices; + return indices_; } /** @@ -143,7 +143,7 @@ class IndexMask { */ bool is_range() const { - return m_indices.size() > 0 && m_indices.last() - m_indices.first() == m_indices.size() - 1; + return indices_.size() > 0 && indices_.last() - indices_.first() == indices_.size() - 1; } /** @@ -153,7 +153,7 @@ class IndexMask { IndexRange as_range() const { BLI_assert(this->is_range()); - return IndexRange{m_indices.first(), m_indices.size()}; + return IndexRange{indices_.first(), indices_.size()}; } /** @@ -167,12 +167,12 @@ class IndexMask { { if (this->is_range()) { IndexRange range = this->as_range(); - for (uint i : range) { + for (int64_t i : range) { callback(i); } } else { - for (uint i : m_indices) { + for (int64_t i : indices_) { callback(i); } } @@ -187,23 +187,23 @@ class IndexMask { */ IndexRange index_range() const { - return m_indices.index_range(); + return indices_.index_range(); } /** * Returns the largest index that is referenced by this IndexMask. */ - uint last() const + int64_t last() const { - return m_indices.last(); + return indices_.last(); } /** * Returns the number of indices referenced by this IndexMask. */ - uint size() const + int64_t size() const { - return m_indices.size(); + return indices_.size(); } }; diff --git a/source/blender/blenlib/BLI_index_range.hh b/source/blender/blenlib/BLI_index_range.hh index 25192429a5d..7c813f58b2c 100644 --- a/source/blender/blenlib/BLI_index_range.hh +++ b/source/blender/blenlib/BLI_index_range.hh @@ -27,29 +27,29 @@ * I'd argue that the second loop is more readable and less error prone than the first one. That is * not necessarily always the case, but often it is. * - * for (uint i = 0; i < 10; i++) { - * for (uint j = 0; j < 20; j++) { - * for (uint k = 0; k < 30; k++) { + * for (int64_t i = 0; i < 10; i++) { + * for (int64_t j = 0; j < 20; j++) { + * for (int64_t k = 0; k < 30; k++) { * - * for (uint i : IndexRange(10)) { - * for (uint j : IndexRange(20)) { - * for (uint k : IndexRange(30)) { + * for (int64_t i : IndexRange(10)) { + * for (int64_t j : IndexRange(20)) { + * for (int64_t k : IndexRange(30)) { * * Some containers like blender::Vector have an index_range() method. This will return the * IndexRange that contains all indices that can be used to access the container. This is * particularly useful when you want to iterate over the indices and the elements (much like * Python's enumerate(), just worse). Again, I think the second example here is better: * - * for (uint i = 0; i < my_vector_with_a_long_name.size(); i++) { + * for (int64_t i = 0; i < my_vector_with_a_long_name.size(); i++) { * do_something(i, my_vector_with_a_long_name[i]); * - * for (uint i : my_vector_with_a_long_name.index_range()) { + * for (int64_t i : my_vector_with_a_long_name.index_range()) { * do_something(i, my_vector_with_a_long_name[i]); * * Ideally this could be could be even closer to Python's enumerate(). We might get that in the * future with newer C++ versions. * - * One other important feature is the as_span method. This method returns an Span<uint> + * One other important feature is the as_span method. This method returns an Span<int64_t> * that contains the interval as individual numbers. */ @@ -70,68 +70,72 @@ template<typename T> class Span; class IndexRange { private: - uint m_start = 0; - uint m_size = 0; + int64_t start_ = 0; + int64_t size_ = 0; public: IndexRange() = default; - explicit IndexRange(uint size) : m_start(0), m_size(size) + explicit IndexRange(int64_t size) : start_(0), size_(size) { + BLI_assert(size >= 0); } - IndexRange(uint start, uint size) : m_start(start), m_size(size) + IndexRange(int64_t start, int64_t size) : start_(start), size_(size) { + BLI_assert(start >= 0); + BLI_assert(size >= 0); } template<typename T> - IndexRange(const tbb::blocked_range<T> &range) : m_start(range.begin()), m_size(range.size()) + IndexRange(const tbb::blocked_range<T> &range) : start_(range.begin()), size_(range.size()) { } class Iterator { private: - uint m_current; + int64_t current_; public: - Iterator(uint current) : m_current(current) + Iterator(int64_t current) : current_(current) { } Iterator &operator++() { - m_current++; + current_++; return *this; } bool operator!=(const Iterator &iterator) const { - return m_current != iterator.m_current; + return current_ != iterator.current_; } - uint operator*() const + int64_t operator*() const { - return m_current; + return current_; } }; Iterator begin() const { - return Iterator(m_start); + return Iterator(start_); } Iterator end() const { - return Iterator(m_start + m_size); + return Iterator(start_ + size_); } /** * Access an element in the range. */ - uint operator[](uint index) const + int64_t operator[](int64_t index) const { + BLI_assert(index >= 0); BLI_assert(index < this->size()); - return m_start + index; + return start_ + index; } /** @@ -139,84 +143,88 @@ class IndexRange { */ friend bool operator==(IndexRange a, IndexRange b) { - return (a.m_size == b.m_size) && (a.m_start == b.m_start || a.m_size == 0); + return (a.size_ == b.size_) && (a.start_ == b.start_ || a.size_ == 0); } /** * Get the amount of numbers in the range. */ - uint size() const + int64_t size() const { - return m_size; + return size_; } /** * Create a new range starting at the end of the current one. */ - IndexRange after(uint n) const + IndexRange after(int64_t n) const { - return IndexRange(m_start + m_size, n); + BLI_assert(n >= 0); + return IndexRange(start_ + size_, n); } /** * Create a new range that ends at the start of the current one. */ - IndexRange before(uint n) const + IndexRange before(int64_t n) const { - return IndexRange(m_start - n, n); + BLI_assert(n >= 0); + return IndexRange(start_ - n, n); } /** * Get the first element in the range. * Asserts when the range is empty. */ - uint first() const + int64_t first() const { BLI_assert(this->size() > 0); - return m_start; + return start_; } /** * Get the last element in the range. * Asserts when the range is empty. */ - uint last() const + int64_t last() const { BLI_assert(this->size() > 0); - return m_start + m_size - 1; + return start_ + size_ - 1; } /** * Get the element one after the end. The returned value is undefined when the range is empty. */ - uint one_after_last() const + int64_t one_after_last() const { - return m_start + m_size; + return start_ + size_; } /** * Get the first element in the range. The returned value is undefined when the range is empty. */ - uint start() const + int64_t start() const { - return m_start; + return start_; } /** * Returns true when the range contains a certain number, otherwise false. */ - bool contains(uint value) const + bool contains(int64_t value) const { - return value >= m_start && value < m_start + m_size; + return value >= start_ && value < start_ + size_; } /** - * Returns a new range, that contains a subinterval of the current one. + * Returns a new range, that contains a sub-interval of the current one. */ - IndexRange slice(uint start, uint size) const + IndexRange slice(int64_t start, int64_t size) const { - uint new_start = m_start + start; - BLI_assert(new_start + size <= m_start + m_size || size == 0); + BLI_assert(start >= 0); + BLI_assert(size >= 0); + int64_t new_start = start_ + start; + BLI_assert(new_start + size <= start_ + size_ || size == 0); return IndexRange(new_start, size); } IndexRange slice(IndexRange range) const @@ -227,7 +235,7 @@ class IndexRange { /** * Get read-only access to a memory buffer that contains the range as actual numbers. */ - Span<uint> as_span() const; + Span<int64_t> as_span() const; friend std::ostream &operator<<(std::ostream &stream, IndexRange range) { diff --git a/source/blender/blenlib/BLI_kdopbvh.h b/source/blender/blenlib/BLI_kdopbvh.h index 70fa633eeac..9e4e30181b9 100644 --- a/source/blender/blenlib/BLI_kdopbvh.h +++ b/source/blender/blenlib/BLI_kdopbvh.h @@ -174,6 +174,8 @@ BVHTreeOverlap *BLI_bvhtree_overlap(const BVHTree *tree1, BVHTree_OverlapCallback callback, void *userdata); +int *BLI_bvhtree_intersect_plane(BVHTree *tree, float plane[4], uint *r_intersect_tot); + int BLI_bvhtree_get_len(const BVHTree *tree); int BLI_bvhtree_get_tree_type(const BVHTree *tree); float BLI_bvhtree_get_epsilon(const BVHTree *tree); diff --git a/source/blender/blenlib/BLI_linear_allocator.hh b/source/blender/blenlib/BLI_linear_allocator.hh index f968f9f15ce..39a3ed27f42 100644 --- a/source/blender/blenlib/BLI_linear_allocator.hh +++ b/source/blender/blenlib/BLI_linear_allocator.hh @@ -33,30 +33,30 @@ namespace blender { template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopyable, NonMovable { private: - Allocator m_allocator; - Vector<void *> m_owned_buffers; - Vector<Span<char>> m_unused_borrowed_buffers; + Allocator allocator_; + Vector<void *> owned_buffers_; + Vector<Span<char>> unused_borrowed_buffers_; - uintptr_t m_current_begin; - uintptr_t m_current_end; - uint m_next_min_alloc_size; + uintptr_t current_begin_; + uintptr_t current_end_; + int64_t next_min_alloc_size_; #ifdef DEBUG - uint m_debug_allocated_amount = 0; + int64_t debug_allocated_amount_ = 0; #endif public: LinearAllocator() { - m_current_begin = 0; - m_current_end = 0; - m_next_min_alloc_size = 64; + current_begin_ = 0; + current_end_ = 0; + next_min_alloc_size_ = 64; } ~LinearAllocator() { - for (void *ptr : m_owned_buffers) { - m_allocator.deallocate(ptr); + for (void *ptr : owned_buffers_) { + allocator_.deallocate(ptr); } } @@ -66,21 +66,23 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya * * The alignment has to be a power of 2. */ - void *allocate(uint size, uint alignment) + void *allocate(const int64_t size, const int64_t alignment) { + BLI_assert(size >= 0); BLI_assert(alignment >= 1); BLI_assert(is_power_of_2_i(alignment)); #ifdef DEBUG - m_debug_allocated_amount += size; + debug_allocated_amount_ += size; #endif - uintptr_t alignment_mask = alignment - 1; - uintptr_t potential_allocation_begin = (m_current_begin + alignment_mask) & ~alignment_mask; - uintptr_t potential_allocation_end = potential_allocation_begin + size; + const uintptr_t alignment_mask = alignment - 1; + const uintptr_t potential_allocation_begin = (current_begin_ + alignment_mask) & + ~alignment_mask; + const uintptr_t potential_allocation_end = potential_allocation_begin + size; - if (potential_allocation_end <= m_current_end) { - m_current_begin = potential_allocation_end; + if (potential_allocation_end <= current_end_) { + current_begin_ = potential_allocation_end; return (void *)potential_allocation_begin; } else { @@ -104,7 +106,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya * * This method only allocates memory and does not construct the instance. */ - template<typename T> MutableSpan<T> allocate_array(uint size) + template<typename T> MutableSpan<T> allocate_array(int64_t size) { return MutableSpan<T>((T *)this->allocate(sizeof(T) * size, alignof(T)), size); } @@ -140,22 +142,22 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya */ StringRefNull copy_string(StringRef str) { - uint alloc_size = str.size() + 1; + const int64_t alloc_size = str.size() + 1; char *buffer = (char *)this->allocate(alloc_size, 1); str.copy(buffer, alloc_size); return StringRefNull((const char *)buffer); } - MutableSpan<void *> allocate_elements_and_pointer_array(uint element_amount, - uint element_size, - uint element_alignment) + MutableSpan<void *> allocate_elements_and_pointer_array(int64_t element_amount, + int64_t element_size, + int64_t element_alignment) { void *pointer_buffer = this->allocate(element_amount * sizeof(void *), alignof(void *)); void *elements_buffer = this->allocate(element_amount * element_size, element_alignment); MutableSpan<void *> pointers((void **)pointer_buffer, element_amount); void *next_element_buffer = elements_buffer; - for (uint i : IndexRange(element_amount)) { + for (int64_t i : IndexRange(element_amount)) { pointers[i] = next_element_buffer; next_element_buffer = POINTER_OFFSET(next_element_buffer, element_size); } @@ -164,14 +166,14 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya } template<typename T, typename... Args> - Span<T *> construct_elements_and_pointer_array(uint n, Args &&... args) + Span<T *> construct_elements_and_pointer_array(int64_t n, Args &&... args) { MutableSpan<void *> void_pointers = this->allocate_elements_and_pointer_array( n, sizeof(T), alignof(T)); MutableSpan<T *> pointers = void_pointers.cast<T *>(); - for (uint i : IndexRange(n)) { - new (pointers[i]) T(std::forward<Args>(args)...); + for (int64_t i : IndexRange(n)) { + new ((void *)pointers[i]) T(std::forward<Args>(args)...); } return pointers; @@ -183,7 +185,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya */ void provide_buffer(void *buffer, uint size) { - m_unused_borrowed_buffers.append(Span<char>((char *)buffer, size)); + unused_borrowed_buffers_.append(Span<char>((char *)buffer, size)); } template<size_t Size, size_t Alignment> @@ -193,25 +195,26 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya } private: - void allocate_new_buffer(uint min_allocation_size) + void allocate_new_buffer(int64_t min_allocation_size) { - for (uint i : m_unused_borrowed_buffers.index_range()) { - Span<char> buffer = m_unused_borrowed_buffers[i]; + for (int64_t i : unused_borrowed_buffers_.index_range()) { + Span<char> buffer = unused_borrowed_buffers_[i]; if (buffer.size() >= min_allocation_size) { - m_unused_borrowed_buffers.remove_and_reorder(i); - m_current_begin = (uintptr_t)buffer.begin(); - m_current_end = (uintptr_t)buffer.end(); + unused_borrowed_buffers_.remove_and_reorder(i); + current_begin_ = (uintptr_t)buffer.begin(); + current_end_ = (uintptr_t)buffer.end(); return; } } - uint size_in_bytes = power_of_2_min_u(std::max(min_allocation_size, m_next_min_alloc_size)); - m_next_min_alloc_size = size_in_bytes * 2; + const int64_t size_in_bytes = power_of_2_min_u( + std::max(min_allocation_size, next_min_alloc_size_)); + next_min_alloc_size_ = size_in_bytes * 2; - void *buffer = m_allocator.allocate(size_in_bytes, 8, AT); - m_owned_buffers.append(buffer); - m_current_begin = (uintptr_t)buffer; - m_current_end = m_current_begin + size_in_bytes; + void *buffer = allocator_.allocate(size_in_bytes, 8, AT); + owned_buffers_.append(buffer); + current_begin_ = (uintptr_t)buffer; + current_end_ = current_begin_ + size_in_bytes; } }; diff --git a/source/blender/blenlib/BLI_linklist.h b/source/blender/blenlib/BLI_linklist.h index 06796d6592a..324da859af1 100644 --- a/source/blender/blenlib/BLI_linklist.h +++ b/source/blender/blenlib/BLI_linklist.h @@ -55,6 +55,7 @@ int BLI_linklist_count(const LinkNode *list) ATTR_WARN_UNUSED_RESULT; int BLI_linklist_index(const LinkNode *list, void *ptr) ATTR_WARN_UNUSED_RESULT; LinkNode *BLI_linklist_find(LinkNode *list, int index) ATTR_WARN_UNUSED_RESULT; +LinkNode *BLI_linklist_find_last(LinkNode *list) ATTR_WARN_UNUSED_RESULT; void BLI_linklist_reverse(LinkNode **listp) ATTR_NONNULL(1); diff --git a/source/blender/blenlib/BLI_listbase_wrapper.hh b/source/blender/blenlib/BLI_listbase_wrapper.hh index a77e2d66458..46f4a9d49fa 100644 --- a/source/blender/blenlib/BLI_listbase_wrapper.hh +++ b/source/blender/blenlib/BLI_listbase_wrapper.hh @@ -20,10 +20,10 @@ /** \file * \ingroup bli * - * `blender::ListBaseWrapper` is a typed wrapper for the ListBase struct. That makes it safer and + * `blender::ListBaseWrapper` is a typed wrapper for the #ListBase struct. That makes it safer and * more convenient to use in C++ in some cases. However, if you find yourself iterating over a * linked list a lot, consider to convert it into a vector for further processing. This improves - * performance and debugability. + * performance and debug-ability. */ #include "BLI_listbase.h" @@ -33,10 +33,10 @@ namespace blender { template<typename T> class ListBaseWrapper { private: - ListBase *m_listbase; + ListBase *listbase_; public: - ListBaseWrapper(ListBase *listbase) : m_listbase(listbase) + ListBaseWrapper(ListBase *listbase) : listbase_(listbase) { BLI_assert(listbase); } @@ -47,17 +47,17 @@ template<typename T> class ListBaseWrapper { class Iterator { private: - ListBase *m_listbase; - T *m_current; + ListBase *listbase_; + T *current_; public: - Iterator(ListBase *listbase, T *current) : m_listbase(listbase), m_current(current) + Iterator(ListBase *listbase, T *current) : listbase_(listbase), current_(current) { } Iterator &operator++() { - m_current = m_current->next; + current_ = current_->next; return *this; } @@ -70,35 +70,35 @@ template<typename T> class ListBaseWrapper { bool operator!=(const Iterator &iterator) const { - return m_current != iterator.m_current; + return current_ != iterator.current_; } T *operator*() const { - return m_current; + return current_; } }; Iterator begin() const { - return Iterator(m_listbase, (T *)m_listbase->first); + return Iterator(listbase_, (T *)listbase_->first); } Iterator end() const { - return Iterator(m_listbase, nullptr); + return Iterator(listbase_, nullptr); } T get(uint index) const { - void *ptr = BLI_findlink(m_listbase, index); + void *ptr = BLI_findlink(listbase_, index); BLI_assert(ptr); return (T *)ptr; } - uint index_of(const T *value) const + int64_t index_of(const T *value) const { - uint index = 0; + int64_t index = 0; for (T *ptr : *this) { if (ptr == value) { return index; @@ -106,7 +106,7 @@ template<typename T> class ListBaseWrapper { index++; } BLI_assert(false); - return 0; + return -1; } }; diff --git a/source/blender/blenlib/BLI_map.hh b/source/blender/blenlib/BLI_map.hh index 9737367ebca..dd375272fdb 100644 --- a/source/blender/blenlib/BLI_map.hh +++ b/source/blender/blenlib/BLI_map.hh @@ -67,13 +67,13 @@ * interface as blender::Map. This is useful for benchmarking. */ +#include <optional> #include <unordered_map> #include "BLI_array.hh" #include "BLI_hash.hh" #include "BLI_hash_tables.hh" #include "BLI_map_slots.hh" -#include "BLI_optional.hh" #include "BLI_probing_strategies.hh" namespace blender { @@ -92,13 +92,10 @@ template< * The minimum number of elements that can be stored in this Map without doing a heap * allocation. This is useful when you expect to have many small maps. However, keep in mind * that (unlike vector) initializing a map has a O(n) cost in the number of slots. - * - * When Key or Value are large, the small buffer optimization is disabled by default to avoid - * large unexpected allocations on the stack. It can still be enabled explicitely though. */ - uint32_t InlineBufferCapacity = (sizeof(Key) + sizeof(Value) < 100) ? 4 : 0, + int64_t InlineBufferCapacity = default_inline_buffer_capacity(sizeof(Key) + sizeof(Value)), /** - * The strategy used to deal with collistions. They are defined in BLI_probing_strategies.hh. + * The strategy used to deal with collisions. They are defined in BLI_probing_strategies.hh. */ typename ProbingStrategy = DefaultProbingStrategy, /** @@ -129,30 +126,30 @@ class Map { * Slots are either empty, occupied or removed. The number of occupied slots can be computed by * subtracting the removed slots from the occupied-and-removed slots. */ - uint32_t m_removed_slots; - uint32_t m_occupied_and_removed_slots; + int64_t removed_slots_; + int64_t occupied_and_removed_slots_; /** * The maximum number of slots that can be used (either occupied or removed) until the set has to * grow. This is the total number of slots times the max load factor. */ - uint32_t m_usable_slots; + int64_t usable_slots_; /** * The number of slots minus one. This is a bit mask that can be used to turn any integer into a * valid slot index efficiently. */ - uint32_t m_slot_mask; + uint64_t slot_mask_; /** This is called to hash incoming keys. */ - Hash m_hash; + Hash hash_; /** This is called to check equality of two keys. */ - IsEqual m_is_equal; + IsEqual is_equal_; /** The max load factor is 1/2 = 50% by default. */ #define LOAD_FACTOR 1, 2 - LoadFactor m_max_load_factor = LoadFactor(LOAD_FACTOR); + LoadFactor max_load_factor_ = LoadFactor(LOAD_FACTOR); using SlotArray = Array<Slot, LoadFactor::compute_total_slots(InlineBufferCapacity, LOAD_FACTOR), Allocator>; #undef LOAD_FACTOR @@ -161,12 +158,12 @@ class Map { * This is the array that contains the actual slots. There is always at least one empty slot and * the size of the array is a power of two. */ - SlotArray m_slots; + SlotArray slots_; /** Iterate over a slot index sequence for a given hash. */ #define MAP_SLOT_PROBING_BEGIN(HASH, R_SLOT) \ - SLOT_PROBING_BEGIN (ProbingStrategy, HASH, m_slot_mask, SLOT_INDEX) \ - auto &R_SLOT = m_slots[SLOT_INDEX]; + SLOT_PROBING_BEGIN (ProbingStrategy, HASH, slot_mask_, SLOT_INDEX) \ + auto &R_SLOT = slots_[SLOT_INDEX]; #define MAP_SLOT_PROBING_END() SLOT_PROBING_END() public: @@ -176,13 +173,13 @@ class Map { * operation is performed on the first insertion. */ Map() - : m_removed_slots(0), - m_occupied_and_removed_slots(0), - m_usable_slots(0), - m_slot_mask(0), - m_hash(), - m_is_equal(), - m_slots(1) + : removed_slots_(0), + occupied_and_removed_slots_(0), + usable_slots_(0), + slot_mask_(0), + hash_(), + is_equal_(), + slots_(1) { } @@ -191,13 +188,13 @@ class Map { Map(const Map &other) = default; Map(Map &&other) noexcept - : m_removed_slots(other.m_removed_slots), - m_occupied_and_removed_slots(other.m_occupied_and_removed_slots), - m_usable_slots(other.m_usable_slots), - m_slot_mask(other.m_slot_mask), - m_hash(std::move(other.m_hash)), - m_is_equal(std::move(other.m_is_equal)), - m_slots(std::move(other.m_slots)) + : removed_slots_(other.removed_slots_), + occupied_and_removed_slots_(other.occupied_and_removed_slots_), + usable_slots_(other.usable_slots_), + slot_mask_(other.slot_mask_), + hash_(std::move(other.hash_)), + is_equal_(std::move(other.is_equal_)), + slots_(std::move(other.slots_)) { other.~Map(); new (&other) Map(); @@ -233,19 +230,19 @@ class Map { */ void add_new(const Key &key, const Value &value) { - this->add_new__impl(key, value, m_hash(key)); + this->add_new__impl(key, value, hash_(key)); } void add_new(const Key &key, Value &&value) { - this->add_new__impl(key, std::move(value), m_hash(key)); + this->add_new__impl(key, std::move(value), hash_(key)); } void add_new(Key &&key, const Value &value) { - this->add_new__impl(std::move(key), value, m_hash(key)); + this->add_new__impl(std::move(key), value, hash_(key)); } void add_new(Key &&key, Value &&value) { - this->add_new__impl(std::move(key), std::move(value), m_hash(key)); + this->add_new__impl(std::move(key), std::move(value), hash_(key)); } /** @@ -271,15 +268,11 @@ class Map { { return this->add_as(std::move(key), std::move(value)); } - - /** - * Same as `add`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey, typename ForwardValue> bool add_as(ForwardKey &&key, ForwardValue &&value) { return this->add__impl( - std::forward<ForwardKey>(key), std::forward<ForwardValue>(value), m_hash(key)); + std::forward<ForwardKey>(key), std::forward<ForwardValue>(value), hash_(key)); } /** @@ -305,15 +298,11 @@ class Map { { return this->add_overwrite_as(std::move(key), std::move(value)); } - - /** - * Same as `add_overwrite`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey, typename ForwardValue> bool add_overwrite_as(ForwardKey &&key, ForwardValue &&value) { return this->add_overwrite__impl( - std::forward<ForwardKey>(key), std::forward<ForwardValue>(value), m_hash(key)); + std::forward<ForwardKey>(key), std::forward<ForwardValue>(value), hash_(key)); } /** @@ -325,13 +314,9 @@ class Map { { return this->contains_as(key); } - - /** - * Same as `contains`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey> bool contains_as(const ForwardKey &key) const { - return this->contains__impl(key, m_hash(key)); + return this->contains__impl(key, hash_(key)); } /** @@ -344,13 +329,9 @@ class Map { { return this->remove_as(key); } - - /** - * Same as `remove`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey> bool remove_as(const ForwardKey &key) { - return this->remove__impl(key, m_hash(key)); + return this->remove__impl(key, hash_(key)); } /** @@ -361,14 +342,9 @@ class Map { { this->remove_contained_as(key); } - - /** - * Same as `remove_contained`, but accepts other key types that are supported by the hash - * function. - */ template<typename ForwardKey> void remove_contained_as(const ForwardKey &key) { - this->remove_contained__impl(key, m_hash(key)); + this->remove_contained__impl(key, hash_(key)); } /** @@ -379,30 +355,22 @@ class Map { { return this->pop_as(key); } - - /** - * Same as `pop`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey> Value pop_as(const ForwardKey &key) { - return this->pop__impl(key, m_hash(key)); + return this->pop__impl(key, hash_(key)); } /** * Get the value that is stored for the given key and remove it from the map. If the key is not * in the map, a value-less optional is returned. */ - Optional<Value> pop_try(const Key &key) + std::optional<Value> pop_try(const Key &key) { return this->pop_try_as(key); } - - /** - * Same as `pop_try`, but accepts other key types that are supported by the hash function. - */ - template<typename ForwardKey> Optional<Value> pop_try_as(const ForwardKey &key) + template<typename ForwardKey> std::optional<Value> pop_try_as(const ForwardKey &key) { - return this->pop_try__impl(key, m_hash(key)); + return this->pop_try__impl(key, hash_(key)); } /** @@ -417,14 +385,10 @@ class Map { { return this->pop_default_as(key, std::move(default_value)); } - - /** - * Same as `pop_default`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey, typename ForwardValue> Value pop_default_as(const ForwardKey &key, ForwardValue &&default_value) { - return this->pop_default__impl(key, std::forward<ForwardValue>(default_value), m_hash(key)); + return this->pop_default__impl(key, std::forward<ForwardValue>(default_value), hash_(key)); } /** @@ -460,17 +424,13 @@ class Map { { return this->add_or_modify_as(std::move(key), create_value, modify_value); } - - /** - * Same as `add_or_modify`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey, typename CreateValueF, typename ModifyValueF> auto add_or_modify_as(ForwardKey &&key, const CreateValueF &create_value, const ModifyValueF &modify_value) -> decltype(create_value(nullptr)) { return this->add_or_modify__impl( - std::forward<Key>(key), create_value, modify_value, m_hash(key)); + std::forward<ForwardKey>(key), create_value, modify_value, hash_(key)); } /** @@ -487,17 +447,13 @@ class Map { { return this->lookup_ptr_as(key); } - - /** - * Same as `lookup_ptr`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey> const Value *lookup_ptr_as(const ForwardKey &key) const { - return this->lookup_ptr__impl(key, m_hash(key)); + return this->lookup_ptr__impl(key, hash_(key)); } template<typename ForwardKey> Value *lookup_ptr_as(const ForwardKey &key) { - return const_cast<Value *>(this->lookup_ptr__impl(key, m_hash(key))); + return const_cast<Value *>(this->lookup_ptr__impl(key, hash_(key))); } /** @@ -512,10 +468,6 @@ class Map { { return this->lookup_as(key); } - - /** - * Same as `lookup`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey> const Value &lookup_as(const ForwardKey &key) const { const Value *ptr = this->lookup_ptr_as(key); @@ -537,10 +489,6 @@ class Map { { return this->lookup_default_as(key, default_value); } - - /** - * Same as `lookup_default`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey, typename ForwardValue> Value lookup_default_as(const ForwardKey &key, ForwardValue &&default_value) const { @@ -573,16 +521,11 @@ class Map { { return this->lookup_or_add_as(std::move(key), std::move(value)); } - - /** - * Same as `lookup_or_add`, but accepts other key types that are supported by the hash - * function. - */ template<typename ForwardKey, typename ForwardValue> Value &lookup_or_add_as(ForwardKey &&key, ForwardValue &&value) { return this->lookup_or_add__impl( - std::forward<ForwardKey>(key), std::forward<ForwardValue>(value), m_hash(key)); + std::forward<ForwardKey>(key), std::forward<ForwardValue>(value), hash_(key)); } /** @@ -602,15 +545,10 @@ class Map { { return this->lookup_or_add_cb_as(std::move(key), create_value); } - - /** - * Same as `lookup_or_add_cb`, but accepts other key types that are supported by the hash - * function. - */ template<typename ForwardKey, typename CreateValueF> Value &lookup_or_add_cb_as(ForwardKey &&key, const CreateValueF &create_value) { - return this->lookup_or_add_cb__impl(std::forward<ForwardKey>(key), create_value, m_hash(key)); + return this->lookup_or_add_cb__impl(std::forward<ForwardKey>(key), create_value, hash_(key)); } /** @@ -625,11 +563,6 @@ class Map { { return this->lookup_or_add_default_as(std::move(key)); } - - /** - * Same as `lookup_or_add_default`, but accepts other key types that are supported by the hash - * function. - */ template<typename ForwardKey> Value &lookup_or_add_default_as(ForwardKey &&key) { return this->lookup_or_add_cb_as(std::forward<ForwardKey>(key), []() { return Value(); }); @@ -641,9 +574,9 @@ class Map { */ template<typename FuncT> void foreach_item(const FuncT &func) const { - uint32_t size = this->size(); - for (uint32_t i = 0; i < size; i++) { - const Slot &slot = m_slots[i]; + int64_t size = slots_.size(); + for (int64_t i = 0; i < size; i++) { + const Slot &slot = slots_[i]; if (slot.is_occupied()) { const Key &key = *slot.key(); const Value &value = *slot.value(); @@ -657,21 +590,19 @@ class Map { * This uses the "curiously recurring template pattern" (CRTP). */ template<typename SubIterator> struct BaseIterator { - Slot *m_slots; - uint32_t m_total_slots; - uint32_t m_current_slot; - - BaseIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot) - : m_slots(const_cast<Slot *>(slots)), - m_total_slots(total_slots), - m_current_slot(current_slot) + Slot *slots_; + int64_t total_slots_; + int64_t current_slot_; + + BaseIterator(const Slot *slots, int64_t total_slots, int64_t current_slot) + : slots_(const_cast<Slot *>(slots)), total_slots_(total_slots), current_slot_(current_slot) { } BaseIterator &operator++() { - while (++m_current_slot < m_total_slots) { - if (m_slots[m_current_slot].is_occupied()) { + while (++current_slot_ < total_slots_) { + if (slots_[current_slot_].is_occupied()) { break; } } @@ -680,16 +611,16 @@ class Map { friend bool operator!=(const BaseIterator &a, const BaseIterator &b) { - BLI_assert(a.m_slots == b.m_slots); - BLI_assert(a.m_total_slots == b.m_total_slots); - return a.m_current_slot != b.m_current_slot; + BLI_assert(a.slots_ == b.slots_); + BLI_assert(a.total_slots_ == b.total_slots_); + return a.current_slot_ != b.current_slot_; } SubIterator begin() const { - for (uint32_t i = 0; i < m_total_slots; i++) { - if (m_slots[i].is_occupied()) { - return SubIterator(m_slots, m_total_slots, i); + for (int64_t i = 0; i < total_slots_; i++) { + if (slots_[i].is_occupied()) { + return SubIterator(slots_, total_slots_, i); } } return this->end(); @@ -697,18 +628,18 @@ class Map { SubIterator end() const { - return SubIterator(m_slots, m_total_slots, m_total_slots); + return SubIterator(slots_, total_slots_, total_slots_); } Slot ¤t_slot() const { - return m_slots[m_current_slot]; + return slots_[current_slot_]; } }; class KeyIterator final : public BaseIterator<KeyIterator> { public: - KeyIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot) + KeyIterator(const Slot *slots, int64_t total_slots, int64_t current_slot) : BaseIterator<KeyIterator>(slots, total_slots, current_slot) { } @@ -721,7 +652,7 @@ class Map { class ValueIterator final : public BaseIterator<ValueIterator> { public: - ValueIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot) + ValueIterator(const Slot *slots, int64_t total_slots, int64_t current_slot) : BaseIterator<ValueIterator>(slots, total_slots, current_slot) { } @@ -734,7 +665,7 @@ class Map { class MutableValueIterator final : public BaseIterator<MutableValueIterator> { public: - MutableValueIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot) + MutableValueIterator(const Slot *slots, int64_t total_slots, int64_t current_slot) : BaseIterator<MutableValueIterator>(slots, total_slots, current_slot) { } @@ -745,18 +676,28 @@ class Map { } }; + struct Item { + const Key &key; + const Value &value; + }; + + struct MutableItem { + const Key &key; + Value &value; + + operator Item() const + { + return Item{key, value}; + } + }; + class ItemIterator final : public BaseIterator<ItemIterator> { public: - ItemIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot) + ItemIterator(const Slot *slots, int64_t total_slots, int64_t current_slot) : BaseIterator<ItemIterator>(slots, total_slots, current_slot) { } - struct Item { - const Key &key; - const Value &value; - }; - Item operator*() const { const Slot &slot = this->current_slot(); @@ -766,17 +707,12 @@ class Map { class MutableItemIterator final : public BaseIterator<MutableItemIterator> { public: - MutableItemIterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot) + MutableItemIterator(const Slot *slots, int64_t total_slots, int64_t current_slot) : BaseIterator<MutableItemIterator>(slots, total_slots, current_slot) { } - struct Item { - const Key &key; - Value &value; - }; - - Item operator*() const + MutableItem operator*() const { Slot &slot = this->current_slot(); return {*slot.key(), *slot.value()}; @@ -789,7 +725,7 @@ class Map { */ KeyIterator keys() const { - return KeyIterator(m_slots.data(), m_slots.size(), 0); + return KeyIterator(slots_.data(), slots_.size(), 0); } /** @@ -798,7 +734,7 @@ class Map { */ ValueIterator values() const { - return ValueIterator(m_slots.data(), m_slots.size(), 0); + return ValueIterator(slots_.data(), slots_.size(), 0); } /** @@ -807,7 +743,7 @@ class Map { */ MutableValueIterator values() { - return MutableValueIterator(m_slots.data(), m_slots.size(), 0); + return MutableValueIterator(slots_.data(), slots_.size(), 0); } /** @@ -817,7 +753,7 @@ class Map { */ ItemIterator items() const { - return ItemIterator(m_slots.data(), m_slots.size(), 0); + return ItemIterator(slots_.data(), slots_.size(), 0); } /** @@ -829,7 +765,7 @@ class Map { */ MutableItemIterator items() { - return MutableItemIterator(m_slots.data(), m_slots.size(), 0); + return MutableItemIterator(slots_.data(), slots_.size(), 0); } /** @@ -838,15 +774,15 @@ class Map { void print_stats(StringRef name = "") const { HashTableStats stats(*this, this->keys()); - stats.print(); + stats.print(name); } /** * Return the number of key-value-pairs that are stored in the map. */ - uint32_t size() const + int64_t size() const { - return m_occupied_and_removed_slots - m_removed_slots; + return occupied_and_removed_slots_ - removed_slots_; } /** @@ -856,29 +792,29 @@ class Map { */ bool is_empty() const { - return m_occupied_and_removed_slots == m_removed_slots; + return occupied_and_removed_slots_ == removed_slots_; } /** * Returns the number of available slots. This is mostly for debugging purposes. */ - uint32_t capacity() const + int64_t capacity() const { - return m_slots.size(); + return slots_.size(); } /** * Returns the amount of removed slots in the set. This is mostly for debugging purposes. */ - uint32_t removed_amount() const + int64_t removed_amount() const { - return m_removed_slots; + return removed_slots_; } /** * Returns the bytes required per element. This is mostly for debugging purposes. */ - uint32_t size_per_element() const + int64_t size_per_element() const { return sizeof(Slot); } @@ -887,18 +823,18 @@ class Map { * Returns the approximate memory requirements of the map in bytes. This becomes more exact the * larger the map becomes. */ - uint32_t size_in_bytes() const + int64_t size_in_bytes() const { - return sizeof(Slot) * m_slots.size(); + return (int64_t)(sizeof(Slot) * slots_.size()); } /** * Potentially resize the map such that the specified number of elements can be added without * another grow operation. */ - void reserve(uint32_t n) + void reserve(int64_t n) { - if (m_usable_slots < n) { + if (usable_slots_ < n) { this->realloc_and_reinsert(n); } } @@ -916,35 +852,36 @@ class Map { * Get the number of collisions that the probing strategy has to go through to find the key or * determine that it is not in the map. */ - uint32_t count_collisions(const Key &key) const + int64_t count_collisions(const Key &key) const { - return this->count_collisions__impl(key, m_hash(key)); + return this->count_collisions__impl(key, hash_(key)); } private: - BLI_NOINLINE void realloc_and_reinsert(uint32_t min_usable_slots) + BLI_NOINLINE void realloc_and_reinsert(int64_t min_usable_slots) { - uint32_t total_slots, usable_slots; - m_max_load_factor.compute_total_and_usable_slots( + int64_t total_slots, usable_slots; + max_load_factor_.compute_total_and_usable_slots( SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots); - uint32_t new_slot_mask = total_slots - 1; + BLI_assert(total_slots >= 1); + const uint64_t new_slot_mask = (uint64_t)total_slots - 1; /** * Optimize the case when the map was empty beforehand. We can avoid some copies here. */ if (this->size() == 0) { - m_slots.~Array(); - new (&m_slots) SlotArray(total_slots); - m_removed_slots = 0; - m_occupied_and_removed_slots = 0; - m_usable_slots = usable_slots; - m_slot_mask = new_slot_mask; + slots_.~Array(); + new (&slots_) SlotArray(total_slots); + removed_slots_ = 0; + occupied_and_removed_slots_ = 0; + usable_slots_ = usable_slots; + slot_mask_ = new_slot_mask; return; } SlotArray new_slots(total_slots); - for (Slot &slot : m_slots) { + for (Slot &slot : slots_) { if (slot.is_occupied()) { this->add_after_grow_and_destruct_old(slot, new_slots, new_slot_mask); } @@ -952,19 +889,19 @@ class Map { /* All occupied slots have been destructed already and empty/removed slots are assumed to be * trivially destructible. */ - m_slots.clear_without_destruct(); - m_slots = std::move(new_slots); - m_occupied_and_removed_slots -= m_removed_slots; - m_usable_slots = usable_slots; - m_removed_slots = 0; - m_slot_mask = new_slot_mask; + slots_.clear_without_destruct(); + slots_ = std::move(new_slots); + occupied_and_removed_slots_ -= removed_slots_; + usable_slots_ = usable_slots; + removed_slots_ = 0; + slot_mask_ = new_slot_mask; } void add_after_grow_and_destruct_old(Slot &old_slot, SlotArray &new_slots, - uint32_t new_slot_mask) + uint64_t new_slot_mask) { - uint32_t hash = old_slot.get_hash(Hash()); + uint64_t hash = old_slot.get_hash(Hash()); SLOT_PROBING_BEGIN (ProbingStrategy, hash, new_slot_mask, slot_index) { Slot &slot = new_slots[slot_index]; if (slot.is_empty()) { @@ -975,13 +912,13 @@ class Map { SLOT_PROBING_END(); } - template<typename ForwardKey> bool contains__impl(const ForwardKey &key, uint32_t hash) const + template<typename ForwardKey> bool contains__impl(const ForwardKey &key, uint64_t hash) const { MAP_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { return false; } - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { return true; } } @@ -989,12 +926,12 @@ class Map { } template<typename ForwardKey, typename ForwardValue> - void add_new__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash) + void add_new__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash) { BLI_assert(!this->contains_as(key)); this->ensure_can_add(); - m_occupied_and_removed_slots++; + occupied_and_removed_slots_++; MAP_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { @@ -1006,29 +943,29 @@ class Map { } template<typename ForwardKey, typename ForwardValue> - bool add__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash) + bool add__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash) { this->ensure_can_add(); MAP_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { slot.occupy(std::forward<ForwardKey>(key), std::forward<ForwardValue>(value), hash); - m_occupied_and_removed_slots++; + occupied_and_removed_slots_++; return true; } - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { return false; } } MAP_SLOT_PROBING_END(); } - template<typename ForwardKey> bool remove__impl(const ForwardKey &key, uint32_t hash) + template<typename ForwardKey> bool remove__impl(const ForwardKey &key, uint64_t hash) { MAP_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { slot.remove(); - m_removed_slots++; + removed_slots_++; return true; } if (slot.is_empty()) { @@ -1038,14 +975,14 @@ class Map { MAP_SLOT_PROBING_END(); } - template<typename ForwardKey> void remove_contained__impl(const ForwardKey &key, uint32_t hash) + template<typename ForwardKey> void remove_contained__impl(const ForwardKey &key, uint64_t hash) { BLI_assert(this->contains_as(key)); - m_removed_slots++; + removed_slots_++; MAP_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { slot.remove(); return; } @@ -1053,14 +990,14 @@ class Map { MAP_SLOT_PROBING_END(); } - template<typename ForwardKey> Value pop__impl(const ForwardKey &key, uint32_t hash) + template<typename ForwardKey> Value pop__impl(const ForwardKey &key, uint64_t hash) { BLI_assert(this->contains_as(key)); - m_removed_slots++; + removed_slots_++; MAP_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { Value value = std::move(*slot.value()); slot.remove(); return value; @@ -1069,13 +1006,14 @@ class Map { MAP_SLOT_PROBING_END(); } - template<typename ForwardKey> Optional<Value> pop_try__impl(const ForwardKey &key, uint32_t hash) + template<typename ForwardKey> + std::optional<Value> pop_try__impl(const ForwardKey &key, uint64_t hash) { MAP_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash)) { - Optional<Value> value = std::move(*slot.value()); + if (slot.contains(key, is_equal_, hash)) { + std::optional<Value> value = std::move(*slot.value()); slot.remove(); - m_removed_slots++; + removed_slots_++; return value; } if (slot.is_empty()) { @@ -1086,13 +1024,13 @@ class Map { } template<typename ForwardKey, typename ForwardValue> - Value pop_default__impl(const ForwardKey &key, ForwardValue &&default_value, uint32_t hash) + Value pop_default__impl(const ForwardKey &key, ForwardValue &&default_value, uint64_t hash) { MAP_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { Value value = std::move(*slot.value()); slot.remove(); - m_removed_slots++; + removed_slots_++; return value; } if (slot.is_empty()) { @@ -1106,23 +1044,23 @@ class Map { auto add_or_modify__impl(ForwardKey &&key, const CreateValueF &create_value, const ModifyValueF &modify_value, - uint32_t hash) -> decltype(create_value(nullptr)) + uint64_t hash) -> decltype(create_value(nullptr)) { using CreateReturnT = decltype(create_value(nullptr)); using ModifyReturnT = decltype(modify_value(nullptr)); - BLI_STATIC_ASSERT((std::is_same<CreateReturnT, ModifyReturnT>::value), + BLI_STATIC_ASSERT((std::is_same_v<CreateReturnT, ModifyReturnT>), "Both callbacks should return the same type."); this->ensure_can_add(); MAP_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { - m_occupied_and_removed_slots++; + occupied_and_removed_slots_++; slot.occupy_without_value(std::forward<ForwardKey>(key), hash); Value *value_ptr = slot.value(); return create_value(value_ptr); } - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { Value *value_ptr = slot.value(); return modify_value(value_ptr); } @@ -1131,17 +1069,17 @@ class Map { } template<typename ForwardKey, typename CreateValueF> - Value &lookup_or_add_cb__impl(ForwardKey &&key, const CreateValueF &create_value, uint32_t hash) + Value &lookup_or_add_cb__impl(ForwardKey &&key, const CreateValueF &create_value, uint64_t hash) { this->ensure_can_add(); MAP_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { slot.occupy(std::forward<ForwardKey>(key), create_value(), hash); - m_occupied_and_removed_slots++; + occupied_and_removed_slots_++; return *slot.value(); } - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { return *slot.value(); } } @@ -1149,17 +1087,17 @@ class Map { } template<typename ForwardKey, typename ForwardValue> - Value &lookup_or_add__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash) + Value &lookup_or_add__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash) { this->ensure_can_add(); MAP_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { slot.occupy(std::forward<ForwardKey>(key), std::forward<ForwardValue>(value), hash); - m_occupied_and_removed_slots++; + occupied_and_removed_slots_++; return *slot.value(); } - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { return *slot.value(); } } @@ -1167,10 +1105,10 @@ class Map { } template<typename ForwardKey, typename ForwardValue> - bool add_overwrite__impl(ForwardKey &&key, ForwardValue &&value, uint32_t hash) + bool add_overwrite__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash) { auto create_func = [&](Value *ptr) { - new (ptr) Value(std::forward<ForwardValue>(value)); + new ((void *)ptr) Value(std::forward<ForwardValue>(value)); return true; }; auto modify_func = [&](Value *ptr) { @@ -1182,13 +1120,13 @@ class Map { } template<typename ForwardKey> - const Value *lookup_ptr__impl(const ForwardKey &key, uint32_t hash) const + const Value *lookup_ptr__impl(const ForwardKey &key, uint64_t hash) const { MAP_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { return nullptr; } - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { return slot.value(); } } @@ -1196,12 +1134,12 @@ class Map { } template<typename ForwardKey> - uint32_t count_collisions__impl(const ForwardKey &key, uint32_t hash) const + int64_t count_collisions__impl(const ForwardKey &key, uint64_t hash) const { - uint32_t collisions = 0; + int64_t collisions = 0; MAP_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { return collisions; } if (slot.is_empty()) { @@ -1214,73 +1152,88 @@ class Map { void ensure_can_add() { - if (m_occupied_and_removed_slots >= m_usable_slots) { + if (occupied_and_removed_slots_ >= usable_slots_) { this->realloc_and_reinsert(this->size() + 1); - BLI_assert(m_occupied_and_removed_slots < m_usable_slots); + BLI_assert(occupied_and_removed_slots_ < usable_slots_); } } }; /** + * Same as a normal Map, but does not use Blender's guarded allocator. This is useful when + * allocating memory with static storage duration. + */ +template<typename Key, + typename Value, + int64_t InlineBufferCapacity = default_inline_buffer_capacity(sizeof(Key) + + sizeof(Value)), + typename ProbingStrategy = DefaultProbingStrategy, + typename Hash = DefaultHash<Key>, + typename IsEqual = DefaultEquality, + typename Slot = typename DefaultMapSlot<Key, Value>::type> +using RawMap = + Map<Key, Value, InlineBufferCapacity, ProbingStrategy, Hash, IsEqual, Slot, RawAllocator>; + +/** * A wrapper for std::unordered_map with the API of blender::Map. This can be used for * benchmarking. */ template<typename Key, typename Value> class StdUnorderedMapWrapper { private: using MapType = std::unordered_map<Key, Value, blender::DefaultHash<Key>>; - MapType m_map; + MapType map_; public: - uint32_t size() const + int64_t size() const { - return (uint32_t)m_map.size(); + return (int64_t)map_.size(); } bool is_empty() const { - return m_map.empty(); + return map_.empty(); } - void reserve(uint32_t n) + void reserve(int64_t n) { - m_map.reserve(n); + map_.reserve(n); } template<typename ForwardKey, typename ForwardValue> void add_new(ForwardKey &&key, ForwardValue &&value) { - m_map.insert({std::forward<ForwardKey>(key), std::forward<ForwardValue>(value)}); + map_.insert({std::forward<ForwardKey>(key), std::forward<ForwardValue>(value)}); } template<typename ForwardKey, typename ForwardValue> bool add(ForwardKey &&key, ForwardValue &&value) { - return m_map.insert({std::forward<ForwardKey>(key), std::forward<ForwardValue>(value)}).second; + return map_.insert({std::forward<ForwardKey>(key), std::forward<ForwardValue>(value)}).second; } bool contains(const Key &key) const { - return m_map.find(key) != m_map.end(); + return map_.find(key) != map_.end(); } bool remove(const Key &key) { - return (bool)m_map.erase(key); + return (bool)map_.erase(key); } Value &lookup(const Key &key) { - return m_map.find(key)->second; + return map_.find(key)->second; } const Value &lookup(const Key &key) const { - return m_map.find(key)->second; + return map_.find(key)->second; } void clear() { - m_map.clear(); + map_.clear(); } void print_stats(StringRef UNUSED(name) = "") const diff --git a/source/blender/blenlib/BLI_map_slots.hh b/source/blender/blenlib/BLI_map_slots.hh index 9ea2c4cad89..b5360795a13 100644 --- a/source/blender/blenlib/BLI_map_slots.hh +++ b/source/blender/blenlib/BLI_map_slots.hh @@ -52,9 +52,9 @@ template<typename Key, typename Value> class SimpleMapSlot { Removed = 2, }; - State m_state; - AlignedBuffer<sizeof(Key), alignof(Key)> m_key_buffer; - AlignedBuffer<sizeof(Value), alignof(Value)> m_value_buffer; + State state_; + TypedBuffer<Key> key_buffer_; + TypedBuffer<Value> value_buffer_; public: /** @@ -62,7 +62,7 @@ template<typename Key, typename Value> class SimpleMapSlot { */ SimpleMapSlot() { - m_state = Empty; + state_ = Empty; } /** @@ -70,9 +70,9 @@ template<typename Key, typename Value> class SimpleMapSlot { */ ~SimpleMapSlot() { - if (m_state == Occupied) { - this->key()->~Key(); - this->value()->~Value(); + if (state_ == Occupied) { + key_buffer_.ref().~Key(); + value_buffer_.ref().~Value(); } } @@ -82,24 +82,24 @@ template<typename Key, typename Value> class SimpleMapSlot { */ SimpleMapSlot(const SimpleMapSlot &other) { - m_state = other.m_state; - if (other.m_state == Occupied) { - new (this->key()) Key(*other.key()); - new (this->value()) Value(*other.value()); + state_ = other.state_; + if (other.state_ == Occupied) { + new (&key_buffer_) Key(*other.key_buffer_); + new (&value_buffer_) Value(*other.value_buffer_); } } /** - * The move construtor has to copy the state. If the other slot was occupied, the key and value + * The move constructor has to copy the state. If the other slot was occupied, the key and value * from the other have to moved as well. The other slot stays in the state it was in before. Its * optionally stored key and value remain in a moved-from state. */ SimpleMapSlot(SimpleMapSlot &&other) noexcept { - m_state = other.m_state; - if (other.m_state == Occupied) { - new (this->key()) Key(std::move(*other.key())); - new (this->value()) Value(std::move(*other.value())); + state_ = other.state_; + if (other.state_ == Occupied) { + new (&key_buffer_) Key(std::move(*other.key_buffer_)); + new (&value_buffer_) Value(std::move(*other.value_buffer_)); } } @@ -108,7 +108,7 @@ template<typename Key, typename Value> class SimpleMapSlot { */ Key *key() { - return (Key *)m_key_buffer.ptr(); + return key_buffer_; } /** @@ -116,7 +116,7 @@ template<typename Key, typename Value> class SimpleMapSlot { */ const Key *key() const { - return (const Key *)m_key_buffer.ptr(); + return key_buffer_; } /** @@ -124,7 +124,7 @@ template<typename Key, typename Value> class SimpleMapSlot { */ Value *value() { - return (Value *)m_value_buffer.ptr(); + return value_buffer_; } /** @@ -132,7 +132,7 @@ template<typename Key, typename Value> class SimpleMapSlot { */ const Value *value() const { - return (const Value *)m_value_buffer.ptr(); + return value_buffer_; } /** @@ -140,7 +140,7 @@ template<typename Key, typename Value> class SimpleMapSlot { */ bool is_occupied() const { - return m_state == Occupied; + return state_ == Occupied; } /** @@ -148,32 +148,32 @@ template<typename Key, typename Value> class SimpleMapSlot { */ bool is_empty() const { - return m_state == Empty; + return state_ == Empty; } /** * Returns the hash of the currently stored key. In this simple map slot implementation, we just * computed the hash here. Other implementations might store the hash in the slot instead. */ - template<typename Hash> uint32_t get_hash(const Hash &hash) + template<typename Hash> uint64_t get_hash(const Hash &hash) { BLI_assert(this->is_occupied()); - return hash(*this->key()); + return hash(*key_buffer_); } /** * Move the other slot into this slot and destruct it. We do destruction here, because this way * we can avoid a comparison with the state, since we know the slot is occupied. */ - void relocate_occupied_here(SimpleMapSlot &other, uint32_t UNUSED(hash)) + void relocate_occupied_here(SimpleMapSlot &other, uint64_t UNUSED(hash)) { BLI_assert(!this->is_occupied()); BLI_assert(other.is_occupied()); - m_state = Occupied; - new (this->key()) Key(std::move(*other.key())); - new (this->value()) Value(std::move(*other.value())); - other.key()->~Key(); - other.value()->~Value(); + state_ = Occupied; + new (&key_buffer_) Key(std::move(*other.key_buffer_)); + new (&value_buffer_) Value(std::move(*other.value_buffer_)); + other.key_buffer_.ref().~Key(); + other.value_buffer_.ref().~Value(); } /** @@ -181,10 +181,10 @@ template<typename Key, typename Value> class SimpleMapSlot { * key. The hash can be used by other slot implementations to determine inequality faster. */ template<typename ForwardKey, typename IsEqual> - bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const + bool contains(const ForwardKey &key, const IsEqual &is_equal, uint64_t UNUSED(hash)) const { - if (m_state == Occupied) { - return is_equal(key, *this->key()); + if (state_ == Occupied) { + return is_equal(key, *key_buffer_); } return false; } @@ -194,22 +194,22 @@ template<typename Key, typename Value> class SimpleMapSlot { * constructed by calling the constructor with the given key/value as parameter. */ template<typename ForwardKey, typename ForwardValue> - void occupy(ForwardKey &&key, ForwardValue &&value, uint32_t hash) + void occupy(ForwardKey &&key, ForwardValue &&value, uint64_t hash) { BLI_assert(!this->is_occupied()); this->occupy_without_value(std::forward<ForwardKey>(key), hash); - new (this->value()) Value(std::forward<ForwardValue>(value)); + new (&value_buffer_) Value(std::forward<ForwardValue>(value)); } /** * Change the state of this slot from empty/removed to occupied, but leave the value * uninitialized. The caller is responsible to construct the value afterwards. */ - template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint32_t UNUSED(hash)) + template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint64_t UNUSED(hash)) { BLI_assert(!this->is_occupied()); - m_state = Occupied; - new (this->key()) Key(std::forward<ForwardKey>(key)); + state_ = Occupied; + new (&key_buffer_) Key(std::forward<ForwardKey>(key)); } /** @@ -219,9 +219,9 @@ template<typename Key, typename Value> class SimpleMapSlot { void remove() { BLI_assert(this->is_occupied()); - m_state = Removed; - this->key()->~Key(); - this->value()->~Value(); + state_ = Removed; + key_buffer_.ref().~Key(); + value_buffer_.ref().~Value(); } }; @@ -235,107 +235,107 @@ template<typename Key, typename Value> class SimpleMapSlot { */ template<typename Key, typename Value, typename KeyInfo> class IntrusiveMapSlot { private: - Key m_key = KeyInfo::get_empty(); - AlignedBuffer<sizeof(Value), alignof(Value)> m_value_buffer; + Key key_ = KeyInfo::get_empty(); + TypedBuffer<Value> value_buffer_; public: IntrusiveMapSlot() = default; ~IntrusiveMapSlot() { - if (KeyInfo::is_not_empty_or_removed(m_key)) { - this->value()->~Value(); + if (KeyInfo::is_not_empty_or_removed(key_)) { + value_buffer_.ref().~Value(); } } - IntrusiveMapSlot(const IntrusiveMapSlot &other) : m_key(other.m_key) + IntrusiveMapSlot(const IntrusiveMapSlot &other) : key_(other.key_) { - if (KeyInfo::is_not_empty_or_removed(m_key)) { - new (this->value()) Value(*other.value()); + if (KeyInfo::is_not_empty_or_removed(key_)) { + new (&value_buffer_) Value(*other.value_buffer_); } } - IntrusiveMapSlot(IntrusiveMapSlot &&other) noexcept : m_key(other.m_key) + IntrusiveMapSlot(IntrusiveMapSlot &&other) noexcept : key_(other.key_) { - if (KeyInfo::is_not_empty_or_removed(m_key)) { - new (this->value()) Value(std::move(*other.value())); + if (KeyInfo::is_not_empty_or_removed(key_)) { + new (&value_buffer_) Value(std::move(*other.value_buffer_)); } } Key *key() { - return &m_key; + return &key_; } const Key *key() const { - return &m_key; + return &key_; } Value *value() { - return (Value *)m_value_buffer.ptr(); + return value_buffer_; } const Value *value() const { - return (const Value *)m_value_buffer.ptr(); + return value_buffer_; } bool is_occupied() const { - return KeyInfo::is_not_empty_or_removed(m_key); + return KeyInfo::is_not_empty_or_removed(key_); } bool is_empty() const { - return KeyInfo::is_empty(m_key); + return KeyInfo::is_empty(key_); } - template<typename Hash> uint32_t get_hash(const Hash &hash) + template<typename Hash> uint64_t get_hash(const Hash &hash) { BLI_assert(this->is_occupied()); - return hash(*this->key()); + return hash(key_); } - void relocate_occupied_here(IntrusiveMapSlot &other, uint32_t UNUSED(hash)) + void relocate_occupied_here(IntrusiveMapSlot &other, uint64_t UNUSED(hash)) { BLI_assert(!this->is_occupied()); BLI_assert(other.is_occupied()); - m_key = std::move(other.m_key); - new (this->value()) Value(std::move(*other.value())); - other.m_key.~Key(); - other.value()->~Value(); + key_ = std::move(other.key_); + new (&value_buffer_) Value(std::move(*other.value_buffer_)); + other.key_.~Key(); + other.value_buffer_.ref().~Value(); } template<typename ForwardKey, typename IsEqual> - bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const + bool contains(const ForwardKey &key, const IsEqual &is_equal, uint64_t UNUSED(hash)) const { BLI_assert(KeyInfo::is_not_empty_or_removed(key)); - return is_equal(key, m_key); + return is_equal(key, key_); } template<typename ForwardKey, typename ForwardValue> - void occupy(ForwardKey &&key, ForwardValue &&value, uint32_t hash) + void occupy(ForwardKey &&key, ForwardValue &&value, uint64_t hash) { BLI_assert(!this->is_occupied()); BLI_assert(KeyInfo::is_not_empty_or_removed(key)); this->occupy_without_value(std::forward<ForwardKey>(key), hash); - new (this->value()) Value(std::forward<ForwardValue>(value)); + new (&value_buffer_) Value(std::forward<ForwardValue>(value)); } - template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint32_t UNUSED(hash)) + template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint64_t UNUSED(hash)) { BLI_assert(!this->is_occupied()); BLI_assert(KeyInfo::is_not_empty_or_removed(key)); - m_key = std::forward<ForwardKey>(key); + key_ = std::forward<ForwardKey>(key); } void remove() { BLI_assert(this->is_occupied()); - KeyInfo::remove(m_key); - this->value()->~Value(); + KeyInfo::remove(key_); + value_buffer_.ref().~Value(); } }; diff --git a/source/blender/blenlib/BLI_math_base_safe.h b/source/blender/blenlib/BLI_math_base_safe.h new file mode 100644 index 00000000000..88a08c3cbc7 --- /dev/null +++ b/source/blender/blenlib/BLI_math_base_safe.h @@ -0,0 +1,50 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __BLI_MATH_BASE_SAFE_H__ +#define __BLI_MATH_BASE_SAFE_H__ + +/** \file + * \ingroup bli + * + * This file provides safe alternatives to common math functions like sqrt, powf. + * In this context "safe" means that the output is not NaN if the input is not NaN. + */ + +#include "BLI_math_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +MINLINE float safe_divide(float a, float b); +MINLINE float safe_modf(float a, float b); +MINLINE float safe_logf(float a, float base); +MINLINE float safe_sqrtf(float a); +MINLINE float safe_inverse_sqrtf(float a); +MINLINE float safe_asinf(float a); +MINLINE float safe_acosf(float a); +MINLINE float safe_powf(float base, float exponent); + +#ifdef __cplusplus +} +#endif + +#if BLI_MATH_DO_INLINE +# include "intern/math_base_safe_inline.c" +#endif + +#endif /* __BLI_MATH_BASE_SAFE_H__ */ diff --git a/source/blender/blenlib/BLI_math_color.h b/source/blender/blenlib/BLI_math_color.h index ba95da4092e..48f8e7d31d9 100644 --- a/source/blender/blenlib/BLI_math_color.h +++ b/source/blender/blenlib/BLI_math_color.h @@ -148,8 +148,12 @@ void blackbody_temperature_to_rgb_table(float *r_table, int width, float min, fl /********* lift/gamma/gain / ASC-CDL conversion ***********/ -void lift_gamma_gain_to_asc_cdl( - float *lift, float *gamma, float *gain, float *offset, float *slope, float *power); +void lift_gamma_gain_to_asc_cdl(const float *lift, + const float *gamma, + const float *gain, + float *offset, + float *slope, + float *power); #if BLI_MATH_DO_INLINE # include "intern/math_color_inline.c" diff --git a/source/blender/blenlib/BLI_math_geom.h b/source/blender/blenlib/BLI_math_geom.h index 563bcad5d14..3a24209b07c 100644 --- a/source/blender/blenlib/BLI_math_geom.h +++ b/source/blender/blenlib/BLI_math_geom.h @@ -190,6 +190,10 @@ float dist_squared_to_projected_aabb_simple(const float projmat[4][4], const float bbmin[3], const float bbmax[3]); +float closest_to_ray_v3(float r_close[3], + const float p[3], + const float ray_orig[3], + const float ray_dir[3]); float closest_to_line_v2(float r_close[2], const float p[2], const float l1[2], const float l2[2]); double closest_to_line_v2_db(double r_close[2], const double p[2], @@ -450,11 +454,11 @@ bool isect_ray_seg_v2(const float ray_origin[2], float *r_lambda, float *r_u); -bool isect_ray_seg_v3(const float ray_origin[3], - const float ray_direction[3], - const float v0[3], - const float v1[3], - float *r_lambda); +bool isect_ray_line_v3(const float ray_origin[3], + const float ray_direction[3], + const float v0[3], + const float v1[3], + float *r_lambda); /* point in polygon */ bool isect_point_poly_v2(const float pt[2], @@ -667,6 +671,13 @@ void projmat_dimensions(const float projmat[4][4], float *r_top, float *r_near, float *r_far); +void projmat_dimensions_db(const float projmat[4][4], + double *r_left, + double *r_right, + double *r_bottom, + double *r_top, + double *r_near, + double *r_far); void projmat_from_subregion(const float projmat[4][4], const int win_size[2], diff --git a/source/blender/blenlib/BLI_math_matrix.h b/source/blender/blenlib/BLI_math_matrix.h index 2d11797bc34..33fcd750aee 100644 --- a/source/blender/blenlib/BLI_math_matrix.h +++ b/source/blender/blenlib/BLI_math_matrix.h @@ -64,7 +64,7 @@ void swap_m3m3(float A[3][3], float B[3][3]); void swap_m4m4(float A[4][4], float B[4][4]); /* Build index shuffle matrix */ -void shuffle_m4(float R[4][4], int index[4]); +void shuffle_m4(float R[4][4], const int index[4]); /******************************** Arithmetic *********************************/ diff --git a/source/blender/blenlib/BLI_math_vector.h b/source/blender/blenlib/BLI_math_vector.h index d46c02a961c..362ab3769a0 100644 --- a/source/blender/blenlib/BLI_math_vector.h +++ b/source/blender/blenlib/BLI_math_vector.h @@ -133,6 +133,7 @@ MINLINE void sub_v3_v3v3_db(double r[3], const double a[3], const double b[3]); MINLINE void sub_v4_v4(float r[4], const float a[4]); MINLINE void sub_v4_v4v4(float r[4], const float a[4], const float b[4]); +MINLINE void sub_v2db_v2fl_v2fl(double r[2], const float a[2], const float b[2]); MINLINE void sub_v3db_v3fl_v3fl(double r[3], const float a[3], const float b[3]); MINLINE void mul_v2_fl(float r[2], float f); @@ -205,6 +206,7 @@ MINLINE double dot_v3db_v3fl(const double a[3], const float b[3]) ATTR_WARN_UNUS MINLINE double dot_v3v3_db(const double a[3], const double b[3]) ATTR_WARN_UNUSED_RESULT; MINLINE float cross_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT; +MINLINE double cross_v2v2_db(const double a[2], const double b[2]) ATTR_WARN_UNUSED_RESULT; MINLINE void cross_v3_v3v3(float r[3], const float a[3], const float b[3]); MINLINE void cross_v3_v3v3_hi_prec(float r[3], const float a[3], const float b[3]); MINLINE void cross_v3_v3v3_db(double r[3], const double a[3], const double b[3]); @@ -221,6 +223,7 @@ MINLINE float len_manhattan_v2(const float v[2]) ATTR_WARN_UNUSED_RESULT; MINLINE int len_manhattan_v2_int(const int v[2]) ATTR_WARN_UNUSED_RESULT; MINLINE float len_manhattan_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT; MINLINE float len_v2(const float a[2]) ATTR_WARN_UNUSED_RESULT; +MINLINE double len_v2_db(const double v[2]) ATTR_WARN_UNUSED_RESULT; MINLINE float len_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT; MINLINE double len_v2v2_db(const double a[2], const double b[2]) ATTR_WARN_UNUSED_RESULT; MINLINE float len_v2v2_int(const int v1[2], const int v2[2]); diff --git a/source/blender/blenlib/BLI_memory_utils.hh b/source/blender/blenlib/BLI_memory_utils.hh index de9fc956bfb..9f65fe0742e 100644 --- a/source/blender/blenlib/BLI_memory_utils.hh +++ b/source/blender/blenlib/BLI_memory_utils.hh @@ -19,61 +19,86 @@ /** \file * \ingroup bli + * Some of the functions below have very similar alternatives in the standard library. However, it + * is rather annoying to use those when debugging. Therefore, some more specialized and easier to + * debug functions are provided here. */ #include <memory> +#include <new> +#include <type_traits> #include "BLI_utildefines.h" namespace blender { /** - * Call the default constructor on n consecutive elements. For trivially constructible types, this - * does nothing. + * Call the destructor on n consecutive values. For trivially destructible types, this does + * nothing. + * + * Exception Safety: Destructors shouldn't throw exceptions. * * Before: - * ptr: uninitialized - * After: * ptr: initialized + * After: + * ptr: uninitialized */ -template<typename T> void default_construct_n(T *ptr, uint n) +template<typename T> void destruct_n(T *ptr, int64_t n) { + BLI_assert(n >= 0); + + static_assert(std::is_nothrow_destructible_v<T>, + "This should be true for all types. Destructors are noexcept by default."); + /* This is not strictly necessary, because the loop below will be optimized away anyway. It is - * nice to make behavior this explicitely, though. */ - if (std::is_trivially_constructible<T>::value) { + * nice to make behavior this explicitly, though. */ + if (std::is_trivially_destructible_v<T>) { return; } - for (uint i = 0; i < n; i++) { - new (ptr + i) T; + for (int64_t i = 0; i < n; i++) { + ptr[i].~T(); } } /** - * Call the destructor on n consecutive values. For trivially destructible types, this does - * nothing. + * Call the default constructor on n consecutive elements. For trivially constructible types, this + * does nothing. + * + * Exception Safety: Strong. * * Before: - * ptr: initialized - * After: * ptr: uninitialized + * After: + * ptr: initialized */ -template<typename T> void destruct_n(T *ptr, uint n) +template<typename T> void default_construct_n(T *ptr, int64_t n) { + BLI_assert(n >= 0); + /* This is not strictly necessary, because the loop below will be optimized away anyway. It is - * nice to make behavior this explicitely, though. */ - if (std::is_trivially_destructible<T>::value) { + * nice to make behavior this explicitly, though. */ + if (std::is_trivially_constructible_v<T>) { return; } - for (uint i = 0; i < n; i++) { - ptr[i].~T(); + int64_t current = 0; + try { + for (; current < n; current++) { + new ((void *)(ptr + current)) T; + } + } + catch (...) { + destruct_n(ptr, current); + throw; } } /** * Copy n values from src to dst. * + * Exception Safety: Basic. + * * Before: * src: initialized * dst: initialized @@ -81,9 +106,11 @@ template<typename T> void destruct_n(T *ptr, uint n) * src: initialized * dst: initialized */ -template<typename T> void initialized_copy_n(const T *src, uint n, T *dst) +template<typename T> void initialized_copy_n(const T *src, int64_t n, T *dst) { - for (uint i = 0; i < n; i++) { + BLI_assert(n >= 0); + + for (int64_t i = 0; i < n; i++) { dst[i] = src[i]; } } @@ -91,6 +118,36 @@ template<typename T> void initialized_copy_n(const T *src, uint n, T *dst) /** * Copy n values from src to dst. * + * Exception Safety: Strong. + * + * Before: + * src: initialized + * dst: uninitialized + * After: + * src: initialized + * dst: initialized + */ +template<typename T> void uninitialized_copy_n(const T *src, int64_t n, T *dst) +{ + BLI_assert(n >= 0); + + int64_t current = 0; + try { + for (; current < n; current++) { + new ((void *)(dst + current)) T(src[current]); + } + } + catch (...) { + destruct_n(dst, current); + throw; + } +} + +/** + * Convert n values from type `From` to type `To`. + * + * Exception Safety: Strong. + * * Before: * src: initialized * dst: uninitialized @@ -98,16 +155,28 @@ template<typename T> void initialized_copy_n(const T *src, uint n, T *dst) * src: initialized * dst: initialized */ -template<typename T> void uninitialized_copy_n(const T *src, uint n, T *dst) +template<typename From, typename To> +void uninitialized_convert_n(const From *src, int64_t n, To *dst) { - for (uint i = 0; i < n; i++) { - new (dst + i) T(src[i]); + BLI_assert(n >= 0); + + int64_t current = 0; + try { + for (; current < n; current++) { + new ((void *)(dst + current)) To((To)src[current]); + } + } + catch (...) { + destruct_n(dst, current); + throw; } } /** * Move n values from src to dst. * + * Exception Safety: Basic. + * * Before: * src: initialized * dst: initialized @@ -115,9 +184,11 @@ template<typename T> void uninitialized_copy_n(const T *src, uint n, T *dst) * src: initialized, moved-from * dst: initialized */ -template<typename T> void initialized_move_n(T *src, uint n, T *dst) +template<typename T> void initialized_move_n(T *src, int64_t n, T *dst) { - for (uint i = 0; i < n; i++) { + BLI_assert(n >= 0); + + for (int64_t i = 0; i < n; i++) { dst[i] = std::move(src[i]); } } @@ -125,6 +196,8 @@ template<typename T> void initialized_move_n(T *src, uint n, T *dst) /** * Move n values from src to dst. * + * Exception Safety: Basic. + * * Before: * src: initialized * dst: uninitialized @@ -132,10 +205,19 @@ template<typename T> void initialized_move_n(T *src, uint n, T *dst) * src: initialized, moved-from * dst: initialized */ -template<typename T> void uninitialized_move_n(T *src, uint n, T *dst) +template<typename T> void uninitialized_move_n(T *src, int64_t n, T *dst) { - for (uint i = 0; i < n; i++) { - new (dst + i) T(std::move(src[i])); + BLI_assert(n >= 0); + + int64_t current = 0; + try { + for (; current < n; current++) { + new ((void *)(dst + current)) T(std::move(src[current])); + } + } + catch (...) { + destruct_n(dst, current); + throw; } } @@ -143,6 +225,8 @@ template<typename T> void uninitialized_move_n(T *src, uint n, T *dst) * Relocate n values from src to dst. Relocation is a move followed by destruction of the src * value. * + * Exception Safety: Basic. + * * Before: * src: initialized * dst: initialized @@ -150,8 +234,10 @@ template<typename T> void uninitialized_move_n(T *src, uint n, T *dst) * src: uninitialized * dst: initialized */ -template<typename T> void initialized_relocate_n(T *src, uint n, T *dst) +template<typename T> void initialized_relocate_n(T *src, int64_t n, T *dst) { + BLI_assert(n >= 0); + initialized_move_n(src, n, dst); destruct_n(src, n); } @@ -160,15 +246,19 @@ template<typename T> void initialized_relocate_n(T *src, uint n, T *dst) * Relocate n values from src to dst. Relocation is a move followed by destruction of the src * value. * + * Exception Safety: Basic. + * * Before: * src: initialized - * dst: uinitialized + * dst: uninitialized * After: * src: uninitialized * dst: initialized */ -template<typename T> void uninitialized_relocate_n(T *src, uint n, T *dst) +template<typename T> void uninitialized_relocate_n(T *src, int64_t n, T *dst) { + BLI_assert(n >= 0); + uninitialized_move_n(src, n, dst); destruct_n(src, n); } @@ -176,14 +266,18 @@ template<typename T> void uninitialized_relocate_n(T *src, uint n, T *dst) /** * Copy the value to n consecutive elements. * + * Exception Safety: Basic. + * * Before: * dst: initialized * After: * dst: initialized */ -template<typename T> void initialized_fill_n(T *dst, uint n, const T &value) +template<typename T> void initialized_fill_n(T *dst, int64_t n, const T &value) { - for (uint i = 0; i < n; i++) { + BLI_assert(n >= 0); + + for (int64_t i = 0; i < n; i++) { dst[i] = value; } } @@ -191,24 +285,27 @@ template<typename T> void initialized_fill_n(T *dst, uint n, const T &value) /** * Copy the value to n consecutive elements. * + * Exception Safety: Strong. + * * Before: * dst: uninitialized * After: * dst: initialized */ -template<typename T> void uninitialized_fill_n(T *dst, uint n, const T &value) +template<typename T> void uninitialized_fill_n(T *dst, int64_t n, const T &value) { - for (uint i = 0; i < n; i++) { - new (dst + i) T(value); - } -} + BLI_assert(n >= 0); -/** - * The same as std::unique_ptr. This can be removed when we start using C++14. - */ -template<typename T, typename... Args> std::unique_ptr<T> make_unique(Args &&... args) -{ - return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); + int64_t current = 0; + try { + for (; current < n; current++) { + new ((void *)(dst + current)) T(value); + } + } + catch (...) { + destruct_n(dst, current); + throw; + } } template<typename T> struct DestructValueAtAddress { @@ -225,30 +322,112 @@ template<typename T> struct DestructValueAtAddress { template<typename T> using destruct_ptr = std::unique_ptr<T, DestructValueAtAddress<T>>; /** - * An `AlignedBuffer` is simply a byte array with the given size and alignment. The buffer will + * An `AlignedBuffer` is a byte array with at least the given size and alignment. The buffer will * not be initialized by the default constructor. - * - * This can be used to reserve memory for C++ objects whose lifetime is different from the - * lifetime of the object they are embedded in. It's used by containers with small buffer - * optimization and hash table implementations. */ template<size_t Size, size_t Alignment> class alignas(Alignment) AlignedBuffer { private: /* Don't create an empty array. This causes problems with some compilers. */ - char m_buffer[(Size > 0) ? Size : 1]; + char buffer_[(Size > 0) ? Size : 1]; public: + operator void *() + { + return (void *)buffer_; + } + + operator const void *() const + { + return (void *)buffer_; + } + void *ptr() { - return (void *)m_buffer; + return (void *)buffer_; } const void *ptr() const { - return (const void *)m_buffer; + return (const void *)buffer_; + } +}; + +/** + * This can be used to reserve memory for C++ objects whose lifetime is different from the + * lifetime of the object they are embedded in. It's used by containers with small buffer + * optimization and hash table implementations. + */ +template<typename T, int64_t Size = 1> class TypedBuffer { + private: + AlignedBuffer<sizeof(T) * (size_t)Size, alignof(T)> buffer_; + + public: + operator T *() + { + return (T *)&buffer_; + } + + operator const T *() const + { + return (const T *)&buffer_; } + + T &operator*() + { + return *(T *)&buffer_; + } + + const T &operator*() const + { + return *(const T *)&buffer_; + } + + T *ptr() + { + return (T *)&buffer_; + } + + const T *ptr() const + { + return (const T *)&buffer_; + } + + T &ref() + { + return *(T *)&buffer_; + } + + const T &ref() const + { + return *(const T *)&buffer_; + } +}; + +/** + * This can be used by container constructors. A parameter of this type should be used to indicate + * that the constructor does not construct the elements. + */ +class NoInitialization { }; +/** + * Helper variable that checks if a pointer type can be converted into another pointer type without + * issues. Possible issues are casting away const and casting a pointer to a child class. + * Adding const or casting to a parent class is fine. + */ +template<typename From, typename To> +inline constexpr bool is_convertible_pointer_v = + std::is_convertible_v<From, To> &&std::is_pointer_v<From> &&std::is_pointer_v<To>; + +/** + * Inline buffers for small-object-optimization should be disable by default. Otherwise we might + * get large unexpected allocations on the stack. + */ +inline constexpr int64_t default_inline_buffer_capacity(size_t element_size) +{ + return ((int64_t)element_size < 100) ? 4 : 0; +} + } // namespace blender #endif /* __BLI_MEMORY_UTILS_HH__ */ diff --git a/source/blender/blenlib/BLI_multi_value_map.hh b/source/blender/blenlib/BLI_multi_value_map.hh new file mode 100644 index 00000000000..c20c4ef9677 --- /dev/null +++ b/source/blender/blenlib/BLI_multi_value_map.hh @@ -0,0 +1,134 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __BLI_MULTI_VALUE_MAP_HH__ +#define __BLI_MULTI_VALUE_MAP_HH__ + +/** \file + * \ingroup bli + * + * A `blender::MultiValueMap<Key, Value>` is an unordered associative container that stores + * key-value pairs. It is different from `blender::Map` in that it can store multiple values for + * the same key. The list of values that corresponds to a specific key can contain duplicates + * and their order is maintained. + * + * This data structure is different from a `std::multi_map`, because multi_map can store the same + * key more than once and MultiValueMap can't. + * + * Currently, this class exists mainly for convenience. There are no performance benefits over + * using Map<Key, Vector<Value>>. In the future, a better implementation for this data structure + * can be developed. + */ + +#include "BLI_map.hh" +#include "BLI_vector.hh" + +namespace blender { + +template<typename Key, typename Value> class MultiValueMap { + private: + using MapType = Map<Key, Vector<Value>>; + MapType map_; + + public: + /** + * Add a new value for the given key. If the map contains the key already, the value will be + * appended to the list of corresponding values. + */ + void add(const Key &key, const Value &value) + { + this->add_as(key, value); + } + void add(const Key &key, Value &&value) + { + this->add_as(key, std::move(value)); + } + void add(Key &&key, const Value &value) + { + this->add_as(std::move(key), value); + } + void add(Key &&key, Value &&value) + { + this->add_as(std::move(key), std::move(value)); + } + template<typename ForwardKey, typename ForwardValue> + void add_as(ForwardKey &&key, ForwardValue &&value) + { + Vector<Value> &vector = map_.lookup_or_add_default_as(std::forward<ForwardKey>(key)); + vector.append(std::forward<ForwardValue>(value)); + } + + /** + * Add all given values to the key. + */ + void add_multiple(const Key &key, Span<Value> values) + { + this->add_multiple_as(key, values); + } + void add_multiple(Key &&key, Span<Value> values) + { + this->add_multiple_as(std::move(key), values); + } + template<typename ForwardKey> void add_multiple_as(ForwardKey &&key, Span<Value> values) + { + Vector<Value> &vector = map_.lookup_or_add_default_as(std::forward<ForwardKey>(key)); + vector.extend(values); + } + + /** + * Get a span to all the values that are stored for the given key. + */ + Span<Value> lookup(const Key &key) const + { + return this->lookup_as(key); + } + template<typename ForwardKey> Span<Value> lookup_as(const ForwardKey &key) const + { + const Vector<Value> *vector = map_.lookup_ptr_as(key); + if (vector != nullptr) { + return vector->as_span(); + } + return {}; + } + + /** + * Note: This signature will change when the implementation changes. + */ + typename MapType::ItemIterator items() const + { + return map_.items(); + } + + /** + * Note: This signature will change when the implementation changes. + */ + typename MapType::KeyIterator keys() const + { + return map_.keys(); + } + + /** + * Note: This signature will change when the implementation changes. + */ + typename MapType::ValueIterator values() const + { + return map_.values(); + } +}; + +} // namespace blender + +#endif /* __BLI_MULTI_VALUE_MAP_HH__ */ diff --git a/source/blender/blenlib/BLI_optional.hh b/source/blender/blenlib/BLI_optional.hh deleted file mode 100644 index b5f98d6fa97..00000000000 --- a/source/blender/blenlib/BLI_optional.hh +++ /dev/null @@ -1,189 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -/** \file - * \ingroup bli - * - * Simple version of std::optional, which is only available since C++17. - */ - -#ifndef __BLI_OPTIONAL_HH__ -#define __BLI_OPTIONAL_HH__ - -#include "BLI_memory_utils.hh" -#include "BLI_utildefines.h" - -#include <algorithm> -#include <memory> - -namespace blender { - -template<typename T> class Optional { - private: - AlignedBuffer<sizeof(T), alignof(T)> m_storage; - bool m_set; - - public: - Optional() : m_set(false) - { - } - - ~Optional() - { - this->reset(); - } - - Optional(const T &value) : Optional() - { - this->set(value); - } - - Optional(T &&value) : Optional() - { - this->set(std::forward<T>(value)); - } - - Optional(const Optional &other) : Optional() - { - if (other.has_value()) { - this->set(other.value()); - } - } - - Optional(Optional &&other) : Optional() - { - if (other.has_value()) { - this->set(std::move(other.value())); - } - } - - Optional &operator=(const Optional &other) - { - if (this == &other) { - return *this; - } - if (other.has_value()) { - this->set(other.value()); - } - else { - this->reset(); - } - return *this; - } - - Optional &operator=(Optional &&other) - { - if (this == &other) { - return *this; - } - if (other.has_value()) { - this->set(std::move(other.value())); - } - else { - this->reset(); - } - return *this; - } - - bool has_value() const - { - return m_set; - } - - const T &value() const - { - BLI_assert(m_set); - return *this->value_ptr(); - } - - T &value() - { - BLI_assert(m_set); - return *this->value_ptr(); - } - - void set(const T &value) - { - if (m_set) { - this->value() = value; - } - else { - new (this->value_ptr()) T(value); - m_set = true; - } - } - - void set(T &&value) - { - if (m_set) { - this->value() = std::move(value); - } - else { - new (this->value_ptr()) T(std::move(value)); - m_set = true; - } - } - - void set_new(const T &value) - { - BLI_assert(!m_set); - new (this->value_ptr()) T(value); - m_set = true; - } - - void set_new(T &&value) - { - BLI_assert(!m_set); - new (this->value_ptr()) T(std::move(value)); - m_set = true; - } - - void reset() - { - if (m_set) { - this->value_ptr()->~T(); - m_set = false; - } - } - - T extract() - { - BLI_assert(m_set); - T value = std::move(this->value()); - this->reset(); - return value; - } - - T *operator->() - { - return this->value_ptr(); - } - - T &operator*() - { - return *this->value_ptr(); - } - - private: - T *value_ptr() const - { - return (T *)m_storage.ptr(); - } -}; - -} /* namespace blender */ - -#endif /* __BLI_OPTIONAL_HH__ */ diff --git a/source/blender/blenlib/BLI_probing_strategies.hh b/source/blender/blenlib/BLI_probing_strategies.hh index 29ebe28aff9..0e5338fa6ed 100644 --- a/source/blender/blenlib/BLI_probing_strategies.hh +++ b/source/blender/blenlib/BLI_probing_strategies.hh @@ -25,17 +25,17 @@ * values based on an initial hash value. * * A probing strategy has to implement the following methods: - * - Constructor(uint32_t hash): Start a new probing sequence based on the given hash. - * - get() const -> uint32_t: Get the current value in the sequence. + * - Constructor(uint64_t hash): Start a new probing sequence based on the given hash. + * - get() const -> uint64_t: Get the current value in the sequence. * - next() -> void: Update the internal state, so that the next value can be accessed with get(). - * - linear_steps() -> uint32_t: Returns number of linear probing steps that should be done. + * - linear_steps() -> int64_t: Returns number of linear probing steps that should be done. * * Using linear probing steps between larger jumps can result in better performance, due to * improved cache usage. It's a way of getting the benefits or linear probing without the * clustering issues. However, more linear steps can also make things slower when the initial hash * produces many collisions. * - * Every probing strategy has to guarantee, that every possible uint32_t is returned eventually. + * Every probing strategy has to guarantee, that every possible uint64_t is returned eventually. * This is necessary for correctness. If this is not the case, empty slots might not be found. * * The SLOT_PROBING_BEGIN and SLOT_PROBING_END macros can be used to implement a loop that iterates @@ -65,24 +65,24 @@ namespace blender { */ class LinearProbingStrategy { private: - uint32_t m_hash; + uint64_t hash_; public: - LinearProbingStrategy(uint32_t hash) : m_hash(hash) + LinearProbingStrategy(const uint64_t hash) : hash_(hash) { } void next() { - m_hash++; + hash_++; } - uint32_t get() const + uint64_t get() const { - return m_hash; + return hash_; } - uint32_t linear_steps() const + int64_t linear_steps() const { return UINT32_MAX; } @@ -101,28 +101,28 @@ class LinearProbingStrategy { */ class QuadraticProbingStrategy { private: - uint32_t m_original_hash; - uint32_t m_current_hash; - uint32_t m_iteration; + uint64_t original_hash_; + uint64_t current_hash_; + uint64_t iteration_; public: - QuadraticProbingStrategy(uint32_t hash) - : m_original_hash(hash), m_current_hash(hash), m_iteration(1) + QuadraticProbingStrategy(const uint64_t hash) + : original_hash_(hash), current_hash_(hash), iteration_(1) { } void next() { - m_current_hash = m_original_hash + ((m_iteration * m_iteration + m_iteration) >> 1); - m_iteration++; + current_hash_ = original_hash_ + ((iteration_ * iteration_ + iteration_) >> 1); + iteration_++; } - uint32_t get() const + uint64_t get() const { - return m_current_hash; + return current_hash_; } - uint32_t linear_steps() const + int64_t linear_steps() const { return 1; } @@ -138,13 +138,13 @@ class QuadraticProbingStrategy { * PreShuffle: When true, the initial call to next() will be done to the constructor. This can help * when the hash function has put little information into the lower bits. */ -template<uint32_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingStrategy { +template<uint64_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingStrategy { private: - uint32_t m_hash; - uint32_t m_perturb; + uint64_t hash_; + uint64_t perturb_; public: - PythonProbingStrategy(uint32_t hash) : m_hash(hash), m_perturb(hash) + PythonProbingStrategy(const uint64_t hash) : hash_(hash), perturb_(hash) { if (PreShuffle) { this->next(); @@ -153,16 +153,16 @@ template<uint32_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingS void next() { - m_perturb >>= 5; - m_hash = 5 * m_hash + 1 + m_perturb; + perturb_ >>= 5; + hash_ = 5 * hash_ + 1 + perturb_; } - uint32_t get() const + uint64_t get() const { - return m_hash; + return hash_; } - uint32_t linear_steps() const + int64_t linear_steps() const { return LinearSteps; } @@ -173,13 +173,13 @@ template<uint32_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingS * method. This way more bits are taken into account earlier. After a couple of collisions (that * should happen rarely), it will fallback to a sequence that hits every slot. */ -template<uint32_t LinearSteps = 2, bool PreShuffle = false> class ShuffleProbingStrategy { +template<uint64_t LinearSteps = 2, bool PreShuffle = false> class ShuffleProbingStrategy { private: - uint32_t m_hash; - uint32_t m_perturb; + uint64_t hash_; + uint64_t perturb_; public: - ShuffleProbingStrategy(uint32_t hash) : m_hash(hash), m_perturb(hash) + ShuffleProbingStrategy(const uint64_t hash) : hash_(hash), perturb_(hash) { if (PreShuffle) { this->next(); @@ -188,21 +188,21 @@ template<uint32_t LinearSteps = 2, bool PreShuffle = false> class ShuffleProbing void next() { - if (m_perturb != 0) { - m_perturb >>= 10; - m_hash = ((m_hash >> 16) ^ m_hash) * 0x45d9f3b + m_perturb; + if (perturb_ != 0) { + perturb_ >>= 10; + hash_ = ((hash_ >> 16) ^ hash_) * 0x45d9f3b + perturb_; } else { - m_hash = 5 * m_hash + 1; + hash_ = 5 * hash_ + 1; } } - uint32_t get() const + uint64_t get() const { - return m_hash; + return hash_; } - uint32_t linear_steps() const + int64_t linear_steps() const { return LinearSteps; } @@ -233,10 +233,10 @@ using DefaultProbingStrategy = PythonProbingStrategy<>; #define SLOT_PROBING_BEGIN(PROBING_STRATEGY, HASH, MASK, R_SLOT_INDEX) \ PROBING_STRATEGY probing_strategy(HASH); \ do { \ - uint32_t linear_offset = 0; \ - uint32_t current_hash = probing_strategy.get(); \ + int64_t linear_offset = 0; \ + uint64_t current_hash = probing_strategy.get(); \ do { \ - uint32_t R_SLOT_INDEX = (current_hash + linear_offset) & MASK; + int64_t R_SLOT_INDEX = (int64_t)((current_hash + (uint64_t)linear_offset) & MASK); #define SLOT_PROBING_END() \ } while (++linear_offset < probing_strategy.linear_steps()); \ diff --git a/source/blender/blenlib/BLI_rand.h b/source/blender/blenlib/BLI_rand.h index ae78ea3af16..c55bbd26db5 100644 --- a/source/blender/blenlib/BLI_rand.h +++ b/source/blender/blenlib/BLI_rand.h @@ -105,12 +105,12 @@ int BLI_rng_thread_rand(RNG_THREAD_ARRAY *rngarr, int thread) ATTR_WARN_UNUSED_R /** Return the _n_th number of the given low-discrepancy sequence. */ void BLI_halton_1d(unsigned int prime, double offset, int n, double *r); -void BLI_halton_2d(unsigned int prime[2], double offset[2], int n, double *r); -void BLI_halton_3d(unsigned int prime[3], double offset[3], int n, double *r); +void BLI_halton_2d(const unsigned int prime[2], double offset[2], int n, double *r); +void BLI_halton_3d(const unsigned int prime[3], double offset[3], int n, double *r); void BLI_hammersley_1d(unsigned int n, double *r); /** Return the whole low-discrepancy sequence up to _n_. */ -void BLI_halton_2d_sequence(unsigned int prime[2], double offset[2], int n, double *r); +void BLI_halton_2d_sequence(const unsigned int prime[2], double offset[2], int n, double *r); void BLI_hammersley_2d_sequence(unsigned int n, double *r); #ifdef __cplusplus diff --git a/source/blender/blenlib/BLI_rand.hh b/source/blender/blenlib/BLI_rand.hh new file mode 100644 index 00000000000..7a98ee0f2bb --- /dev/null +++ b/source/blender/blenlib/BLI_rand.hh @@ -0,0 +1,147 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** \file + * \ingroup bli + */ + +#ifndef __BLI_RAND_HH__ +#define __BLI_RAND_HH__ + +#include "BLI_float2.hh" +#include "BLI_float3.hh" +#include "BLI_math.h" +#include "BLI_span.hh" +#include "BLI_utildefines.h" + +namespace blender { + +class RandomNumberGenerator { + private: + uint64_t x_; + + public: + RandomNumberGenerator(uint32_t seed = 0) + { + this->seed(seed); + } + + /** + * Set the seed for future random numbers. + */ + void seed(uint32_t seed) + { + constexpr uint64_t lowseed = 0x330E; + x_ = (((uint64_t)seed) << 16) | lowseed; + } + + void seed_random(uint32_t seed); + + uint32_t get_uint32() + { + this->step(); + return (uint32_t)(x_ >> 17); + } + + int32_t get_int32() + { + this->step(); + return (int32_t)(x_ >> 17); + } + + /** + * \return Random value (0..N), but never N. + */ + int32_t get_int32(int32_t max_exclusive) + { + BLI_assert(max_exclusive > 0); + return this->get_int32() % max_exclusive; + } + + /** + * \return Random value (0..1), but never 1.0. + */ + double get_double() + { + return (double)this->get_int32() / 0x80000000; + } + + /** + * \return Random value (0..1), but never 1.0. + */ + float get_float() + { + return (float)this->get_int32() / 0x80000000; + } + + template<typename T> void shuffle(MutableSpan<T> values) + { + /* Cannot shuffle arrays of this size yet. */ + BLI_assert(values.size() <= INT32_MAX); + + for (int i = values.size() - 1; i >= 2; i--) { + int j = this->get_int32(i); + if (i != j) { + std::swap(values[i], values[j]); + } + } + } + + /** + * Compute uniformly distributed barycentric coordinates. + */ + float3 get_barycentric_coordinates() + { + float rand1 = this->get_float(); + float rand2 = this->get_float(); + + if (rand1 + rand2 > 1.0f) { + rand1 = 1.0f - rand1; + rand2 = 1.0f - rand2; + } + + return float3(rand1, rand2, 1.0f - rand1 - rand2); + } + + float2 get_unit_float2(); + float3 get_unit_float3(); + float2 get_triangle_sample(float2 v1, float2 v2, float2 v3); + void get_bytes(MutableSpan<char> r_bytes); + + /** + * Simulate getting \a n random values. + */ + void skip(int64_t n) + { + while (n--) { + this->step(); + } + } + + private: + void step() + { + constexpr uint64_t multiplier = 0x5DEECE66Dll; + constexpr uint64_t addend = 0xB; + constexpr uint64_t mask = 0x0000FFFFFFFFFFFFll; + + x_ = (multiplier * x_ + addend) & mask; + } +}; + +} // namespace blender + +#endif /* __BLI_RAND_HH__ */ diff --git a/source/blender/blenlib/BLI_rect.h b/source/blender/blenlib/BLI_rect.h index b1faae03583..14d18308ed6 100644 --- a/source/blender/blenlib/BLI_rect.h +++ b/source/blender/blenlib/BLI_rect.h @@ -63,6 +63,7 @@ void BLI_rcti_translate(struct rcti *rect, int x, int y); void BLI_rcti_recenter(struct rcti *rect, int x, int y); void BLI_rctf_recenter(struct rctf *rect, float x, float y); void BLI_rcti_resize(struct rcti *rect, int x, int y); +void BLI_rcti_pad(struct rcti *rect, int pad_x, int pad_y); void BLI_rctf_resize(struct rctf *rect, float x, float y); void BLI_rcti_scale(rcti *rect, const float scale); void BLI_rctf_scale(rctf *rect, const float scale); diff --git a/source/blender/blenlib/BLI_resource_collector.hh b/source/blender/blenlib/BLI_resource_collector.hh new file mode 100644 index 00000000000..e1be87d8af2 --- /dev/null +++ b/source/blender/blenlib/BLI_resource_collector.hh @@ -0,0 +1,151 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __BLI_RESOURCE_COLLECTOR_HH__ +#define __BLI_RESOURCE_COLLECTOR_HH__ + +/** \file + * \ingroup bli + * + * A ResourceCollector holds an arbitrary set of resources, that will be destructed and/or freed + * when the ResourceCollector is destructed. This is useful when some object has to take ownership + * of other objects, but it does not know the type of those other objects. + * + * Resources owned by the ResourceCollector will be freed in reverse order. That allows resources + * that are added later to depend on resources that have been added before. + */ + +#include "BLI_linear_allocator.hh" +#include "BLI_utility_mixins.hh" +#include "BLI_vector.hh" + +namespace blender { + +class ResourceCollector : NonCopyable, NonMovable { + private: + struct ResourceData { + void *data; + void (*free)(void *data); + const char *debug_name; + }; + + LinearAllocator<> m_allocator; + Vector<ResourceData> m_resources; + + public: + ResourceCollector() = default; + + ~ResourceCollector() + { + /* Free in reversed order. */ + for (int64_t i = m_resources.size(); i--;) { + ResourceData &data = m_resources[i]; + data.free(data.data); + } + } + + /** + * Pass ownership of the resource to the ResourceCollector. It will be destructed and freed when + * the collector is destructed. + */ + template<typename T> void add(std::unique_ptr<T> resource, const char *name) + { + BLI_assert(resource.get() != nullptr); + this->add( + resource.release(), + [](void *data) { + T *typed_data = reinterpret_cast<T *>(data); + delete typed_data; + }, + name); + } + + /** + * Pass ownership of the resource to the ResourceCollector. It will be destructed when the + * collector is destructed. + */ + template<typename T> void add(destruct_ptr<T> resource, const char *name) + { + /* There is no need to keep track of such types. */ + if (std::is_trivially_destructible_v<T>) { + resource.release(); + return; + } + + BLI_assert(resource.get() != nullptr); + this->add( + resource.release(), + [](void *data) { + T *typed_data = reinterpret_cast<T *>(data); + typed_data->~T(); + }, + name); + } + + /** + * Pass ownership of some resource to the ResourceCollector. The given free function will be + * called when the collector is destructed. + */ + void add(void *userdata, void (*free)(void *), const char *name) + { + ResourceData data; + data.debug_name = name; + data.data = userdata; + data.free = free; + m_resources.append(data); + } + + /** + * Returns a reference to a linear allocator that is owned by the ResourcesCollector. Memory + * allocated through this allocator will be freed when the collector is destructed. + */ + LinearAllocator<> &linear_allocator() + { + return m_allocator; + } + + /** + * Utility method to construct an instance of type T that will be owned by the ResourceCollector. + */ + template<typename T, typename... Args> T &construct(const char *name, Args &&... args) + { + T *value = m_allocator.construct<T>(std::forward<Args>(args)...); + this->add(destruct_ptr<T>(value), name); + return *value; + } + + /** + * Print the names of all the resources that are owned by this ResourceCollector. This can be + * useful for debugging. + */ + void print(StringRef name) const + { + if (m_resources.size() == 0) { + std::cout << "\"" << name << "\" has no resources.\n"; + return; + } + else { + std::cout << "Resources for \"" << name << "\":\n"; + for (const ResourceData &data : m_resources) { + std::cout << " " << data.data << ": " << data.debug_name << '\n'; + } + } + } +}; + +} // namespace blender + +#endif /* __BLI_RESOURCE_COLLECTOR_HH__ */ diff --git a/source/blender/blenlib/BLI_set.hh b/source/blender/blenlib/BLI_set.hh index ece9fb05d8c..90adea69e06 100644 --- a/source/blender/blenlib/BLI_set.hh +++ b/source/blender/blenlib/BLI_set.hh @@ -30,7 +30,7 @@ * Every slot is in one of three states: empty, occupied or removed. If a slot is occupied, it * contains an instance of the key type. * - * Benchmarking and comparing hash tables is hard, because many factors influence the result. The + * Bench-marking and comparing hash tables is hard, because many factors influence the result. The * performance of a hash table depends on the combination of the hash function, probing strategy, * max load factor, key type, slot type and the data distribution. This implementation is designed * to be relatively fast by default in all cases. However, it also offers many customization @@ -49,21 +49,21 @@ * - Small buffer optimization is enabled by default, if the key is not too large. * - The methods `add_new` and `remove_contained` should be used instead of `add` and `remove` * whenever appropriate. Assumptions and intention are described better this way. - * - Lookups can be performed using types other than Key without conversion. For that use the - * methods ending with `_as`. The template parameters Hash and IsEqual have to support the other + * - Look-ups can be performed using types other than Key without conversion. For that use the + * methods ending with `_as`. The template parameters Hash and #IsEqual have to support the other * key type. This can greatly improve performance when the set contains strings. - * - The default constructor is cheap, even when a large InlineBufferCapacity is used. A large + * - The default constructor is cheap, even when a large #InlineBufferCapacity is used. A large * slot array will only be initialized when the first key is added. * - The `print_stats` method can be used to get information about the distribution of keys and * memory usage of the set. * - The method names don't follow the std::unordered_set names in many cases. Searching for such * names in this file will usually let you discover the new name. - * - There is a StdUnorderedSetWrapper class, that wraps std::unordered_set and gives it the same - * interface as blender::Set. This is useful for benchmarking. + * - There is a #StdUnorderedSetWrapper class, that wraps std::unordered_set and gives it the same + * interface as blender::Set. This is useful for bench-marking. * * Possible Improvements: - * - Use a branchless loop over slots in grow function (measured ~10% performance improvement when - * the distribution of occupied slots is suffiently random). + * - Use a branch-less loop over slots in grow function (measured ~10% performance improvement when + * the distribution of occupied slots is sufficiently random). * - Support max load factor customization. * - Improve performance with large data sets through software prefetching. I got fairly * significant improvements in simple tests (~30% faster). It still needs to be investigated how @@ -89,11 +89,8 @@ template< * The minimum number of elements that can be stored in this Set without doing a heap * allocation. This is useful when you expect to have many small sets. However, keep in mind * that (unlike vector) initializing a set has a O(n) cost in the number of slots. - * - * When Key is large, the small buffer optimization is disabled by default to avoid large - * unexpected allocations on the stack. It can still be enabled explicitely though. */ - uint32_t InlineBufferCapacity = (sizeof(Key) < 100) ? 4 : 0, + int64_t InlineBufferCapacity = default_inline_buffer_capacity(sizeof(Key)), /** * The strategy used to deal with collisions. They are defined in BLI_probing_strategies.hh. */ @@ -128,30 +125,30 @@ class Set { * Slots are either empty, occupied or removed. The number of occupied slots can be computed by * subtracting the removed slots from the occupied-and-removed slots. */ - uint32_t m_removed_slots; - uint32_t m_occupied_and_removed_slots; + int64_t removed_slots_; + int64_t occupied_and_removed_slots_; /** * The maximum number of slots that can be used (either occupied or removed) until the set has to * grow. This is the total number of slots times the max load factor. */ - uint32_t m_usable_slots; + int64_t usable_slots_; /** * The number of slots minus one. This is a bit mask that can be used to turn any integer into a * valid slot index efficiently. */ - uint32_t m_slot_mask; + uint64_t slot_mask_; /** This is called to hash incoming keys. */ - Hash m_hash; + Hash hash_; /** This is called to check equality of two keys. */ - IsEqual m_is_equal; + IsEqual is_equal_; /** The max load factor is 1/2 = 50% by default. */ #define LOAD_FACTOR 1, 2 - LoadFactor m_max_load_factor = LoadFactor(LOAD_FACTOR); + LoadFactor max_load_factor_ = LoadFactor(LOAD_FACTOR); using SlotArray = Array<Slot, LoadFactor::compute_total_slots(InlineBufferCapacity, LOAD_FACTOR), Allocator>; #undef LOAD_FACTOR @@ -160,12 +157,12 @@ class Set { * This is the array that contains the actual slots. There is always at least one empty slot and * the size of the array is a power of two. */ - SlotArray m_slots; + SlotArray slots_; /** Iterate over a slot index sequence for a given hash. */ #define SET_SLOT_PROBING_BEGIN(HASH, R_SLOT) \ - SLOT_PROBING_BEGIN (ProbingStrategy, HASH, m_slot_mask, SLOT_INDEX) \ - auto &R_SLOT = m_slots[SLOT_INDEX]; + SLOT_PROBING_BEGIN (ProbingStrategy, HASH, slot_mask_, SLOT_INDEX) \ + auto &R_SLOT = slots_[SLOT_INDEX]; #define SET_SLOT_PROBING_END() SLOT_PROBING_END() public: @@ -175,11 +172,11 @@ class Set { * grow operation is performed on the first insertion. */ Set() - : m_removed_slots(0), - m_occupied_and_removed_slots(0), - m_usable_slots(0), - m_slot_mask(0), - m_slots(1) + : removed_slots_(0), + occupied_and_removed_slots_(0), + usable_slots_(0), + slot_mask_(0), + slots_(1) { } @@ -196,13 +193,13 @@ class Set { Set(const Set &other) = default; Set(Set &&other) noexcept - : m_removed_slots(other.m_removed_slots), - m_occupied_and_removed_slots(other.m_occupied_and_removed_slots), - m_usable_slots(other.m_usable_slots), - m_slot_mask(other.m_slot_mask), - m_hash(std::move(other.m_hash)), - m_is_equal(std::move(other.m_is_equal)), - m_slots(std::move(other.m_slots)) + : removed_slots_(other.removed_slots_), + occupied_and_removed_slots_(other.occupied_and_removed_slots_), + usable_slots_(other.usable_slots_), + slot_mask_(other.slot_mask_), + hash_(std::move(other.hash_)), + is_equal_(std::move(other.is_equal_)), + slots_(std::move(other.slots_)) { other.~Set(); new (&other) Set(); @@ -239,11 +236,11 @@ class Set { */ void add_new(const Key &key) { - this->add_new__impl(key, m_hash(key)); + this->add_new__impl(key, hash_(key)); } void add_new(Key &&key) { - this->add_new__impl(std::move(key), m_hash(key)); + this->add_new__impl(std::move(key), hash_(key)); } /** @@ -260,13 +257,9 @@ class Set { { return this->add_as(std::move(key)); } - - /** - * Same as `add`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey> bool add_as(ForwardKey &&key) { - return this->add__impl(std::forward<ForwardKey>(key), m_hash(key)); + return this->add__impl(std::forward<ForwardKey>(key), hash_(key)); } /** @@ -303,13 +296,53 @@ class Set { { return this->contains_as(key); } + template<typename ForwardKey> bool contains_as(const ForwardKey &key) const + { + return this->contains__impl(key, hash_(key)); + } /** - * Same as `contains`, but accepts other key types that are supported by the hash function. + * Returns the key that is stored in the set that compares equal to the given key. This invokes + * undefined behavior when the key is not in the set. */ - template<typename ForwardKey> bool contains_as(const ForwardKey &key) const + const Key &lookup_key(const Key &key) const + { + return this->lookup_key_as(key); + } + template<typename ForwardKey> const Key &lookup_key_as(const ForwardKey &key) const + { + return this->lookup_key__impl(key, hash_(key)); + } + + /** + * Returns the key that is stored in the set that compares equal to the given key. If the key is + * not in the set, the given default value is returned instead. + */ + const Key &lookup_key_default(const Key &key, const Key &default_value) const { - return this->contains__impl(key, m_hash(key)); + return this->lookup_key_default_as(key, default_value); + } + template<typename ForwardKey> + const Key &lookup_key_default_as(const ForwardKey &key, const Key &default_key) const + { + const Key *ptr = this->lookup_key_ptr__impl(key, hash_(key)); + if (ptr == nullptr) { + return default_key; + } + return *ptr; + } + + /** + * Returns a pointer to the key that is stored in the set that compares equal to the given key. + * If the key is not in the set, nullptr is returned instead. + */ + const Key *lookup_key_ptr(const Key &key) const + { + return this->lookup_key_ptr_as(key); + } + template<typename ForwardKey> const Key *lookup_key_ptr_as(const ForwardKey &key) const + { + return this->lookup_key_ptr__impl(key, hash_(key)); } /** @@ -321,13 +354,9 @@ class Set { { return this->remove_as(key); } - - /** - * Same as `remove`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey> bool remove_as(const ForwardKey &key) { - return this->remove__impl(key, m_hash(key)); + return this->remove__impl(key, hash_(key)); } /** @@ -337,14 +366,9 @@ class Set { { this->remove_contained_as(key); } - - /** - * Same as `remove_contained`, but accepts other key types that are supported by the hash - * function. - */ template<typename ForwardKey> void remove_contained_as(const ForwardKey &key) { - this->remove_contained__impl(key, m_hash(key)); + this->remove_contained__impl(key, hash_(key)); } /** @@ -356,20 +380,20 @@ class Set { */ class Iterator { private: - const Slot *m_slots; - uint32_t m_total_slots; - uint32_t m_current_slot; + const Slot *slots_; + int64_t total_slots_; + int64_t current_slot_; public: - Iterator(const Slot *slots, uint32_t total_slots, uint32_t current_slot) - : m_slots(slots), m_total_slots(total_slots), m_current_slot(current_slot) + Iterator(const Slot *slots, int64_t total_slots, int64_t current_slot) + : slots_(slots), total_slots_(total_slots), current_slot_(current_slot) { } Iterator &operator++() { - while (++m_current_slot < m_total_slots) { - if (m_slots[m_current_slot].is_occupied()) { + while (++current_slot_ < total_slots_) { + if (slots_[current_slot_].is_occupied()) { break; } } @@ -378,22 +402,22 @@ class Set { const Key &operator*() const { - return *m_slots[m_current_slot].key(); + return *slots_[current_slot_].key(); } friend bool operator!=(const Iterator &a, const Iterator &b) { - BLI_assert(a.m_slots == b.m_slots); - BLI_assert(a.m_total_slots == b.m_total_slots); - return a.m_current_slot != b.m_current_slot; + BLI_assert(a.slots_ == b.slots_); + BLI_assert(a.total_slots_ == b.total_slots_); + return a.current_slot_ != b.current_slot_; } }; Iterator begin() const { - for (uint32_t i = 0; i < m_slots.size(); i++) { - if (m_slots[i].is_occupied()) { - return Iterator(m_slots.data(), m_slots.size(), i); + for (int64_t i = 0; i < slots_.size(); i++) { + if (slots_[i].is_occupied()) { + return Iterator(slots_.data(), slots_.size(), i); } } return this->end(); @@ -401,7 +425,7 @@ class Set { Iterator end() const { - return Iterator(m_slots.data(), m_slots.size(), m_slots.size()); + return Iterator(slots_.data(), slots_.size(), slots_.size()); } /** @@ -410,16 +434,16 @@ class Set { void print_stats(StringRef name = "") const { HashTableStats stats(*this, *this); - stats.print(); + stats.print(name); } /** * Get the number of collisions that the probing strategy has to go through to find the key or * determine that it is not in the set. */ - uint32_t count_collisions(const Key &key) const + int64_t count_collisions(const Key &key) const { - return this->count_collisions__impl(key, m_hash(key)); + return this->count_collisions__impl(key, hash_(key)); } /** @@ -433,7 +457,7 @@ class Set { /** * Creates a new slot array and reinserts all keys inside of that. This method can be used to get - * rid of dummy slots. Also this is useful for benchmarking the grow function. + * rid of removed slots. Also this is useful for benchmarking the grow function. */ void rehash() { @@ -443,9 +467,9 @@ class Set { /** * Returns the number of keys stored in the set. */ - uint32_t size() const + int64_t size() const { - return m_occupied_and_removed_slots - m_removed_slots; + return occupied_and_removed_slots_ - removed_slots_; } /** @@ -453,29 +477,29 @@ class Set { */ bool is_empty() const { - return m_occupied_and_removed_slots == m_removed_slots; + return occupied_and_removed_slots_ == removed_slots_; } /** * Returns the number of available slots. This is mostly for debugging purposes. */ - uint32_t capacity() const + int64_t capacity() const { - return m_slots.size(); + return slots_.size(); } /** * Returns the amount of removed slots in the set. This is mostly for debugging purposes. */ - uint32_t removed_amount() const + int64_t removed_amount() const { - return m_removed_slots; + return removed_slots_; } /** * Returns the bytes required per element. This is mostly for debugging purposes. */ - uint32_t size_per_element() const + int64_t size_per_element() const { return sizeof(Slot); } @@ -484,18 +508,18 @@ class Set { * Returns the approximate memory requirements of the set in bytes. This is more correct for * larger sets. */ - uint32_t size_in_bytes() const + int64_t size_in_bytes() const { - return sizeof(Slot) * m_slots.size(); + return sizeof(Slot) * slots_.size(); } /** * Potentially resize the set such that it can hold the specified number of keys without another * grow operation. */ - void reserve(uint32_t n) + void reserve(const int64_t n) { - if (m_usable_slots < n) { + if (usable_slots_ < n) { this->realloc_and_reinsert(n); } } @@ -527,30 +551,31 @@ class Set { } private: - BLI_NOINLINE void realloc_and_reinsert(uint32_t min_usable_slots) + BLI_NOINLINE void realloc_and_reinsert(const int64_t min_usable_slots) { - uint32_t total_slots, usable_slots; - m_max_load_factor.compute_total_and_usable_slots( + int64_t total_slots, usable_slots; + max_load_factor_.compute_total_and_usable_slots( SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots); - uint32_t new_slot_mask = total_slots - 1; + BLI_assert(total_slots >= 1); + const uint64_t new_slot_mask = (uint64_t)total_slots - 1; /** * Optimize the case when the set was empty beforehand. We can avoid some copies here. */ if (this->size() == 0) { - m_slots.~Array(); - new (&m_slots) SlotArray(total_slots); - m_removed_slots = 0; - m_occupied_and_removed_slots = 0; - m_usable_slots = usable_slots; - m_slot_mask = new_slot_mask; + slots_.~Array(); + new (&slots_) SlotArray(total_slots); + removed_slots_ = 0; + occupied_and_removed_slots_ = 0; + usable_slots_ = usable_slots; + slot_mask_ = new_slot_mask; return; } /* The grown array that we insert the keys into. */ SlotArray new_slots(total_slots); - for (Slot &slot : m_slots) { + for (Slot &slot : slots_) { if (slot.is_occupied()) { this->add_after_grow_and_destruct_old(slot, new_slots, new_slot_mask); } @@ -558,19 +583,19 @@ class Set { /* All occupied slots have been destructed already and empty/removed slots are assumed to be * trivially destructible. */ - m_slots.clear_without_destruct(); - m_slots = std::move(new_slots); - m_occupied_and_removed_slots -= m_removed_slots; - m_usable_slots = usable_slots; - m_removed_slots = 0; - m_slot_mask = new_slot_mask; + slots_.clear_without_destruct(); + slots_ = std::move(new_slots); + occupied_and_removed_slots_ -= removed_slots_; + usable_slots_ = usable_slots; + removed_slots_ = 0; + slot_mask_ = new_slot_mask; } void add_after_grow_and_destruct_old(Slot &old_slot, SlotArray &new_slots, - uint32_t new_slot_mask) + const uint64_t new_slot_mask) { - uint32_t hash = old_slot.get_hash(Hash()); + const uint64_t hash = old_slot.get_hash(Hash()); SLOT_PROBING_BEGIN (ProbingStrategy, hash, new_slot_mask, slot_index) { Slot &slot = new_slots[slot_index]; @@ -582,20 +607,48 @@ class Set { SLOT_PROBING_END(); } - template<typename ForwardKey> bool contains__impl(const ForwardKey &key, uint32_t hash) const + template<typename ForwardKey> + bool contains__impl(const ForwardKey &key, const uint64_t hash) const { SET_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { return false; } - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { return true; } } SET_SLOT_PROBING_END(); } - template<typename ForwardKey> void add_new__impl(ForwardKey &&key, uint32_t hash) + template<typename ForwardKey> + const Key &lookup_key__impl(const ForwardKey &key, const uint64_t hash) const + { + BLI_assert(this->contains_as(key)); + + SET_SLOT_PROBING_BEGIN (hash, slot) { + if (slot.contains(key, is_equal_, hash)) { + return *slot.key(); + } + } + SET_SLOT_PROBING_END(); + } + + template<typename ForwardKey> + const Key *lookup_key_ptr__impl(const ForwardKey &key, const uint64_t hash) const + { + SET_SLOT_PROBING_BEGIN (hash, slot) { + if (slot.contains(key, is_equal_, hash)) { + return slot.key(); + } + if (slot.is_empty()) { + return nullptr; + } + } + SET_SLOT_PROBING_END(); + } + + template<typename ForwardKey> void add_new__impl(ForwardKey &&key, const uint64_t hash) { BLI_assert(!this->contains_as(key)); @@ -604,36 +657,36 @@ class Set { SET_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { slot.occupy(std::forward<ForwardKey>(key), hash); - m_occupied_and_removed_slots++; + occupied_and_removed_slots_++; return; } } SET_SLOT_PROBING_END(); } - template<typename ForwardKey> bool add__impl(ForwardKey &&key, uint32_t hash) + template<typename ForwardKey> bool add__impl(ForwardKey &&key, const uint64_t hash) { this->ensure_can_add(); SET_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { slot.occupy(std::forward<ForwardKey>(key), hash); - m_occupied_and_removed_slots++; + occupied_and_removed_slots_++; return true; } - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { return false; } } SET_SLOT_PROBING_END(); } - template<typename ForwardKey> bool remove__impl(const ForwardKey &key, uint32_t hash) + template<typename ForwardKey> bool remove__impl(const ForwardKey &key, const uint64_t hash) { SET_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { slot.remove(); - m_removed_slots++; + removed_slots_++; return true; } if (slot.is_empty()) { @@ -643,13 +696,14 @@ class Set { SET_SLOT_PROBING_END(); } - template<typename ForwardKey> void remove_contained__impl(const ForwardKey &key, uint32_t hash) + template<typename ForwardKey> + void remove_contained__impl(const ForwardKey &key, const uint64_t hash) { BLI_assert(this->contains_as(key)); - m_removed_slots++; + removed_slots_++; SET_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { slot.remove(); return; } @@ -658,12 +712,12 @@ class Set { } template<typename ForwardKey> - uint32_t count_collisions__impl(const ForwardKey &key, uint32_t hash) const + int64_t count_collisions__impl(const ForwardKey &key, const uint64_t hash) const { - uint32_t collisions = 0; + int64_t collisions = 0; SET_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash)) { + if (slot.contains(key, is_equal_, hash)) { return collisions; } if (slot.is_empty()) { @@ -676,9 +730,9 @@ class Set { void ensure_can_add() { - if (m_occupied_and_removed_slots >= m_usable_slots) { + if (occupied_and_removed_slots_ >= usable_slots_) { this->realloc_and_reinsert(this->size() + 1); - BLI_assert(m_occupied_and_removed_slots < m_usable_slots); + BLI_assert(occupied_and_removed_slots_ < usable_slots_); } } }; @@ -690,80 +744,92 @@ class Set { template<typename Key> class StdUnorderedSetWrapper { private: using SetType = std::unordered_set<Key, blender::DefaultHash<Key>>; - SetType m_set; + SetType set_; public: - uint32_t size() const + int64_t size() const { - return (uint32_t)m_set.size(); + return (int64_t)set_.size(); } bool is_empty() const { - return m_set.empty(); + return set_.empty(); } - void reserve(uint32_t n) + void reserve(int64_t n) { - m_set.reserve(n); + set_.reserve(n); } void add_new(const Key &key) { - m_set.insert(key); + set_.insert(key); } void add_new(Key &&key) { - m_set.insert(std::move(key)); + set_.insert(std::move(key)); } bool add(const Key &key) { - return m_set.insert(key).second; + return set_.insert(key).second; } bool add(Key &&key) { - return m_set.insert(std::move(key)).second; + return set_.insert(std::move(key)).second; } void add_multiple(Span<Key> keys) { for (const Key &key : keys) { - m_set.insert(key); + set_.insert(key); } } bool contains(const Key &key) const { - return m_set.find(key) != m_set.end(); + return set_.find(key) != set_.end(); } bool remove(const Key &key) { - return (bool)m_set.erase(key); + return (bool)set_.erase(key); } void remove_contained(const Key &key) { - return m_set.erase(key); + return set_.erase(key); } void clear() { - m_set.clear(); + set_.clear(); } typename SetType::iterator begin() const { - return m_set.begin(); + return set_.begin(); } typename SetType::iterator end() const { - return m_set.end(); + return set_.end(); } }; +/** + * Same as a normal Set, but does not use Blender's guarded allocator. This is useful when + * allocating memory with static storage duration. + */ +template<typename Key, + int64_t InlineBufferCapacity = default_inline_buffer_capacity(sizeof(Key)), + typename ProbingStrategy = DefaultProbingStrategy, + typename Hash = DefaultHash<Key>, + typename IsEqual = DefaultEquality, + typename Slot = typename DefaultSetSlot<Key>::type> +using RawSet = Set<Key, InlineBufferCapacity, ProbingStrategy, Hash, IsEqual, Slot, RawAllocator>; + } // namespace blender #endif /* __BLI_SET_HH__ */ diff --git a/source/blender/blenlib/BLI_set_slots.hh b/source/blender/blenlib/BLI_set_slots.hh index 15f56f2450e..b78ed37f534 100644 --- a/source/blender/blenlib/BLI_set_slots.hh +++ b/source/blender/blenlib/BLI_set_slots.hh @@ -50,8 +50,8 @@ template<typename Key> class SimpleSetSlot { Removed = 2, }; - State m_state; - AlignedBuffer<sizeof(Key), alignof(Key)> m_buffer; + State state_; + TypedBuffer<Key> key_buffer_; public: /** @@ -59,7 +59,7 @@ template<typename Key> class SimpleSetSlot { */ SimpleSetSlot() { - m_state = Empty; + state_ = Empty; } /** @@ -67,8 +67,8 @@ template<typename Key> class SimpleSetSlot { */ ~SimpleSetSlot() { - if (m_state == Occupied) { - this->key()->~Key(); + if (state_ == Occupied) { + key_buffer_.ref().~Key(); } } @@ -78,9 +78,9 @@ template<typename Key> class SimpleSetSlot { */ SimpleSetSlot(const SimpleSetSlot &other) { - m_state = other.m_state; - if (other.m_state == Occupied) { - new (this->key()) Key(*other.key()); + state_ = other.state_; + if (other.state_ == Occupied) { + new (&key_buffer_) Key(*other.key_buffer_); } } @@ -91,9 +91,9 @@ template<typename Key> class SimpleSetSlot { */ SimpleSetSlot(SimpleSetSlot &&other) noexcept { - m_state = other.m_state; - if (other.m_state == Occupied) { - new (this->key()) Key(std::move(*other.key())); + state_ = other.state_; + if (other.state_ == Occupied) { + new (&key_buffer_) Key(std::move(*other.key_buffer_)); } } @@ -102,7 +102,7 @@ template<typename Key> class SimpleSetSlot { */ Key *key() { - return (Key *)m_buffer.ptr(); + return key_buffer_; } /** @@ -110,7 +110,7 @@ template<typename Key> class SimpleSetSlot { */ const Key *key() const { - return (const Key *)m_buffer.ptr(); + return key_buffer_; } /** @@ -118,7 +118,7 @@ template<typename Key> class SimpleSetSlot { */ bool is_occupied() const { - return m_state == Occupied; + return state_ == Occupied; } /** @@ -126,30 +126,30 @@ template<typename Key> class SimpleSetSlot { */ bool is_empty() const { - return m_state == Empty; + return state_ == Empty; } /** * Return the hash of the currently stored key. In this simple set slot implementation, we just * compute the hash here. Other implementations might store the hash in the slot instead. */ - template<typename Hash> uint32_t get_hash(const Hash &hash) const + template<typename Hash> uint64_t get_hash(const Hash &hash) const { BLI_assert(this->is_occupied()); - return hash(*this->key()); + return hash(*key_buffer_); } /** * Move the other slot into this slot and destruct it. We do destruction here, because this way * we can avoid a comparison with the state, since we know the slot is occupied. */ - void relocate_occupied_here(SimpleSetSlot &other, uint32_t UNUSED(hash)) + void relocate_occupied_here(SimpleSetSlot &other, uint64_t UNUSED(hash)) { BLI_assert(!this->is_occupied()); BLI_assert(other.is_occupied()); - m_state = Occupied; - new (this->key()) Key(std::move(*other.key())); - other.key()->~Key(); + state_ = Occupied; + new (&key_buffer_) Key(std::move(*other.key_buffer_)); + other.key_buffer_.ref().~Key(); } /** @@ -157,10 +157,10 @@ template<typename Key> class SimpleSetSlot { * key. The hash is used by other slot implementations to determine inequality faster. */ template<typename ForwardKey, typename IsEqual> - bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const + bool contains(const ForwardKey &key, const IsEqual &is_equal, uint64_t UNUSED(hash)) const { - if (m_state == Occupied) { - return is_equal(key, *this->key()); + if (state_ == Occupied) { + return is_equal(key, *key_buffer_); } return false; } @@ -169,11 +169,11 @@ template<typename Key> class SimpleSetSlot { * Change the state of this slot from empty/removed to occupied. The key has to be constructed * by calling the constructor with the given key as parameter. */ - template<typename ForwardKey> void occupy(ForwardKey &&key, uint32_t UNUSED(hash)) + template<typename ForwardKey> void occupy(ForwardKey &&key, uint64_t UNUSED(hash)) { BLI_assert(!this->is_occupied()); - m_state = Occupied; - new (this->key()) Key(std::forward<ForwardKey>(key)); + state_ = Occupied; + new (&key_buffer_) Key(std::forward<ForwardKey>(key)); } /** @@ -182,8 +182,8 @@ template<typename Key> class SimpleSetSlot { void remove() { BLI_assert(this->is_occupied()); - m_state = Removed; - this->key()->~Key(); + state_ = Removed; + key_buffer_.ref().~Key(); } }; @@ -199,102 +199,102 @@ template<typename Key> class HashedSetSlot { Removed = 2, }; - uint32_t m_hash; - State m_state; - AlignedBuffer<sizeof(Key), alignof(Key)> m_buffer; + uint64_t hash_; + State state_; + TypedBuffer<Key> key_buffer_; public: HashedSetSlot() { - m_state = Empty; + state_ = Empty; } ~HashedSetSlot() { - if (m_state == Occupied) { - this->key()->~Key(); + if (state_ == Occupied) { + key_buffer_.ref().~Key(); } } HashedSetSlot(const HashedSetSlot &other) { - m_state = other.m_state; - if (other.m_state == Occupied) { - m_hash = other.m_hash; - new (this->key()) Key(*other.key()); + state_ = other.state_; + if (other.state_ == Occupied) { + hash_ = other.hash_; + new (&key_buffer_) Key(*other.key_buffer_); } } HashedSetSlot(HashedSetSlot &&other) noexcept { - m_state = other.m_state; - if (other.m_state == Occupied) { - m_hash = other.m_hash; - new (this->key()) Key(std::move(*other.key())); + state_ = other.state_; + if (other.state_ == Occupied) { + hash_ = other.hash_; + new (&key_buffer_) Key(std::move(*other.key_buffer_)); } } Key *key() { - return (Key *)m_buffer.ptr(); + return key_buffer_; } const Key *key() const { - return (const Key *)m_buffer.ptr(); + return key_buffer_; } bool is_occupied() const { - return m_state == Occupied; + return state_ == Occupied; } bool is_empty() const { - return m_state == Empty; + return state_ == Empty; } - template<typename Hash> uint32_t get_hash(const Hash &UNUSED(hash)) const + template<typename Hash> uint64_t get_hash(const Hash &UNUSED(hash)) const { BLI_assert(this->is_occupied()); - return m_hash; + return hash_; } - void relocate_occupied_here(HashedSetSlot &other, uint32_t hash) + void relocate_occupied_here(HashedSetSlot &other, const uint64_t hash) { BLI_assert(!this->is_occupied()); BLI_assert(other.is_occupied()); - m_state = Occupied; - m_hash = hash; - new (this->key()) Key(std::move(*other.key())); - other.key()->~Key(); + state_ = Occupied; + hash_ = hash; + new (&key_buffer_) Key(std::move(*other.key_buffer_)); + key_buffer_.ref().~Key(); } template<typename ForwardKey, typename IsEqual> - bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t hash) const + bool contains(const ForwardKey &key, const IsEqual &is_equal, const uint64_t hash) const { - /* m_hash might be uninitialized here, but that is ok. */ - if (m_hash == hash) { - if (m_state == Occupied) { - return is_equal(key, *this->key()); + /* hash_ might be uninitialized here, but that is ok. */ + if (hash_ == hash) { + if (state_ == Occupied) { + return is_equal(key, *key_buffer_); } } return false; } - template<typename ForwardKey> void occupy(ForwardKey &&key, uint32_t hash) + template<typename ForwardKey> void occupy(ForwardKey &&key, const uint64_t hash) { BLI_assert(!this->is_occupied()); - m_state = Occupied; - m_hash = hash; - new (this->key()) Key(std::forward<ForwardKey>(key)); + state_ = Occupied; + hash_ = hash; + new (&key_buffer_) Key(std::forward<ForwardKey>(key)); } void remove() { BLI_assert(this->is_occupied()); - m_state = Removed; - this->key()->~Key(); + state_ = Removed; + key_buffer_.ref().~Key(); } }; @@ -308,7 +308,7 @@ template<typename Key> class HashedSetSlot { */ template<typename Key, typename KeyInfo> class IntrusiveSetSlot { private: - Key m_key = KeyInfo::get_empty(); + Key key_ = KeyInfo::get_empty(); public: IntrusiveSetSlot() = default; @@ -318,57 +318,57 @@ template<typename Key, typename KeyInfo> class IntrusiveSetSlot { Key *key() { - return &m_key; + return &key_; } const Key *key() const { - return &m_key; + return &key_; } bool is_occupied() const { - return KeyInfo::is_not_empty_or_removed(m_key); + return KeyInfo::is_not_empty_or_removed(key_); } bool is_empty() const { - return KeyInfo::is_empty(m_key); + return KeyInfo::is_empty(key_); } - template<typename Hash> uint32_t get_hash(const Hash &hash) const + template<typename Hash> uint64_t get_hash(const Hash &hash) const { BLI_assert(this->is_occupied()); - return hash(m_key); + return hash(key_); } - void relocate_occupied_here(IntrusiveSetSlot &other, uint32_t UNUSED(hash)) + void relocate_occupied_here(IntrusiveSetSlot &other, const uint64_t UNUSED(hash)) { BLI_assert(!this->is_occupied()); BLI_assert(other.is_occupied()); - m_key = std::move(other.m_key); - other.m_key.~Key(); + key_ = std::move(other.key_); + other.key_.~Key(); } template<typename ForwardKey, typename IsEqual> - bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const + bool contains(const ForwardKey &key, const IsEqual &is_equal, const uint64_t UNUSED(hash)) const { BLI_assert(KeyInfo::is_not_empty_or_removed(key)); - return is_equal(m_key, key); + return is_equal(key_, key); } - template<typename ForwardKey> void occupy(ForwardKey &&key, uint32_t UNUSED(hash)) + template<typename ForwardKey> void occupy(ForwardKey &&key, const uint64_t UNUSED(hash)) { BLI_assert(!this->is_occupied()); BLI_assert(KeyInfo::is_not_empty_or_removed(key)); - m_key = std::forward<ForwardKey>(key); + key_ = std::forward<ForwardKey>(key); } void remove() { BLI_assert(this->is_occupied()); - KeyInfo::remove(m_key); + KeyInfo::remove(key_); } }; diff --git a/source/blender/blenlib/BLI_span.hh b/source/blender/blenlib/BLI_span.hh index ce4e90d5e16..81b86f647f6 100644 --- a/source/blender/blenlib/BLI_span.hh +++ b/source/blender/blenlib/BLI_span.hh @@ -87,8 +87,8 @@ namespace blender { */ template<typename T> class Span { private: - const T *m_start = nullptr; - uint m_size = 0; + const T *data_ = nullptr; + int64_t size_ = 0; public: /** @@ -96,8 +96,15 @@ template<typename T> class Span { */ Span() = default; - Span(const T *start, uint size) : m_start(start), m_size(size) + Span(const T *start, int64_t size) : data_(start), size_(size) { + BLI_assert(size >= 0); + } + + template<typename U, typename std::enable_if_t<is_convertible_pointer_v<U, T>> * = nullptr> + Span(const U *start, int64_t size) : data_((const T *)start), size_(size) + { + BLI_assert(size >= 0); } /** @@ -111,11 +118,11 @@ template<typename T> class Span { * Span<int> span = {1, 2, 3, 4}; * call_function_with_array(span); */ - Span(const std::initializer_list<T> &list) : Span(list.begin(), (uint)list.size()) + Span(const std::initializer_list<T> &list) : Span(list.begin(), (int64_t)list.size()) { } - Span(const std::vector<T> &vector) : Span(vector.data(), (uint)vector.size()) + Span(const std::vector<T> &vector) : Span(vector.data(), (int64_t)vector.size()) { } @@ -128,9 +135,8 @@ template<typename T> class Span { * Span<T *> -> Span<const T *> * Span<Derived *> -> Span<Base *> */ - template<typename U, - typename std::enable_if<std::is_convertible<U *, T>::value>::type * = nullptr> - Span(Span<U *> array) : Span((T *)array.data(), array.size()) + template<typename U, typename std::enable_if_t<is_convertible_pointer_v<U, T>> * = nullptr> + Span(Span<U> array) : data_((T *)array.data()), size_(array.size()) { } @@ -138,10 +144,12 @@ template<typename T> class Span { * Returns a contiguous part of the array. This invokes undefined behavior when the slice does * not stay within the bounds of the array. */ - Span slice(uint start, uint size) const + Span slice(int64_t start, int64_t size) const { + BLI_assert(start >= 0); + BLI_assert(size >= 0); BLI_assert(start + size <= this->size() || size == 0); - return Span(m_start + start, size); + return Span(data_ + start, size); } Span slice(IndexRange range) const @@ -153,8 +161,9 @@ template<typename T> class Span { * Returns a new Span with n elements removed from the beginning. This invokes undefined * behavior when the array is too small. */ - Span drop_front(uint n) const + Span drop_front(int64_t n) const { + BLI_assert(n >= 0); BLI_assert(n <= this->size()); return this->slice(n, this->size() - n); } @@ -163,8 +172,9 @@ template<typename T> class Span { * Returns a new Span with n elements removed from the beginning. This invokes undefined * behavior when the array is too small. */ - Span drop_back(uint n) const + Span drop_back(int64_t n) const { + BLI_assert(n >= 0); BLI_assert(n <= this->size()); return this->slice(0, this->size() - n); } @@ -173,8 +183,9 @@ template<typename T> class Span { * Returns a new Span that only contains the first n elements. This invokes undefined * behavior when the array is too small. */ - Span take_front(uint n) const + Span take_front(int64_t n) const { + BLI_assert(n >= 0); BLI_assert(n <= this->size()); return this->slice(0, n); } @@ -183,8 +194,9 @@ template<typename T> class Span { * Returns a new Span that only contains the last n elements. This invokes undefined * behavior when the array is too small. */ - Span take_back(uint n) const + Span take_back(int64_t n) const { + BLI_assert(n >= 0); BLI_assert(n <= this->size()); return this->slice(this->size() - n, n); } @@ -195,35 +207,36 @@ template<typename T> class Span { */ const T *data() const { - return m_start; + return data_; } const T *begin() const { - return m_start; + return data_; } const T *end() const { - return m_start + m_size; + return data_ + size_; } /** * Access an element in the array. This invokes undefined behavior when the index is out of * bounds. */ - const T &operator[](uint index) const + const T &operator[](int64_t index) const { - BLI_assert(index < m_size); - return m_start[index]; + BLI_assert(index >= 0); + BLI_assert(index < size_); + return data_[index]; } /** * Returns the number of elements in the referenced array. */ - uint size() const + int64_t size() const { - return m_size; + return size_; } /** @@ -231,15 +244,15 @@ template<typename T> class Span { */ bool is_empty() const { - return m_size == 0; + return size_ == 0; } /** * Returns the number of bytes referenced by this Span. */ - uint size_in_bytes() const + int64_t size_in_bytes() const { - return sizeof(T) * m_size; + return sizeof(T) * size_; } /** @@ -269,9 +282,9 @@ template<typename T> class Span { * Does a linear search to count how often the value is in the array. * Returns the number of occurrences. */ - uint count(const T &value) const + int64_t count(const T &value) const { - uint counter = 0; + int64_t counter = 0; for (const T &element : *this) { if (element == value) { counter++; @@ -286,8 +299,8 @@ template<typename T> class Span { */ const T &first() const { - BLI_assert(m_size > 0); - return m_start[0]; + BLI_assert(size_ > 0); + return data_[0]; } /** @@ -296,18 +309,18 @@ template<typename T> class Span { */ const T &last() const { - BLI_assert(m_size > 0); - return m_start[m_size - 1]; + BLI_assert(size_ > 0); + return data_[size_ - 1]; } /** * Returns the element at the given index. If the index is out of range, return the fallback * value. */ - T get(uint index, const T &fallback) const + T get(int64_t index, const T &fallback) const { - if (index < m_size) { - return m_start[index]; + if (index < size_ && index >= 0) { + return data_[index]; } return fallback; } @@ -320,12 +333,12 @@ template<typename T> class Span { { /* The size should really be smaller than that. If it is not, the calling code should be * changed. */ - BLI_assert(m_size < 1000); + BLI_assert(size_ < 1000); - for (uint i = 0; i < m_size; i++) { - const T &value = m_start[i]; - for (uint j = i + 1; j < m_size; j++) { - if (value == m_start[j]) { + for (int64_t i = 0; i < size_; i++) { + const T &value = data_[i]; + for (int64_t j = i + 1; j < size_; j++) { + if (value == data_[j]) { return true; } } @@ -342,10 +355,10 @@ template<typename T> class Span { { /* The size should really be smaller than that. If it is not, the calling code should be * changed. */ - BLI_assert(m_size < 1000); + BLI_assert(size_ < 1000); - for (uint i = 0; i < m_size; i++) { - const T &value = m_start[i]; + for (int64_t i = 0; i < size_; i++) { + const T &value = data_[i]; if (other.contains(value)) { return true; } @@ -357,20 +370,20 @@ template<typename T> class Span { * Returns the index of the first occurrence of the given value. This invokes undefined behavior * when the value is not in the array. */ - uint first_index(const T &search_value) const + int64_t first_index(const T &search_value) const { - int index = this->first_index_try(search_value); + const int64_t index = this->first_index_try(search_value); BLI_assert(index >= 0); - return (uint)index; + return (int64_t)index; } /** * Returns the index of the first occurrence of the given value or -1 if it does not exist. */ - int first_index_try(const T &search_value) const + int64_t first_index_try(const T &search_value) const { - for (uint i = 0; i < m_size; i++) { - if (m_start[i] == search_value) { + for (int64_t i = 0; i < size_; i++) { + if (data_[i] == search_value) { return i; } } @@ -383,7 +396,7 @@ template<typename T> class Span { */ IndexRange index_range() const { - return IndexRange(m_size); + return IndexRange(size_); } /** @@ -391,9 +404,9 @@ template<typename T> class Span { */ template<typename NewT> Span<NewT> cast() const { - BLI_assert((m_size * sizeof(T)) % sizeof(NewT) == 0); - uint new_size = m_size * sizeof(T) / sizeof(NewT); - return Span<NewT>(reinterpret_cast<const NewT *>(m_start), new_size); + BLI_assert((size_ * sizeof(T)) % sizeof(NewT) == 0); + int64_t new_size = size_ * sizeof(T) / sizeof(NewT); + return Span<NewT>(reinterpret_cast<const NewT *>(data_), new_size); } /** @@ -402,7 +415,7 @@ template<typename T> class Span { */ template<typename PrintLineF> void print_as_lines(std::string name, PrintLineF print_line) const { - std::cout << "Span: " << name << " \tSize:" << m_size << '\n'; + std::cout << "Span: " << name << " \tSize:" << size_ << '\n'; for (const T &value : *this) { std::cout << " "; print_line(value); @@ -426,28 +439,13 @@ template<typename T> class Span { */ template<typename T> class MutableSpan { private: - T *m_start; - uint m_size; + T *data_; + int64_t size_; public: MutableSpan() = default; - MutableSpan(T *start, uint size) : m_start(start), m_size(size) - { - } - - /** - * Reference an initializer_list. Note that the data in the initializer_list is only valid until - * the expression containing it is fully computed. - * - * Do: - * call_function_with_array({1, 2, 3, 4}); - * - * Don't: - * MutableSpan<int> span = {1, 2, 3, 4}; - * call_function_with_array(span); - */ - MutableSpan(std::initializer_list<T> &list) : MutableSpan(list.begin(), list.size()) + MutableSpan(T *start, const int64_t size) : data_(start), size_(size) { } @@ -461,15 +459,15 @@ template<typename T> class MutableSpan { operator Span<T>() const { - return Span<T>(m_start, m_size); + return Span<T>(data_, size_); } /** * Returns the number of elements in the array. */ - uint size() const + int64_t size() const { - return m_size; + return size_; } /** @@ -477,18 +475,18 @@ template<typename T> class MutableSpan { */ void fill(const T &value) { - initialized_fill_n(m_start, m_size, value); + initialized_fill_n(data_, size_, value); } /** * Replace a subset of all elements with the given value. This invokes undefined behavior when * one of the indices is out of bounds. */ - void fill_indices(Span<uint> indices, const T &value) + void fill_indices(Span<int64_t> indices, const T &value) { - for (uint i : indices) { - BLI_assert(i < m_size); - m_start[i] = value; + for (int64_t i : indices) { + BLI_assert(i < size_); + data_[i] = value; } } @@ -498,40 +496,40 @@ template<typename T> class MutableSpan { */ T *data() const { - return m_start; + return data_; } T *begin() const { - return m_start; + return data_; } T *end() const { - return m_start + m_size; + return data_ + size_; } - T &operator[](uint index) const + T &operator[](const int64_t index) const { BLI_assert(index < this->size()); - return m_start[index]; + return data_[index]; } /** * Returns a contiguous part of the array. This invokes undefined behavior when the slice would * go out of bounds. */ - MutableSpan slice(uint start, uint length) const + MutableSpan slice(const int64_t start, const int64_t length) const { BLI_assert(start + length <= this->size()); - return MutableSpan(m_start + start, length); + return MutableSpan(data_ + start, length); } /** * Returns a new MutableSpan with n elements removed from the beginning. This invokes * undefined behavior when the array is too small. */ - MutableSpan drop_front(uint n) const + MutableSpan drop_front(const int64_t n) const { BLI_assert(n <= this->size()); return this->slice(n, this->size() - n); @@ -541,7 +539,7 @@ template<typename T> class MutableSpan { * Returns a new MutableSpan with n elements removed from the end. This invokes undefined * behavior when the array is too small. */ - MutableSpan drop_back(uint n) const + MutableSpan drop_back(const int64_t n) const { BLI_assert(n <= this->size()); return this->slice(0, this->size() - n); @@ -551,7 +549,7 @@ template<typename T> class MutableSpan { * Returns a new MutableSpan that only contains the first n elements. This invokes undefined * behavior when the array is too small. */ - MutableSpan take_front(uint n) const + MutableSpan take_front(const int64_t n) const { BLI_assert(n <= this->size()); return this->slice(0, n); @@ -561,7 +559,7 @@ template<typename T> class MutableSpan { * Return a new MutableSpan that only contains the last n elements. This invokes undefined * behavior when the array is too small. */ - MutableSpan take_back(uint n) const + MutableSpan take_back(const int64_t n) const { BLI_assert(n <= this->size()); return this->slice(this->size() - n, n); @@ -573,7 +571,7 @@ template<typename T> class MutableSpan { */ Span<T> as_span() const { - return Span<T>(m_start, m_size); + return Span<T>(data_, size_); } /** @@ -582,7 +580,7 @@ template<typename T> class MutableSpan { */ IndexRange index_range() const { - return IndexRange(m_size); + return IndexRange(size_); } /** @@ -591,8 +589,34 @@ template<typename T> class MutableSpan { */ T &last() const { - BLI_assert(m_size > 0); - return m_start[m_size - 1]; + BLI_assert(size_ > 0); + return data_[size_ - 1]; + } + + /** + * Does a linear search to count how often the value is in the array. + * Returns the number of occurrences. + */ + int64_t count(const T &value) const + { + int64_t counter = 0; + for (const T &element : *this) { + if (element == value) { + counter++; + } + } + return counter; + } + + /** + * Copy all values from another span into this span. This invokes undefined behavior when the + * destination contains uninitialized data and T is not trivially copy constructible. + * The size of both spans is expected to be the same. + */ + void copy_from(Span<T> values) + { + BLI_assert(size_ == values.size()); + initialized_copy_n(values.data(), size_, data_); } /** @@ -600,28 +624,20 @@ template<typename T> class MutableSpan { */ template<typename NewT> MutableSpan<NewT> cast() const { - BLI_assert((m_size * sizeof(T)) % sizeof(NewT) == 0); - uint new_size = m_size * sizeof(T) / sizeof(NewT); - return MutableSpan<NewT>(reinterpret_cast<NewT *>(m_start), new_size); + BLI_assert((size_ * sizeof(T)) % sizeof(NewT) == 0); + int64_t new_size = size_ * sizeof(T) / sizeof(NewT); + return MutableSpan<NewT>(reinterpret_cast<NewT *>(data_), new_size); } }; /** - * Shorthand to make use of automatic template parameter deduction. - */ -template<typename T> Span<T> ref_c_array(const T *array, uint size) -{ - return Span<T>(array, size); -} - -/** * Utilities to check that arrays have the same size in debug builds. */ template<typename T1, typename T2> void assert_same_size(const T1 &v1, const T2 &v2) { UNUSED_VARS_NDEBUG(v1, v2); #ifdef DEBUG - uint size = v1.size(); + int64_t size = v1.size(); BLI_assert(size == v1.size()); BLI_assert(size == v2.size()); #endif @@ -632,7 +648,7 @@ void assert_same_size(const T1 &v1, const T2 &v2, const T3 &v3) { UNUSED_VARS_NDEBUG(v1, v2, v3); #ifdef DEBUG - uint size = v1.size(); + int64_t size = v1.size(); BLI_assert(size == v1.size()); BLI_assert(size == v2.size()); BLI_assert(size == v3.size()); diff --git a/source/blender/blenlib/BLI_stack.hh b/source/blender/blenlib/BLI_stack.hh index 030d9c84c8e..75ae9df79a4 100644 --- a/source/blender/blenlib/BLI_stack.hh +++ b/source/blender/blenlib/BLI_stack.hh @@ -60,7 +60,7 @@ template<typename T> struct StackChunk { /** Pointer to one element past the end of the referenced buffer. */ T *capacity_end; - uint capacity() const + int64_t capacity() const { return capacity_end - begin; } @@ -73,11 +73,8 @@ template< * The number of values that can be stored in this stack, without doing a heap allocation. * Sometimes it can make sense to increase this value a lot. The memory in the inline buffer is * not initialized when it is not needed. - * - * When T is large, the small buffer optimization is disabled by default to avoid large - * unexpected allocations on the stack. It can still be enabled explicitely though. */ - uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0, + int64_t InlineBufferCapacity = default_inline_buffer_capacity(sizeof(T)), /** * The allocator used by this stack. Should rarely be changed, except when you don't want that * MEM_* is used internally. @@ -91,48 +88,46 @@ class Stack { * Points to one element after top-most value in the stack. * * Invariant: - * If m_size == 0 - * then: m_top == m_inline_chunk.begin - * else: &peek() == m_top - 1; + * If size_ == 0 + * then: top_ == inline_chunk_.begin + * else: &peek() == top_ - 1; */ - T *m_top; + T *top_; - /** Points to the chunk that references the memory pointed to by m_top. */ - Chunk *m_top_chunk; + /** Points to the chunk that references the memory pointed to by top_. */ + Chunk *top_chunk_; /** * Number of elements in the entire stack. The sum of initialized element counts in the chunks. */ - uint m_size; + int64_t size_; /** The buffer used to implement small object optimization. */ - AlignedBuffer<sizeof(T) * InlineBufferCapacity, alignof(T)> m_inline_buffer; + TypedBuffer<T, InlineBufferCapacity> inline_buffer_; /** * A chunk referencing the inline buffer. This is always the bottom-most chunk. - * So m_inline_chunk.below == nullptr. + * So inline_chunk_.below == nullptr. */ - Chunk m_inline_chunk; + Chunk inline_chunk_; /** Used for allocations when the inline buffer is not large enough. */ - Allocator m_allocator; + Allocator allocator_; public: /** * Initialize an empty stack. No heap allocation is done. */ - Stack(Allocator allocator = {}) : m_allocator(allocator) + Stack(Allocator allocator = {}) : allocator_(allocator) { - T *inline_buffer = this->inline_buffer(); - - m_inline_chunk.below = nullptr; - m_inline_chunk.above = nullptr; - m_inline_chunk.begin = inline_buffer; - m_inline_chunk.capacity_end = inline_buffer + InlineBufferCapacity; - - m_top = inline_buffer; - m_top_chunk = &m_inline_chunk; - m_size = 0; + inline_chunk_.below = nullptr; + inline_chunk_.above = nullptr; + inline_chunk_.begin = inline_buffer_; + inline_chunk_.capacity_end = inline_buffer_ + InlineBufferCapacity; + + top_ = inline_buffer_; + top_chunk_ = &inline_chunk_; + size_ = 0; } /** @@ -157,46 +152,49 @@ class Stack { { } - Stack(const Stack &other) : Stack(other.m_allocator) + Stack(const Stack &other) : Stack(other.allocator_) { - for (const Chunk *chunk = &other.m_inline_chunk; chunk; chunk = chunk->above) { + for (const Chunk *chunk = &other.inline_chunk_; chunk; chunk = chunk->above) { const T *begin = chunk->begin; - const T *end = (chunk == other.m_top_chunk) ? other.m_top : chunk->capacity_end; + const T *end = (chunk == other.top_chunk_) ? other.top_ : chunk->capacity_end; this->push_multiple(Span<T>(begin, end - begin)); } } - Stack(Stack &&other) noexcept : Stack(other.m_allocator) + Stack(Stack &&other) noexcept : Stack(other.allocator_) { - uninitialized_relocate_n(other.inline_buffer(), - std::min(other.m_size, InlineBufferCapacity), - this->inline_buffer()); + uninitialized_relocate_n<T>( + other.inline_buffer_, std::min(other.size_, InlineBufferCapacity), inline_buffer_); - m_inline_chunk.above = other.m_inline_chunk.above; - m_size = other.m_size; + inline_chunk_.above = other.inline_chunk_.above; + size_ = other.size_; - if (m_size <= InlineBufferCapacity) { - m_top_chunk = &m_inline_chunk; - m_top = this->inline_buffer() + m_size; + if (inline_chunk_.above != nullptr) { + inline_chunk_.above->below = &inline_chunk_; + } + + if (size_ <= InlineBufferCapacity) { + top_chunk_ = &inline_chunk_; + top_ = inline_buffer_ + size_; } else { - m_top_chunk = other.m_top_chunk; - m_top = other.m_top; + top_chunk_ = other.top_chunk_; + top_ = other.top_; } - other.m_size = 0; - other.m_inline_chunk.above = nullptr; - other.m_top_chunk = &other.m_inline_chunk; - other.m_top = other.m_top_chunk->begin; + other.size_ = 0; + other.inline_chunk_.above = nullptr; + other.top_chunk_ = &other.inline_chunk_; + other.top_ = other.top_chunk_->begin; } ~Stack() { this->destruct_all_elements(); Chunk *above_chunk; - for (Chunk *chunk = m_inline_chunk.above; chunk; chunk = above_chunk) { + for (Chunk *chunk = inline_chunk_.above; chunk; chunk = above_chunk) { above_chunk = chunk->above; - m_allocator.deallocate(chunk); + allocator_.deallocate(chunk); } } @@ -229,21 +227,21 @@ class Stack { */ void push(const T &value) { - if (m_top == m_top_chunk->capacity_end) { + if (top_ == top_chunk_->capacity_end) { this->activate_next_chunk(1); } - new (m_top) T(value); - m_top++; - m_size++; + new (top_) T(value); + top_++; + size_++; } void push(T &&value) { - if (m_top == m_top_chunk->capacity_end) { + if (top_ == top_chunk_->capacity_end) { this->activate_next_chunk(1); } - new (m_top) T(std::move(value)); - m_top++; - m_size++; + new (top_) T(std::move(value)); + top_++; + size_++; } /** @@ -252,16 +250,16 @@ class Stack { */ T pop() { - BLI_assert(m_size > 0); - m_top--; - T value = std::move(*m_top); - m_top->~T(); - m_size--; - - if (m_top == m_top_chunk->begin) { - if (m_top_chunk->below != nullptr) { - m_top_chunk = m_top_chunk->below; - m_top = m_top_chunk->capacity_end; + BLI_assert(size_ > 0); + top_--; + T value = std::move(*top_); + top_->~T(); + size_--; + + if (top_ == top_chunk_->begin) { + if (top_chunk_->below != nullptr) { + top_chunk_ = top_chunk_->below; + top_ = top_chunk_->capacity_end; } } return value; @@ -273,15 +271,15 @@ class Stack { */ T &peek() { - BLI_assert(m_size > 0); - BLI_assert(m_top > m_top_chunk->begin); - return *(m_top - 1); + BLI_assert(size_ > 0); + BLI_assert(top_ > top_chunk_->begin); + return *(top_ - 1); } const T &peek() const { - BLI_assert(m_size > 0); - BLI_assert(m_top > m_top_chunk->begin); - return *(m_top - 1); + BLI_assert(size_ > 0); + BLI_assert(top_ > top_chunk_->begin); + return *(top_ - 1); } /** @@ -293,19 +291,19 @@ class Stack { { Span<T> remaining_values = values; while (!remaining_values.is_empty()) { - if (m_top == m_top_chunk->capacity_end) { + if (top_ == top_chunk_->capacity_end) { this->activate_next_chunk(remaining_values.size()); } - uint remaining_capacity = m_top_chunk->capacity_end - m_top; - uint amount = std::min(remaining_values.size(), remaining_capacity); - uninitialized_copy_n(remaining_values.data(), amount, m_top); - m_top += amount; + const int64_t remaining_capacity = top_chunk_->capacity_end - top_; + const int64_t amount = std::min(remaining_values.size(), remaining_capacity); + uninitialized_copy_n(remaining_values.data(), amount, top_); + top_ += amount; remaining_values = remaining_values.drop_front(amount); } - m_size += values.size(); + size_ += values.size(); } /** @@ -313,15 +311,15 @@ class Stack { */ bool is_empty() const { - return m_size == 0; + return size_ == 0; } /** * Returns the number of elements in the stack. */ - uint size() const + int64_t size() const { - return m_size; + return size_; } /** @@ -331,31 +329,26 @@ class Stack { void clear() { this->destruct_all_elements(); - m_top_chunk = &m_inline_chunk; - m_top = m_top_chunk->begin; + top_chunk_ = &inline_chunk_; + top_ = top_chunk_->begin; } private: - T *inline_buffer() const - { - return (T *)m_inline_buffer.ptr(); - } - /** - * Changes m_top_chunk to point to a new chunk that is above the current one. The new chunk might + * Changes top_chunk_ to point to a new chunk that is above the current one. The new chunk might * be smaller than the given size_hint. This happens when a chunk that has been allocated before * is reused. The size of the new chunk will be at least one. * * This invokes undefined behavior when the currently active chunk is not full. */ - void activate_next_chunk(uint size_hint) + void activate_next_chunk(const int64_t size_hint) { - BLI_assert(m_top == m_top_chunk->capacity_end); - if (m_top_chunk->above == nullptr) { - uint new_capacity = std::max(size_hint, m_top_chunk->capacity() * 2 + 10); + BLI_assert(top_ == top_chunk_->capacity_end); + if (top_chunk_->above == nullptr) { + const int64_t new_capacity = std::max(size_hint, top_chunk_->capacity() * 2 + 10); /* Do a single memory allocation for the Chunk and the array it references. */ - void *buffer = m_allocator.allocate( + void *buffer = allocator_.allocate( sizeof(Chunk) + sizeof(T) * new_capacity + alignof(T), alignof(Chunk), AT); void *chunk_buffer = buffer; void *data_buffer = (void *)(((uintptr_t)buffer + sizeof(Chunk) + alignof(T) - 1) & @@ -365,19 +358,19 @@ class Stack { new_chunk->begin = (T *)data_buffer; new_chunk->capacity_end = new_chunk->begin + new_capacity; new_chunk->above = nullptr; - new_chunk->below = m_top_chunk; - m_top_chunk->above = new_chunk; + new_chunk->below = top_chunk_; + top_chunk_->above = new_chunk; } - m_top_chunk = m_top_chunk->above; - m_top = m_top_chunk->begin; + top_chunk_ = top_chunk_->above; + top_ = top_chunk_->begin; } void destruct_all_elements() { - for (T *value = m_top_chunk->begin; value != m_top; value++) { + for (T *value = top_chunk_->begin; value != top_; value++) { value->~T(); } - for (Chunk *chunk = m_top_chunk->below; chunk; chunk = chunk->below) { + for (Chunk *chunk = top_chunk_->below; chunk; chunk = chunk->below) { for (T *value = chunk->begin; value != chunk->capacity_end; value++) { value->~T(); } @@ -385,6 +378,13 @@ class Stack { } }; +/** + * Same as a normal Stack, but does not use Blender's guarded allocator. This is useful when + * allocating memory with static storage duration. + */ +template<typename T, int64_t InlineBufferCapacity = default_inline_buffer_capacity(sizeof(T))> +using RawStack = Stack<T, InlineBufferCapacity, RawAllocator>; + } /* namespace blender */ #endif /* __BLI_STACK_HH__ */ diff --git a/source/blender/blenlib/BLI_string_ref.hh b/source/blender/blenlib/BLI_string_ref.hh index 073137fe175..06fc66f6b55 100644 --- a/source/blender/blenlib/BLI_string_ref.hh +++ b/source/blender/blenlib/BLI_string_ref.hh @@ -59,10 +59,10 @@ class StringRef; */ class StringRefBase { protected: - const char *m_data; - uint m_size; + const char *data_; + int64_t size_; - StringRefBase(const char *data, uint size) : m_data(data), m_size(size) + StringRefBase(const char *data, const int64_t size) : data_(data), size_(size) { } @@ -70,9 +70,9 @@ class StringRefBase { /** * Return the (byte-)length of the referenced string, without any null-terminator. */ - uint size() const + int64_t size() const { - return m_size; + return size_; } /** @@ -80,31 +80,31 @@ class StringRefBase { */ const char *data() const { - return m_data; + return data_; } operator Span<char>() const { - return Span<char>(m_data, m_size); + return Span<char>(data_, size_); } /** - * Implicitely convert to std::string. This is convenient in most cases, but you have to be a bit + * Implicitly convert to std::string. This is convenient in most cases, but you have to be a bit * careful not to convert to std::string accidentally. */ operator std::string() const { - return std::string(m_data, m_size); + return std::string(data_, (size_t)size_); } const char *begin() const { - return m_data; + return data_; } const char *end() const { - return m_data + m_size; + return data_ + size_; } /** @@ -114,17 +114,17 @@ class StringRefBase { */ void unsafe_copy(char *dst) const { - memcpy(dst, m_data, m_size); - dst[m_size] = '\0'; + memcpy(dst, data_, (size_t)size_); + dst[size_] = '\0'; } /** * Copy the string into a buffer. The copied string will be null-terminated. This invokes * undefined behavior when dst_size is too small. (Should we define the behavior?) */ - void copy(char *dst, uint dst_size) const + void copy(char *dst, const int64_t dst_size) const { - if (m_size < dst_size) { + if (size_ < dst_size) { this->unsafe_copy(dst); } else { @@ -137,7 +137,7 @@ class StringRefBase { * Copy the string into a char array. The copied string will be null-terminated. This invokes * undefined behavior when dst is too small. */ - template<uint N> void copy(char (&dst)[N]) + template<size_t N> void copy(char (&dst)[N]) { this->copy(dst, N); } @@ -152,7 +152,7 @@ class StringRefBase { */ bool endswith(StringRef suffix) const; - StringRef substr(uint start, uint size) const; + StringRef substr(int64_t start, const int64_t size) const; }; /** @@ -168,37 +168,48 @@ class StringRefNull : public StringRefBase { /** * Construct a StringRefNull from a null terminated c-string. The pointer must not point to NULL. */ - StringRefNull(const char *str) : StringRefBase(str, (uint)strlen(str)) + StringRefNull(const char *str) : StringRefBase(str, (int64_t)strlen(str)) { BLI_assert(str != NULL); - BLI_assert(m_data[m_size] == '\0'); + BLI_assert(data_[size_] == '\0'); } /** * Construct a StringRefNull from a null terminated c-string. This invokes undefined behavior * when the given size is not the correct size of the string. */ - StringRefNull(const char *str, uint size) : StringRefBase(str, size) + StringRefNull(const char *str, const int64_t size) : StringRefBase(str, size) { - BLI_assert((uint)strlen(str) == size); + BLI_assert((int64_t)strlen(str) == size); } /** * Reference a std::string. Remember that when the std::string is destructed, the StringRefNull * will point to uninitialized memory. */ - StringRefNull(const std::string &str) : StringRefNull(str.data()) + StringRefNull(const std::string &str) : StringRefNull(str.c_str()) { } /** * Get the char at the given index. */ - char operator[](uint index) const + char operator[](const int64_t index) const { + BLI_assert(index >= 0); /* Use '<=' instead of just '<', so that the null character can be accessed as well. */ - BLI_assert(index <= m_size); - return m_data[index]; + BLI_assert(index <= size_); + return data_[index]; + } + + /** + * Returns the beginning of a null-terminated char array. + * + * This is like ->data(), but can only be called on a StringRefNull. + */ + const char *c_str() const + { + return data_; } }; @@ -221,11 +232,11 @@ class StringRef : public StringRefBase { /** * Create a StringRef from a null-terminated c-string. */ - StringRef(const char *str) : StringRefBase(str, str ? (uint)strlen(str) : 0) + StringRef(const char *str) : StringRefBase(str, str ? (int64_t)strlen(str) : 0) { } - StringRef(const char *str, uint length) : StringRefBase(str, length) + StringRef(const char *str, const int64_t length) : StringRefBase(str, length) { } @@ -234,7 +245,7 @@ class StringRef : public StringRefBase { * second point points to a smaller address than the first one. */ StringRef(const char *begin, const char *one_after_end) - : StringRefBase(begin, (uint)(one_after_end - begin)) + : StringRefBase(begin, (int64_t)(one_after_end - begin)) { BLI_assert(begin <= one_after_end); } @@ -243,17 +254,18 @@ class StringRef : public StringRefBase { * Reference a std::string. Remember that when the std::string is destructed, the StringRef * will point to uninitialized memory. */ - StringRef(const std::string &str) : StringRefBase(str.data(), (uint)str.size()) + StringRef(const std::string &str) : StringRefBase(str.data(), (int64_t)str.size()) { } /** * Return a new StringRef that does not contain the first n chars. */ - StringRef drop_prefix(uint n) const + StringRef drop_prefix(const int64_t n) const { - BLI_assert(n <= m_size); - return StringRef(m_data + n, m_size - n); + BLI_assert(n >= 0); + BLI_assert(n <= size_); + return StringRef(data_ + n, size_ - n); } /** @@ -269,10 +281,11 @@ class StringRef : public StringRefBase { /** * Get the char at the given index. */ - char operator[](uint index) const + char operator[](int64_t index) const { - BLI_assert(index < m_size); - return m_data[index]; + BLI_assert(index >= 0); + BLI_assert(index < size_); + return data_[index]; } }; @@ -287,13 +300,13 @@ inline std::ostream &operator<<(std::ostream &stream, StringRef ref) inline std::ostream &operator<<(std::ostream &stream, StringRefNull ref) { - stream << std::string(ref.data(), ref.size()); + stream << std::string(ref.data(), (size_t)ref.size()); return stream; } /** - * Adding two StringRefs will allocate an std::string. This is not efficient, but convenient in - * most cases. + * Adding two #StringRefs will allocate an std::string. + * This is not efficient, but convenient in most cases. */ inline std::string operator+(StringRef a, StringRef b) { @@ -305,7 +318,7 @@ inline bool operator==(StringRef a, StringRef b) if (a.size() != b.size()) { return false; } - return STREQLEN(a.data(), b.data(), a.size()); + return STREQLEN(a.data(), b.data(), (size_t)a.size()); } inline bool operator!=(StringRef a, StringRef b) @@ -318,11 +331,11 @@ inline bool operator!=(StringRef a, StringRef b) */ inline bool StringRefBase::startswith(StringRef prefix) const { - if (m_size < prefix.m_size) { + if (size_ < prefix.size_) { return false; } - for (uint i = 0; i < prefix.m_size; i++) { - if (m_data[i] != prefix.m_data[i]) { + for (int64_t i = 0; i < prefix.size_; i++) { + if (data_[i] != prefix.data_[i]) { return false; } } @@ -334,12 +347,12 @@ inline bool StringRefBase::startswith(StringRef prefix) const */ inline bool StringRefBase::endswith(StringRef suffix) const { - if (m_size < suffix.m_size) { + if (size_ < suffix.size_) { return false; } - uint offset = m_size - suffix.m_size; - for (uint i = 0; i < suffix.m_size; i++) { - if (m_data[offset + i] != suffix.m_data[i]) { + const int64_t offset = size_ - suffix.size_; + for (int64_t i = 0; i < suffix.size_; i++) { + if (data_[offset + i] != suffix.data_[i]) { return false; } } @@ -347,12 +360,14 @@ inline bool StringRefBase::endswith(StringRef suffix) const } /** - * Return a new StringRef containing only a substring of the original string. + * Return a new #StringRef containing only a sub-string of the original string. */ -inline StringRef StringRefBase::substr(uint start, uint size) const +inline StringRef StringRefBase::substr(const int64_t start, const int64_t size) const { - BLI_assert(start + size <= m_size); - return StringRef(m_data + start, size); + BLI_assert(size >= 0); + BLI_assert(start >= 0); + BLI_assert(start + size <= size_); + return StringRef(data_ + start, size); } } // namespace blender diff --git a/source/blender/blenlib/BLI_threads.h b/source/blender/blenlib/BLI_threads.h index 03fe27c10ed..920a0a8f650 100644 --- a/source/blender/blenlib/BLI_threads.h +++ b/source/blender/blenlib/BLI_threads.h @@ -28,10 +28,6 @@ #include "BLI_sys_types.h" -#ifdef __APPLE__ -# include <libkern/OSAtomic.h> -#endif - #ifdef __cplusplus extern "C" { #endif @@ -100,10 +96,18 @@ void BLI_mutex_unlock(ThreadMutex *mutex); /* Spin Lock */ -#if defined(__APPLE__) -typedef OSSpinLock SpinLock; +/* By default we use TBB for spin lock on all platforms. When building without + * TBB fall-back to spin lock implementation which is native to the platform. + * + * On macOS we use mutex lock instead of spin since the spin lock has been + * deprecated in SDK 10.12 and is discouraged from use. */ + +#ifdef WITH_TBB +typedef uint32_t SpinLock; +#elif defined(__APPLE__) +typedef ThreadMutex SpinLock; #elif defined(_MSC_VER) -typedef volatile int SpinLock; +typedef volatile unsigned int SpinLock; #else typedef pthread_spinlock_t SpinLock; #endif diff --git a/source/blender/blenlib/BLI_timeit.hh b/source/blender/blenlib/BLI_timeit.hh index 711a7f16ab4..f0968587597 100644 --- a/source/blender/blenlib/BLI_timeit.hh +++ b/source/blender/blenlib/BLI_timeit.hh @@ -23,8 +23,7 @@ #include "BLI_sys_types.h" -namespace blender { -namespace Timeit { +namespace blender::timeit { using Clock = std::chrono::steady_clock; using TimePoint = Clock::time_point; @@ -34,29 +33,28 @@ void print_duration(Nanoseconds duration); class ScopedTimer { private: - std::string m_name; - TimePoint m_start; + std::string name_; + TimePoint start_; public: - ScopedTimer(std::string name) : m_name(std::move(name)) + ScopedTimer(std::string name) : name_(std::move(name)) { - m_start = Clock::now(); + start_ = Clock::now(); } ~ScopedTimer() { - TimePoint end = Clock::now(); - Nanoseconds duration = end - m_start; + const TimePoint end = Clock::now(); + const Nanoseconds duration = end - start_; - std::cout << "Timer '" << m_name << "' took "; + std::cout << "Timer '" << name_ << "' took "; print_duration(duration); std::cout << '\n'; } }; -} // namespace Timeit -} // namespace blender +} // namespace blender::timeit -#define SCOPED_TIMER(name) blender::Timeit::ScopedTimer scoped_timer(name) +#define SCOPED_TIMER(name) blender::timeit::ScopedTimer scoped_timer(name) #endif /* __BLI_TIMEIT_HH__ */ diff --git a/source/blender/blenlib/BLI_utildefines.h b/source/blender/blenlib/BLI_utildefines.h index 1f28f7e80c5..2699f2498ac 100644 --- a/source/blender/blenlib/BLI_utildefines.h +++ b/source/blender/blenlib/BLI_utildefines.h @@ -627,11 +627,11 @@ extern bool BLI_memory_is_zero(const void *arr, const size_t arr_size); /** \name String Macros * \{ */ -/* Macro to convert a value to string in the preprocessor - * STRINGIFY_ARG: gives the argument as a string - * STRINGIFY_APPEND: appends any argument 'b' onto the string argument 'a', - * used by STRINGIFY because some preprocessors warn about zero arguments - * STRINGIFY: gives the argument's value as a string */ +/* Macro to convert a value to string in the pre-processor: + * - `STRINGIFY_ARG`: gives the argument as a string + * - `STRINGIFY_APPEND`: appends any argument 'b' onto the string argument 'a', + * used by `STRINGIFY` because some preprocessors warn about zero arguments + * - `STRINGIFY`: gives the argument's value as a string. */ #define STRINGIFY_ARG(x) "" #x #define STRINGIFY_APPEND(a, b) "" a #b #define STRINGIFY(x) STRINGIFY_APPEND("", x) @@ -755,6 +755,43 @@ extern bool BLI_memory_is_zero(const void *arr, const size_t arr_size); /** \} */ /* -------------------------------------------------------------------- */ +/** \name C++ Macros + * \{ */ + +#ifdef __cplusplus + +/* Useful to port C code using enums to C++ where enums are strongly typed. + * To use after the enum declaration. */ +# define ENUM_OPERATORS(_enum_type) \ + inline constexpr _enum_type operator|(_enum_type a, _enum_type b) \ + { \ + return a = static_cast<_enum_type>(static_cast<int>(a) | b); \ + } \ + inline constexpr _enum_type operator&(_enum_type a, _enum_type b) \ + { \ + return a = static_cast<_enum_type>(static_cast<int>(a) & b); \ + } \ + inline constexpr _enum_type operator~(_enum_type a) \ + { \ + return a = static_cast<_enum_type>(~static_cast<int>(a)); \ + } \ + inline _enum_type &operator|=(_enum_type &a, _enum_type b) \ + { \ + return a = static_cast<_enum_type>(static_cast<int>(a) | b); \ + } \ + inline _enum_type &operator&=(_enum_type &a, _enum_type b) \ + { \ + return a = static_cast<_enum_type>(static_cast<int>(a) & b); \ + } + +#else +/* Output nothing. */ +# define ENUM_OPERATORS(_type) +#endif + +/** \} */ + +/* -------------------------------------------------------------------- */ /** \name Misc Macros * \{ */ diff --git a/source/blender/blenlib/BLI_vector.hh b/source/blender/blenlib/BLI_vector.hh index b2b2da0a4b0..7eac511bf4a 100644 --- a/source/blender/blenlib/BLI_vector.hh +++ b/source/blender/blenlib/BLI_vector.hh @@ -68,9 +68,9 @@ template< * not initialized when it is not needed. * * When T is large, the small buffer optimization is disabled by default to avoid large - * unexpected allocations on the stack. It can still be enabled explicitely though. + * unexpected allocations on the stack. It can still be enabled explicitly though. */ - uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0, + int64_t InlineBufferCapacity = default_inline_buffer_capacity(sizeof(T)), /** * The allocator used by this vector. Should rarely be changed, except when you don't want that * MEM_* is used internally. @@ -79,38 +79,38 @@ template< class Vector { private: /** - * Use pointers instead of storing the size explicitely. This reduces the number of instructions + * Use pointers instead of storing the size explicitly. This reduces the number of instructions * in `append`. * * The pointers might point to the memory in the inline buffer. */ - T *m_begin; - T *m_end; - T *m_capacity_end; + T *begin_; + T *end_; + T *capacity_end_; /** Used for allocations when the inline buffer is too small. */ - Allocator m_allocator; + Allocator allocator_; /** A placeholder buffer that will remain uninitialized until it is used. */ - AlignedBuffer<(uint)sizeof(T) * InlineBufferCapacity, (uint)alignof(T)> m_inline_buffer; + TypedBuffer<T, InlineBufferCapacity> inline_buffer_; /** - * Store the size of the vector explicitely in debug builds. Otherwise you'd always have to call + * Store the size of the vector explicitly in debug builds. Otherwise you'd always have to call * the `size` function or do the math to compute it from the pointers manually. This is rather * annoying. Knowing the size of a vector is often quite essential when debugging some code. */ #ifndef NDEBUG - uint m_debug_size; -# define UPDATE_VECTOR_SIZE(ptr) (ptr)->m_debug_size = (uint)((ptr)->m_end - (ptr)->m_begin) + int64_t debug_size_; +# define UPDATE_VECTOR_SIZE(ptr) (ptr)->debug_size_ = (int64_t)((ptr)->end_ - (ptr)->begin_) #else # define UPDATE_VECTOR_SIZE(ptr) ((void)0) #endif /** - * Be a friend with other vector instanciations. This is necessary to implement some memory + * Be a friend with other vector instantiations. This is necessary to implement some memory * management logic. */ - template<typename OtherT, uint OtherInlineBufferCapacity, typename OtherAllocator> + template<typename OtherT, int64_t OtherInlineBufferCapacity, typename OtherAllocator> friend class Vector; public: @@ -118,11 +118,11 @@ class Vector { * Create an empty vector. * This does not do any memory allocation. */ - Vector() + Vector(Allocator allocator = {}) : allocator_(allocator) { - m_begin = this->inline_buffer(); - m_end = m_begin; - m_capacity_end = m_begin + InlineBufferCapacity; + begin_ = inline_buffer_; + end_ = begin_; + capacity_end_ = begin_ + InlineBufferCapacity; UPDATE_VECTOR_SIZE(this); } @@ -131,7 +131,7 @@ class Vector { * The elements will be default constructed. * If T is trivially constructible, the elements in the vector are not touched. */ - explicit Vector(uint size) : Vector() + explicit Vector(int64_t size) : Vector() { this->resize(size); } @@ -139,37 +139,48 @@ class Vector { /** * Create a vector filled with a specific value. */ - Vector(uint size, const T &value) : Vector() + Vector(int64_t size, const T &value) : Vector() { + this->resize(size, value); + } + + /** + * Create a vector from an array ref. The values in the vector are copy constructed. + */ + template<typename U, typename std::enable_if_t<std::is_convertible_v<U, T>> * = nullptr> + Vector(Span<U> values, Allocator allocator = {}) : Vector(allocator) + { + const int64_t size = values.size(); this->reserve(size); this->increase_size_by_unchecked(size); - blender::uninitialized_fill_n(m_begin, size, value); + uninitialized_convert_n<U, T>(values.data(), size, begin_); } /** - * Create a vector that contains copys of the values in the initialized list. + * Create a vector that contains copies of the values in the initialized list. * * This allows you to write code like: * Vector<int> vec = {3, 4, 5}; */ + template<typename U, typename std::enable_if_t<std::is_convertible_v<U, T>> * = nullptr> + Vector(const std::initializer_list<U> &values) : Vector(Span<U>(values)) + { + } + Vector(const std::initializer_list<T> &values) : Vector(Span<T>(values)) { } - /** - * Create a vector from an array ref. The values in the vector are copy constructed. - */ - Vector(Span<T> values) : Vector() + template<typename U, + size_t N, + typename std::enable_if_t<std::is_convertible_v<U, T>> * = nullptr> + Vector(const std::array<U, N> &values) : Vector(Span(values)) { - uint size = values.size(); - this->reserve(size); - this->increase_size_by_unchecked(size); - blender::uninitialized_copy_n(values.data(), size, m_begin); } /** - * Create a vector from any container. It must be possible to use the container in a range-for - * loop. + * Create a vector from any container. It must be possible to use the container in a + * range-for loop. */ template<typename ContainerT> static Vector FromContainer(const ContainerT &container) { @@ -198,91 +209,69 @@ class Vector { * Create a copy of another vector. The other vector will not be changed. If the other vector has * less than InlineBufferCapacity elements, no allocation will be made. */ - Vector(const Vector &other) : m_allocator(other.m_allocator) + Vector(const Vector &other) : Vector(other.as_span(), other.allocator_) { - this->init_copy_from_other_vector(other); } /** * Create a copy of a vector with a different InlineBufferCapacity. This needs to be handled * separately, so that the other one is a valid copy constructor. */ - template<uint OtherInlineBufferCapacity> + template<int64_t OtherInlineBufferCapacity> Vector(const Vector<T, OtherInlineBufferCapacity, Allocator> &other) - : m_allocator(other.m_allocator) + : Vector(other.as_span(), other.allocator_) { - this->init_copy_from_other_vector(other); } /** * Steal the elements from another vector. This does not do an allocation. The other vector will * have zero elements afterwards. */ - template<uint OtherInlineBufferCapacity> + template<int64_t OtherInlineBufferCapacity> Vector(Vector<T, OtherInlineBufferCapacity, Allocator> &&other) noexcept - : m_allocator(other.m_allocator) + : allocator_(other.allocator_) { - uint size = other.size(); + const int64_t size = other.size(); if (other.is_inline()) { if (size <= InlineBufferCapacity) { /* Copy between inline buffers. */ - m_begin = this->inline_buffer(); - m_end = m_begin + size; - m_capacity_end = m_begin + InlineBufferCapacity; - uninitialized_relocate_n(other.m_begin, size, m_begin); + begin_ = inline_buffer_; + end_ = begin_ + size; + capacity_end_ = begin_ + InlineBufferCapacity; + uninitialized_relocate_n(other.begin_, size, begin_); } else { /* Copy from inline buffer to newly allocated buffer. */ - uint capacity = size; - m_begin = (T *)m_allocator.allocate(sizeof(T) * capacity, alignof(T), AT); - m_end = m_begin + size; - m_capacity_end = m_begin + capacity; - uninitialized_relocate_n(other.m_begin, size, m_begin); + const int64_t capacity = size; + begin_ = (T *)allocator_.allocate(sizeof(T) * (size_t)capacity, alignof(T), AT); + end_ = begin_ + size; + capacity_end_ = begin_ + capacity; + uninitialized_relocate_n(other.begin_, size, begin_); } } else { /* Steal the pointer. */ - m_begin = other.m_begin; - m_end = other.m_end; - m_capacity_end = other.m_capacity_end; + begin_ = other.begin_; + end_ = other.end_; + capacity_end_ = other.capacity_end_; } - other.m_begin = other.inline_buffer(); - other.m_end = other.m_begin; - other.m_capacity_end = other.m_begin + OtherInlineBufferCapacity; + other.begin_ = other.inline_buffer_; + other.end_ = other.begin_; + other.capacity_end_ = other.begin_ + OtherInlineBufferCapacity; UPDATE_VECTOR_SIZE(this); UPDATE_VECTOR_SIZE(&other); } ~Vector() { - destruct_n(m_begin, this->size()); + destruct_n(begin_, this->size()); if (!this->is_inline()) { - m_allocator.deallocate(m_begin); + allocator_.deallocate(begin_); } } - operator Span<T>() const - { - return Span<T>(m_begin, this->size()); - } - - operator MutableSpan<T>() - { - return MutableSpan<T>(m_begin, this->size()); - } - - Span<T> as_span() const - { - return *this; - } - - MutableSpan<T> as_mutable_span() - { - return *this; - } - Vector &operator=(const Vector &other) { if (this == &other) { @@ -310,11 +299,61 @@ class Vector { } /** + * Get the value at the given index. This invokes undefined behavior when the index is out of + * bounds. + */ + const T &operator[](int64_t index) const + { + BLI_assert(index >= 0); + BLI_assert(index < this->size()); + return begin_[index]; + } + + T &operator[](int64_t index) + { + BLI_assert(index >= 0); + BLI_assert(index < this->size()); + return begin_[index]; + } + + operator Span<T>() const + { + return Span<T>(begin_, this->size()); + } + + operator MutableSpan<T>() + { + return MutableSpan<T>(begin_, this->size()); + } + + template<typename U, typename std::enable_if_t<is_convertible_pointer_v<T, U>> * = nullptr> + operator Span<U>() const + { + return Span<U>(begin_, this->size()); + } + + template<typename U, typename std::enable_if_t<is_convertible_pointer_v<T, U>> * = nullptr> + operator MutableSpan<U>() + { + return MutableSpan<U>(begin_, this->size()); + } + + Span<T> as_span() const + { + return *this; + } + + MutableSpan<T> as_mutable_span() + { + return *this; + } + + /** * Make sure that enough memory is allocated to hold min_capacity elements. * This won't necessarily make an allocation when min_capacity is small. * The actual size of the vector does not change. */ - void reserve(uint min_capacity) + void reserve(const int64_t min_capacity) { if (min_capacity > this->capacity()) { this->realloc_to_at_least(min_capacity); @@ -327,17 +366,18 @@ class Vector { * destructed. If new_size is larger than the old size, the new elements at the end are default * constructed. If T is trivially constructible, the memory is not touched by this function. */ - void resize(uint new_size) + void resize(const int64_t new_size) { - uint old_size = this->size(); + BLI_assert(new_size >= 0); + const int64_t old_size = this->size(); if (new_size > old_size) { this->reserve(new_size); - default_construct_n(m_begin + old_size, new_size - old_size); + default_construct_n(begin_ + old_size, new_size - old_size); } else { - destruct_n(m_begin + new_size, old_size - new_size); + destruct_n(begin_ + new_size, old_size - new_size); } - m_end = m_begin + new_size; + end_ = begin_ + new_size; UPDATE_VECTOR_SIZE(this); } @@ -347,17 +387,18 @@ class Vector { * destructed. If new_size is larger than the old size, the new elements will be copy constructed * from the given value. */ - void resize(uint new_size, const T &value) + void resize(const int64_t new_size, const T &value) { - uint old_size = this->size(); + BLI_assert(new_size >= 0); + const int64_t old_size = this->size(); if (new_size > old_size) { this->reserve(new_size); - uninitialized_fill_n(m_begin + old_size, new_size - old_size, value); + uninitialized_fill_n(begin_ + old_size, new_size - old_size, value); } else { - destruct_n(m_begin + new_size, old_size - new_size); + destruct_n(begin_ + new_size, old_size - new_size); } - m_end = m_begin + new_size; + end_ = begin_ + new_size; UPDATE_VECTOR_SIZE(this); } @@ -367,8 +408,8 @@ class Vector { */ void clear() { - destruct_n(m_begin, this->size()); - m_end = m_begin; + destruct_n(begin_, this->size()); + end_ = begin_; UPDATE_VECTOR_SIZE(this); } @@ -378,14 +419,14 @@ class Vector { */ void clear_and_make_inline() { - destruct_n(m_begin, this->size()); + destruct_n(begin_, this->size()); if (!this->is_inline()) { - m_allocator.deallocate(m_begin); + allocator_.deallocate(begin_); } - m_begin = this->inline_buffer(); - m_end = m_begin; - m_capacity_end = m_begin + InlineBufferCapacity; + begin_ = inline_buffer_; + end_ = begin_; + capacity_end_ = begin_ + InlineBufferCapacity; UPDATE_VECTOR_SIZE(this); } @@ -410,9 +451,9 @@ class Vector { * Append the value to the vector and return the index that can be used to access the newly * added value. */ - uint append_and_get_index(const T &value) + int64_t append_and_get_index(const T &value) { - uint index = this->size(); + const int64_t index = this->size(); this->append(value); return index; } @@ -436,16 +477,16 @@ class Vector { */ void append_unchecked(const T &value) { - BLI_assert(m_end < m_capacity_end); - new (m_end) T(value); - m_end++; + BLI_assert(end_ < capacity_end_); + new (end_) T(value); + end_++; UPDATE_VECTOR_SIZE(this); } void append_unchecked(T &&value) { - BLI_assert(m_end < m_capacity_end); - new (m_end) T(std::move(value)); - m_end++; + BLI_assert(end_ < capacity_end_); + new (end_) T(std::move(value)); + end_++; UPDATE_VECTOR_SIZE(this); } @@ -453,10 +494,11 @@ class Vector { * Insert the same element n times at the end of the vector. * This might result in a reallocation internally. */ - void append_n_times(const T &value, uint n) + void append_n_times(const T &value, const int64_t n) { + BLI_assert(n >= 0); this->reserve(this->size() + n); - blender::uninitialized_fill_n(m_end, n, value); + blender::uninitialized_fill_n(end_, n, value); this->increase_size_by_unchecked(n); } @@ -466,10 +508,10 @@ class Vector { * useful when you want to call constructors in the vector yourself. This should only be done in * very rare cases and has to be justified every time. */ - void increase_size_by_unchecked(uint n) + void increase_size_by_unchecked(const int64_t n) { - BLI_assert(m_end + n <= m_capacity_end); - m_end += n; + BLI_assert(end_ + n <= capacity_end_); + end_ += n; UPDATE_VECTOR_SIZE(this); } @@ -482,7 +524,7 @@ class Vector { { this->extend(array.data(), array.size()); } - void extend(const T *start, uint amount) + void extend(const T *start, int64_t amount) { this->reserve(this->size() + amount); this->extend_unchecked(start, amount); @@ -508,52 +550,37 @@ class Vector { { this->extend_unchecked(array.data(), array.size()); } - void extend_unchecked(const T *start, uint amount) + void extend_unchecked(const T *start, int64_t amount) { - BLI_assert(m_begin + amount <= m_capacity_end); - blender::uninitialized_copy_n(start, amount, m_end); - m_end += amount; + BLI_assert(amount >= 0); + BLI_assert(begin_ + amount <= capacity_end_); + blender::uninitialized_copy_n(start, amount, end_); + end_ += amount; UPDATE_VECTOR_SIZE(this); } /** * Return a reference to the last element in the vector. - * This will assert when the vector is empty. + * This invokes undefined behavior when the vector is empty. */ const T &last() const { BLI_assert(this->size() > 0); - return *(m_end - 1); + return *(end_ - 1); } T &last() { BLI_assert(this->size() > 0); - return *(m_end - 1); - } - - /** - * Replace every element with a new value. - */ - void fill(const T &value) - { - initialized_fill_n(m_begin, this->size(), value); - } - - /** - * Copy the value to all positions specified by the indices array. - */ - void fill_indices(Span<uint> indices, const T &value) - { - MutableSpan<T>(*this).fill_indices(indices, value); + return *(end_ - 1); } /** * Return how many values are currently stored in the vector. */ - uint size() const + int64_t size() const { - BLI_assert(m_debug_size == (uint)(m_end - m_begin)); - return (uint)(m_end - m_begin); + BLI_assert(debug_size_ == (int64_t)(end_ - begin_)); + return (int64_t)(end_ - begin_); } /** @@ -563,7 +590,7 @@ class Vector { */ bool is_empty() const { - return m_begin == m_end; + return begin_ == end_; } /** @@ -573,8 +600,8 @@ class Vector { void remove_last() { BLI_assert(!this->is_empty()); - m_end--; - m_end->~T(); + end_--; + end_->~T(); UPDATE_VECTOR_SIZE(this); } @@ -587,9 +614,9 @@ class Vector { T pop_last() { BLI_assert(!this->is_empty()); - m_end--; - T value = std::move(*m_end); - m_end->~T(); + end_--; + T value = std::move(*end_); + end_->~T(); UPDATE_VECTOR_SIZE(this); return value; } @@ -598,26 +625,27 @@ class Vector { * Delete any element in the vector. The empty space will be filled by the previously last * element. This takes O(1) time. */ - void remove_and_reorder(uint index) + void remove_and_reorder(const int64_t index) { + BLI_assert(index >= 0); BLI_assert(index < this->size()); - T *element_to_remove = m_begin + index; - m_end--; - if (element_to_remove < m_end) { - *element_to_remove = std::move(*m_end); + T *element_to_remove = begin_ + index; + end_--; + if (element_to_remove < end_) { + *element_to_remove = std::move(*end_); } - m_end->~T(); + end_->~T(); UPDATE_VECTOR_SIZE(this); } /** - * Finds the first occurence of the value, removes it and copies the last element to the hole in + * Finds the first occurrence of the value, removes it and copies the last element to the hole in * the vector. This takes O(n) time. */ void remove_first_occurrence_and_reorder(const T &value) { - uint index = this->first_index_of(value); - this->remove_and_reorder((uint)index); + const int64_t index = this->first_index_of(value); + this->remove_and_reorder(index); } /** @@ -627,15 +655,16 @@ class Vector { * * This is similar to std::vector::erase. */ - void remove(uint index) + void remove(const int64_t index) { + BLI_assert(index >= 0); BLI_assert(index < this->size()); - uint last_index = this->size() - 1; - for (uint i = index; i < last_index; i++) { - m_begin[i] = std::move(m_begin[i + 1]); + const int64_t last_index = this->size() - 1; + for (int64_t i = index; i < last_index; i++) { + begin_[i] = std::move(begin_[i + 1]); } - m_begin[last_index].~T(); - m_end--; + begin_[last_index].~T(); + end_--; UPDATE_VECTOR_SIZE(this); } @@ -643,11 +672,11 @@ class Vector { * Do a linear search to find the value in the vector. * When found, return the first index, otherwise return -1. */ - int first_index_of_try(const T &value) const + int64_t first_index_of_try(const T &value) const { - for (T *current = m_begin; current != m_end; current++) { + for (const T *current = begin_; current != end_; current++) { if (*current == value) { - return (int)(current - m_begin); + return (int64_t)(current - begin_); } } return -1; @@ -657,11 +686,11 @@ class Vector { * Do a linear search to find the value in the vector and return the found index. This invokes * undefined behavior when the value is not in the vector. */ - uint first_index_of(const T &value) const + int64_t first_index_of(const T &value) const { - int index = this->first_index_of_try(value); + const int64_t index = this->first_index_of_try(value); BLI_assert(index >= 0); - return (uint)index; + return index; } /** @@ -674,19 +703,11 @@ class Vector { } /** - * Get the value at the given index. This invokes undefined behavior when the index is out of - * bounds. + * Copies the given value to every element in the vector. */ - const T &operator[](uint index) const + void fill(const T &value) const { - BLI_assert(index < this->size()); - return m_begin[index]; - } - - T &operator[](uint index) - { - BLI_assert(index < this->size()); - return m_begin[index]; + initialized_fill_n(begin_, this->size(), value); } /** @@ -694,7 +715,7 @@ class Vector { */ T *data() { - return m_begin; + return begin_; } /** @@ -702,34 +723,34 @@ class Vector { */ const T *data() const { - return m_begin; + return begin_; } T *begin() { - return m_begin; + return begin_; } T *end() { - return m_end; + return end_; } const T *begin() const { - return m_begin; + return begin_; } const T *end() const { - return m_end; + return end_; } /** * Get the current capacity of the vector, i.e. the maximum number of elements the vector can * hold, before it has to reallocate. */ - uint capacity() const + int64_t capacity() const { - return (uint)(m_capacity_end - m_begin); + return (int64_t)(capacity_end_ - begin_); } /** @@ -737,7 +758,7 @@ class Vector { * Obviously, this should only be used when you actually need the index in the loop. * * Example: - * for (uint i : myvector.index_range()) { + * for (int64_t i : myvector.index_range()) { * do_something(i, my_vector[i]); * } */ @@ -754,7 +775,7 @@ class Vector { std::cout << "Vector Stats: " << name << "\n"; std::cout << " Address: " << this << "\n"; std::cout << " Elements: " << this->size() << "\n"; - std::cout << " Capacity: " << (m_capacity_end - m_begin) << "\n"; + std::cout << " Capacity: " << (capacity_end_ - begin_) << "\n"; std::cout << " Inline Capacity: " << InlineBufferCapacity << "\n"; char memory_size_str[15]; @@ -763,26 +784,19 @@ class Vector { } private: - T *inline_buffer() const - { - return (T *)m_inline_buffer.ptr(); - } - bool is_inline() const { - return m_begin == this->inline_buffer(); + return begin_ == inline_buffer_; } void ensure_space_for_one() { - if (UNLIKELY(m_end >= m_capacity_end)) { + if (UNLIKELY(end_ >= capacity_end_)) { this->realloc_to_at_least(this->size() + 1); } - std::vector<int> a; - a.push_back(4); } - BLI_NOINLINE void realloc_to_at_least(uint min_capacity) + BLI_NOINLINE void realloc_to_at_least(const int64_t min_capacity) { if (this->capacity() >= min_capacity) { return; @@ -790,59 +804,32 @@ class Vector { /* At least double the size of the previous allocation. Otherwise consecutive calls to grow can * cause a reallocation every time even though min_capacity only increments. */ - uint min_new_capacity = this->capacity() * 2; + const int64_t min_new_capacity = this->capacity() * 2; - uint new_capacity = std::max(min_capacity, min_new_capacity); - uint size = this->size(); + const int64_t new_capacity = std::max(min_capacity, min_new_capacity); + const int64_t size = this->size(); - T *new_array = (T *)m_allocator.allocate(new_capacity * (uint)sizeof(T), alignof(T), AT); - uninitialized_relocate_n(m_begin, size, new_array); + T *new_array = (T *)allocator_.allocate((size_t)new_capacity * sizeof(T), alignof(T), AT); + uninitialized_relocate_n(begin_, size, new_array); if (!this->is_inline()) { - m_allocator.deallocate(m_begin); + allocator_.deallocate(begin_); } - m_begin = new_array; - m_end = m_begin + size; - m_capacity_end = m_begin + new_capacity; - } - - /** - * Initialize all properties, except for m_allocator, which has to be initialized beforehand. - */ - template<uint OtherInlineBufferCapacity> - void init_copy_from_other_vector(const Vector<T, OtherInlineBufferCapacity, Allocator> &other) - { - m_allocator = other.m_allocator; - - uint size = other.size(); - uint capacity = size; - - if (size <= InlineBufferCapacity) { - m_begin = this->inline_buffer(); - capacity = InlineBufferCapacity; - } - else { - m_begin = (T *)m_allocator.allocate(sizeof(T) * size, alignof(T), AT); - capacity = size; - } - - m_end = m_begin + size; - m_capacity_end = m_begin + capacity; - - uninitialized_copy_n(other.data(), size, m_begin); - UPDATE_VECTOR_SIZE(this); + begin_ = new_array; + end_ = begin_ + size; + capacity_end_ = begin_ + new_capacity; } }; #undef UPDATE_VECTOR_SIZE /** - * Use when the vector is used in the local scope of a function. It has a larger inline storage by - * default to make allocations less likely. + * Same as a normal Vector, but does not use Blender's guarded allocator. This is useful when + * allocating memory with static storage duration. */ -template<typename T, uint InlineBufferCapacity = 20> -using ScopedVector = Vector<T, InlineBufferCapacity, GuardedAllocator>; +template<typename T, int64_t InlineBufferCapacity = default_inline_buffer_capacity(sizeof(T))> +using RawVector = Vector<T, InlineBufferCapacity, RawAllocator>; } /* namespace blender */ diff --git a/source/blender/blenlib/BLI_vector_adaptor.hh b/source/blender/blenlib/BLI_vector_adaptor.hh new file mode 100644 index 00000000000..cadffc0b445 --- /dev/null +++ b/source/blender/blenlib/BLI_vector_adaptor.hh @@ -0,0 +1,105 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __BLI_VECTOR_ADAPTOR_HH__ +#define __BLI_VECTOR_ADAPTOR_HH__ + +/** \file + * \ingroup bli + * + * A `blender::VectorAdaptor` is a container with a fixed maximum size and does not own the + * underlying memory. When an adaptor is constructed, you have to provide it with an uninitialized + * array that will be filled when elements are added to the vector. The vector adaptor is not able + * to grow. Therefore, it is undefined behavior to add more elements than fit into the provided + * buffer. + */ + +#include "BLI_span.hh" + +namespace blender { + +template<typename T> class VectorAdaptor { + private: + T *begin_; + T *end_; + T *capacity_end_; + + public: + VectorAdaptor() : begin_(nullptr), end_(nullptr), capacity_end_(nullptr) + { + } + + VectorAdaptor(T *data, int64_t capacity, int64_t size = 0) + : begin_(data), end_(data + size), capacity_end_(data + capacity) + { + } + + VectorAdaptor(MutableSpan<T> span) : VectorAdaptor(span.data(), span.size(), 0) + { + } + + void append(const T &value) + { + BLI_assert(end_ < capacity_end_); + new (end_) T(value); + end_++; + } + + void append(T &&value) + { + BLI_assert(end_ < capacity_end_); + new (end_) T(std::move(value)); + end_++; + } + + void append_n_times(const T &value, int64_t n) + { + BLI_assert(end_ + n <= capacity_end_); + uninitialized_fill_n(end_, n, value); + end_ += n; + } + + void extend(Span<T> values) + { + BLI_assert(end_ + values.size() <= capacity_end_); + uninitialized_copy_n(values.data(), values.size(), end_); + end_ += values.size(); + } + + int64_t capacity() const + { + return capacity_end_ - begin_; + } + + int64_t size() const + { + return end_ - begin_; + } + + bool is_empty() const + { + return begin_ == end_; + } + + bool is_full() const + { + return end_ == capacity_end_; + } +}; + +} // namespace blender + +#endif /* __BLI_VECTOR_ADAPTOR_HH__ */ diff --git a/source/blender/blenlib/BLI_vector_set.hh b/source/blender/blenlib/BLI_vector_set.hh index d330d3c3247..f007d41118f 100644 --- a/source/blender/blenlib/BLI_vector_set.hh +++ b/source/blender/blenlib/BLI_vector_set.hh @@ -27,7 +27,7 @@ * * All core operations (add, remove and contains) can be done in O(1) amortized expected time. * - * Using a VectorSet instead of a normal Set can be benefitial in any of the following + * Using a VectorSet instead of a normal Set can be beneficial in any of the following * circumstances: * - The insertion order is important. * - Iteration over all keys has to be fast. @@ -106,30 +106,30 @@ class VectorSet { * Slots are either empty, occupied or removed. The number of occupied slots can be computed by * subtracting the removed slots from the occupied-and-removed slots. */ - uint32_t m_removed_slots; - uint32_t m_occupied_and_removed_slots; + int64_t removed_slots_; + int64_t occupied_and_removed_slots_; /** * The maximum number of slots that can be used (either occupied or removed) until the set has to * grow. This is the total number of slots times the max load factor. */ - uint32_t m_usable_slots; + int64_t usable_slots_; /** * The number of slots minus one. This is a bit mask that can be used to turn any integer into a * valid slot index efficiently. */ - uint32_t m_slot_mask; + uint64_t slot_mask_; /** This is called to hash incoming keys. */ - Hash m_hash; + Hash hash_; /** This is called to check equality of two keys. */ - IsEqual m_is_equal; + IsEqual is_equal_; /** The max load factor is 1/2 = 50% by default. */ #define LOAD_FACTOR 1, 2 - LoadFactor m_max_load_factor = LoadFactor(LOAD_FACTOR); + LoadFactor max_load_factor_ = LoadFactor(LOAD_FACTOR); using SlotArray = Array<Slot, LoadFactor::compute_total_slots(4, LOAD_FACTOR), Allocator>; #undef LOAD_FACTOR @@ -137,19 +137,19 @@ class VectorSet { * This is the array that contains the actual slots. There is always at least one empty slot and * the size of the array is a power of two. */ - SlotArray m_slots; + SlotArray slots_; /** * Pointer to an array that contains all keys. The keys are sorted by insertion order as long as * no keys are removed. The first set->size() elements in this array are initialized. The - * capacity of the array is m_usable_slots. + * capacity of the array is usable_slots_. */ - Key *m_keys; + Key *keys_; /** Iterate over a slot index sequence for a given hash. */ #define VECTOR_SET_SLOT_PROBING_BEGIN(HASH, R_SLOT) \ - SLOT_PROBING_BEGIN (ProbingStrategy, HASH, m_slot_mask, SLOT_INDEX) \ - auto &R_SLOT = m_slots[SLOT_INDEX]; + SLOT_PROBING_BEGIN (ProbingStrategy, HASH, slot_mask_, SLOT_INDEX) \ + auto &R_SLOT = slots_[SLOT_INDEX]; #define VECTOR_SET_SLOT_PROBING_END() SLOT_PROBING_END() public: @@ -159,12 +159,12 @@ class VectorSet { * is performed on the first insertion. */ VectorSet() - : m_removed_slots(0), - m_occupied_and_removed_slots(0), - m_usable_slots(0), - m_slot_mask(0), - m_slots(1), - m_keys(nullptr) + : removed_slots_(0), + occupied_and_removed_slots_(0), + usable_slots_(0), + slot_mask_(0), + slots_(1), + keys_(nullptr) { } @@ -178,37 +178,37 @@ class VectorSet { ~VectorSet() { - destruct_n(m_keys, this->size()); - if (m_keys != nullptr) { - this->deallocate_keys_array(m_keys); + destruct_n(keys_, this->size()); + if (keys_ != nullptr) { + this->deallocate_keys_array(keys_); } } VectorSet(const VectorSet &other) - : m_removed_slots(other.m_removed_slots), - m_occupied_and_removed_slots(other.m_occupied_and_removed_slots), - m_usable_slots(other.m_usable_slots), - m_slot_mask(other.m_slot_mask), - m_slots(other.m_slots) + : removed_slots_(other.removed_slots_), + occupied_and_removed_slots_(other.occupied_and_removed_slots_), + usable_slots_(other.usable_slots_), + slot_mask_(other.slot_mask_), + slots_(other.slots_) { - m_keys = this->allocate_keys_array(m_usable_slots); - uninitialized_copy_n(other.m_keys, other.size(), m_keys); + keys_ = this->allocate_keys_array(usable_slots_); + uninitialized_copy_n(other.keys_, other.size(), keys_); } VectorSet(VectorSet &&other) noexcept - : m_removed_slots(other.m_removed_slots), - m_occupied_and_removed_slots(other.m_occupied_and_removed_slots), - m_usable_slots(other.m_usable_slots), - m_slot_mask(other.m_slot_mask), - m_slots(std::move(other.m_slots)), - m_keys(other.m_keys) + : removed_slots_(other.removed_slots_), + occupied_and_removed_slots_(other.occupied_and_removed_slots_), + usable_slots_(other.usable_slots_), + slot_mask_(other.slot_mask_), + slots_(std::move(other.slots_)), + keys_(other.keys_) { - other.m_removed_slots = 0; - other.m_occupied_and_removed_slots = 0; - other.m_usable_slots = 0; - other.m_slot_mask = 0; - other.m_slots = SlotArray(1); - other.m_keys = nullptr; + other.removed_slots_ = 0; + other.occupied_and_removed_slots_ = 0; + other.usable_slots_ = 0; + other.slot_mask_ = 0; + other.slots_ = SlotArray(1); + other.keys_ = nullptr; } VectorSet &operator=(const VectorSet &other) @@ -236,17 +236,43 @@ class VectorSet { } /** + * Get the key stored at the given position in the vector. + */ + const Key &operator[](const int64_t index) const + { + BLI_assert(index >= 0); + BLI_assert(index <= this->size()); + return keys_[index]; + } + + operator Span<Key>() const + { + return Span<Key>(keys_, this->size()); + } + + /** + * Get an Span referencing the keys vector. The referenced memory buffer is only valid as + * long as the vector set is not changed. + * + * The keys must not be changed, because this would change their hash value. + */ + Span<Key> as_span() const + { + return *this; + } + + /** * Add a new key to the vector set. This invokes undefined behavior when the key is in the set * already. When you know for certain that a key is not in the set yet, use this method for * better performance. This also expresses the intent better. */ void add_new(const Key &key) { - this->add_new__impl(key, m_hash(key)); + this->add_new__impl(key, hash_(key)); } void add_new(Key &&key) { - this->add_new__impl(std::move(key), m_hash(key)); + this->add_new__impl(std::move(key), hash_(key)); } /** @@ -263,13 +289,9 @@ class VectorSet { { return this->add_as(std::move(key)); } - - /** - * Same as `add`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey> bool add_as(ForwardKey &&key) { - return this->add__impl(std::forward<ForwardKey>(key), m_hash(key)); + return this->add__impl(std::forward<ForwardKey>(key), hash_(key)); } /** @@ -295,13 +317,9 @@ class VectorSet { { return this->contains_as(key); } - - /** - * Same as `contains`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey> bool contains_as(const ForwardKey &key) const { - return this->contains__impl(key, m_hash(key)); + return this->contains__impl(key, hash_(key)); } /** @@ -314,13 +332,9 @@ class VectorSet { { return this->remove_as(key); } - - /** - * Same as `remove`, but accepts other key types that are supported by the hash function. - */ template<typename ForwardKey> bool remove_as(const ForwardKey &key) { - return this->remove__impl(key, m_hash(key)); + return this->remove__impl(key, hash_(key)); } /** @@ -331,14 +345,9 @@ class VectorSet { { this->remove_contained_as(key); } - - /** - * Same as `remove_contained`, but accepts other key types that are supported by the hash - * function. - */ template<typename ForwardKey> void remove_contained_as(const ForwardKey &key) { - this->remove_contained__impl(key, m_hash(key)); + this->remove_contained__impl(key, hash_(key)); } /** @@ -354,34 +363,26 @@ class VectorSet { * Return the location of the key in the vector. It is assumed, that the key is in the vector * set. If this is not necessarily the case, use `index_of_try`. */ - uint32_t index_of(const Key &key) const + int64_t index_of(const Key &key) const { return this->index_of_as(key); } - - /** - * Same as `index_of`, but accepts other key types that are supported by the hash function. - */ - template<typename ForwardKey> uint32_t index_of_as(const ForwardKey &key) const + template<typename ForwardKey> int64_t index_of_as(const ForwardKey &key) const { - return this->index_of__impl(key, m_hash(key)); + return this->index_of__impl(key, hash_(key)); } /** * Return the location of the key in the vector. If the key is not in the set, -1 is returned. * If you know for sure that the key is in the set, it is better to use `index_of` instead. */ - int32_t index_of_try(const Key &key) const + int64_t index_of_try(const Key &key) const { - return (int32_t)this->index_of_try_as(key); + return this->index_of_try_as(key); } - - /** - * Same as `index_of_try`, but accepts other key types that are supported by the hash function. - */ - template<typename ForwardKey> int32_t index_of_try_as(const ForwardKey &key) const + template<typename ForwardKey> int64_t index_of_try_as(const ForwardKey &key) const { - return this->index_of_try__impl(key, m_hash(key)); + return this->index_of_try__impl(key, hash_(key)); } /** @@ -389,42 +390,17 @@ class VectorSet { */ const Key *data() const { - return m_keys; + return keys_; } const Key *begin() const { - return m_keys; + return keys_; } const Key *end() const { - return m_keys + this->size(); - } - - /** - * Get the key stored at the given position in the vector. - */ - const Key &operator[](uint32_t index) const - { - BLI_assert(index <= this->size()); - return m_keys[index]; - } - - operator Span<Key>() const - { - return Span<Key>(m_keys, this->size()); - } - - /** - * Get an Span referencing the keys vector. The referenced memory buffer is only valid as - * long as the vector set is not changed. - * - * The keys must not be changed, because this would change their hash value. - */ - Span<Key> as_span() const - { - return *this; + return keys_ + this->size(); } /** @@ -433,15 +409,15 @@ class VectorSet { void print_stats(StringRef name = "") const { HashTableStats stats(*this, this->as_span()); - stats.print(); + stats.print(name); } /** * Returns the number of keys stored in the vector set. */ - uint32_t size() const + int64_t size() const { - return m_occupied_and_removed_slots - m_removed_slots; + return occupied_and_removed_slots_ - removed_slots_; } /** @@ -449,29 +425,29 @@ class VectorSet { */ bool is_empty() const { - return m_occupied_and_removed_slots == m_removed_slots; + return occupied_and_removed_slots_ == removed_slots_; } /** * Returns the number of available slots. This is mostly for debugging purposes. */ - uint32_t capacity() const + int64_t capacity() const { - return m_slots.size(); + return slots_.size(); } /** * Returns the amount of removed slots in the set. This is mostly for debugging purposes. */ - uint32_t removed_amount() const + int64_t removed_amount() const { - return m_removed_slots; + return removed_slots_; } /** * Returns the bytes required per element. This is mostly for debugging purposes. */ - uint32_t size_per_element() const + int64_t size_per_element() const { return sizeof(Slot) + sizeof(Key); } @@ -480,17 +456,17 @@ class VectorSet { * Returns the approximate memory requirements of the set in bytes. This is more correct for * larger sets. */ - uint32_t size_in_bytes() const + int64_t size_in_bytes() const { - return sizeof(Slot) * m_slots.size() + sizeof(Key) * m_usable_slots; + return (int64_t)(sizeof(Slot) * slots_.size() + sizeof(Key) * usable_slots_); } /** * Potentially resize the vector set such that it can hold n elements without doing another grow. */ - void reserve(uint32_t n) + void reserve(const int64_t n) { - if (m_usable_slots < n) { + if (usable_slots_ < n) { this->realloc_and_reinsert(n); } } @@ -499,60 +475,61 @@ class VectorSet { * Get the number of collisions that the probing strategy has to go through to find the key or * determine that it is not in the set. */ - uint32_t count_collisions(const Key &key) const + int64_t count_collisions(const Key &key) const { - return this->count_collisions__impl(key, m_hash(key)); + return this->count_collisions__impl(key, hash_(key)); } private: - BLI_NOINLINE void realloc_and_reinsert(uint32_t min_usable_slots) + BLI_NOINLINE void realloc_and_reinsert(const int64_t min_usable_slots) { - uint32_t total_slots, usable_slots; - m_max_load_factor.compute_total_and_usable_slots( + int64_t total_slots, usable_slots; + max_load_factor_.compute_total_and_usable_slots( SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots); - uint32_t new_slot_mask = total_slots - 1; + BLI_assert(total_slots >= 1); + const uint64_t new_slot_mask = (uint64_t)total_slots - 1; /* Optimize the case when the set was empty beforehand. We can avoid some copies here. */ if (this->size() == 0) { - m_slots.~Array(); - new (&m_slots) SlotArray(total_slots); - m_removed_slots = 0; - m_occupied_and_removed_slots = 0; - m_usable_slots = usable_slots; - m_slot_mask = new_slot_mask; - m_keys = this->allocate_keys_array(usable_slots); + slots_.~Array(); + new (&slots_) SlotArray(total_slots); + removed_slots_ = 0; + occupied_and_removed_slots_ = 0; + usable_slots_ = usable_slots; + slot_mask_ = new_slot_mask; + keys_ = this->allocate_keys_array(usable_slots); return; } SlotArray new_slots(total_slots); - for (Slot &slot : m_slots) { + for (Slot &slot : slots_) { if (slot.is_occupied()) { this->add_after_grow_and_destruct_old(slot, new_slots, new_slot_mask); } } Key *new_keys = this->allocate_keys_array(usable_slots); - uninitialized_relocate_n(m_keys, this->size(), new_keys); - this->deallocate_keys_array(m_keys); + uninitialized_relocate_n(keys_, this->size(), new_keys); + this->deallocate_keys_array(keys_); /* All occupied slots have been destructed already and empty/removed slots are assumed to be * trivially destructible. */ - m_slots.clear_without_destruct(); - m_slots = std::move(new_slots); - m_keys = new_keys; - m_occupied_and_removed_slots -= m_removed_slots; - m_usable_slots = usable_slots; - m_removed_slots = 0; - m_slot_mask = new_slot_mask; + slots_.clear_without_destruct(); + slots_ = std::move(new_slots); + keys_ = new_keys; + occupied_and_removed_slots_ -= removed_slots_; + usable_slots_ = usable_slots; + removed_slots_ = 0; + slot_mask_ = new_slot_mask; } void add_after_grow_and_destruct_old(Slot &old_slot, SlotArray &new_slots, - uint32_t new_slot_mask) + const uint64_t new_slot_mask) { - const Key &key = m_keys[old_slot.index()]; - uint32_t hash = old_slot.get_hash(key, Hash()); + const Key &key = keys_[old_slot.index()]; + const uint64_t hash = old_slot.get_hash(key, Hash()); SLOT_PROBING_BEGIN (ProbingStrategy, hash, new_slot_mask, slot_index) { Slot &slot = new_slots[slot_index]; @@ -564,20 +541,21 @@ class VectorSet { SLOT_PROBING_END(); } - template<typename ForwardKey> bool contains__impl(const ForwardKey &key, uint32_t hash) const + template<typename ForwardKey> + bool contains__impl(const ForwardKey &key, const uint64_t hash) const { VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { return false; } - if (slot.contains(key, m_is_equal, hash, m_keys)) { + if (slot.contains(key, is_equal_, hash, keys_)) { return true; } } VECTOR_SET_SLOT_PROBING_END(); } - template<typename ForwardKey> void add_new__impl(ForwardKey &&key, uint32_t hash) + template<typename ForwardKey> void add_new__impl(ForwardKey &&key, const uint64_t hash) { BLI_assert(!this->contains_as(key)); @@ -585,41 +563,42 @@ class VectorSet { VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { - uint32_t index = this->size(); - new (m_keys + index) Key(std::forward<ForwardKey>(key)); + int64_t index = this->size(); + new (keys_ + index) Key(std::forward<ForwardKey>(key)); slot.occupy(index, hash); - m_occupied_and_removed_slots++; + occupied_and_removed_slots_++; return; } } VECTOR_SET_SLOT_PROBING_END(); } - template<typename ForwardKey> bool add__impl(ForwardKey &&key, uint32_t hash) + template<typename ForwardKey> bool add__impl(ForwardKey &&key, const uint64_t hash) { this->ensure_can_add(); VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) { if (slot.is_empty()) { - uint32_t index = this->size(); - new (m_keys + index) Key(std::forward<ForwardKey>(key)); - m_occupied_and_removed_slots++; + int64_t index = this->size(); + new (keys_ + index) Key(std::forward<ForwardKey>(key)); + occupied_and_removed_slots_++; slot.occupy(index, hash); return true; } - if (slot.contains(key, m_is_equal, hash, m_keys)) { + if (slot.contains(key, is_equal_, hash, keys_)) { return false; } } VECTOR_SET_SLOT_PROBING_END(); } - template<typename ForwardKey> uint32_t index_of__impl(const ForwardKey &key, uint32_t hash) const + template<typename ForwardKey> + int64_t index_of__impl(const ForwardKey &key, const uint64_t hash) const { BLI_assert(this->contains_as(key)); VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash, m_keys)) { + if (slot.contains(key, is_equal_, hash, keys_)) { return slot.index(); } } @@ -627,11 +606,11 @@ class VectorSet { } template<typename ForwardKey> - int32_t index_of_try__impl(const ForwardKey &key, uint32_t hash) const + int64_t index_of_try__impl(const ForwardKey &key, const uint64_t hash) const { VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash, m_keys)) { - return (int32_t)slot.index(); + if (slot.contains(key, is_equal_, hash, keys_)) { + return slot.index(); } if (slot.is_empty()) { return -1; @@ -644,12 +623,12 @@ class VectorSet { { BLI_assert(this->size() > 0); - uint32_t index_to_pop = this->size() - 1; - Key key = std::move(m_keys[index_to_pop]); - m_keys[index_to_pop].~Key(); - uint32_t hash = m_hash(key); + const int64_t index_to_pop = this->size() - 1; + Key key = std::move(keys_[index_to_pop]); + keys_[index_to_pop].~Key(); + const uint64_t hash = hash_(key); - m_removed_slots++; + removed_slots_++; VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) { if (slot.has_index(index_to_pop)) { @@ -660,10 +639,10 @@ class VectorSet { VECTOR_SET_SLOT_PROBING_END(); } - template<typename ForwardKey> bool remove__impl(const ForwardKey &key, uint32_t hash) + template<typename ForwardKey> bool remove__impl(const ForwardKey &key, const uint64_t hash) { VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash, m_keys)) { + if (slot.contains(key, is_equal_, hash, keys_)) { this->remove_key_internal(slot); return true; } @@ -674,12 +653,13 @@ class VectorSet { VECTOR_SET_SLOT_PROBING_END(); } - template<typename ForwardKey> void remove_contained__impl(const ForwardKey &key, uint32_t hash) + template<typename ForwardKey> + void remove_contained__impl(const ForwardKey &key, const uint64_t hash) { BLI_assert(this->contains_as(key)); VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash, m_keys)) { + if (slot.contains(key, is_equal_, hash, keys_)) { this->remove_key_internal(slot); return; } @@ -689,24 +669,24 @@ class VectorSet { void remove_key_internal(Slot &slot) { - uint32_t index_to_remove = slot.index(); - uint32_t size = this->size(); - uint32_t last_element_index = size - 1; + int64_t index_to_remove = slot.index(); + int64_t size = this->size(); + int64_t last_element_index = size - 1; if (index_to_remove < last_element_index) { - m_keys[index_to_remove] = std::move(m_keys[last_element_index]); - this->update_slot_index(m_keys[index_to_remove], last_element_index, index_to_remove); + keys_[index_to_remove] = std::move(keys_[last_element_index]); + this->update_slot_index(keys_[index_to_remove], last_element_index, index_to_remove); } - m_keys[last_element_index].~Key(); + keys_[last_element_index].~Key(); slot.remove(); - m_removed_slots++; + removed_slots_++; return; } - void update_slot_index(const Key &key, uint32_t old_index, uint32_t new_index) + void update_slot_index(const Key &key, const int64_t old_index, const int64_t new_index) { - uint32_t hash = m_hash(key); + uint64_t hash = hash_(key); VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) { if (slot.has_index(old_index)) { slot.update_index(new_index); @@ -717,12 +697,12 @@ class VectorSet { } template<typename ForwardKey> - uint32_t count_collisions__impl(const ForwardKey &key, uint32_t hash) const + int64_t count_collisions__impl(const ForwardKey &key, const uint64_t hash) const { - uint32_t collisions = 0; + int64_t collisions = 0; VECTOR_SET_SLOT_PROBING_BEGIN (hash, slot) { - if (slot.contains(key, m_is_equal, hash, m_keys)) { + if (slot.contains(key, is_equal_, hash, keys_)) { return collisions; } if (slot.is_empty()) { @@ -735,23 +715,34 @@ class VectorSet { void ensure_can_add() { - if (m_occupied_and_removed_slots >= m_usable_slots) { + if (occupied_and_removed_slots_ >= usable_slots_) { this->realloc_and_reinsert(this->size() + 1); - BLI_assert(m_occupied_and_removed_slots < m_usable_slots); + BLI_assert(occupied_and_removed_slots_ < usable_slots_); } } - Key *allocate_keys_array(uint32_t size) + Key *allocate_keys_array(const int64_t size) { - return (Key *)m_slots.allocator().allocate((uint32_t)sizeof(Key) * size, alignof(Key), AT); + return (Key *)slots_.allocator().allocate(sizeof(Key) * (size_t)size, alignof(Key), AT); } void deallocate_keys_array(Key *keys) { - m_slots.allocator().deallocate(keys); + slots_.allocator().deallocate(keys); } }; +/** + * Same as a normal VectorSet, but does not use Blender's guarded allocator. This is useful when + * allocating memory with static storage duration. + */ +template<typename Key, + typename ProbingStrategy = DefaultProbingStrategy, + typename Hash = DefaultHash<Key>, + typename IsEqual = DefaultEquality, + typename Slot = typename DefaultVectorSetSlot<Key>::type> +using RawVectorSet = VectorSet<Key, ProbingStrategy, Hash, IsEqual, Slot, RawAllocator>; + } // namespace blender #endif /* __BLI_VECTOR_SET_HH__ */ diff --git a/source/blender/blenlib/BLI_vector_set_slots.hh b/source/blender/blenlib/BLI_vector_set_slots.hh index 25148866b6c..49e6d4daedb 100644 --- a/source/blender/blenlib/BLI_vector_set_slots.hh +++ b/source/blender/blenlib/BLI_vector_set_slots.hh @@ -53,7 +53,7 @@ template<typename Key> class SimpleVectorSetSlot { /** * After the default constructor has run, the slot has to be in the empty state. */ - int32_t m_state = s_is_empty; + int64_t state_ = s_is_empty; public: /** @@ -61,7 +61,7 @@ template<typename Key> class SimpleVectorSetSlot { */ bool is_occupied() const { - return m_state >= 0; + return state_ >= 0; } /** @@ -69,16 +69,16 @@ template<typename Key> class SimpleVectorSetSlot { */ bool is_empty() const { - return m_state == s_is_empty; + return state_ == s_is_empty; } /** * Return the stored index. It is assumed that the slot is occupied. */ - uint32_t index() const + int64_t index() const { BLI_assert(this->is_occupied()); - return (uint32_t)m_state; + return state_; } /** @@ -88,11 +88,11 @@ template<typename Key> class SimpleVectorSetSlot { template<typename ForwardKey, typename IsEqual> bool contains(const ForwardKey &key, const IsEqual &is_equal, - uint32_t UNUSED(hash), + uint64_t UNUSED(hash), const Key *keys) const { - if (m_state >= 0) { - return is_equal(key, keys[m_state]); + if (state_ >= 0) { + return is_equal(key, keys[state_]); } return false; } @@ -102,31 +102,31 @@ template<typename Key> class SimpleVectorSetSlot { * we can avoid a comparison with the state, since we know the slot is occupied. For this * specific slot implementation, this does not make a difference. */ - void relocate_occupied_here(SimpleVectorSetSlot &other, uint32_t UNUSED(hash)) + void relocate_occupied_here(SimpleVectorSetSlot &other, uint64_t UNUSED(hash)) { BLI_assert(!this->is_occupied()); BLI_assert(other.is_occupied()); - m_state = other.m_state; + state_ = other.state_; } /** * Change the state of this slot from empty/removed to occupied. The hash can be used by other * slot implementations. */ - void occupy(uint32_t index, uint32_t UNUSED(hash)) + void occupy(int64_t index, uint64_t UNUSED(hash)) { BLI_assert(!this->is_occupied()); - m_state = (int32_t)index; + state_ = index; } /** * The key has changed its position in the vector, so the index has to be updated. This method * can assume that the slot is currently occupied. */ - void update_index(uint32_t index) + void update_index(int64_t index) { BLI_assert(this->is_occupied()); - m_state = (int32_t)index; + state_ = index; } /** @@ -135,22 +135,22 @@ template<typename Key> class SimpleVectorSetSlot { void remove() { BLI_assert(this->is_occupied()); - m_state = s_is_removed; + state_ = s_is_removed; } /** * Return true if this slot is currently occupied and its corresponding key has the given index. */ - bool has_index(uint32_t index) const + bool has_index(int64_t index) const { - return (uint32_t)m_state == index; + return state_ == index; } /** * Return the hash of the currently stored key. In this simple set slot implementation, we just * compute the hash here. Other implementations might store the hash in the slot instead. */ - template<typename Hash> uint32_t get_hash(const Key &key, const Hash &hash) const + template<typename Hash> uint64_t get_hash(const Key &key, const Hash &hash) const { BLI_assert(this->is_occupied()); return hash(key); diff --git a/source/blender/blenlib/BLI_voxel.h b/source/blender/blenlib/BLI_voxel.h index 220ab9b3705..82854c57928 100644 --- a/source/blender/blenlib/BLI_voxel.h +++ b/source/blender/blenlib/BLI_voxel.h @@ -35,10 +35,13 @@ extern "C" { (int64_t)(z) * (int64_t)(res)[0] * (int64_t)(res)[1]) /* all input coordinates must be in bounding box 0.0 - 1.0 */ -float BLI_voxel_sample_nearest(float *data, const int res[3], const float co[3]); -float BLI_voxel_sample_trilinear(float *data, const int res[3], const float co[3]); -float BLI_voxel_sample_triquadratic(float *data, const int res[3], const float co[3]); -float BLI_voxel_sample_tricubic(float *data, const int res[3], const float co[3], int bspline); +float BLI_voxel_sample_nearest(const float *data, const int res[3], const float co[3]); +float BLI_voxel_sample_trilinear(const float *data, const int res[3], const float co[3]); +float BLI_voxel_sample_triquadratic(const float *data, const int res[3], const float co[3]); +float BLI_voxel_sample_tricubic(const float *data, + const int res[3], + const float co[3], + int bspline); #ifdef __cplusplus } diff --git a/source/blender/blenlib/CMakeLists.txt b/source/blender/blenlib/CMakeLists.txt index 69df0505dfe..4997917a93f 100644 --- a/source/blender/blenlib/CMakeLists.txt +++ b/source/blender/blenlib/CMakeLists.txt @@ -86,6 +86,7 @@ set(SRC intern/listbase.c intern/math_base.c intern/math_base_inline.c + intern/math_base_safe_inline.c intern/math_bits_inline.c intern/math_color.c intern/math_color_blend_inline.c @@ -105,7 +106,7 @@ set(SRC intern/polyfill_2d.c intern/polyfill_2d_beautify.c intern/quadric.c - intern/rand.c + intern/rand.cc intern/rct.c intern/scanfill.c intern/scanfill_utils.c @@ -124,7 +125,7 @@ set(SRC intern/task_pool.cc intern/task_range.cc intern/task_scheduler.cc - intern/threads.c + intern/threads.cc intern/time.c intern/timecode.c intern/timeit.cc @@ -163,6 +164,7 @@ set(SRC BLI_convexhull_2d.h BLI_delaunay_2d.h BLI_dial_2d.h + BLI_disjoint_set.hh BLI_dlrbTree.h BLI_dot_export.hh BLI_dot_export_attribute_enums.hh @@ -197,6 +199,7 @@ set(SRC BLI_kdtree.h BLI_kdtree_impl.h BLI_lasso_2d.h + BLI_linear_allocator.hh BLI_link_utils.h BLI_linklist.h BLI_linklist_lockfree.h @@ -207,6 +210,7 @@ set(SRC BLI_map_slots.hh BLI_math.h BLI_math_base.h + BLI_math_base_safe.h BLI_math_bits.h BLI_math_color.h BLI_math_color_blend.h @@ -225,14 +229,15 @@ set(SRC BLI_memory_utils.hh BLI_mempool.h BLI_noise.h - BLI_optional.hh BLI_path_util.h BLI_polyfill_2d.h BLI_polyfill_2d_beautify.h BLI_probing_strategies.hh BLI_quadric.h BLI_rand.h + BLI_rand.hh BLI_rect.h + BLI_resource_collector.hh BLI_scanfill.h BLI_set.hh BLI_set_slots.hh @@ -262,6 +267,7 @@ set(SRC BLI_utility_mixins.hh BLI_uvproject.h BLI_vector.hh + BLI_vector_adaptor.hh BLI_vector_set.hh BLI_vector_set_slots.hh BLI_vfontdata.h @@ -325,6 +331,7 @@ endif() # no need to compile object files for inline headers. set_source_files_properties( intern/math_base_inline.c + intern/math_base_safe_inline.c intern/math_bits_inline.c intern/math_color_blend_inline.c intern/math_color_inline.c @@ -334,3 +341,29 @@ set_source_files_properties( ) blender_add_lib(bf_blenlib "${SRC}" "${INC}" "${INC_SYS}" "${LIB}") + +if(WITH_GTESTS) + set(TEST_SRC + tests/BLI_array_test.cc + tests/BLI_disjoint_set_test.cc + tests/BLI_edgehash_test.cc + tests/BLI_index_mask_test.cc + tests/BLI_index_range_test.cc + tests/BLI_linear_allocator_test.cc + tests/BLI_map_test.cc + tests/BLI_math_base_safe_test.cc + tests/BLI_memory_utils_test.cc + tests/BLI_multi_value_map_test.cc + tests/BLI_set_test.cc + tests/BLI_span_test.cc + tests/BLI_stack_cxx_test.cc + tests/BLI_string_ref_test.cc + tests/BLI_vector_set_test.cc + tests/BLI_vector_test.cc + ) + set(TEST_LIB + bf_blenlib + ) + include(GTestTesting) + blender_add_test_lib(bf_bli_tests "${TEST_SRC}" "${INC};${TEST_INC}" "${INC_SYS}" "${LIB};${TEST_LIB}") +endif() diff --git a/source/blender/blenlib/intern/BLI_ghash_utils.c b/source/blender/blenlib/intern/BLI_ghash_utils.c index 83bf0373ae7..d6a4b24682f 100644 --- a/source/blender/blenlib/intern/BLI_ghash_utils.c +++ b/source/blender/blenlib/intern/BLI_ghash_utils.c @@ -86,25 +86,6 @@ bool BLI_ghashutil_uinthash_v4_cmp(const void *a, const void *b) return (memcmp(a, b, sizeof(uint[4])) != 0); } -uint BLI_ghashutil_uinthash_v2(const uint key[2]) -{ - uint hash; - hash = key[0]; - hash *= 37; - hash += key[1]; - return hash; -} - -uint BLI_ghashutil_uinthash_v2_murmur(const uint key[2]) -{ - return BLI_hash_mm2((const unsigned char *)key, sizeof(int) * 2 /* sizeof(key) */, 0); -} - -bool BLI_ghashutil_uinthash_v2_cmp(const void *a, const void *b) -{ - return (memcmp(a, b, sizeof(uint[2])) != 0); -} - uint BLI_ghashutil_uinthash(uint key) { key += ~(key << 16); diff --git a/source/blender/blenlib/intern/BLI_index_range.cc b/source/blender/blenlib/intern/BLI_index_range.cc index 910e418a29b..43c6265a17d 100644 --- a/source/blender/blenlib/intern/BLI_index_range.cc +++ b/source/blender/blenlib/intern/BLI_index_range.cc @@ -24,28 +24,28 @@ namespace blender { -static Vector<Array<uint, 0, RawAllocator>, 1, RawAllocator> arrays; -static uint current_array_size = 0; -static uint *current_array = nullptr; +static RawVector<RawArray<int64_t, 0>> arrays; +static int64_t current_array_size = 0; +static int64_t *current_array = nullptr; static std::mutex current_array_mutex; -Span<uint> IndexRange::as_span() const +Span<int64_t> IndexRange::as_span() const { - uint min_required_size = m_start + m_size; + int64_t min_required_size = start_ + size_; if (min_required_size <= current_array_size) { - return Span<uint>(current_array + m_start, m_size); + return Span<int64_t>(current_array + start_, size_); } std::lock_guard<std::mutex> lock(current_array_mutex); if (min_required_size <= current_array_size) { - return Span<uint>(current_array + m_start, m_size); + return Span<int64_t>(current_array + start_, size_); } - uint new_size = std::max<uint>(1000, power_of_2_max_u(min_required_size)); - Array<uint, 0, RawAllocator> new_array(new_size); - for (uint i = 0; i < new_size; i++) { + int64_t new_size = std::max<int64_t>(1000, power_of_2_max_u(min_required_size)); + RawArray<int64_t, 0> new_array(new_size); + for (int64_t i = 0; i < new_size; i++) { new_array[i] = i; } arrays.append(std::move(new_array)); @@ -54,7 +54,7 @@ Span<uint> IndexRange::as_span() const std::atomic_thread_fence(std::memory_order_seq_cst); current_array_size = new_size; - return Span<uint>(current_array + m_start, m_size); + return Span<int64_t>(current_array + start_, size_); } } // namespace blender diff --git a/source/blender/blenlib/intern/BLI_kdopbvh.c b/source/blender/blenlib/intern/BLI_kdopbvh.c index da67baf0ead..a3f93ccc753 100644 --- a/source/blender/blenlib/intern/BLI_kdopbvh.c +++ b/source/blender/blenlib/intern/BLI_kdopbvh.c @@ -94,7 +94,7 @@ typedef struct BVHNode { struct BVHTree { BVHNode **nodes; BVHNode *nodearray; /* pre-alloc branch nodes */ - BVHNode **nodechild; /* pre-alloc childs for nodes */ + BVHNode **nodechild; /* pre-alloc children for nodes */ float *nodebv; /* pre-alloc bounding-volumes for nodes */ float epsilon; /* epslion is used for inflation of the k-dop */ int totleaf; /* leafs */ @@ -169,6 +169,12 @@ typedef struct BVHNearestProjectedData { } BVHNearestProjectedData; +typedef struct BVHIntersectPlaneData { + const BVHTree *tree; + float plane[4]; + struct BLI_Stack *intersect; /* Store indexes. */ +} BVHIntersectPlaneData; + /** \} */ /** @@ -769,7 +775,7 @@ static void non_recursive_bvh_div_nodes_task_cb(void *__restrict userdata, * This functions builds an optimal implicit tree from the given leafs. * Where optimal stands for: * - The resulting tree will have the smallest number of branches; - * - At most only one branch will have NULL childs; + * - At most only one branch will have NULL children; * - All leafs will be stored at level N or N+1. * * This function creates an implicit tree on branches_array, @@ -777,7 +783,7 @@ static void non_recursive_bvh_div_nodes_task_cb(void *__restrict userdata, * * The tree is built per depth levels. First branches at depth 1.. then branches at depth 2.. etc.. * The reason is that we can build level N+1 from level N without any data dependencies.. - * thus it allows to use multithread building. + * thus it allows to use multi-thread building. * * To archive this is necessary to find how much leafs are accessible from a certain branch, * #BVHBuildHelper, #implicit_needed_branches and #implicit_leafs_index @@ -1032,12 +1038,14 @@ bool BLI_bvhtree_update_node( return true; } -/* call BLI_bvhtree_update_node() first for every node/point/triangle */ +/** + * Call #BLI_bvhtree_update_node() first for every node/point/triangle. + */ void BLI_bvhtree_update_tree(BVHTree *tree) { /* Update bottom=>top - * TRICKY: the way we build the tree all the childs have an index greater than the parent - * This allows us todo a bottom up update by starting on the bigger numbered branch */ + * TRICKY: the way we build the tree all the children have an index greater than the parent + * This allows us todo a bottom up update by starting on the bigger numbered branch. */ BVHNode **root = tree->nodes + tree->totleaf; BVHNode **index = tree->nodes + tree->totleaf + tree->totbranch - 1; @@ -1391,6 +1399,71 @@ BVHTreeOverlap *BLI_bvhtree_overlap( /** \} */ /* -------------------------------------------------------------------- */ +/** \name BLI_bvhtree_intersect_plane + * \{ */ + +static bool tree_intersect_plane_test(const float *bv, const float plane[4]) +{ + /* TODO(germano): Support other kdop geometries. */ + const float bb_min[3] = {bv[0], bv[2], bv[4]}; + const float bb_max[3] = {bv[1], bv[3], bv[5]}; + float bb_near[3], bb_far[3]; + aabb_get_near_far_from_plane(plane, bb_min, bb_max, bb_near, bb_far); + if ((plane_point_side_v3(plane, bb_near) > 0.0f) != + (plane_point_side_v3(plane, bb_far) > 0.0f)) { + return true; + } + + return false; +} + +static void bvhtree_intersect_plane_dfs_recursive(BVHIntersectPlaneData *__restrict data, + const BVHNode *node) +{ + if (tree_intersect_plane_test(node->bv, data->plane)) { + /* check if node is a leaf */ + if (!node->totnode) { + int *intersect = BLI_stack_push_r(data->intersect); + *intersect = node->index; + } + else { + for (int j = 0; j < data->tree->tree_type; j++) { + if (node->children[j]) { + bvhtree_intersect_plane_dfs_recursive(data, node->children[j]); + } + } + } + } +} + +int *BLI_bvhtree_intersect_plane(BVHTree *tree, float plane[4], uint *r_intersect_tot) +{ + int *intersect = NULL; + size_t total = 0; + + if (tree->totleaf) { + BVHIntersectPlaneData data; + data.tree = tree; + copy_v4_v4(data.plane, plane); + data.intersect = BLI_stack_new(sizeof(int), __func__); + + BVHNode *root = tree->nodes[tree->totleaf]; + bvhtree_intersect_plane_dfs_recursive(&data, root); + + total = BLI_stack_count(data.intersect); + if (total) { + intersect = MEM_mallocN(sizeof(int) * total, __func__); + BLI_stack_pop_n(data.intersect, intersect, (uint)total); + } + BLI_stack_free(data.intersect); + } + *r_intersect_tot = (uint)total; + return intersect; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ /** \name BLI_bvhtree_find_nearest * \{ */ @@ -2309,7 +2382,7 @@ static bool bvhtree_walk_dfs_recursive(BVHTree_WalkData *walk_data, const BVHNod } /** - * This is a generic function to perform a depth first search on the BVHTree + * This is a generic function to perform a depth first search on the #BVHTree * where the search order and nodes traversed depend on callbacks passed in. * * \param tree: Tree to walk. @@ -2317,7 +2390,7 @@ static bool bvhtree_walk_dfs_recursive(BVHTree_WalkData *walk_data, const BVHNod * \param walk_leaf_cb: Callback to test leaf nodes, callback must store its own result, * returning false exits early. * \param walk_order_cb: Callback that indicates which direction to search, - * either from the node with the lower or higher k-dop axis value. + * either from the node with the lower or higher K-DOP axis value. * \param userdata: Argument passed to all callbacks. */ void BLI_bvhtree_walk_dfs(BVHTree *tree, diff --git a/source/blender/blenlib/intern/BLI_linklist.c b/source/blender/blenlib/intern/BLI_linklist.c index 5020e06d0b3..dc5d20ece99 100644 --- a/source/blender/blenlib/intern/BLI_linklist.c +++ b/source/blender/blenlib/intern/BLI_linklist.c @@ -74,6 +74,16 @@ LinkNode *BLI_linklist_find(LinkNode *list, int index) return NULL; } +LinkNode *BLI_linklist_find_last(LinkNode *list) +{ + if (list) { + while (list->next) { + list = list->next; + } + } + return list; +} + void BLI_linklist_reverse(LinkNode **listp) { LinkNode *rhead = NULL, *cur = *listp; diff --git a/source/blender/blenlib/intern/bitmap.c b/source/blender/blenlib/intern/bitmap.c index d24047397fb..54edcaec2c8 100644 --- a/source/blender/blenlib/intern/bitmap.c +++ b/source/blender/blenlib/intern/bitmap.c @@ -20,7 +20,7 @@ /** \file * \ingroup bli * - * Utility functions for variable size bitmasks. + * Utility functions for variable size bit-masks. */ #include <limits.h> diff --git a/source/blender/blenlib/intern/delaunay_2d.c b/source/blender/blenlib/intern/delaunay_2d.c index 4e0cd3a78dc..5f663dcb2e1 100644 --- a/source/blender/blenlib/intern/delaunay_2d.c +++ b/source/blender/blenlib/intern/delaunay_2d.c @@ -405,7 +405,8 @@ static CDTEdge *add_vert_to_symedge_edge(CDT_state *cdt, CDTVert *v, SymEdge *se return e; } -/* Connect the verts of se1 and se2, assuming that currently those two SymEdges are on +/** + * Connect the verts of se1 and se2, assuming that currently those two #SymEdges are on * the outer boundary (have face == outer_face) of two components that are isolated from * each other. */ @@ -479,7 +480,7 @@ static CDTEdge *split_edge(CDT_state *cdt, SymEdge *se, double lambda) * the deleted edge will be the one that was e's face. * There will be now an unused face, marked by setting its deleted flag, * and an unused #CDTEdge, marked by setting the next and rot pointers of - * its SymEdges to NULL. + * its #SymEdge(s) to NULL. * <pre> * . v2 . * / \ / \ @@ -951,7 +952,7 @@ static void initial_triangulation(CDT_state *cdt) } #endif - /* Now dedup according to user-defined epsilon. + /* Now de-duplicate according to user-defined epsilon. * We will merge a vertex into an earlier-indexed vertex * that is within epsilon (Euclidean distance). * Merges may cascade. So we may end up merging two things @@ -1017,7 +1018,9 @@ static void initial_triangulation(CDT_state *cdt) MEM_freeN(sites); } -/** Use LinkNode linked list as stack of SymEdges, allocating from cdt->listpool. */ +/** + * Use #LinkNode linked list as stack of #SymEdges, allocating from `cdt->listpool` . + */ typedef LinkNode *Stack; BLI_INLINE void push(Stack *stack, SymEdge *se, CDT_state *cdt) @@ -1153,22 +1156,22 @@ static double tri_orient(const SymEdge *t) * in the path we will take to insert an edge constraint. * Each such point will either be * (a) a vertex or - * (b) a fraction lambda (0 < lambda < 1) along some SymEdge.] + * (b) a fraction lambda (0 < lambda < 1) along some #SymEdge.] * * In general, lambda=0 indicates case a and lambda != 0 indicates case be. * The 'in' edge gives the destination attachment point of a diagonal from the previous crossing, * and the 'out' edge gives the origin attachment point of a diagonal to the next crossing. * But in some cases, 'in' and 'out' are undefined or not needed, and will be NULL. * - * For case (a), 'vert' will be the vertex, and lambda will be 0, and 'in' will be the SymEdge from - * 'vert' that has as face the one that you go through to get to this vertex. If you go exactly - * along an edge then we set 'in' to NULL, since it won't be needed. The first crossing will have - * 'in' = NULL. We set 'out' to the SymEdge that has the face we go though to get to the next - * crossing, or, if the next crossing is a case (a), then it is the edge that goes to that next - * vertex. 'out' wlll be NULL for the last one. + * For case (a), 'vert' will be the vertex, and lambda will be 0, and 'in' will be the #SymEdge + * from 'vert' that has as face the one that you go through to get to this vertex. If you go + * exactly along an edge then we set 'in' to NULL, since it won't be needed. The first crossing + * will have 'in' = NULL. We set 'out' to the #SymEdge that has the face we go though to get to the + * next crossing, or, if the next crossing is a case (a), then it is the edge that goes to that + * next vertex. 'out' wlll be NULL for the last one. * * For case (b), vert will be NULL at first, and later filled in with the created split vertex, - * and 'in' will be the SymEdge that we go through, and lambda will be between 0 and 1, + * and 'in' will be the #SymEdge that we go through, and lambda will be between 0 and 1, * the fraction from in's vert to in->next's vert to put the split vertex. * 'out' is not needed in this case, since the attachment point will be the sym of the first * half of the split edge. @@ -1231,8 +1234,8 @@ static void fill_crossdata_for_through_vert(CDTVert *v, /** * As part of finding crossings, we found a case where orient tests say that the next crossing - * is on the SymEdge t, while intersecting with the ray from curco to v2. - * Find the intersection point and fill in the CrossData for that point. + * is on the #SymEdge t, while intersecting with the ray from \a curco to \a v2. + * Find the intersection point and fill in the #CrossData for that point. * It may turn out that when doing the intersection, we get an answer that says that * this case is better handled as through-vertex case instead, so we may do that. * In the latter case, we want to avoid a situation where the current crossing is on an edge @@ -1442,12 +1445,12 @@ static bool get_next_crossing_from_vert(CDT_state *cdt, } /** - * As part of finding the crossings of a ray to v2, find the next crossing after 'cd', assuming + * As part of finding the crossings of a ray to 'v2', find the next crossing after 'cd', assuming * 'cd' represents a crossing that goes through a an edge, not at either end of that edge. * - * We have the triangle vb-va-vc, where va and vb are the split edge and vc is the third vertex on - * that new side of the edge (should be closer to v2). The next crossing should be through vc or - * intersecting vb-vc or va-vc. + * We have the triangle 'vb-va-vc', where va and vb are the split edge and 'vc' is the third vertex + * on that new side of the edge (should be closer to v2). The next crossing should be through 'vc' + * or intersecting 'vb-vc' or 'va-vc'. */ static void get_next_crossing_from_edge(CDT_state *cdt, CrossData *cd, @@ -4317,7 +4320,7 @@ static void exactinit(void) */ static int fast_expansion_sum_zeroelim( - int elen, double *e, int flen, double *f, double *h) /* h cannot be e or f. */ + int elen, const double *e, int flen, const double *f, double *h) /* h cannot be e or f. */ { double Q; INEXACT double Qnew; @@ -4402,7 +4405,7 @@ static int fast_expansion_sum_zeroelim( */ static int scale_expansion_zeroelim(int elen, - double *e, + const double *e, double b, double *h) /* e and h cannot be the same. */ { @@ -4448,7 +4451,7 @@ static int scale_expansion_zeroelim(int elen, * See either version of my paper for details. */ -static double estimate(int elen, double *e) +static double estimate(int elen, const double *e) { double Q; int eindex; diff --git a/source/blender/blenlib/intern/dot_export.cc b/source/blender/blenlib/intern/dot_export.cc index a2cf843c473..48b6dc826d0 100644 --- a/source/blender/blenlib/intern/dot_export.cc +++ b/source/blender/blenlib/intern/dot_export.cc @@ -18,8 +18,7 @@ #include "BLI_dot_export.hh" -namespace blender { -namespace DotExport { +namespace blender::dot { /* Graph Building ************************************************/ @@ -27,8 +26,8 @@ namespace DotExport { Node &Graph::new_node(StringRef label) { Node *node = new Node(*this); - m_nodes.append(std::unique_ptr<Node>(node)); - m_top_level_nodes.add_new(node); + nodes_.append(std::unique_ptr<Node>(node)); + top_level_nodes_.add_new(node); node->set_attribute("label", label); return *node; } @@ -36,8 +35,8 @@ Node &Graph::new_node(StringRef label) Cluster &Graph::new_cluster(StringRef label) { Cluster *cluster = new Cluster(*this); - m_clusters.append(std::unique_ptr<Cluster>(cluster)); - m_top_level_clusters.add_new(cluster); + clusters_.append(std::unique_ptr<Cluster>(cluster)); + top_level_clusters_.add_new(cluster); cluster->set_attribute("label", label); return *cluster; } @@ -45,55 +44,55 @@ Cluster &Graph::new_cluster(StringRef label) UndirectedEdge &UndirectedGraph::new_edge(NodePort a, NodePort b) { UndirectedEdge *edge = new UndirectedEdge(a, b); - m_edges.append(std::unique_ptr<UndirectedEdge>(edge)); + edges_.append(std::unique_ptr<UndirectedEdge>(edge)); return *edge; } DirectedEdge &DirectedGraph::new_edge(NodePort from, NodePort to) { DirectedEdge *edge = new DirectedEdge(from, to); - m_edges.append(std::unique_ptr<DirectedEdge>(edge)); + edges_.append(std::unique_ptr<DirectedEdge>(edge)); return *edge; } void Cluster::set_parent_cluster(Cluster *new_parent) { - if (m_parent == new_parent) { + if (parent_ == new_parent) { return; } - else if (m_parent == nullptr) { - m_graph.m_top_level_clusters.remove(this); - new_parent->m_children.add_new(this); + else if (parent_ == nullptr) { + graph_.top_level_clusters_.remove(this); + new_parent->children_.add_new(this); } else if (new_parent == nullptr) { - m_parent->m_children.remove(this); - m_graph.m_top_level_clusters.add_new(this); + parent_->children_.remove(this); + graph_.top_level_clusters_.add_new(this); } else { - m_parent->m_children.remove(this); - new_parent->m_children.add_new(this); + parent_->children_.remove(this); + new_parent->children_.add_new(this); } - m_parent = new_parent; + parent_ = new_parent; } void Node::set_parent_cluster(Cluster *cluster) { - if (m_cluster == cluster) { + if (cluster_ == cluster) { return; } - else if (m_cluster == nullptr) { - m_graph.m_top_level_nodes.remove(this); - cluster->m_nodes.add_new(this); + else if (cluster_ == nullptr) { + graph_.top_level_nodes_.remove(this); + cluster->nodes_.add_new(this); } else if (cluster == nullptr) { - m_cluster->m_nodes.remove(this); - m_graph.m_top_level_nodes.add_new(this); + cluster_->nodes_.remove(this); + graph_.top_level_nodes_.add_new(this); } else { - m_cluster->m_nodes.remove(this); - cluster->m_nodes.add_new(this); + cluster_->nodes_.remove(this); + cluster->nodes_.add_new(this); } - m_cluster = cluster; + cluster_ = cluster; } /* Utility methods @@ -101,7 +100,7 @@ void Node::set_parent_cluster(Cluster *cluster) void Graph::set_random_cluster_bgcolors() { - for (Cluster *cluster : m_top_level_clusters) { + for (Cluster *cluster : top_level_clusters_) { cluster->set_random_cluster_bgcolors(); } } @@ -113,7 +112,7 @@ void Cluster::set_random_cluster_bgcolors() float value = 0.8f; this->set_attribute("bgcolor", color_attr_from_hsv(hue, staturation, value)); - for (Cluster *cluster : m_children) { + for (Cluster *cluster : children_) { cluster->set_random_cluster_bgcolors(); } } @@ -128,7 +127,7 @@ std::string DirectedGraph::to_dot_string() const this->export__declare_nodes_and_clusters(ss); ss << "\n"; - for (const std::unique_ptr<DirectedEdge> &edge : m_edges) { + for (const std::unique_ptr<DirectedEdge> &edge : edges_) { edge->export__as_edge_statement(ss); ss << "\n"; } @@ -144,7 +143,7 @@ std::string UndirectedGraph::to_dot_string() const this->export__declare_nodes_and_clusters(ss); ss << "\n"; - for (const std::unique_ptr<UndirectedEdge> &edge : m_edges) { + for (const std::unique_ptr<UndirectedEdge> &edge : edges_) { edge->export__as_edge_statement(ss); ss << "\n"; } @@ -156,14 +155,14 @@ std::string UndirectedGraph::to_dot_string() const void Graph::export__declare_nodes_and_clusters(std::stringstream &ss) const { ss << "graph "; - m_attributes.export__as_bracket_list(ss); + attributes_.export__as_bracket_list(ss); ss << "\n\n"; - for (Node *node : m_top_level_nodes) { + for (Node *node : top_level_nodes_) { node->export__as_declaration(ss); } - for (Cluster *cluster : m_top_level_clusters) { + for (Cluster *cluster : top_level_clusters_) { cluster->export__declare_nodes_and_clusters(ss); } } @@ -173,14 +172,14 @@ void Cluster::export__declare_nodes_and_clusters(std::stringstream &ss) const ss << "subgraph cluster_" << (uintptr_t)this << " {\n"; ss << "graph "; - m_attributes.export__as_bracket_list(ss); + attributes_.export__as_bracket_list(ss); ss << "\n\n"; - for (Node *node : m_nodes) { + for (Node *node : nodes_) { node->export__as_declaration(ss); } - for (Cluster *cluster : m_children) { + for (Cluster *cluster : children_) { cluster->export__declare_nodes_and_clusters(ss); } @@ -189,26 +188,26 @@ void Cluster::export__declare_nodes_and_clusters(std::stringstream &ss) const void DirectedEdge::export__as_edge_statement(std::stringstream &ss) const { - m_a.to_dot_string(ss); + a_.to_dot_string(ss); ss << " -> "; - m_b.to_dot_string(ss); + b_.to_dot_string(ss); ss << " "; - m_attributes.export__as_bracket_list(ss); + attributes_.export__as_bracket_list(ss); } void UndirectedEdge::export__as_edge_statement(std::stringstream &ss) const { - m_a.to_dot_string(ss); + a_.to_dot_string(ss); ss << " -- "; - m_b.to_dot_string(ss); + b_.to_dot_string(ss); ss << " "; - m_attributes.export__as_bracket_list(ss); + attributes_.export__as_bracket_list(ss); } void AttributeList::export__as_bracket_list(std::stringstream &ss) const { ss << "["; - m_attributes.foreach_item([&](StringRef key, StringRef value) { + attributes_.foreach_item([&](StringRef key, StringRef value) { if (StringRef(value).startswith("<")) { /* Don't draw the quotes, this is an html-like value. */ ss << key << "=" << value << ", "; @@ -229,15 +228,15 @@ void Node::export__as_declaration(std::stringstream &ss) const { this->export__as_id(ss); ss << " "; - m_attributes.export__as_bracket_list(ss); + attributes_.export__as_bracket_list(ss); ss << "\n"; } void NodePort::to_dot_string(std::stringstream &ss) const { - m_node->export__as_id(ss); - if (m_port_name.has_value()) { - ss << ":" << m_port_name.value(); + node_->export__as_id(ss); + if (port_name_.has_value()) { + ss << ":" << *port_name_; } } @@ -252,7 +251,7 @@ NodeWithSocketsRef::NodeWithSocketsRef(Node &node, StringRef name, Span<std::string> input_names, Span<std::string> output_names) - : m_node(&node) + : node_(&node) { std::stringstream ss; @@ -264,8 +263,8 @@ NodeWithSocketsRef::NodeWithSocketsRef(Node &node, ss << "</b></td></tr>"; /* Sockets */ - uint socket_max_amount = std::max(input_names.size(), output_names.size()); - for (uint i = 0; i < socket_max_amount; i++) { + int socket_max_amount = std::max(input_names.size(), output_names.size()); + for (int i = 0; i < socket_max_amount; i++) { ss << "<tr>"; if (i < input_names.size()) { StringRef name = input_names[i]; @@ -297,9 +296,8 @@ NodeWithSocketsRef::NodeWithSocketsRef(Node &node, ss << "</table>>"; - m_node->set_attribute("label", ss.str()); - m_node->set_shape(Attr_shape::Rectangle); + node_->set_attribute("label", ss.str()); + node_->set_shape(Attr_shape::Rectangle); } -} // namespace DotExport -} // namespace blender +} // namespace blender::dot diff --git a/source/blender/blenlib/intern/edgehash.c b/source/blender/blenlib/intern/edgehash.c index 556c0a65fc5..56529581dd3 100644 --- a/source/blender/blenlib/intern/edgehash.c +++ b/source/blender/blenlib/intern/edgehash.c @@ -376,7 +376,7 @@ bool BLI_edgehash_ensure_p(EdgeHash *eh, uint v0, uint v1, void ***r_value) * Remove \a key (v0, v1) from \a eh, or return false if the key wasn't found. * * \param v0, v1: The key to remove. - * \param valfreefp: Optional callback to free the value. + * \param free_value: Optional callback to free the value. * \return true if \a key was removed from \a eh. */ bool BLI_edgehash_remove(EdgeHash *eh, uint v0, uint v1, EdgeHashFreeFP free_value) diff --git a/source/blender/blenlib/intern/expr_pylike_eval.c b/source/blender/blenlib/intern/expr_pylike_eval.c index d1d84dab3f7..f8618c54ea4 100644 --- a/source/blender/blenlib/intern/expr_pylike_eval.c +++ b/source/blender/blenlib/intern/expr_pylike_eval.c @@ -72,6 +72,8 @@ typedef enum eOpCode { OPCODE_FUNC1, /* 2 argument function call: (a b -> func2(a,b)) */ OPCODE_FUNC2, + /* 3 argument function call: (a b c -> func3(a,b,c)) */ + OPCODE_FUNC3, /* Parameter access: (-> params[ival]) */ OPCODE_PARAMETER, /* Minimum of multiple inputs: (a b c... -> min); ival = arg count */ @@ -92,6 +94,7 @@ typedef enum eOpCode { typedef double (*UnaryOpFunc)(double); typedef double (*BinaryOpFunc)(double, double); +typedef double (*TernaryOpFunc)(double, double, double); typedef struct ExprOp { eOpCode opcode; @@ -104,6 +107,7 @@ typedef struct ExprOp { void *ptr; UnaryOpFunc func1; BinaryOpFunc func2; + TernaryOpFunc func3; } arg; } ExprOp; @@ -216,6 +220,11 @@ eExprPyLike_EvalStatus BLI_expr_pylike_eval(ExprPyLike_Parsed *expr, stack[sp - 2] = ops[pc].arg.func2(stack[sp - 2], stack[sp - 1]); sp--; break; + case OPCODE_FUNC3: + FAIL_IF(sp < 3); + stack[sp - 3] = ops[pc].arg.func3(stack[sp - 3], stack[sp - 2], stack[sp - 1]); + sp -= 2; + break; case OPCODE_MIN: FAIL_IF(sp < ops[pc].arg.ival); for (int j = 1; j < ops[pc].arg.ival; j++, sp--) { @@ -326,6 +335,35 @@ static double op_degrees(double arg) return arg * 180.0 / M_PI; } +static double op_log2(double a, double b) +{ + return log(a) / log(b); +} + +static double op_lerp(double a, double b, double x) +{ + return a * (1.0 - x) + b * x; +} + +static double op_clamp(double arg) +{ + CLAMP(arg, 0.0, 1.0); + return arg; +} + +static double op_clamp3(double arg, double minv, double maxv) +{ + CLAMP(arg, minv, maxv); + return arg; +} + +static double op_smoothstep(double a, double b, double x) +{ + double t = (x - a) / (b - a); + CLAMP(t, 0.0, 1.0); + return t * t * (3.0 - 2.0 * t); +} + static double op_not(double a) { return a ? 0.0 : 1.0; @@ -390,6 +428,7 @@ static BuiltinOpDef builtin_ops[] = { {"floor", OPCODE_FUNC1, floor}, {"ceil", OPCODE_FUNC1, ceil}, {"trunc", OPCODE_FUNC1, trunc}, + {"round", OPCODE_FUNC1, round}, {"int", OPCODE_FUNC1, trunc}, {"sin", OPCODE_FUNC1, sin}, {"cos", OPCODE_FUNC1, cos}, @@ -400,9 +439,14 @@ static BuiltinOpDef builtin_ops[] = { {"atan2", OPCODE_FUNC2, atan2}, {"exp", OPCODE_FUNC1, exp}, {"log", OPCODE_FUNC1, log}, + {"log", OPCODE_FUNC2, op_log2}, {"sqrt", OPCODE_FUNC1, sqrt}, {"pow", OPCODE_FUNC2, pow}, {"fmod", OPCODE_FUNC2, fmod}, + {"lerp", OPCODE_FUNC3, op_lerp}, + {"clamp", OPCODE_FUNC1, op_clamp}, + {"clamp", OPCODE_FUNC3, op_clamp3}, + {"smoothstep", OPCODE_FUNC3, op_smoothstep}, {NULL, OPCODE_CONST, NULL}, }; @@ -514,6 +558,22 @@ static void parse_set_jump(ExprParseState *state, int jump) state->ops[jump - 1].jmp_offset = state->ops_count - jump; } +/* Returns the required argument count of the given function call code. */ +static int opcode_arg_count(eOpCode code) +{ + switch (code) { + case OPCODE_FUNC1: + return 1; + case OPCODE_FUNC2: + return 2; + case OPCODE_FUNC3: + return 3; + default: + BLI_assert(!"unexpected opcode"); + return -1; + } +} + /* Add a function call operation, applying constant folding when possible. */ static bool parse_add_func(ExprParseState *state, eOpCode code, int args, void *funcptr) { @@ -560,6 +620,27 @@ static bool parse_add_func(ExprParseState *state, eOpCode code, int args, void * } break; + case OPCODE_FUNC3: + CHECK_ERROR(args == 3); + + if (jmp_gap >= 3 && prev_ops[-3].opcode == OPCODE_CONST && + prev_ops[-2].opcode == OPCODE_CONST && prev_ops[-1].opcode == OPCODE_CONST) { + TernaryOpFunc func = funcptr; + + /* volatile because some compilers overly aggressive optimize this call out. + * see D6012 for details. */ + volatile double result = func( + prev_ops[-3].arg.dval, prev_ops[-2].arg.dval, prev_ops[-1].arg.dval); + + if (fetestexcept(FE_DIVBYZERO | FE_INVALID) == 0) { + prev_ops[-3].arg.dval = result; + state->ops_count -= 2; + state->stack_ptr -= 2; + return true; + } + } + break; + default: BLI_assert(false); return false; @@ -755,6 +836,17 @@ static bool parse_unary(ExprParseState *state) if (STREQ(state->tokenbuf, builtin_ops[i].name)) { int args = parse_function_args(state); + /* Search for other arg count versions if necessary. */ + if (args != opcode_arg_count(builtin_ops[i].op)) { + for (int j = i + 1; builtin_ops[j].name; j++) { + if (opcode_arg_count(builtin_ops[j].op) == args && + STREQ(builtin_ops[j].name, builtin_ops[i].name)) { + i = j; + break; + } + } + } + return parse_add_func(state, builtin_ops[i].op, args, builtin_ops[i].funcptr); } } diff --git a/source/blender/blenlib/intern/fileops.c b/source/blender/blenlib/intern/fileops.c index b9133edcae3..e61cbd318fc 100644 --- a/source/blender/blenlib/intern/fileops.c +++ b/source/blender/blenlib/intern/fileops.c @@ -177,8 +177,9 @@ size_t BLI_gzip_mem_to_file_at_pos( strm.zfree = Z_NULL; strm.opaque = Z_NULL; ret = deflateInit(&strm, compression_level); - if (ret != Z_OK) + if (ret != Z_OK) { return 0; + } strm.avail_in = len; strm.next_in = (Bytef *)buf; @@ -224,8 +225,9 @@ size_t BLI_ungzip_file_to_mem_at_pos(void *buf, size_t len, FILE *file, size_t g strm.avail_in = 0; strm.next_in = Z_NULL; ret = inflateInit(&strm); - if (ret != Z_OK) + if (ret != Z_OK) { return 0; + } do { strm.avail_in = fread(in, 1, chunk, file); @@ -558,7 +560,7 @@ int BLI_move(const char *file, const char *to) /* windows doesn't support moving to a directory * it has to be 'mv filename filename' and not - * 'mv filename destdir' */ + * 'mv filename destination_directory' */ BLI_strncpy(str, to, sizeof(str)); /* points 'to' to a directory ? */ diff --git a/source/blender/blenlib/intern/math_base_safe_inline.c b/source/blender/blenlib/intern/math_base_safe_inline.c new file mode 100644 index 00000000000..5600382e395 --- /dev/null +++ b/source/blender/blenlib/intern/math_base_safe_inline.c @@ -0,0 +1,79 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MATH_BASE_SAFE_INLINE_C__ +#define __MATH_BASE_SAFE_INLINE_C__ + +#include "BLI_math_base_safe.h" +#include "BLI_utildefines.h" + +#ifdef __cplusplus +extern "C" { +#endif + +MINLINE float safe_divide(float a, float b) +{ + return (b != 0.0f) ? a / b : 0.0f; +} + +MINLINE float safe_modf(float a, float b) +{ + return (b != 0.0f) ? fmodf(a, b) : 0.0f; +} + +MINLINE float safe_logf(float a, float base) +{ + if (UNLIKELY(a <= 0.0f || base <= 0.0f)) { + return 0.0f; + } + return safe_divide(logf(a), logf(base)); +} + +MINLINE float safe_sqrtf(float a) +{ + return sqrtf(MAX2(a, 0.0f)); +} + +MINLINE float safe_inverse_sqrtf(float a) +{ + return (a > 0.0f) ? 1.0f / sqrtf(a) : 0.0f; +} + +MINLINE float safe_asinf(float a) +{ + CLAMP(a, -1.0f, 1.0f); + return asinf(a); +} + +MINLINE float safe_acosf(float a) +{ + CLAMP(a, -1.0f, 1.0f); + return acosf(a); +} + +MINLINE float safe_powf(float base, float exponent) +{ + if (UNLIKELY(base < 0.0f && exponent != (int)exponent)) { + return 0.0f; + } + return powf(base, exponent); +} + +#ifdef __cplusplus +} +#endif + +#endif /* __MATH_BASE_SAFE_INLINE_C__ */ diff --git a/source/blender/blenlib/intern/math_bits_inline.c b/source/blender/blenlib/intern/math_bits_inline.c index 8f8f257f1e7..e7a7b17e1e4 100644 --- a/source/blender/blenlib/intern/math_bits_inline.c +++ b/source/blender/blenlib/intern/math_bits_inline.c @@ -63,7 +63,7 @@ MINLINE unsigned int bitscan_reverse_uint(unsigned int a) #ifdef _MSC_VER unsigned long clz; _BitScanReverse(&clz, a); - return clz; + return 31 - clz; #else return (unsigned int)__builtin_clz(a); #endif diff --git a/source/blender/blenlib/intern/math_color.c b/source/blender/blenlib/intern/math_color.c index 625849c01df..651a062e3d5 100644 --- a/source/blender/blenlib/intern/math_color.c +++ b/source/blender/blenlib/intern/math_color.c @@ -503,8 +503,12 @@ int constrain_rgb(float *r, float *g, float *b) /* ********************** lift/gamma/gain / ASC-CDL conversion ********************************* */ -void lift_gamma_gain_to_asc_cdl( - float *lift, float *gamma, float *gain, float *offset, float *slope, float *power) +void lift_gamma_gain_to_asc_cdl(const float *lift, + const float *gamma, + const float *gain, + float *offset, + float *slope, + float *power) { int c; for (c = 0; c < 3; c++) { diff --git a/source/blender/blenlib/intern/math_geom.c b/source/blender/blenlib/intern/math_geom.c index e7c1fc8c2d9..937bf8b1ae6 100644 --- a/source/blender/blenlib/intern/math_geom.c +++ b/source/blender/blenlib/intern/math_geom.c @@ -630,7 +630,7 @@ float dist_squared_ray_to_seg_v3(const float ray_origin[3], float *r_depth) { float lambda, depth; - if (isect_ray_seg_v3(ray_origin, ray_direction, v0, v1, &lambda)) { + if (isect_ray_line_v3(ray_origin, ray_direction, v0, v1, &lambda)) { if (lambda <= 0.0f) { copy_v3_v3(r_point, v0); } @@ -2129,11 +2129,11 @@ bool isect_ray_seg_v2(const float ray_origin[2], return false; } -bool isect_ray_seg_v3(const float ray_origin[3], - const float ray_direction[3], - const float v0[3], - const float v1[3], - float *r_lambda) +bool isect_ray_line_v3(const float ray_origin[3], + const float ray_direction[3], + const float v0[3], + const float v1[3], + float *r_lambda) { float a[3], t[3], n[3]; sub_v3_v3v3(a, v1, v0); @@ -3248,19 +3248,27 @@ bool isect_ray_aabb_v3_simple(const float orig[3], } } -/* find closest point to p on line through (l1, l2) and return lambda, - * where (0 <= lambda <= 1) when cp is in the line segment (l1, l2) +float closest_to_ray_v3(float r_close[3], + const float p[3], + const float ray_orig[3], + const float ray_dir[3]) +{ + float h[3], lambda; + sub_v3_v3v3(h, p, ray_orig); + lambda = dot_v3v3(ray_dir, h) / dot_v3v3(ray_dir, ray_dir); + madd_v3_v3v3fl(r_close, ray_orig, ray_dir, lambda); + return lambda; +} + +/** + * Find closest point to p on line through (l1, l2) and return lambda, + * where (0 <= lambda <= 1) when cp is in the line segment (l1, l2). */ float closest_to_line_v3(float r_close[3], const float p[3], const float l1[3], const float l2[3]) { - float h[3], u[3], lambda; + float u[3]; sub_v3_v3v3(u, l2, l1); - sub_v3_v3v3(h, p, l1); - lambda = dot_v3v3(u, h) / dot_v3v3(u, u); - r_close[0] = l1[0] + u[0] * lambda; - r_close[1] = l1[1] + u[1] * lambda; - r_close[2] = l1[2] + u[2] * lambda; - return lambda; + return closest_to_ray_v3(r_close, p, l1, u); } float closest_to_line_v2(float r_close[2], const float p[2], const float l1[2], const float l2[2]) @@ -4168,8 +4176,8 @@ int interp_sparse_array(float *array, const int list_size, const float skipval) #define DIR_V2_SET(d_len, va, vb) \ { \ - sub_v2_v2v2((d_len)->dir, va, vb); \ - (d_len)->len = len_v2((d_len)->dir); \ + sub_v2db_v2fl_v2fl((d_len)->dir, va, vb); \ + (d_len)->len = len_v2_db((d_len)->dir); \ } \ (void)0 @@ -4177,8 +4185,8 @@ struct Float3_Len { float dir[3], len; }; -struct Float2_Len { - float dir[2], len; +struct Double2_Len { + double dir[2], len; }; /* Mean value weights - smooth interpolation weights for polygons with @@ -4201,21 +4209,30 @@ static float mean_value_half_tan_v3(const struct Float3_Len *d_curr, return 0.0f; } -static float mean_value_half_tan_v2(const struct Float2_Len *d_curr, - const struct Float2_Len *d_next) +/** + * Mean value weights - same as #mean_value_half_tan_v3 but for 2D vectors. + * + * \note When interpolating a 2D polygon, a point can be considered "outside" + * the polygon's bounds. Thus, when the point is very distant and the vectors + * have relatively close values, the precision problems are evident since they + * do not indicate a point "inside" the polygon. + * To resolve this, doubles are used. + */ +static double mean_value_half_tan_v2_db(const struct Double2_Len *d_curr, + const struct Double2_Len *d_next) { - /* different from the 3d version but still correct */ - const float area = cross_v2v2(d_curr->dir, d_next->dir); + /* Different from the 3d version but still correct. */ + const double area = cross_v2v2_db(d_curr->dir, d_next->dir); /* Compare against zero since 'FLT_EPSILON' can be too large, see: T73348. */ - if (LIKELY(area != 0.0f)) { - const float dot = dot_v2v2(d_curr->dir, d_next->dir); - const float len = d_curr->len * d_next->len; - const float result = (len - dot) / area; + if (LIKELY(area != 0.0)) { + const double dot = dot_v2v2_db(d_curr->dir, d_next->dir); + const double len = d_curr->len * d_next->len; + const double result = (len - dot) / area; if (isfinite(result)) { return result; } } - return 0.0f; + return 0.0; } void interp_weights_poly_v3(float *w, float v[][3], const int n, const float co[3]) @@ -4257,7 +4274,7 @@ void interp_weights_poly_v3(float *w, float v[][3], const int n, const float co[ * to borders of face. * In that case, do simple linear interpolation between the two edge vertices */ - /* 'd_next.len' is infact 'd_curr.len', just avoid copy to begin with */ + /* 'd_next.len' is in fact 'd_curr.len', just avoid copy to begin with */ if (UNLIKELY(d_next.len < eps)) { ix_flag = IS_POINT_IX; break; @@ -4320,11 +4337,11 @@ void interp_weights_poly_v2(float *w, float v[][2], const int n, const float co[ const float eps_sq = eps * eps; const float *v_curr, *v_next; - float ht_prev, ht; /* half tangents */ + double ht_prev, ht; /* half tangents */ float totweight = 0.0f; int i_curr, i_next; char ix_flag = 0; - struct Float2_Len d_curr, d_next; + struct Double2_Len d_curr, d_next; /* loop over 'i_next' */ i_curr = n - 1; @@ -4335,14 +4352,14 @@ void interp_weights_poly_v2(float *w, float v[][2], const int n, const float co[ DIR_V2_SET(&d_curr, v_curr - 2 /* v[n - 2] */, co); DIR_V2_SET(&d_next, v_curr /* v[n - 1] */, co); - ht_prev = mean_value_half_tan_v2(&d_curr, &d_next); + ht_prev = mean_value_half_tan_v2_db(&d_curr, &d_next); while (i_next < n) { /* Mark Mayer et al algorithm that is used here does not operate well if vertex is close * to borders of face. In that case, * do simple linear interpolation between the two edge vertices */ - /* 'd_next.len' is infact 'd_curr.len', just avoid copy to begin with */ + /* 'd_next.len' is in fact 'd_curr.len', just avoid copy to begin with */ if (UNLIKELY(d_next.len < eps)) { ix_flag = IS_POINT_IX; break; @@ -4354,8 +4371,8 @@ void interp_weights_poly_v2(float *w, float v[][2], const int n, const float co[ d_curr = d_next; DIR_V2_SET(&d_next, v_next, co); - ht = mean_value_half_tan_v2(&d_curr, &d_next); - w[i_curr] = (ht_prev + ht) / d_curr.len; + ht = mean_value_half_tan_v2_db(&d_curr, &d_next); + w[i_curr] = (float)((ht_prev + ht) / d_curr.len); totweight += w[i_curr]; /* step */ @@ -4872,6 +4889,37 @@ void projmat_dimensions(const float projmat[4][4], } } +void projmat_dimensions_db(const float projmat_fl[4][4], + double *r_left, + double *r_right, + double *r_bottom, + double *r_top, + double *r_near, + double *r_far) +{ + double projmat[4][4]; + copy_m4d_m4(projmat, projmat_fl); + + bool is_persp = projmat[3][3] == 0.0f; + + if (is_persp) { + *r_left = (projmat[2][0] - 1.0) / projmat[0][0]; + *r_right = (projmat[2][0] + 1.0) / projmat[0][0]; + *r_bottom = (projmat[2][1] - 1.0) / projmat[1][1]; + *r_top = (projmat[2][1] + 1.0) / projmat[1][1]; + *r_near = projmat[3][2] / (projmat[2][2] - 1.0); + *r_far = projmat[3][2] / (projmat[2][2] + 1.0); + } + else { + *r_left = (-projmat[3][0] - 1.0) / projmat[0][0]; + *r_right = (-projmat[3][0] + 1.0) / projmat[0][0]; + *r_bottom = (-projmat[3][1] - 1.0) / projmat[1][1]; + *r_top = (-projmat[3][1] + 1.0) / projmat[1][1]; + *r_near = (projmat[3][2] + 1.0) / projmat[2][2]; + *r_far = (projmat[3][2] - 1.0) / projmat[2][2]; + } +} + /** * Creates a projection matrix for a small region of the viewport. * diff --git a/source/blender/blenlib/intern/math_matrix.c b/source/blender/blenlib/intern/math_matrix.c index 9e398239bc7..fadd7d83444 100644 --- a/source/blender/blenlib/intern/math_matrix.c +++ b/source/blender/blenlib/intern/math_matrix.c @@ -248,7 +248,7 @@ void swap_m4m4(float m1[4][4], float m2[4][4]) } } -void shuffle_m4(float R[4][4], int index[4]) +void shuffle_m4(float R[4][4], const int index[4]) { zero_m4(R); for (int k = 0; k < 4; k++) { @@ -2388,6 +2388,22 @@ void interp_m3_m3m3(float R[3][3], const float A[3][3], const float B[3][3], con mat3_polar_decompose(A, U_A, P_A); mat3_polar_decompose(B, U_B, P_B); + /* Quaternions cannot represent an axis flip. If such a singularity is detected, choose a + * different decomposition of the matrix that still satisfies A = U_A * P_A but which has a + * positive determinant and thus no axis flips. This resolves T77154. + * + * Note that a flip of two axes is just a rotation of 180 degrees around the third axis, and + * three flipped axes are just an 180 degree rotation + a single axis flip. It is thus sufficient + * to solve this problem for single axis flips. */ + if (determinant_m3_array(U_A) < 0) { + mul_m3_fl(U_A, -1.0f); + mul_m3_fl(P_A, -1.0f); + } + if (determinant_m3_array(U_B) < 0) { + mul_m3_fl(U_B, -1.0f); + mul_m3_fl(P_B, -1.0f); + } + mat3_to_quat(quat_A, U_A); mat3_to_quat(quat_B, U_B); interp_qt_qtqt(quat, quat_A, quat_B, t); diff --git a/source/blender/blenlib/intern/math_rotation.c b/source/blender/blenlib/intern/math_rotation.c index a91cdabe3ab..a2f7cc24dd3 100644 --- a/source/blender/blenlib/intern/math_rotation.c +++ b/source/blender/blenlib/intern/math_rotation.c @@ -543,8 +543,8 @@ void rotation_between_quats_to_quat(float q[4], const float q1[4], const float q * * \param q: input quaternion. * \param axis: twist axis in [0,1,2] - * \param r_swing[out]: if not NULL, receives the swing quaternion. - * \param r_twist[out]: if not NULL, receives the twist quaternion. + * \param r_swing: if not NULL, receives the swing quaternion. + * \param r_twist: if not NULL, receives the twist quaternion. * \returns twist angle. */ float quat_split_swing_and_twist(const float q[4], int axis, float r_swing[4], float r_twist[4]) @@ -2127,7 +2127,7 @@ void mul_v3m3_dq(float co[3], float mat[3][3], DualQuat *dq) co[1] = (co[1] + t[1]) * len2; co[2] = (co[2] + t[2]) * len2; - /* compute crazyspace correction mat */ + /* Compute crazy-space correction matrix. */ if (mat) { if (dq->scale_weight) { copy_m3_m4(scalemat, dq->scale); diff --git a/source/blender/blenlib/intern/math_vector.c b/source/blender/blenlib/intern/math_vector.c index 6ec7c960d6b..7f1840228e2 100644 --- a/source/blender/blenlib/intern/math_vector.c +++ b/source/blender/blenlib/intern/math_vector.c @@ -307,7 +307,7 @@ void mid_v3_v3_array(float r[3], const float (*vec_arr)[3], const uint nbr) /** * Specialized function for calculating normals. - * fastpath for: + * Fast-path for: * * \code{.c} * add_v3_v3v3(r, a, b); diff --git a/source/blender/blenlib/intern/math_vector_inline.c b/source/blender/blenlib/intern/math_vector_inline.c index ca405907bdd..1b47832589e 100644 --- a/source/blender/blenlib/intern/math_vector_inline.c +++ b/source/blender/blenlib/intern/math_vector_inline.c @@ -509,6 +509,12 @@ MINLINE void sub_v3_v3v3_db(double r[3], const double a[3], const double b[3]) r[2] = a[2] - b[2]; } +MINLINE void sub_v2db_v2fl_v2fl(double r[2], const float a[2], const float b[2]) +{ + r[0] = (double)a[0] - (double)b[0]; + r[1] = (double)a[1] - (double)b[1]; +} + MINLINE void sub_v3db_v3fl_v3fl(double r[3], const float a[3], const float b[3]) { r[0] = (double)a[0] - (double)b[0]; @@ -917,6 +923,11 @@ MINLINE float cross_v2v2(const float a[2], const float b[2]) return a[0] * b[1] - a[1] * b[0]; } +MINLINE double cross_v2v2_db(const double a[2], const double b[2]) +{ + return a[0] * b[1] - a[1] * b[0]; +} + MINLINE void cross_v3_v3v3(float r[3], const float a[3], const float b[3]) { BLI_assert(r != a && r != b); @@ -997,6 +1008,11 @@ MINLINE float len_v2(const float v[2]) return sqrtf(v[0] * v[0] + v[1] * v[1]); } +MINLINE double len_v2_db(const double v[2]) +{ + return sqrt(v[0] * v[0] + v[1] * v[1]); +} + MINLINE float len_v2v2(const float v1[2], const float v2[2]) { float x, y; diff --git a/source/blender/blenlib/intern/noise.c b/source/blender/blenlib/intern/noise.c index 42b5ba28f5a..1ae1c91a3bd 100644 --- a/source/blender/blenlib/intern/noise.c +++ b/source/blender/blenlib/intern/noise.c @@ -27,7 +27,7 @@ #include "BLI_noise.h" /* local */ -static float noise3_perlin(float vec[3]); +static float noise3_perlin(const float vec[3]); // static float turbulence_perlin(const float point[3], float lofreq, float hifreq); // static float turbulencep(float noisesize, float x, float y, float z, int nr); @@ -779,7 +779,7 @@ static const float g_perlin_data_v3[512 + 2][3] = { } \ (void)0 -static float noise3_perlin(float vec[3]) +static float noise3_perlin(const float vec[3]) { const char *p = g_perlin_data_ub; const float(*g)[3] = g_perlin_data_v3; diff --git a/source/blender/blenlib/intern/path_util.c b/source/blender/blenlib/intern/path_util.c index 2f51b66725b..d912cb8d464 100644 --- a/source/blender/blenlib/intern/path_util.c +++ b/source/blender/blenlib/intern/path_util.c @@ -242,7 +242,7 @@ void BLI_path_normalize(const char *relabase, char *path) /* Note: previous version of following call used an offset of 3 instead of 4, * which meant that the "/../home/me" example actually became "home/me". - * Using offset of 3 gives behavior consistent with the abovementioned + * Using offset of 3 gives behavior consistent with the aforementioned * Python routine. */ memmove(path, path + 3, strlen(path + 3) + 1); } diff --git a/source/blender/blenlib/intern/rand.c b/source/blender/blenlib/intern/rand.cc index ab7a972e010..9bafc422db5 100644 --- a/source/blender/blenlib/intern/rand.c +++ b/source/blender/blenlib/intern/rand.cc @@ -30,6 +30,7 @@ #include "BLI_math.h" #include "BLI_rand.h" +#include "BLI_rand.hh" #include "BLI_threads.h" /* defines BLI_INLINE */ @@ -38,29 +39,22 @@ #include "BLI_strict_flags.h" #include "BLI_sys_types.h" -#define MULTIPLIER 0x5DEECE66Dll -#define MASK 0x0000FFFFFFFFFFFFll -#define MASK_BYTES 2 - -#define ADDEND 0xB -#define LOWSEED 0x330E - -extern unsigned char BLI_noise_hash_uchar_512[512]; /* noise.c */ +extern "C" unsigned char BLI_noise_hash_uchar_512[512]; /* noise.c */ #define hash BLI_noise_hash_uchar_512 /** * Random Number Generator. */ struct RNG { - uint64_t X; + blender::RandomNumberGenerator rng; + + MEM_CXX_CLASS_ALLOC_FUNCS("RNG") }; RNG *BLI_rng_new(unsigned int seed) { - RNG *rng = MEM_mallocN(sizeof(*rng), "rng"); - - BLI_rng_seed(rng, seed); - + RNG *rng = new RNG(); + rng->rng.seed(seed); return rng; } @@ -69,26 +63,24 @@ RNG *BLI_rng_new(unsigned int seed) */ RNG *BLI_rng_new_srandom(unsigned int seed) { - RNG *rng = MEM_mallocN(sizeof(*rng), "rng"); - - BLI_rng_srandom(rng, seed); - + RNG *rng = new RNG(); + rng->rng.seed_random(seed); return rng; } RNG *BLI_rng_copy(RNG *rng) { - return MEM_dupallocN(rng); + return new RNG(*rng); } void BLI_rng_free(RNG *rng) { - MEM_freeN(rng); + delete rng; } void BLI_rng_seed(RNG *rng, unsigned int seed) { - rng->X = (((uint64_t)seed) << 16) | LOWSEED; + rng->rng.seed(seed); } /** @@ -96,67 +88,22 @@ void BLI_rng_seed(RNG *rng, unsigned int seed) */ void BLI_rng_srandom(RNG *rng, unsigned int seed) { - BLI_rng_seed(rng, seed + hash[seed & 255]); - seed = BLI_rng_get_uint(rng); - BLI_rng_seed(rng, seed + hash[seed & 255]); - seed = BLI_rng_get_uint(rng); - BLI_rng_seed(rng, seed + hash[seed & 255]); -} - -BLI_INLINE void rng_step(RNG *rng) -{ - rng->X = (MULTIPLIER * rng->X + ADDEND) & MASK; + rng->rng.seed_random(seed); } void BLI_rng_get_char_n(RNG *rng, char *bytes, size_t bytes_len) { - size_t last_len = 0; - size_t trim_len = bytes_len; - -#define RAND_STRIDE (sizeof(rng->X) - MASK_BYTES) - - if (trim_len > RAND_STRIDE) { - last_len = trim_len % RAND_STRIDE; - trim_len = trim_len - last_len; - } - else { - trim_len = 0; - last_len = bytes_len; - } - - const char *data_src = (void *)&(rng->X); - size_t i = 0; - while (i != trim_len) { - BLI_assert(i < trim_len); -#ifdef __BIG_ENDIAN__ - for (size_t j = (RAND_STRIDE + MASK_BYTES) - 1; j != MASK_BYTES - 1; j--) -#else - for (size_t j = 0; j != RAND_STRIDE; j++) -#endif - { - bytes[i++] = data_src[j]; - } - rng_step(rng); - } - if (last_len) { - for (size_t j = 0; j != last_len; j++) { - bytes[i++] = data_src[j]; - } - } - -#undef RAND_STRIDE + rng->rng.get_bytes(blender::MutableSpan(bytes, (int64_t)bytes_len)); } int BLI_rng_get_int(RNG *rng) { - rng_step(rng); - return (int)(rng->X >> 17); + return rng->rng.get_int32(); } unsigned int BLI_rng_get_uint(RNG *rng) { - rng_step(rng); - return (unsigned int)(rng->X >> 17); + return rng->rng.get_uint32(); } /** @@ -164,7 +111,7 @@ unsigned int BLI_rng_get_uint(RNG *rng) */ double BLI_rng_get_double(RNG *rng) { - return (double)BLI_rng_get_int(rng) / 0x80000000; + return rng->rng.get_double(); } /** @@ -172,29 +119,17 @@ double BLI_rng_get_double(RNG *rng) */ float BLI_rng_get_float(RNG *rng) { - return (float)BLI_rng_get_int(rng) / 0x80000000; + return rng->rng.get_float(); } void BLI_rng_get_float_unit_v2(RNG *rng, float v[2]) { - float a = (float)(M_PI * 2.0) * BLI_rng_get_float(rng); - v[0] = cosf(a); - v[1] = sinf(a); + copy_v2_v2(v, rng->rng.get_unit_float2()); } void BLI_rng_get_float_unit_v3(RNG *rng, float v[3]) { - float r; - v[2] = (2.0f * BLI_rng_get_float(rng)) - 1.0f; - if ((r = 1.0f - (v[2] * v[2])) > 0.0f) { - float a = (float)(M_PI * 2.0) * BLI_rng_get_float(rng); - r = sqrtf(r); - v[0] = r * cosf(a); - v[1] = r * sinf(a); - } - else { - v[2] = 1.0f; - } + copy_v3_v3(v, rng->rng.get_unit_float3()); } /** @@ -203,27 +138,12 @@ void BLI_rng_get_float_unit_v3(RNG *rng, float v[3]) void BLI_rng_get_tri_sample_float_v2( RNG *rng, const float v1[2], const float v2[2], const float v3[2], float r_pt[2]) { - float u = BLI_rng_get_float(rng); - float v = BLI_rng_get_float(rng); - - float side_u[2], side_v[2]; - - if ((u + v) > 1.0f) { - u = 1.0f - u; - v = 1.0f - v; - } - - sub_v2_v2v2(side_u, v2, v1); - sub_v2_v2v2(side_v, v3, v1); - - copy_v2_v2(r_pt, v1); - madd_v2_v2fl(r_pt, side_u, u); - madd_v2_v2fl(r_pt, side_v, v); + copy_v2_v2(r_pt, rng->rng.get_triangle_sample(v1, v2, v3)); } void BLI_rng_shuffle_array(RNG *rng, void *data, unsigned int elem_size_i, unsigned int elem_tot) { - const size_t elem_size = (size_t)elem_size_i; + const uint elem_size = elem_size_i; unsigned int i = elem_tot; void *temp; @@ -254,9 +174,7 @@ void BLI_rng_shuffle_array(RNG *rng, void *data, unsigned int elem_size_i, unsig */ void BLI_rng_skip(RNG *rng, int n) { - while (n--) { - rng_step(rng); - } + rng->rng.skip((uint)n); } /***/ @@ -326,7 +244,8 @@ struct RNG_THREAD_ARRAY { RNG_THREAD_ARRAY *BLI_rng_threaded_new(void) { unsigned int i; - RNG_THREAD_ARRAY *rngarr = MEM_mallocN(sizeof(RNG_THREAD_ARRAY), "random_array"); + RNG_THREAD_ARRAY *rngarr = (RNG_THREAD_ARRAY *)MEM_mallocN(sizeof(RNG_THREAD_ARRAY), + "random_array"); for (i = 0; i < BLENDER_MAX_THREADS; i++) { BLI_rng_srandom(&rngarr->rng_tab[i], (unsigned int)clock()); @@ -382,7 +301,7 @@ void BLI_halton_1d(unsigned int prime, double offset, int n, double *r) } } -void BLI_halton_2d(unsigned int prime[2], double offset[2], int n, double *r) +void BLI_halton_2d(const unsigned int prime[2], double offset[2], int n, double *r) { const double invprimes[2] = {1.0 / (double)prime[0], 1.0 / (double)prime[1]}; @@ -395,7 +314,7 @@ void BLI_halton_2d(unsigned int prime[2], double offset[2], int n, double *r) } } -void BLI_halton_3d(unsigned int prime[3], double offset[3], int n, double *r) +void BLI_halton_3d(const unsigned int prime[3], double offset[3], int n, double *r) { const double invprimes[3] = { 1.0 / (double)prime[0], 1.0 / (double)prime[1], 1.0 / (double)prime[2]}; @@ -409,7 +328,7 @@ void BLI_halton_3d(unsigned int prime[3], double offset[3], int n, double *r) } } -void BLI_halton_2d_sequence(unsigned int prime[2], double offset[2], int n, double *r) +void BLI_halton_2d_sequence(const unsigned int prime[2], double offset[2], int n, double *r) { const double invprimes[2] = {1.0 / (double)prime[0], 1.0 / (double)prime[1]}; @@ -426,7 +345,7 @@ BLI_INLINE double radical_inverse(unsigned int n) { double u = 0; - /* This reverse the bitwise representation + /* This reverse the bit-wise representation * around the decimal point. */ for (double p = 0.5; n; p *= 0.5, n >>= 1) { if (n & 1) { @@ -449,3 +368,99 @@ void BLI_hammersley_2d_sequence(unsigned int n, double *r) r[s * 2 + 1] = radical_inverse(s); } } + +namespace blender { + +/** + * Set a randomized hash of the value as seed. + */ +void RandomNumberGenerator::seed_random(uint32_t seed) +{ + this->seed(seed + hash[seed & 255]); + seed = this->get_uint32(); + this->seed(seed + hash[seed & 255]); + seed = this->get_uint32(); + this->seed(seed + hash[seed & 255]); +} + +float2 RandomNumberGenerator::get_unit_float2() +{ + float a = (float)(M_PI * 2.0) * this->get_float(); + return {cosf(a), sinf(a)}; +} + +float3 RandomNumberGenerator::get_unit_float3() +{ + float z = (2.0f * this->get_float()) - 1.0f; + float r = 1.0f - z * z; + if (r > 0.0f) { + float a = (float)(M_PI * 2.0) * this->get_float(); + r = sqrtf(r); + float x = r * cosf(a); + float y = r * sinf(a); + return {x, y, z}; + } + return {0.0f, 0.0f, 1.0f}; +} + +/** + * Generate a random point inside the given triangle. + */ +float2 RandomNumberGenerator::get_triangle_sample(float2 v1, float2 v2, float2 v3) +{ + float u = this->get_float(); + float v = this->get_float(); + + if (u + v > 1.0f) { + u = 1.0f - u; + v = 1.0f - v; + } + + float2 side_u = v2 - v1; + float2 side_v = v3 - v1; + + float2 sample = v1; + sample += side_u * u; + sample += side_v * v; + return sample; +} + +void RandomNumberGenerator::get_bytes(MutableSpan<char> r_bytes) +{ + constexpr int64_t mask_bytes = 2; + constexpr int64_t rand_stride = (int64_t)sizeof(x_) - mask_bytes; + + int64_t last_len = 0; + int64_t trim_len = r_bytes.size(); + + if (trim_len > rand_stride) { + last_len = trim_len % rand_stride; + trim_len = trim_len - last_len; + } + else { + trim_len = 0; + last_len = r_bytes.size(); + } + + const char *data_src = (const char *)&x_; + int64_t i = 0; + while (i != trim_len) { + BLI_assert(i < trim_len); +#ifdef __BIG_ENDIAN__ + for (int64_t j = (rand_stride + mask_bytes) - 1; j != mask_bytes - 1; j--) +#else + for (int64_t j = 0; j != rand_stride; j++) +#endif + { + r_bytes[i++] = data_src[j]; + } + this->step(); + } + if (last_len) { + for (int64_t j = 0; j != last_len; j++) { + r_bytes[i++] = data_src[j]; + } + } +} + +} // namespace blender diff --git a/source/blender/blenlib/intern/rct.c b/source/blender/blenlib/intern/rct.c index ad8443683f8..bf3c8730b01 100644 --- a/source/blender/blenlib/intern/rct.c +++ b/source/blender/blenlib/intern/rct.c @@ -635,6 +635,14 @@ void BLI_rcti_resize(rcti *rect, int x, int y) rect->ymax = rect->ymin + y; } +void BLI_rcti_pad(rcti *rect, int pad_x, int pad_y) +{ + rect->xmin -= pad_x; + rect->ymin -= pad_y; + rect->xmax += pad_x; + rect->ymax += pad_y; +} + void BLI_rctf_resize(rctf *rect, float x, float y) { rect->xmin = BLI_rctf_cent_x(rect) - (x * 0.5f); diff --git a/source/blender/blenlib/intern/storage.c b/source/blender/blenlib/intern/storage.c index fbfb258693b..d3191148c90 100644 --- a/source/blender/blenlib/intern/storage.c +++ b/source/blender/blenlib/intern/storage.c @@ -53,9 +53,9 @@ # include "BLI_string_utf8.h" # include "BLI_winstuff.h" # include "utfconv.h" +# include <ShObjIdl.h> # include <direct.h> # include <io.h> -# include <shobjidl_core.h> # include <stdbool.h> #else # include <pwd.h> @@ -275,25 +275,26 @@ eFileAttributes BLI_file_attributes(const char *path) ret |= FILE_ATTR_REPARSE_POINT; } -# endif +# else -# ifdef __linux__ UNUSED_VARS(path); /* TODO: * If Immutable set FILE_ATTR_READONLY * If Archived set FILE_ATTR_ARCHIVE */ - # endif - return ret; } #endif /* Return alias/shortcut file target. Apple version is defined in storage_apple.mm */ #ifndef __APPLE__ -bool BLI_file_alias_target(char target[FILE_MAXDIR], const char *filepath) +bool BLI_file_alias_target( + /* This parameter can only be const on non-windows platforms. + * NOLINTNEXTLINE: readability-non-const-parameter. */ + char target[FILE_MAXDIR], + const char *filepath) { # ifdef WIN32 if (!BLI_path_extension_check(filepath, ".lnk")) { @@ -330,9 +331,7 @@ bool BLI_file_alias_target(char target[FILE_MAXDIR], const char *filepath) } return (success && target[0]); -# endif - -# ifdef __linux__ +# else UNUSED_VARS(target, filepath); /* File-based redirection not supported. */ return false; @@ -529,7 +528,7 @@ void *BLI_file_read_binary_as_mem(const char *filepath, size_t pad_bytes, size_t * Return the text file data with: * - Newlines replaced with '\0'. - * - Optionally trim whitespace, replacing trailing ' ' & '\t' with '\0'. + * - Optionally trim white-space, replacing trailing <space> & <tab> with '\0'. * * This is an alternative to using #BLI_file_read_as_lines, * allowing us to loop over lines without converting it into a linked list diff --git a/source/blender/blenlib/intern/string.c b/source/blender/blenlib/intern/string.c index ef137c4459e..abdae06acd5 100644 --- a/source/blender/blenlib/intern/string.c +++ b/source/blender/blenlib/intern/string.c @@ -409,7 +409,7 @@ char *BLI_str_quoted_substrN(const char *__restrict str, const char *__restrict /** * string with all instances of substr_old replaced with substr_new, - * Returns a copy of the cstring \a str into a newly mallocN'd + * Returns a copy of the c-string \a str into a newly #MEM_mallocN'd * and returns it. * * \note A rather wasteful string-replacement utility, though this shall do for now... @@ -430,53 +430,49 @@ char *BLI_str_replaceN(const char *__restrict str, BLI_assert(substr_old[0] != '\0'); - /* while we can still find a match for the old substring that we're searching for, - * keep dicing and replacing - */ + /* While we can still find a match for the old sub-string that we're searching for, + * keep dicing and replacing. */ while ((match = strstr(str, substr_old))) { /* the assembly buffer only gets created when we actually need to rebuild the string */ if (ds == NULL) { ds = BLI_dynstr_new(); } - /* if the match position does not match the current position in the string, - * copy the text up to this position and advance the current position in the string - */ + /* If the match position does not match the current position in the string, + * copy the text up to this position and advance the current position in the string. */ if (str != match) { - /* add the segment of the string from str to match to the buffer, - * then restore the value at match */ + /* Add the segment of the string from `str` to match to the buffer, + * then restore the value at match. */ BLI_dynstr_nappend(ds, str, (match - str)); /* now our current position should be set on the start of the match */ str = match; } - /* add the replacement text to the accumulation buffer */ + /* Add the replacement text to the accumulation buffer. */ BLI_dynstr_append(ds, substr_new); - /* advance the current position of the string up to the end of the replaced segment */ + /* Advance the current position of the string up to the end of the replaced segment. */ str += len_old; } - /* finish off and return a new string that has had all occurrences of */ + /* Finish off and return a new string that has had all occurrences of. */ if (ds) { char *str_new; - /* add what's left of the string to the assembly buffer - * - we've been adjusting str to point at the end of the replaced segments - */ + /* Add what's left of the string to the assembly buffer + * - we've been adjusting `str` to point at the end of the replaced segments. */ BLI_dynstr_append(ds, str); - /* convert to new c-string (MEM_malloc'd), and free the buffer */ + /* Convert to new c-string (MEM_malloc'd), and free the buffer. */ str_new = BLI_dynstr_get_cstring(ds); BLI_dynstr_free(ds); return str_new; } else { - /* just create a new copy of the entire string - we avoid going through the assembly buffer - * for what should be a bit more efficiency... - */ + /* Just create a new copy of the entire string - we avoid going through the assembly buffer + * for what should be a bit more efficiency. */ return BLI_strdup(str); } } diff --git a/source/blender/blenlib/intern/system.c b/source/blender/blenlib/intern/system.c index 53db49aa59c..20edbb97561 100644 --- a/source/blender/blenlib/intern/system.c +++ b/source/blender/blenlib/intern/system.c @@ -111,7 +111,11 @@ void BLI_system_backtrace(FILE *fp) /* NOTE: The code for CPU brand string is adopted from Cycles. */ #if !defined(_WIN32) || defined(FREE_WINDOWS) -static void __cpuid(int data[4], int selector) +static void __cpuid( + /* Cannot be const, because it is modified below. + * NOLINTNEXTLINE: readability-non-const-parameter. */ + int data[4], + int selector) { # if defined(__x86_64__) asm("cpuid" : "=a"(data[0]), "=b"(data[1]), "=c"(data[2]), "=d"(data[3]) : "a"(selector)); diff --git a/source/blender/blenlib/intern/task_range.cc b/source/blender/blenlib/intern/task_range.cc index 67d8960434e..27e0fb0ed07 100644 --- a/source/blender/blenlib/intern/task_range.cc +++ b/source/blender/blenlib/intern/task_range.cc @@ -62,7 +62,7 @@ struct RangeTask { } /* Splitting constructor for parallel reduce. */ - RangeTask(RangeTask &other, tbb::split) + RangeTask(RangeTask &other, tbb::split /* unused */) : func(other.func), userdata(other.userdata), settings(other.settings) { init_chunk(settings->userdata_chunk); diff --git a/source/blender/blenlib/intern/threads.c b/source/blender/blenlib/intern/threads.cc index be43c27e945..a8333d0c696 100644 --- a/source/blender/blenlib/intern/threads.c +++ b/source/blender/blenlib/intern/threads.cc @@ -47,6 +47,10 @@ # include <unistd.h> #endif +#ifdef WITH_TBB +# include <tbb/spin_mutex.h> +#endif + #include "atomic_ops.h" #include "numaapi.h" @@ -151,7 +155,7 @@ void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int t { int a; - if (threadbase != NULL && tot > 0) { + if (threadbase != nullptr && tot > 0) { BLI_listbase_clear(threadbase); if (tot > RE_MAX_THREAD) { @@ -162,7 +166,7 @@ void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int t } for (a = 0; a < tot; a++) { - ThreadSlot *tslot = MEM_callocN(sizeof(ThreadSlot), "threadslot"); + ThreadSlot *tslot = static_cast<ThreadSlot *>(MEM_callocN(sizeof(ThreadSlot), "threadslot")); BLI_addtail(threadbase, tslot); tslot->do_thread = do_thread; tslot->avail = 1; @@ -172,9 +176,9 @@ void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int t unsigned int level = atomic_fetch_and_add_u(&thread_levels, 1); if (level == 0) { #ifdef USE_APPLE_OMP_FIX - /* workaround for Apple gcc 4.2.1 omp vs background thread bug, - * we copy gomp thread local storage pointer to setting it again - * inside the thread that we start */ + /* Workaround for Apple gcc 4.2.1 OMP vs background thread bug, + * we copy GOMP thread local storage pointer to setting it again + * inside the thread that we start. */ thread_tls_data = pthread_getspecific(gomp_tls_key); #endif } @@ -183,28 +187,29 @@ void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int t /* amount of available threads */ int BLI_available_threads(ListBase *threadbase) { - ThreadSlot *tslot; int counter = 0; - for (tslot = threadbase->first; tslot; tslot = tslot->next) { + LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { if (tslot->avail) { counter++; } } + return counter; } /* returns thread number, for sample patterns or threadsafe tables */ int BLI_threadpool_available_thread_index(ListBase *threadbase) { - ThreadSlot *tslot; int counter = 0; - for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) { + LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { if (tslot->avail) { return counter; } + ++counter; } + return 0; } @@ -213,8 +218,8 @@ static void *tslot_thread_start(void *tslot_p) ThreadSlot *tslot = (ThreadSlot *)tslot_p; #ifdef USE_APPLE_OMP_FIX - /* workaround for Apple gcc 4.2.1 omp vs background thread bug, - * set gomp thread local storage pointer which was copied beforehand */ + /* Workaround for Apple gcc 4.2.1 OMP vs background thread bug, + * set GOMP thread local storage pointer which was copied beforehand */ pthread_setspecific(gomp_tls_key, thread_tls_data); #endif @@ -228,13 +233,11 @@ int BLI_thread_is_main(void) void BLI_threadpool_insert(ListBase *threadbase, void *callerdata) { - ThreadSlot *tslot; - - for (tslot = threadbase->first; tslot; tslot = tslot->next) { + LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { if (tslot->avail) { tslot->avail = 0; tslot->callerdata = callerdata; - pthread_create(&tslot->pthread, NULL, tslot_thread_start, tslot); + pthread_create(&tslot->pthread, nullptr, tslot_thread_start, tslot); return; } } @@ -243,12 +246,10 @@ void BLI_threadpool_insert(ListBase *threadbase, void *callerdata) void BLI_threadpool_remove(ListBase *threadbase, void *callerdata) { - ThreadSlot *tslot; - - for (tslot = threadbase->first; tslot; tslot = tslot->next) { + LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { if (tslot->callerdata == callerdata) { - pthread_join(tslot->pthread, NULL); - tslot->callerdata = NULL; + pthread_join(tslot->pthread, nullptr); + tslot->callerdata = nullptr; tslot->avail = 1; } } @@ -256,27 +257,25 @@ void BLI_threadpool_remove(ListBase *threadbase, void *callerdata) void BLI_threadpool_remove_index(ListBase *threadbase, int index) { - ThreadSlot *tslot; int counter = 0; - for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) { + LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { if (counter == index && tslot->avail == 0) { - pthread_join(tslot->pthread, NULL); - tslot->callerdata = NULL; + pthread_join(tslot->pthread, nullptr); + tslot->callerdata = nullptr; tslot->avail = 1; break; } + ++counter; } } void BLI_threadpool_clear(ListBase *threadbase) { - ThreadSlot *tslot; - - for (tslot = threadbase->first; tslot; tslot = tslot->next) { + LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { if (tslot->avail == 0) { - pthread_join(tslot->pthread, NULL); - tslot->callerdata = NULL; + pthread_join(tslot->pthread, nullptr); + tslot->callerdata = nullptr; tslot->avail = 1; } } @@ -284,19 +283,20 @@ void BLI_threadpool_clear(ListBase *threadbase) void BLI_threadpool_end(ListBase *threadbase) { - ThreadSlot *tslot; /* only needed if there's actually some stuff to end * this way we don't end up decrementing thread_levels on an empty threadbase * */ - if (threadbase && (BLI_listbase_is_empty(threadbase) == false)) { - for (tslot = threadbase->first; tslot; tslot = tslot->next) { - if (tslot->avail == 0) { - pthread_join(tslot->pthread, NULL); - } + if (threadbase == nullptr || BLI_listbase_is_empty(threadbase)) { + return; + } + + LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { + if (tslot->avail == 0) { + pthread_join(tslot->pthread, nullptr); } - BLI_freelistN(threadbase); } + BLI_freelistN(threadbase); } /* System Information */ @@ -326,7 +326,7 @@ int BLI_system_thread_count(void) mib[0] = CTL_HW; mib[1] = HW_NCPU; len = sizeof(t); - sysctl(mib, 2, &t, &len, NULL, 0); + sysctl(mib, 2, &t, &len, nullptr, 0); # else t = (int)sysconf(_SC_NPROCESSORS_ONLN); # endif @@ -377,7 +377,7 @@ static ThreadMutex *global_mutex_from_type(const int type) return &_view3d_lock; default: BLI_assert(0); - return NULL; + return nullptr; } } @@ -395,7 +395,7 @@ void BLI_thread_unlock(int type) void BLI_mutex_init(ThreadMutex *mutex) { - pthread_mutex_init(mutex, NULL); + pthread_mutex_init(mutex, nullptr); } void BLI_mutex_lock(ThreadMutex *mutex) @@ -420,7 +420,7 @@ void BLI_mutex_end(ThreadMutex *mutex) ThreadMutex *BLI_mutex_alloc(void) { - ThreadMutex *mutex = MEM_callocN(sizeof(ThreadMutex), "ThreadMutex"); + ThreadMutex *mutex = static_cast<ThreadMutex *>(MEM_callocN(sizeof(ThreadMutex), "ThreadMutex")); BLI_mutex_init(mutex); return mutex; } @@ -433,10 +433,24 @@ void BLI_mutex_free(ThreadMutex *mutex) /* Spin Locks */ +#ifdef WITH_TBB +static tbb::spin_mutex *tbb_spin_mutex_cast(SpinLock *spin) +{ + static_assert(sizeof(SpinLock) >= sizeof(tbb::spin_mutex), + "SpinLock must match tbb::spin_mutex"); + static_assert(alignof(SpinLock) % alignof(tbb::spin_mutex) == 0, + "SpinLock must be aligned same as tbb::spin_mutex"); + return reinterpret_cast<tbb::spin_mutex *>(spin); +} +#endif + void BLI_spin_init(SpinLock *spin) { -#if defined(__APPLE__) - *spin = OS_SPINLOCK_INIT; +#ifdef WITH_TBB + tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin); + new (spin_mutex) tbb::spin_mutex(); +#elif defined(__APPLE__) + BLI_mutex_init(spin); #elif defined(_MSC_VER) *spin = 0; #else @@ -446,8 +460,11 @@ void BLI_spin_init(SpinLock *spin) void BLI_spin_lock(SpinLock *spin) { -#if defined(__APPLE__) - OSSpinLockLock(spin); +#ifdef WITH_TBB + tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin); + spin_mutex->lock(); +#elif defined(__APPLE__) + BLI_mutex_lock(spin); #elif defined(_MSC_VER) while (InterlockedExchangeAcquire(spin, 1)) { while (*spin) { @@ -462,8 +479,11 @@ void BLI_spin_lock(SpinLock *spin) void BLI_spin_unlock(SpinLock *spin) { -#if defined(__APPLE__) - OSSpinLockUnlock(spin); +#ifdef WITH_TBB + tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin); + spin_mutex->unlock(); +#elif defined(__APPLE__) + BLI_mutex_unlock(spin); #elif defined(_MSC_VER) _ReadWriteBarrier(); *spin = 0; @@ -472,22 +492,25 @@ void BLI_spin_unlock(SpinLock *spin) #endif } -#if defined(__APPLE__) || defined(_MSC_VER) -void BLI_spin_end(SpinLock *UNUSED(spin)) -{ -} -#else void BLI_spin_end(SpinLock *spin) { +#ifdef WITH_TBB + tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin); + spin_mutex->~spin_mutex(); +#elif defined(__APPLE__) + BLI_mutex_end(spin); +#elif defined(_MSC_VER) + /* Nothing to do, spin is a simple integer type. */ +#else pthread_spin_destroy(spin); -} #endif +} /* Read/Write Mutex Lock */ void BLI_rw_mutex_init(ThreadRWMutex *mutex) { - pthread_rwlock_init(mutex, NULL); + pthread_rwlock_init(mutex, nullptr); } void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode) @@ -512,7 +535,8 @@ void BLI_rw_mutex_end(ThreadRWMutex *mutex) ThreadRWMutex *BLI_rw_mutex_alloc(void) { - ThreadRWMutex *mutex = MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex"); + ThreadRWMutex *mutex = static_cast<ThreadRWMutex *>( + MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex")); BLI_rw_mutex_init(mutex); return mutex; } @@ -533,10 +557,11 @@ struct TicketMutex { TicketMutex *BLI_ticket_mutex_alloc(void) { - TicketMutex *ticket = MEM_callocN(sizeof(TicketMutex), "TicketMutex"); + TicketMutex *ticket = static_cast<TicketMutex *>( + MEM_callocN(sizeof(TicketMutex), "TicketMutex")); - pthread_cond_init(&ticket->cond, NULL); - pthread_mutex_init(&ticket->mutex, NULL); + pthread_cond_init(&ticket->cond, nullptr); + pthread_mutex_init(&ticket->mutex, nullptr); return ticket; } @@ -576,7 +601,7 @@ void BLI_ticket_mutex_unlock(TicketMutex *ticket) void BLI_condition_init(ThreadCondition *cond) { - pthread_cond_init(cond, NULL); + pthread_cond_init(cond, nullptr); } void BLI_condition_wait(ThreadCondition *cond, ThreadMutex *mutex) @@ -619,12 +644,12 @@ ThreadQueue *BLI_thread_queue_init(void) { ThreadQueue *queue; - queue = MEM_callocN(sizeof(ThreadQueue), "ThreadQueue"); + queue = static_cast<ThreadQueue *>(MEM_callocN(sizeof(ThreadQueue), "ThreadQueue")); queue->queue = BLI_gsqueue_new(sizeof(void *)); - pthread_mutex_init(&queue->mutex, NULL); - pthread_cond_init(&queue->push_cond, NULL); - pthread_cond_init(&queue->finish_cond, NULL); + pthread_mutex_init(&queue->mutex, nullptr); + pthread_cond_init(&queue->push_cond, nullptr); + pthread_cond_init(&queue->finish_cond, nullptr); return queue; } @@ -654,7 +679,7 @@ void BLI_thread_queue_push(ThreadQueue *queue, void *work) void *BLI_thread_queue_pop(ThreadQueue *queue) { - void *work = NULL; + void *work = nullptr; /* wait until there is work */ pthread_mutex_lock(&queue->mutex); @@ -691,7 +716,7 @@ static void wait_timeout(struct timespec *timeout, int ms) #else { struct timeval now; - gettimeofday(&now, NULL); + gettimeofday(&now, nullptr); sec = now.tv_sec; usec = now.tv_usec; } @@ -714,7 +739,7 @@ static void wait_timeout(struct timespec *timeout, int ms) void *BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms) { double t; - void *work = NULL; + void *work = nullptr; struct timespec timeout; t = PIL_check_seconds_timer(); @@ -805,7 +830,7 @@ static bool check_is_threadripper2_alike_topology(void) } is_initialized = true; char *cpu_brand = BLI_cpu_brand_string(); - if (cpu_brand == NULL) { + if (cpu_brand == nullptr) { return false; } if (strstr(cpu_brand, "Threadripper")) { diff --git a/source/blender/blenlib/intern/timecode.c b/source/blender/blenlib/intern/timecode.c index 510b9651961..9586da941a4 100644 --- a/source/blender/blenlib/intern/timecode.c +++ b/source/blender/blenlib/intern/timecode.c @@ -36,7 +36,7 @@ #include "BLI_strict_flags.h" /** - * Generate timecode/frame number string and store in \a str + * Generate time-code/frame number string and store in \a str * * \param str: destination string * \param maxncpy: maximum number of characters to copy ``sizeof(str)`` @@ -44,7 +44,7 @@ * used to specify how detailed we need to be * \param time_seconds: time total time in seconds * \param fps: frames per second, typically from the #FPS macro - * \param timecode_style: enum from eTimecodeStyles + * \param timecode_style: enum from #eTimecodeStyles * \return length of \a str */ diff --git a/source/blender/blenlib/intern/timeit.cc b/source/blender/blenlib/intern/timeit.cc index 7938784da67..9e07e44ca12 100644 --- a/source/blender/blenlib/intern/timeit.cc +++ b/source/blender/blenlib/intern/timeit.cc @@ -16,8 +16,7 @@ #include "BLI_timeit.hh" -namespace blender { -namespace Timeit { +namespace blender::timeit { void print_duration(Nanoseconds duration) { @@ -32,5 +31,4 @@ void print_duration(Nanoseconds duration) } } -} // namespace Timeit -} // namespace blender +} // namespace blender::timeit diff --git a/source/blender/blenlib/intern/voronoi_2d.c b/source/blender/blenlib/intern/voronoi_2d.c index 59270c58341..bc11a2c7a1c 100644 --- a/source/blender/blenlib/intern/voronoi_2d.c +++ b/source/blender/blenlib/intern/voronoi_2d.c @@ -213,7 +213,7 @@ static void voronoiParabola_setRight(VoronoiParabola *parabola, VoronoiParabola right->parent = parabola; } -static float voronoi_getY(VoronoiProcess *process, float p[2], float x) +static float voronoi_getY(VoronoiProcess *process, const float p[2], float x) { float ly = process->current_y; diff --git a/source/blender/blenlib/intern/voxel.c b/source/blender/blenlib/intern/voxel.c index c7c794957c2..2c8eb9f5a13 100644 --- a/source/blender/blenlib/intern/voxel.c +++ b/source/blender/blenlib/intern/voxel.c @@ -26,7 +26,7 @@ #include "BLI_strict_flags.h" -BLI_INLINE float D(float *data, const int res[3], int x, int y, int z) +BLI_INLINE float D(const float *data, const int res[3], int x, int y, int z) { CLAMP(x, 0, res[0] - 1); CLAMP(y, 0, res[1] - 1); @@ -36,7 +36,7 @@ BLI_INLINE float D(float *data, const int res[3], int x, int y, int z) /* *** nearest neighbor *** */ /* input coordinates must be in bounding box 0.0 - 1.0 */ -float BLI_voxel_sample_nearest(float *data, const int res[3], const float co[3]) +float BLI_voxel_sample_nearest(const float *data, const int res[3], const float co[3]) { int xi, yi, zi; @@ -65,7 +65,7 @@ BLI_INLINE int64_t _clamp(int a, int b, int c) return (a < b) ? b : ((a > c) ? c : a); } -float BLI_voxel_sample_trilinear(float *data, const int res[3], const float co[3]) +float BLI_voxel_sample_trilinear(const float *data, const int res[3], const float co[3]) { if (data) { @@ -106,7 +106,7 @@ float BLI_voxel_sample_trilinear(float *data, const int res[3], const float co[3 return 0.f; } -float BLI_voxel_sample_triquadratic(float *data, const int res[3], const float co[3]) +float BLI_voxel_sample_triquadratic(const float *data, const int res[3], const float co[3]) { if (data) { @@ -161,7 +161,10 @@ float BLI_voxel_sample_triquadratic(float *data, const int res[3], const float c return 0.f; } -float BLI_voxel_sample_tricubic(float *data, const int res[3], const float co[3], int bspline) +float BLI_voxel_sample_tricubic(const float *data, + const int res[3], + const float co[3], + int bspline) { if (data) { diff --git a/source/blender/blenlib/tests/BLI_array_test.cc b/source/blender/blenlib/tests/BLI_array_test.cc new file mode 100644 index 00000000000..7348a6f93f3 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_array_test.cc @@ -0,0 +1,176 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_array.hh" +#include "BLI_strict_flags.h" +#include "testing/testing.h" + +namespace blender::tests { + +TEST(array, DefaultConstructor) +{ + Array<int> array; + EXPECT_EQ(array.size(), 0); + EXPECT_TRUE(array.is_empty()); +} + +TEST(array, SizeConstructor) +{ + Array<int> array(5); + EXPECT_EQ(array.size(), 5); + EXPECT_FALSE(array.is_empty()); +} + +TEST(array, FillConstructor) +{ + Array<int> array(5, 8); + EXPECT_EQ(array.size(), 5); + EXPECT_EQ(array[0], 8); + EXPECT_EQ(array[1], 8); + EXPECT_EQ(array[2], 8); + EXPECT_EQ(array[3], 8); + EXPECT_EQ(array[4], 8); +} + +TEST(array, InitializerListConstructor) +{ + Array<int> array = {4, 5, 6, 7}; + EXPECT_EQ(array.size(), 4); + EXPECT_EQ(array[0], 4); + EXPECT_EQ(array[1], 5); + EXPECT_EQ(array[2], 6); + EXPECT_EQ(array[3], 7); +} + +TEST(array, SpanConstructor) +{ + int stackarray[4] = {6, 7, 8, 9}; + Span<int> span(stackarray, ARRAY_SIZE(stackarray)); + Array<int> array(span); + EXPECT_EQ(array.size(), 4); + EXPECT_EQ(array[0], 6); + EXPECT_EQ(array[1], 7); + EXPECT_EQ(array[2], 8); + EXPECT_EQ(array[3], 9); +} + +TEST(array, CopyConstructor) +{ + Array<int> array = {5, 6, 7, 8}; + Array<int> new_array(array); + + EXPECT_EQ(array.size(), 4); + EXPECT_EQ(new_array.size(), 4); + EXPECT_NE(array.data(), new_array.data()); + EXPECT_EQ(new_array[0], 5); + EXPECT_EQ(new_array[1], 6); + EXPECT_EQ(new_array[2], 7); + EXPECT_EQ(new_array[3], 8); +} + +TEST(array, MoveConstructor) +{ + Array<int> array = {5, 6, 7, 8}; + Array<int> new_array(std::move(array)); + + EXPECT_EQ(array.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(new_array.size(), 4); + EXPECT_EQ(new_array[0], 5); + EXPECT_EQ(new_array[1], 6); + EXPECT_EQ(new_array[2], 7); + EXPECT_EQ(new_array[3], 8); +} + +TEST(array, CopyAssignment) +{ + Array<int> array = {1, 2, 3}; + Array<int> new_array = {4}; + EXPECT_EQ(new_array.size(), 1); + new_array = array; + EXPECT_EQ(new_array.size(), 3); + EXPECT_EQ(array.size(), 3); + EXPECT_NE(array.data(), new_array.data()); + EXPECT_EQ(new_array[0], 1); + EXPECT_EQ(new_array[1], 2); + EXPECT_EQ(new_array[2], 3); +} + +TEST(array, MoveAssignment) +{ + Array<int> array = {1, 2, 3}; + Array<int> new_array = {4}; + EXPECT_EQ(new_array.size(), 1); + new_array = std::move(array); + EXPECT_EQ(new_array.size(), 3); + EXPECT_EQ(array.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(new_array[0], 1); + EXPECT_EQ(new_array[1], 2); + EXPECT_EQ(new_array[2], 3); +} + +/** + * Tests that the trivially constructible types are not zero-initialized. We do not want that for + * performance reasons. + */ +TEST(array, TrivialTypeSizeConstructor) +{ + Array<char, 1> *array = new Array<char, 1>(1); + char *ptr = &(*array)[0]; + array->~Array(); + + const char magic = 42; + *ptr = magic; + EXPECT_EQ(*ptr, magic); + + new (array) Array<char, 1>(1); + EXPECT_EQ((*array)[0], magic); + EXPECT_EQ(*ptr, magic); + delete array; +} + +struct ConstructibleType { + char value; + + ConstructibleType() + { + value = 42; + } +}; + +TEST(array, NoInitializationSizeConstructor) +{ + using MyArray = Array<ConstructibleType>; + + TypedBuffer<MyArray> buffer; + memset((void *)&buffer, 100, sizeof(MyArray)); + + /* Doing this to avoid some compiler optimization. */ + for (int64_t i : IndexRange(sizeof(MyArray))) { + EXPECT_EQ(((char *)buffer.ptr())[i], 100); + } + + { + MyArray &array = *new (buffer) MyArray(1, NoInitialization()); + EXPECT_EQ(array[0].value, 100); + array.clear_without_destruct(); + array.~Array(); + } + { + MyArray &array = *new (buffer) MyArray(1); + EXPECT_EQ(array[0].value, 42); + array.~Array(); + } +} + +TEST(array, Fill) +{ + Array<int> array(5); + array.fill(3); + EXPECT_EQ(array.size(), 5u); + EXPECT_EQ(array[0], 3); + EXPECT_EQ(array[1], 3); + EXPECT_EQ(array[2], 3); + EXPECT_EQ(array[3], 3); + EXPECT_EQ(array[4], 3); +} + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_disjoint_set_test.cc b/source/blender/blenlib/tests/BLI_disjoint_set_test.cc new file mode 100644 index 00000000000..f30ee610b2a --- /dev/null +++ b/source/blender/blenlib/tests/BLI_disjoint_set_test.cc @@ -0,0 +1,36 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_disjoint_set.hh" +#include "BLI_strict_flags.h" + +#include "testing/testing.h" + +namespace blender::tests { + +TEST(disjoint_set, Test) +{ + DisjointSet disjoint_set(6); + EXPECT_FALSE(disjoint_set.in_same_set(1, 2)); + EXPECT_FALSE(disjoint_set.in_same_set(5, 3)); + EXPECT_TRUE(disjoint_set.in_same_set(2, 2)); + EXPECT_EQ(disjoint_set.find_root(3), 3); + + disjoint_set.join(1, 2); + + EXPECT_TRUE(disjoint_set.in_same_set(1, 2)); + EXPECT_FALSE(disjoint_set.in_same_set(0, 1)); + + disjoint_set.join(3, 4); + + EXPECT_FALSE(disjoint_set.in_same_set(2, 3)); + EXPECT_TRUE(disjoint_set.in_same_set(3, 4)); + + disjoint_set.join(1, 4); + + EXPECT_TRUE(disjoint_set.in_same_set(1, 4)); + EXPECT_TRUE(disjoint_set.in_same_set(1, 3)); + EXPECT_TRUE(disjoint_set.in_same_set(2, 4)); + EXPECT_FALSE(disjoint_set.in_same_set(0, 4)); +} + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_edgehash_test.cc b/source/blender/blenlib/tests/BLI_edgehash_test.cc new file mode 100644 index 00000000000..7106033df36 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_edgehash_test.cc @@ -0,0 +1,408 @@ +/* Apache License, Version 2.0 */ + +#include "testing/testing.h" +#include <algorithm> +#include <random> +#include <vector> + +#include "BLI_edgehash.h" +#include "BLI_utildefines.h" + +#define VALUE_1 POINTER_FROM_INT(1) +#define VALUE_2 POINTER_FROM_INT(2) +#define VALUE_3 POINTER_FROM_INT(3) + +TEST(edgehash, InsertIncreasesLength) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + ASSERT_EQ(BLI_edgehash_len(eh), 0); + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + ASSERT_EQ(BLI_edgehash_len(eh), 1); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, ReinsertNewIncreasesLength) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + ASSERT_EQ(BLI_edgehash_len(eh), 0); + BLI_edgehash_reinsert(eh, 1, 2, VALUE_1); + ASSERT_EQ(BLI_edgehash_len(eh), 1); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, ReinsertExistingDoesNotIncreaseLength) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + ASSERT_EQ(BLI_edgehash_len(eh), 0); + BLI_edgehash_reinsert(eh, 1, 2, VALUE_1); + ASSERT_EQ(BLI_edgehash_len(eh), 1); + BLI_edgehash_reinsert(eh, 1, 2, VALUE_2); + ASSERT_EQ(BLI_edgehash_len(eh), 1); + BLI_edgehash_reinsert(eh, 2, 1, VALUE_2); + ASSERT_EQ(BLI_edgehash_len(eh), 1); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, ReinsertCanChangeValue) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + ASSERT_EQ(BLI_edgehash_lookup(eh, 1, 2), VALUE_1); + BLI_edgehash_reinsert(eh, 2, 1, VALUE_2); + ASSERT_EQ(BLI_edgehash_lookup(eh, 1, 2), VALUE_2); + BLI_edgehash_reinsert(eh, 1, 2, VALUE_3); + ASSERT_EQ(BLI_edgehash_lookup(eh, 2, 1), VALUE_3); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, LookupExisting) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + ASSERT_EQ(BLI_edgehash_lookup(eh, 1, 2), VALUE_1); + ASSERT_EQ(BLI_edgehash_lookup(eh, 2, 1), VALUE_1); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, LookupNonExisting) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + ASSERT_EQ(BLI_edgehash_lookup(eh, 1, 2), nullptr); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, LookupNonExistingWithDefault) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + ASSERT_EQ(BLI_edgehash_lookup_default(eh, 1, 2, VALUE_1), VALUE_1); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, LookupExistingWithDefault) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + ASSERT_EQ(BLI_edgehash_lookup_default(eh, 1, 2, VALUE_2), VALUE_1); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, LookupPExisting) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + void *value = VALUE_1; + BLI_edgehash_insert(eh, 1, 2, value); + void **value_p = BLI_edgehash_lookup_p(eh, 1, 2); + ASSERT_EQ(*value_p, VALUE_1); + *value_p = VALUE_2; + ASSERT_EQ(BLI_edgehash_lookup(eh, 1, 2), VALUE_2); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, LookupPNonExisting) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + ASSERT_EQ(BLI_edgehash_lookup_p(eh, 1, 2), nullptr); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, EnsurePNonExisting) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + void **value_p; + bool existed = BLI_edgehash_ensure_p(eh, 1, 2, &value_p); + ASSERT_FALSE(existed); + *value_p = VALUE_1; + ASSERT_EQ(BLI_edgehash_lookup(eh, 1, 2), VALUE_1); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, EnsurePExisting) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + void **value_p; + bool existed = BLI_edgehash_ensure_p(eh, 1, 2, &value_p); + ASSERT_TRUE(existed); + ASSERT_EQ(*value_p, VALUE_1); + *value_p = VALUE_2; + ASSERT_EQ(BLI_edgehash_lookup(eh, 1, 2), VALUE_2); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, RemoveExistingDecreasesLength) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + ASSERT_EQ(BLI_edgehash_len(eh), 1); + bool has_been_removed = BLI_edgehash_remove(eh, 1, 2, nullptr); + ASSERT_EQ(BLI_edgehash_len(eh), 0); + ASSERT_TRUE(has_been_removed); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, RemoveNonExistingDoesNotDecreaseLength) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + ASSERT_EQ(BLI_edgehash_len(eh), 1); + bool has_been_removed = BLI_edgehash_remove(eh, 4, 5, nullptr); + ASSERT_EQ(BLI_edgehash_len(eh), 1); + ASSERT_FALSE(has_been_removed); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, PopKeyTwice) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + ASSERT_EQ(BLI_edgehash_popkey(eh, 1, 2), VALUE_1); + ASSERT_EQ(BLI_edgehash_popkey(eh, 1, 2), nullptr); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, LookupInvertedIndices) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + ASSERT_EQ(BLI_edgehash_lookup(eh, 2, 1), VALUE_1); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, HasKeyExisting) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + ASSERT_TRUE(BLI_edgehash_haskey(eh, 1, 2)); + ASSERT_TRUE(BLI_edgehash_haskey(eh, 2, 1)); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, HasKeyNonExisting) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + ASSERT_FALSE(BLI_edgehash_haskey(eh, 1, 2)); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, ClearSetsLengthToZero) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + BLI_edgehash_insert(eh, 1, 2, VALUE_2); + ASSERT_EQ(BLI_edgehash_len(eh), 2); + BLI_edgehash_clear(eh, nullptr); + ASSERT_EQ(BLI_edgehash_len(eh), 0); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, IteratorFindsAllValues) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + BLI_edgehash_insert(eh, 1, 3, VALUE_2); + BLI_edgehash_insert(eh, 1, 4, VALUE_3); + + EdgeHashIterator *ehi = BLI_edgehashIterator_new(eh); + auto a = BLI_edgehashIterator_getValue(ehi); + BLI_edgehashIterator_step(ehi); + auto b = BLI_edgehashIterator_getValue(ehi); + BLI_edgehashIterator_step(ehi); + auto c = BLI_edgehashIterator_getValue(ehi); + BLI_edgehashIterator_step(ehi); + + ASSERT_NE(a, b); + ASSERT_NE(b, c); + ASSERT_NE(a, c); + ASSERT_TRUE(ELEM(a, VALUE_1, VALUE_2, VALUE_3)); + ASSERT_TRUE(ELEM(b, VALUE_1, VALUE_2, VALUE_3)); + ASSERT_TRUE(ELEM(c, VALUE_1, VALUE_2, VALUE_3)); + + BLI_edgehashIterator_free(ehi); + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, IterateIsDone) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + BLI_edgehash_insert(eh, 1, 3, VALUE_2); + BLI_edgehash_insert(eh, 1, 4, VALUE_3); + + EdgeHashIterator *ehi = BLI_edgehashIterator_new(eh); + ASSERT_FALSE(BLI_edgehashIterator_isDone(ehi)); + BLI_edgehashIterator_step(ehi); + ASSERT_FALSE(BLI_edgehashIterator_isDone(ehi)); + BLI_edgehashIterator_step(ehi); + ASSERT_FALSE(BLI_edgehashIterator_isDone(ehi)); + BLI_edgehashIterator_step(ehi); + ASSERT_TRUE(BLI_edgehashIterator_isDone(ehi)); + + BLI_edgehashIterator_free(ehi); + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgehash, DoubleRemove) +{ + EdgeHash *eh = BLI_edgehash_new(__func__); + + BLI_edgehash_insert(eh, 1, 2, VALUE_1); + BLI_edgehash_insert(eh, 1, 3, VALUE_2); + BLI_edgehash_insert(eh, 1, 4, VALUE_3); + ASSERT_EQ(BLI_edgehash_len(eh), 3); + + BLI_edgehash_remove(eh, 1, 2, nullptr); + BLI_edgehash_remove(eh, 1, 3, nullptr); + ASSERT_EQ(BLI_edgehash_len(eh), 1); + + BLI_edgehash_free(eh, nullptr); +} + +struct Edge { + uint v1, v2; +}; + +TEST(edgehash, StressTest) +{ + std::srand(0); + int amount = 10000; + + std::vector<Edge> edges; + for (int i = 0; i < amount; i++) { + edges.push_back({(uint)i, amount + (uint)std::rand() % 12345}); + } + + EdgeHash *eh = BLI_edgehash_new(__func__); + + /* first insert all the edges */ + for (int i = 0; i < edges.size(); i++) { + BLI_edgehash_insert(eh, edges[i].v1, edges[i].v2, POINTER_FROM_INT(i)); + } + + std::vector<Edge> shuffled = edges; + std::shuffle(shuffled.begin(), shuffled.end(), std::default_random_engine()); + + /* then remove half of them */ + int remove_until = shuffled.size() / 2; + for (int i = 0; i < remove_until; i++) { + BLI_edgehash_remove(eh, shuffled[i].v2, shuffled[i].v1, nullptr); + } + + ASSERT_EQ(BLI_edgehash_len(eh), edges.size() - remove_until); + + /* check if the right ones have been removed */ + for (int i = 0; i < shuffled.size(); i++) { + bool haskey = BLI_edgehash_haskey(eh, shuffled[i].v1, shuffled[i].v2); + if (i < remove_until) { + ASSERT_FALSE(haskey); + } + else { + ASSERT_TRUE(haskey); + } + } + + /* reinsert all edges */ + for (int i = 0; i < edges.size(); i++) { + BLI_edgehash_reinsert(eh, edges[i].v1, edges[i].v2, POINTER_FROM_INT(i)); + } + + ASSERT_EQ(BLI_edgehash_len(eh), edges.size()); + + /* pop all edges */ + for (int i = 0; i < edges.size(); i++) { + int value = POINTER_AS_INT(BLI_edgehash_popkey(eh, edges[i].v1, edges[i].v2)); + ASSERT_EQ(i, value); + } + + ASSERT_EQ(BLI_edgehash_len(eh), 0); + + BLI_edgehash_free(eh, nullptr); +} + +TEST(edgeset, AddNonExistingIncreasesLength) +{ + EdgeSet *es = BLI_edgeset_new(__func__); + + ASSERT_EQ(BLI_edgeset_len(es), 0); + BLI_edgeset_add(es, 1, 2); + ASSERT_EQ(BLI_edgeset_len(es), 1); + BLI_edgeset_add(es, 1, 3); + ASSERT_EQ(BLI_edgeset_len(es), 2); + BLI_edgeset_add(es, 1, 4); + ASSERT_EQ(BLI_edgeset_len(es), 3); + + BLI_edgeset_free(es); +} + +TEST(edgeset, AddExistingDoesNotIncreaseLength) +{ + EdgeSet *es = BLI_edgeset_new(__func__); + + ASSERT_EQ(BLI_edgeset_len(es), 0); + BLI_edgeset_add(es, 1, 2); + ASSERT_EQ(BLI_edgeset_len(es), 1); + BLI_edgeset_add(es, 2, 1); + ASSERT_EQ(BLI_edgeset_len(es), 1); + BLI_edgeset_add(es, 1, 2); + ASSERT_EQ(BLI_edgeset_len(es), 1); + + BLI_edgeset_free(es); +} + +TEST(edgeset, HasKeyNonExisting) +{ + EdgeSet *es = BLI_edgeset_new(__func__); + + ASSERT_FALSE(BLI_edgeset_haskey(es, 1, 2)); + + BLI_edgeset_free(es); +} + +TEST(edgeset, HasKeyExisting) +{ + EdgeSet *es = BLI_edgeset_new(__func__); + + BLI_edgeset_insert(es, 1, 2); + ASSERT_TRUE(BLI_edgeset_haskey(es, 1, 2)); + + BLI_edgeset_free(es); +} diff --git a/source/blender/blenlib/tests/BLI_index_mask_test.cc b/source/blender/blenlib/tests/BLI_index_mask_test.cc new file mode 100644 index 00000000000..4d6060e51c9 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_index_mask_test.cc @@ -0,0 +1,43 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_index_mask.hh" +#include "testing/testing.h" + +namespace blender::tests { + +TEST(index_mask, DefaultConstructor) +{ + IndexMask mask; + EXPECT_EQ(mask.min_array_size(), 0); + EXPECT_EQ(mask.size(), 0); +} + +TEST(index_mask, ArrayConstructor) +{ + [](IndexMask mask) { + EXPECT_EQ(mask.size(), 4); + EXPECT_EQ(mask.min_array_size(), 8); + EXPECT_FALSE(mask.is_range()); + EXPECT_EQ(mask[0], 3); + EXPECT_EQ(mask[1], 5); + EXPECT_EQ(mask[2], 6); + EXPECT_EQ(mask[3], 7); + }({3, 5, 6, 7}); +} + +TEST(index_mask, RangeConstructor) +{ + IndexMask mask = IndexRange(3, 5); + EXPECT_EQ(mask.size(), 5); + EXPECT_EQ(mask.min_array_size(), 8); + EXPECT_EQ(mask.last(), 7); + EXPECT_TRUE(mask.is_range()); + EXPECT_EQ(mask.as_range().first(), 3); + EXPECT_EQ(mask.as_range().last(), 7); + Span<int64_t> indices = mask.indices(); + EXPECT_EQ(indices[0], 3); + EXPECT_EQ(indices[1], 4); + EXPECT_EQ(indices[2], 5); +} + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_index_range_test.cc b/source/blender/blenlib/tests/BLI_index_range_test.cc new file mode 100644 index 00000000000..d472ded0f18 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_index_range_test.cc @@ -0,0 +1,143 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_index_range.hh" +#include "BLI_strict_flags.h" +#include "BLI_vector.hh" +#include "testing/testing.h" + +namespace blender::tests { + +TEST(index_range, DefaultConstructor) +{ + IndexRange range; + EXPECT_EQ(range.size(), 0); + + Vector<int64_t> vector; + for (int64_t value : range) { + vector.append(value); + } + EXPECT_EQ(vector.size(), 0); +} + +TEST(index_range, SingleElementRange) +{ + IndexRange range(4, 1); + EXPECT_EQ(range.size(), 1); + EXPECT_EQ(*range.begin(), 4); + + Vector<int64_t> vector; + for (int64_t value : range) { + vector.append(value); + } + + EXPECT_EQ(vector.size(), 1); + EXPECT_EQ(vector[0], 4); +} + +TEST(index_range, MultipleElementRange) +{ + IndexRange range(6, 4); + EXPECT_EQ(range.size(), 4); + + Vector<int64_t> vector; + for (int64_t value : range) { + vector.append(value); + } + + EXPECT_EQ(vector.size(), 4); + for (int i = 0; i < 4; i++) { + EXPECT_EQ(vector[i], i + 6); + } +} + +TEST(index_range, SubscriptOperator) +{ + IndexRange range(5, 5); + EXPECT_EQ(range[0], 5); + EXPECT_EQ(range[1], 6); + EXPECT_EQ(range[2], 7); +} + +TEST(index_range, Before) +{ + IndexRange range = IndexRange(5, 5).before(3); + EXPECT_EQ(range[0], 2); + EXPECT_EQ(range[1], 3); + EXPECT_EQ(range[2], 4); + EXPECT_EQ(range.size(), 3); +} + +TEST(index_range, After) +{ + IndexRange range = IndexRange(5, 5).after(4); + EXPECT_EQ(range[0], 10); + EXPECT_EQ(range[1], 11); + EXPECT_EQ(range[2], 12); + EXPECT_EQ(range[3], 13); + EXPECT_EQ(range.size(), 4); +} + +TEST(index_range, Contains) +{ + IndexRange range = IndexRange(5, 3); + EXPECT_TRUE(range.contains(5)); + EXPECT_TRUE(range.contains(6)); + EXPECT_TRUE(range.contains(7)); + EXPECT_FALSE(range.contains(4)); + EXPECT_FALSE(range.contains(8)); +} + +TEST(index_range, First) +{ + IndexRange range = IndexRange(5, 3); + EXPECT_EQ(range.first(), 5); +} + +TEST(index_range, Last) +{ + IndexRange range = IndexRange(5, 3); + EXPECT_EQ(range.last(), 7); +} + +TEST(index_range, OneAfterEnd) +{ + IndexRange range = IndexRange(5, 3); + EXPECT_EQ(range.one_after_last(), 8); +} + +TEST(index_range, Start) +{ + IndexRange range = IndexRange(6, 2); + EXPECT_EQ(range.start(), 6); +} + +TEST(index_range, Slice) +{ + IndexRange range = IndexRange(5, 15); + IndexRange slice = range.slice(2, 6); + EXPECT_EQ(slice.size(), 6); + EXPECT_EQ(slice.first(), 7); + EXPECT_EQ(slice.last(), 12); +} + +TEST(index_range, SliceRange) +{ + IndexRange range = IndexRange(5, 15); + IndexRange slice = range.slice(IndexRange(3, 5)); + EXPECT_EQ(slice.size(), 5); + EXPECT_EQ(slice.first(), 8); + EXPECT_EQ(slice.last(), 12); +} + +TEST(index_range, AsSpan) +{ + IndexRange range = IndexRange(4, 6); + Span<int64_t> span = range.as_span(); + EXPECT_EQ(span.size(), 6); + EXPECT_EQ(span[0], 4); + EXPECT_EQ(span[1], 5); + EXPECT_EQ(span[2], 6); + EXPECT_EQ(span[3], 7); +} + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_linear_allocator_test.cc b/source/blender/blenlib/tests/BLI_linear_allocator_test.cc new file mode 100644 index 00000000000..44b70d1f55d --- /dev/null +++ b/source/blender/blenlib/tests/BLI_linear_allocator_test.cc @@ -0,0 +1,118 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_linear_allocator.hh" +#include "BLI_strict_flags.h" +#include "testing/testing.h" + +namespace blender::tests { + +static bool is_aligned(void *ptr, uint alignment) +{ + BLI_assert(is_power_of_2_i((int)alignment)); + return (POINTER_AS_UINT(ptr) & (alignment - 1)) == 0; +} + +TEST(linear_allocator, AllocationAlignment) +{ + LinearAllocator<> allocator; + + EXPECT_TRUE(is_aligned(allocator.allocate(10, 4), 4)); + EXPECT_TRUE(is_aligned(allocator.allocate(10, 4), 4)); + EXPECT_TRUE(is_aligned(allocator.allocate(10, 4), 4)); + EXPECT_TRUE(is_aligned(allocator.allocate(10, 8), 8)); + EXPECT_TRUE(is_aligned(allocator.allocate(10, 4), 4)); + EXPECT_TRUE(is_aligned(allocator.allocate(10, 16), 16)); + EXPECT_TRUE(is_aligned(allocator.allocate(10, 4), 4)); + EXPECT_TRUE(is_aligned(allocator.allocate(10, 64), 64)); + EXPECT_TRUE(is_aligned(allocator.allocate(10, 64), 64)); + EXPECT_TRUE(is_aligned(allocator.allocate(10, 8), 8)); + EXPECT_TRUE(is_aligned(allocator.allocate(10, 128), 128)); +} + +TEST(linear_allocator, PackedAllocation) +{ + LinearAllocator<> allocator; + blender::AlignedBuffer<256, 32> buffer; + allocator.provide_buffer(buffer); + + uintptr_t ptr1 = (uintptr_t)allocator.allocate(10, 4); /* 0 - 10 */ + uintptr_t ptr2 = (uintptr_t)allocator.allocate(10, 4); /* 12 - 22 */ + uintptr_t ptr3 = (uintptr_t)allocator.allocate(8, 32); /* 32 - 40 */ + uintptr_t ptr4 = (uintptr_t)allocator.allocate(16, 8); /* 40 - 56 */ + uintptr_t ptr5 = (uintptr_t)allocator.allocate(1, 8); /* 56 - 57 */ + uintptr_t ptr6 = (uintptr_t)allocator.allocate(1, 4); /* 60 - 61 */ + uintptr_t ptr7 = (uintptr_t)allocator.allocate(1, 1); /* 61 - 62 */ + + EXPECT_EQ(ptr2 - ptr1, 12); /* 12 - 0 = 12 */ + EXPECT_EQ(ptr3 - ptr2, 20); /* 32 - 12 = 20 */ + EXPECT_EQ(ptr4 - ptr3, 8); /* 40 - 32 = 8 */ + EXPECT_EQ(ptr5 - ptr4, 16); /* 56 - 40 = 16 */ + EXPECT_EQ(ptr6 - ptr5, 4); /* 60 - 56 = 4 */ + EXPECT_EQ(ptr7 - ptr6, 1); /* 61 - 60 = 1 */ +} + +TEST(linear_allocator, CopyString) +{ + LinearAllocator<> allocator; + blender::AlignedBuffer<256, 1> buffer; + allocator.provide_buffer(buffer); + + StringRefNull ref1 = allocator.copy_string("Hello"); + StringRefNull ref2 = allocator.copy_string("World"); + + EXPECT_EQ(ref1, "Hello"); + EXPECT_EQ(ref2, "World"); + EXPECT_EQ(ref2.data() - ref1.data(), 6); +} + +TEST(linear_allocator, AllocateArray) +{ + LinearAllocator<> allocator; + + MutableSpan<int> span = allocator.allocate_array<int>(5); + EXPECT_EQ(span.size(), 5); +} + +TEST(linear_allocator, Construct) +{ + LinearAllocator<> allocator; + + std::array<int, 5> values = {1, 2, 3, 4, 5}; + Vector<int> *vector = allocator.construct<Vector<int>>(values); + EXPECT_EQ(vector->size(), 5); + EXPECT_EQ((*vector)[3], 4); + vector->~Vector(); +} + +TEST(linear_allocator, ConstructElementsAndPointerArray) +{ + LinearAllocator<> allocator; + + std::array<int, 7> values = {1, 2, 3, 4, 5, 6, 7}; + Span<Vector<int> *> vectors = allocator.construct_elements_and_pointer_array<Vector<int>>( + 5, values); + + EXPECT_EQ(vectors.size(), 5); + EXPECT_EQ(vectors[3]->size(), 7); + EXPECT_EQ((*vectors[2])[5], 6); + + for (Vector<int> *vector : vectors) { + vector->~Vector(); + } +} + +TEST(linear_allocator, ConstructArrayCopy) +{ + LinearAllocator<> allocator; + + Vector<int> values = {1, 2, 3}; + MutableSpan<int> span1 = allocator.construct_array_copy(values.as_span()); + MutableSpan<int> span2 = allocator.construct_array_copy(values.as_span()); + EXPECT_NE(span1.data(), span2.data()); + EXPECT_EQ(span1.size(), 3); + EXPECT_EQ(span2.size(), 3); + EXPECT_EQ(span1[1], 2); + EXPECT_EQ(span2[2], 3); +} + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_map_test.cc b/source/blender/blenlib/tests/BLI_map_test.cc new file mode 100644 index 00000000000..fe7b0f01279 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_map_test.cc @@ -0,0 +1,590 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_map.hh" +#include "BLI_rand.h" +#include "BLI_set.hh" +#include "BLI_strict_flags.h" +#include "BLI_timeit.hh" +#include "BLI_vector.hh" +#include "testing/testing.h" + +namespace blender::tests { + +TEST(map, DefaultConstructor) +{ + Map<int, float> map; + EXPECT_EQ(map.size(), 0); + EXPECT_TRUE(map.is_empty()); +} + +TEST(map, AddIncreasesSize) +{ + Map<int, float> map; + EXPECT_EQ(map.size(), 0); + EXPECT_TRUE(map.is_empty()); + map.add(2, 5.0f); + EXPECT_EQ(map.size(), 1); + EXPECT_FALSE(map.is_empty()); + map.add(6, 2.0f); + EXPECT_EQ(map.size(), 2); + EXPECT_FALSE(map.is_empty()); +} + +TEST(map, Contains) +{ + Map<int, float> map; + EXPECT_FALSE(map.contains(4)); + map.add(5, 6.0f); + EXPECT_FALSE(map.contains(4)); + map.add(4, 2.0f); + EXPECT_TRUE(map.contains(4)); +} + +TEST(map, LookupExisting) +{ + Map<int, float> map; + map.add(2, 6.0f); + map.add(4, 1.0f); + EXPECT_EQ(map.lookup(2), 6.0f); + EXPECT_EQ(map.lookup(4), 1.0f); +} + +TEST(map, LookupNotExisting) +{ + Map<int, float> map; + map.add(2, 4.0f); + map.add(1, 1.0f); + EXPECT_EQ(map.lookup_ptr(0), nullptr); + EXPECT_EQ(map.lookup_ptr(5), nullptr); +} + +TEST(map, AddMany) +{ + Map<int, int> map; + for (int i = 0; i < 100; i++) { + map.add(i * 30, i); + map.add(i * 31, i); + } +} + +TEST(map, PopItem) +{ + Map<int, float> map; + map.add(2, 3.0f); + map.add(1, 9.0f); + EXPECT_TRUE(map.contains(2)); + EXPECT_TRUE(map.contains(1)); + + EXPECT_EQ(map.pop(1), 9.0f); + EXPECT_TRUE(map.contains(2)); + EXPECT_FALSE(map.contains(1)); + + EXPECT_EQ(map.pop(2), 3.0f); + EXPECT_FALSE(map.contains(2)); + EXPECT_FALSE(map.contains(1)); +} + +TEST(map, PopTry) +{ + Map<int, int> map; + map.add(1, 5); + map.add(2, 7); + EXPECT_EQ(map.size(), 2); + std::optional<int> value = map.pop_try(4); + EXPECT_EQ(map.size(), 2); + EXPECT_FALSE(value.has_value()); + value = map.pop_try(2); + EXPECT_EQ(map.size(), 1); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(*value, 7); + EXPECT_EQ(*map.pop_try(1), 5); + EXPECT_EQ(map.size(), 0); +} + +TEST(map, PopDefault) +{ + Map<int, int> map; + map.add(1, 4); + map.add(2, 7); + map.add(3, 8); + EXPECT_EQ(map.size(), 3); + EXPECT_EQ(map.pop_default(4, 10), 10); + EXPECT_EQ(map.size(), 3); + EXPECT_EQ(map.pop_default(1, 10), 4); + EXPECT_EQ(map.size(), 2); + EXPECT_EQ(map.pop_default(2, 20), 7); + EXPECT_EQ(map.size(), 1); + EXPECT_EQ(map.pop_default(2, 20), 20); + EXPECT_EQ(map.size(), 1); + EXPECT_EQ(map.pop_default(3, 0), 8); + EXPECT_EQ(map.size(), 0); +} + +TEST(map, PopItemMany) +{ + Map<int, int> map; + for (int i = 0; i < 100; i++) { + map.add_new(i, i); + } + for (int i = 25; i < 80; i++) { + EXPECT_EQ(map.pop(i), i); + } + for (int i = 0; i < 100; i++) { + EXPECT_EQ(map.contains(i), i < 25 || i >= 80); + } +} + +TEST(map, ValueIterator) +{ + Map<int, float> map; + map.add(3, 5.0f); + map.add(1, 2.0f); + map.add(7, -2.0f); + + blender::Set<float> values; + + int iterations = 0; + for (float value : map.values()) { + values.add(value); + iterations++; + } + + EXPECT_EQ(iterations, 3); + EXPECT_TRUE(values.contains(5.0f)); + EXPECT_TRUE(values.contains(-2.0f)); + EXPECT_TRUE(values.contains(2.0f)); +} + +TEST(map, KeyIterator) +{ + Map<int, float> map; + map.add(6, 3.0f); + map.add(2, 4.0f); + map.add(1, 3.0f); + + blender::Set<int> keys; + + int iterations = 0; + for (int key : map.keys()) { + keys.add(key); + iterations++; + } + + EXPECT_EQ(iterations, 3); + EXPECT_TRUE(keys.contains(1)); + EXPECT_TRUE(keys.contains(2)); + EXPECT_TRUE(keys.contains(6)); +} + +TEST(map, ItemIterator) +{ + Map<int, float> map; + map.add(5, 3.0f); + map.add(2, 9.0f); + map.add(1, 0.0f); + + blender::Set<int> keys; + blender::Set<float> values; + + int iterations = 0; + const Map<int, float> &const_map = map; + for (auto item : const_map.items()) { + keys.add(item.key); + values.add(item.value); + iterations++; + } + + EXPECT_EQ(iterations, 3); + EXPECT_TRUE(keys.contains(5)); + EXPECT_TRUE(keys.contains(2)); + EXPECT_TRUE(keys.contains(1)); + EXPECT_TRUE(values.contains(3.0f)); + EXPECT_TRUE(values.contains(9.0f)); + EXPECT_TRUE(values.contains(0.0f)); +} + +TEST(map, MutableValueIterator) +{ + Map<int, int> map; + map.add(3, 6); + map.add(2, 1); + + for (int &value : map.values()) { + value += 10; + } + + EXPECT_EQ(map.lookup(3), 16); + EXPECT_EQ(map.lookup(2), 11); +} + +TEST(map, MutableItemIterator) +{ + Map<int, int> map; + map.add(3, 6); + map.add(2, 1); + + for (auto item : map.items()) { + item.value += item.key; + } + + EXPECT_EQ(map.lookup(3), 9.0f); + EXPECT_EQ(map.lookup(2), 3.0f); +} + +TEST(map, MutableItemToItemConversion) +{ + Map<int, int> map; + map.add(3, 6); + map.add(2, 1); + + Vector<int> keys, values; + for (Map<int, int>::Item item : map.items()) { + keys.append(item.key); + values.append(item.value); + } + + EXPECT_EQ(keys.size(), 2); + EXPECT_EQ(values.size(), 2); + EXPECT_TRUE(keys.contains(3)); + EXPECT_TRUE(keys.contains(2)); + EXPECT_TRUE(values.contains(6)); + EXPECT_TRUE(values.contains(1)); +} + +static float return_42() +{ + return 42.0f; +} + +TEST(map, LookupOrAddCB_SeparateFunction) +{ + Map<int, float> map; + EXPECT_EQ(map.lookup_or_add_cb(0, return_42), 42.0f); + EXPECT_EQ(map.lookup(0), 42); + + map.keys(); +} + +TEST(map, LookupOrAddCB_Lambdas) +{ + Map<int, float> map; + auto lambda1 = []() { return 11.0f; }; + EXPECT_EQ(map.lookup_or_add_cb(0, lambda1), 11.0f); + auto lambda2 = []() { return 20.0f; }; + EXPECT_EQ(map.lookup_or_add_cb(1, lambda2), 20.0f); + + EXPECT_EQ(map.lookup_or_add_cb(0, lambda2), 11.0f); + EXPECT_EQ(map.lookup_or_add_cb(1, lambda1), 20.0f); +} + +TEST(map, AddOrModify) +{ + Map<int, float> map; + auto create_func = [](float *value) { + *value = 10.0f; + return true; + }; + auto modify_func = [](float *value) { + *value += 5; + return false; + }; + EXPECT_TRUE(map.add_or_modify(1, create_func, modify_func)); + EXPECT_EQ(map.lookup(1), 10.0f); + EXPECT_FALSE(map.add_or_modify(1, create_func, modify_func)); + EXPECT_EQ(map.lookup(1), 15.0f); +} + +TEST(map, AddOverwrite) +{ + Map<int, float> map; + EXPECT_FALSE(map.contains(3)); + EXPECT_TRUE(map.add_overwrite(3, 6.0f)); + EXPECT_EQ(map.lookup(3), 6.0f); + EXPECT_FALSE(map.add_overwrite(3, 7.0f)); + EXPECT_EQ(map.lookup(3), 7.0f); + EXPECT_FALSE(map.add(3, 8.0f)); + EXPECT_EQ(map.lookup(3), 7.0f); +} + +TEST(map, LookupOrAddDefault) +{ + Map<int, float> map; + map.lookup_or_add_default(3) = 6; + EXPECT_EQ(map.lookup(3), 6); + map.lookup_or_add_default(5) = 2; + EXPECT_EQ(map.lookup(5), 2); + map.lookup_or_add_default(3) += 4; + EXPECT_EQ(map.lookup(3), 10); +} + +TEST(map, LookupOrAdd) +{ + Map<int, int> map; + EXPECT_EQ(map.lookup_or_add(6, 4), 4); + EXPECT_EQ(map.lookup_or_add(6, 5), 4); + map.lookup_or_add(6, 4) += 10; + EXPECT_EQ(map.lookup(6), 14); +} + +TEST(map, MoveConstructorSmall) +{ + Map<int, float> map1; + map1.add(1, 2.0f); + map1.add(4, 1.0f); + Map<int, float> map2(std::move(map1)); + EXPECT_EQ(map2.size(), 2); + EXPECT_EQ(map2.lookup(1), 2.0f); + EXPECT_EQ(map2.lookup(4), 1.0f); + EXPECT_EQ(map1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(map1.lookup_ptr(4), nullptr); +} + +TEST(map, MoveConstructorLarge) +{ + Map<int, int> map1; + for (int i = 0; i < 100; i++) { + map1.add_new(i, i); + } + Map<int, int> map2(std::move(map1)); + EXPECT_EQ(map2.size(), 100); + EXPECT_EQ(map2.lookup(1), 1); + EXPECT_EQ(map2.lookup(4), 4); + EXPECT_EQ(map1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(map1.lookup_ptr(4), nullptr); +} + +TEST(map, MoveAssignment) +{ + Map<int, float> map1; + map1.add(1, 2.0f); + map1.add(4, 1.0f); + Map<int, float> map2; + map2 = std::move(map1); + EXPECT_EQ(map2.size(), 2); + EXPECT_EQ(map2.lookup(1), 2.0f); + EXPECT_EQ(map2.lookup(4), 1.0f); + EXPECT_EQ(map1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(map1.lookup_ptr(4), nullptr); +} + +TEST(map, CopyAssignment) +{ + Map<int, float> map1; + map1.add(1, 2.0f); + map1.add(4, 1.0f); + Map<int, float> map2; + map2 = map1; + EXPECT_EQ(map2.size(), 2); + EXPECT_EQ(map2.lookup(1), 2.0f); + EXPECT_EQ(map2.lookup(4), 1.0f); + EXPECT_EQ(map1.size(), 2); + EXPECT_EQ(*map1.lookup_ptr(4), 1.0f); +} + +TEST(map, Clear) +{ + Map<int, float> map; + map.add(1, 1.0f); + map.add(2, 5.0f); + + EXPECT_EQ(map.size(), 2); + EXPECT_TRUE(map.contains(1)); + EXPECT_TRUE(map.contains(2)); + + map.clear(); + + EXPECT_EQ(map.size(), 0); + EXPECT_FALSE(map.contains(1)); + EXPECT_FALSE(map.contains(2)); +} + +TEST(map, UniquePtrValue) +{ + auto value1 = std::unique_ptr<int>(new int()); + auto value2 = std::unique_ptr<int>(new int()); + auto value3 = std::unique_ptr<int>(new int()); + + int *value1_ptr = value1.get(); + + Map<int, std::unique_ptr<int>> map; + map.add_new(1, std::move(value1)); + map.add(2, std::move(value2)); + map.add_overwrite(3, std::move(value3)); + map.lookup_or_add_cb(4, []() { return std::unique_ptr<int>(new int()); }); + map.add_new(5, std::unique_ptr<int>(new int())); + map.add(6, std::unique_ptr<int>(new int())); + map.add_overwrite(7, std::unique_ptr<int>(new int())); + map.lookup_or_add(8, std::unique_ptr<int>(new int())); + map.pop_default(9, std::unique_ptr<int>(new int())); + + EXPECT_EQ(map.lookup(1).get(), value1_ptr); + EXPECT_EQ(map.lookup_ptr(100), nullptr); +} + +TEST(map, Remove) +{ + Map<int, int> map; + map.add(2, 4); + EXPECT_EQ(map.size(), 1); + EXPECT_FALSE(map.remove(3)); + EXPECT_EQ(map.size(), 1); + EXPECT_TRUE(map.remove(2)); + EXPECT_EQ(map.size(), 0); +} + +TEST(map, PointerKeys) +{ + char a, b, c, d; + + Map<char *, int> map; + EXPECT_TRUE(map.add(&a, 5)); + EXPECT_FALSE(map.add(&a, 4)); + map.add_new(&b, 1); + map.add_new(&c, 1); + EXPECT_EQ(map.size(), 3); + EXPECT_TRUE(map.remove(&b)); + EXPECT_TRUE(map.add(&b, 8)); + EXPECT_FALSE(map.remove(&d)); + EXPECT_TRUE(map.remove(&a)); + EXPECT_TRUE(map.remove(&b)); + EXPECT_TRUE(map.remove(&c)); + EXPECT_TRUE(map.is_empty()); +} + +TEST(map, ConstKeysAndValues) +{ + Map<const std::string, const std::string> map; + map.reserve(10); + map.add("45", "643"); + EXPECT_TRUE(map.contains("45")); + EXPECT_FALSE(map.contains("54")); +} + +TEST(map, ForeachItem) +{ + Map<int, int> map; + map.add(3, 4); + map.add(1, 8); + + Vector<int> keys; + Vector<int> values; + map.foreach_item([&](int key, int value) { + keys.append(key); + values.append(value); + }); + + EXPECT_EQ(keys.size(), 2); + EXPECT_EQ(values.size(), 2); + EXPECT_EQ(keys.first_index_of(3), values.first_index_of(4)); + EXPECT_EQ(keys.first_index_of(1), values.first_index_of(8)); +} + +/** + * Set this to 1 to activate the benchmark. It is disabled by default, because it prints a lot. + */ +#if 0 +template<typename MapT> +BLI_NOINLINE void benchmark_random_ints(StringRef name, int amount, int factor) +{ + RNG *rng = BLI_rng_new(0); + Vector<int> values; + for (int i = 0; i < amount; i++) { + values.append(BLI_rng_get_int(rng) * factor); + } + BLI_rng_free(rng); + + MapT map; + { + SCOPED_TIMER(name + " Add"); + for (int value : values) { + map.add(value, value); + } + } + int count = 0; + { + SCOPED_TIMER(name + " Contains"); + for (int value : values) { + count += map.contains(value); + } + } + { + SCOPED_TIMER(name + " Remove"); + for (int value : values) { + count += map.remove(value); + } + } + + /* Print the value for simple error checking and to avoid some compiler optimizations. */ + std::cout << "Count: " << count << "\n"; +} + +TEST(map, Benchmark) +{ + for (int i = 0; i < 3; i++) { + benchmark_random_ints<blender::Map<int, int>>("blender::Map ", 1000000, 1); + benchmark_random_ints<blender::StdUnorderedMapWrapper<int, int>>("std::unordered_map", 1000000, 1); + } + std::cout << "\n"; + for (int i = 0; i < 3; i++) { + uint32_t factor = (3 << 10); + benchmark_random_ints<blender::Map<int, int>>("blender::Map ", 1000000, factor); + benchmark_random_ints<blender::StdUnorderedMapWrapper<int, int>>( + "std::unordered_map", 1000000, factor); + } +} + +/** + * Timer 'blender::Map Add' took 61.7616 ms + * Timer 'blender::Map Contains' took 18.4989 ms + * Timer 'blender::Map Remove' took 20.5864 ms + * Count: 1999755 + * Timer 'std::unordered_map Add' took 188.674 ms + * Timer 'std::unordered_map Contains' took 44.3741 ms + * Timer 'std::unordered_map Remove' took 169.52 ms + * Count: 1999755 + * Timer 'blender::Map Add' took 37.9196 ms + * Timer 'blender::Map Contains' took 16.7361 ms + * Timer 'blender::Map Remove' took 20.9568 ms + * Count: 1999755 + * Timer 'std::unordered_map Add' took 166.09 ms + * Timer 'std::unordered_map Contains' took 40.6133 ms + * Timer 'std::unordered_map Remove' took 142.85 ms + * Count: 1999755 + * Timer 'blender::Map Add' took 37.3053 ms + * Timer 'blender::Map Contains' took 16.6731 ms + * Timer 'blender::Map Remove' took 18.8304 ms + * Count: 1999755 + * Timer 'std::unordered_map Add' took 170.964 ms + * Timer 'std::unordered_map Contains' took 38.1824 ms + * Timer 'std::unordered_map Remove' took 140.263 ms + * Count: 1999755 + * + * Timer 'blender::Map Add' took 50.1131 ms + * Timer 'blender::Map Contains' took 25.0491 ms + * Timer 'blender::Map Remove' took 32.4225 ms + * Count: 1889920 + * Timer 'std::unordered_map Add' took 150.129 ms + * Timer 'std::unordered_map Contains' took 34.6999 ms + * Timer 'std::unordered_map Remove' took 120.907 ms + * Count: 1889920 + * Timer 'blender::Map Add' took 50.4438 ms + * Timer 'blender::Map Contains' took 25.2677 ms + * Timer 'blender::Map Remove' took 32.3047 ms + * Count: 1889920 + * Timer 'std::unordered_map Add' took 144.015 ms + * Timer 'std::unordered_map Contains' took 36.3387 ms + * Timer 'std::unordered_map Remove' took 119.109 ms + * Count: 1889920 + * Timer 'blender::Map Add' took 48.6995 ms + * Timer 'blender::Map Contains' took 25.1846 ms + * Timer 'blender::Map Remove' took 33.0283 ms + * Count: 1889920 + * Timer 'std::unordered_map Add' took 143.494 ms + * Timer 'std::unordered_map Contains' took 34.8905 ms + * Timer 'std::unordered_map Remove' took 122.739 ms + * Count: 1889920 + */ + +#endif /* Benchmark */ + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_math_base_safe_test.cc b/source/blender/blenlib/tests/BLI_math_base_safe_test.cc new file mode 100644 index 00000000000..2e3e083cf92 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_math_base_safe_test.cc @@ -0,0 +1,37 @@ +/* Apache License, Version 2.0 */ + +#include "testing/testing.h" + +#include "BLI_math_base_safe.h" + +TEST(math_base, SafePowf) +{ + EXPECT_FLOAT_EQ(safe_powf(4.0f, 3.0f), 64.0f); + EXPECT_FLOAT_EQ(safe_powf(3.2f, 5.6f), 674.2793796f); + EXPECT_FLOAT_EQ(safe_powf(4.0f, -2.0f), 0.0625f); + EXPECT_FLOAT_EQ(safe_powf(6.0f, -3.2f), 0.003235311f); + EXPECT_FLOAT_EQ(safe_powf(-4.0f, 6), 4096.0f); + EXPECT_FLOAT_EQ(safe_powf(-3.0f, 5.5), 0.0f); + EXPECT_FLOAT_EQ(safe_powf(-2.5f, -4.0f), 0.0256f); + EXPECT_FLOAT_EQ(safe_powf(-3.7f, -4.5f), 0.0f); +} + +TEST(math_base, SafeModf) +{ + EXPECT_FLOAT_EQ(safe_modf(3.4, 2.2f), 1.2f); + EXPECT_FLOAT_EQ(safe_modf(3.4, -2.2f), 1.2f); + EXPECT_FLOAT_EQ(safe_modf(-3.4, -2.2f), -1.2f); + EXPECT_FLOAT_EQ(safe_modf(-3.4, 0.0f), 0.0f); + EXPECT_FLOAT_EQ(safe_modf(0.0f, 3.0f), 0.0f); + EXPECT_FLOAT_EQ(safe_modf(55.0f, 10.0f), 5.0f); +} + +TEST(math_base, SafeLogf) +{ + EXPECT_FLOAT_EQ(safe_logf(3.3f, 2.5f), 1.302995247f); + EXPECT_FLOAT_EQ(safe_logf(0.0f, 3.0f), 0.0f); + EXPECT_FLOAT_EQ(safe_logf(3.0f, 0.0f), 0.0f); + EXPECT_FLOAT_EQ(safe_logf(-2.0f, 4.3f), 0.0f); + EXPECT_FLOAT_EQ(safe_logf(2.0f, -4.3f), 0.0f); + EXPECT_FLOAT_EQ(safe_logf(-2.0f, -4.3f), 0.0f); +} diff --git a/source/blender/blenlib/tests/BLI_memory_utils_test.cc b/source/blender/blenlib/tests/BLI_memory_utils_test.cc new file mode 100644 index 00000000000..f3cb02b63d7 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_memory_utils_test.cc @@ -0,0 +1,159 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_float3.hh" +#include "BLI_memory_utils.hh" +#include "BLI_strict_flags.h" +#include "testing/testing.h" + +namespace blender::tests { + +struct MyValue { + static inline int alive = 0; + + MyValue() + { + if (alive == 15) { + throw std::exception(); + } + + alive++; + } + + MyValue(const MyValue &UNUSED(other)) + { + if (alive == 15) { + throw std::exception(); + } + + alive++; + } + + ~MyValue() + { + alive--; + } +}; + +TEST(memory_utils, DefaultConstructN_ActuallyCallsConstructor) +{ + constexpr int amount = 10; + TypedBuffer<MyValue, amount> buffer; + + EXPECT_EQ(MyValue::alive, 0); + default_construct_n(buffer.ptr(), amount); + EXPECT_EQ(MyValue::alive, amount); + destruct_n(buffer.ptr(), amount); + EXPECT_EQ(MyValue::alive, 0); +} + +TEST(memory_utils, DefaultConstructN_StrongExceptionSafety) +{ + constexpr int amount = 20; + TypedBuffer<MyValue, amount> buffer; + + EXPECT_EQ(MyValue::alive, 0); + EXPECT_THROW(default_construct_n(buffer.ptr(), amount), std::exception); + EXPECT_EQ(MyValue::alive, 0); +} + +TEST(memory_utils, UninitializedCopyN_ActuallyCopies) +{ + constexpr int amount = 5; + TypedBuffer<MyValue, amount> buffer1; + TypedBuffer<MyValue, amount> buffer2; + + EXPECT_EQ(MyValue::alive, 0); + default_construct_n(buffer1.ptr(), amount); + EXPECT_EQ(MyValue::alive, amount); + uninitialized_copy_n(buffer1.ptr(), amount, buffer2.ptr()); + EXPECT_EQ(MyValue::alive, 2 * amount); + destruct_n(buffer1.ptr(), amount); + EXPECT_EQ(MyValue::alive, amount); + destruct_n(buffer2.ptr(), amount); + EXPECT_EQ(MyValue::alive, 0); +} + +TEST(memory_utils, UninitializedCopyN_StrongExceptionSafety) +{ + constexpr int amount = 10; + TypedBuffer<MyValue, amount> buffer1; + TypedBuffer<MyValue, amount> buffer2; + + EXPECT_EQ(MyValue::alive, 0); + default_construct_n(buffer1.ptr(), amount); + EXPECT_EQ(MyValue::alive, amount); + EXPECT_THROW(uninitialized_copy_n(buffer1.ptr(), amount, buffer2.ptr()), std::exception); + EXPECT_EQ(MyValue::alive, amount); + destruct_n(buffer1.ptr(), amount); + EXPECT_EQ(MyValue::alive, 0); +} + +TEST(memory_utils, UninitializedFillN_ActuallyCopies) +{ + constexpr int amount = 10; + TypedBuffer<MyValue, amount> buffer; + + EXPECT_EQ(MyValue::alive, 0); + { + MyValue value; + EXPECT_EQ(MyValue::alive, 1); + uninitialized_fill_n(buffer.ptr(), amount, value); + EXPECT_EQ(MyValue::alive, 1 + amount); + destruct_n(buffer.ptr(), amount); + EXPECT_EQ(MyValue::alive, 1); + } + EXPECT_EQ(MyValue::alive, 0); +} + +TEST(memory_utils, UninitializedFillN_StrongExceptionSafety) +{ + constexpr int amount = 20; + TypedBuffer<MyValue, amount> buffer; + + EXPECT_EQ(MyValue::alive, 0); + { + MyValue value; + EXPECT_EQ(MyValue::alive, 1); + EXPECT_THROW(uninitialized_fill_n(buffer.ptr(), amount, value), std::exception); + EXPECT_EQ(MyValue::alive, 1); + } + EXPECT_EQ(MyValue::alive, 0); +} + +class TestBaseClass { + virtual void mymethod(){}; +}; + +class TestChildClass : public TestBaseClass { + void mymethod() override + { + } +}; + +static_assert(is_convertible_pointer_v<int *, int *>); +static_assert(is_convertible_pointer_v<int *, const int *>); +static_assert(is_convertible_pointer_v<int *, int *const>); +static_assert(is_convertible_pointer_v<int *, const int *const>); +static_assert(!is_convertible_pointer_v<const int *, int *>); +static_assert(!is_convertible_pointer_v<int, int *>); +static_assert(!is_convertible_pointer_v<int *, int>); +static_assert(is_convertible_pointer_v<TestBaseClass *, const TestBaseClass *>); +static_assert(!is_convertible_pointer_v<const TestBaseClass *, TestBaseClass *>); +static_assert(is_convertible_pointer_v<TestChildClass *, TestBaseClass *>); +static_assert(!is_convertible_pointer_v<TestBaseClass *, TestChildClass *>); +static_assert(is_convertible_pointer_v<const TestChildClass *, const TestBaseClass *>); +static_assert(!is_convertible_pointer_v<TestBaseClass, const TestChildClass *>); +static_assert(!is_convertible_pointer_v<float3, float *>); +static_assert(!is_convertible_pointer_v<float *, float3>); +static_assert(!is_convertible_pointer_v<int **, int *>); +static_assert(!is_convertible_pointer_v<int *, int **>); +static_assert(is_convertible_pointer_v<int **, int **>); +static_assert(is_convertible_pointer_v<const int **, const int **>); +static_assert(!is_convertible_pointer_v<const int **, int **>); +static_assert(!is_convertible_pointer_v<int *const *, int **>); +static_assert(!is_convertible_pointer_v<int *const *const, int **>); +static_assert(is_convertible_pointer_v<int **, int **const>); +static_assert(is_convertible_pointer_v<int **, int *const *>); +static_assert(is_convertible_pointer_v<int **, int const *const *>); + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_multi_value_map_test.cc b/source/blender/blenlib/tests/BLI_multi_value_map_test.cc new file mode 100644 index 00000000000..7501fbe0d87 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_multi_value_map_test.cc @@ -0,0 +1,109 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_multi_value_map.hh" +#include "BLI_vector.hh" +#include "testing/testing.h" + +namespace blender::tests { + +TEST(multi_value_map, LookupNotExistant) +{ + MultiValueMap<int, int> map; + EXPECT_EQ(map.lookup(5).size(), 0); + map.add(2, 5); + EXPECT_EQ(map.lookup(5).size(), 0); +} + +TEST(multi_value_map, LookupExistant) +{ + MultiValueMap<int, int> map; + map.add(2, 4); + map.add(2, 5); + map.add(3, 6); + + EXPECT_EQ(map.lookup(2).size(), 2); + EXPECT_EQ(map.lookup(2)[0], 4); + EXPECT_EQ(map.lookup(2)[1], 5); + + EXPECT_EQ(map.lookup(3).size(), 1); + EXPECT_EQ(map.lookup(3)[0], 6); +} + +TEST(multi_value_map, AddMultiple) +{ + MultiValueMap<int, int> map; + map.add_multiple(2, {4, 5, 6}); + map.add_multiple(2, {1, 2}); + map.add_multiple(5, {7, 5, 3}); + + EXPECT_EQ(map.lookup(2).size(), 5); + EXPECT_EQ(map.lookup(2)[0], 4); + EXPECT_EQ(map.lookup(2)[1], 5); + EXPECT_EQ(map.lookup(2)[2], 6); + EXPECT_EQ(map.lookup(2)[3], 1); + EXPECT_EQ(map.lookup(2)[4], 2); + + EXPECT_EQ(map.lookup(5).size(), 3); + EXPECT_EQ(map.lookup(5)[0], 7); + EXPECT_EQ(map.lookup(5)[1], 5); + EXPECT_EQ(map.lookup(5)[2], 3); +} + +TEST(multi_value_map, Keys) +{ + MultiValueMap<int, int> map; + map.add(5, 7); + map.add(5, 7); + map.add_multiple(2, {6, 7, 8}); + + Vector<int> keys; + for (int key : map.keys()) { + keys.append(key); + } + + EXPECT_EQ(keys.size(), 2); + EXPECT_TRUE(keys.contains(5)); + EXPECT_TRUE(keys.contains(2)); +} + +TEST(multi_value_map, Values) +{ + MultiValueMap<int, int> map; + map.add(3, 5); + map.add_multiple(3, {1, 2}); + map.add(6, 1); + + Vector<Span<int>> values; + for (Span<int> value_span : map.values()) { + values.append(value_span); + } + + EXPECT_EQ(values.size(), 2); +} + +TEST(multi_value_map, Items) +{ + MultiValueMap<int, int> map; + map.add_multiple(4, {1, 2, 3}); + + for (auto &&item : map.items()) { + int key = item.key; + Span<int> values = item.value; + EXPECT_EQ(key, 4); + EXPECT_EQ(values.size(), 3); + EXPECT_EQ(values[0], 1); + EXPECT_EQ(values[1], 2); + EXPECT_EQ(values[2], 3); + } +} + +TEST(multi_value_map, UniquePtr) +{ + /* Mostly testing if it compiles here. */ + MultiValueMap<std::unique_ptr<int>, std::unique_ptr<int>> map; + map.add(std::make_unique<int>(4), std::make_unique<int>(6)); + map.add(std::make_unique<int>(4), std::make_unique<int>(7)); + EXPECT_EQ(map.lookup(std::make_unique<int>(10)).size(), 0); +} + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_set_test.cc b/source/blender/blenlib/tests/BLI_set_test.cc new file mode 100644 index 00000000000..7bd0b258df8 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_set_test.cc @@ -0,0 +1,565 @@ +/* Apache License, Version 2.0 */ + +#include <set> +#include <unordered_set> + +#include "BLI_ghash.h" +#include "BLI_rand.h" +#include "BLI_set.hh" +#include "BLI_strict_flags.h" +#include "BLI_timeit.hh" +#include "BLI_vector.hh" +#include "testing/testing.h" + +namespace blender { +namespace tests { + +TEST(set, DefaultConstructor) +{ + Set<int> set; + EXPECT_EQ(set.size(), 0); + EXPECT_TRUE(set.is_empty()); +} + +TEST(set, ContainsNotExistant) +{ + Set<int> set; + EXPECT_FALSE(set.contains(3)); +} + +TEST(set, ContainsExistant) +{ + Set<int> set; + EXPECT_FALSE(set.contains(5)); + EXPECT_TRUE(set.is_empty()); + set.add(5); + EXPECT_TRUE(set.contains(5)); + EXPECT_FALSE(set.is_empty()); +} + +TEST(set, AddMany) +{ + Set<int> set; + for (int i = 0; i < 100; i++) { + set.add(i); + } + + for (int i = 50; i < 100; i++) { + EXPECT_TRUE(set.contains(i)); + } + for (int i = 100; i < 150; i++) { + EXPECT_FALSE(set.contains(i)); + } +} + +TEST(set, InitializerListConstructor) +{ + Set<int> set = {4, 5, 6}; + EXPECT_EQ(set.size(), 3); + EXPECT_TRUE(set.contains(4)); + EXPECT_TRUE(set.contains(5)); + EXPECT_TRUE(set.contains(6)); + EXPECT_FALSE(set.contains(2)); + EXPECT_FALSE(set.contains(3)); +} + +TEST(set, CopyConstructor) +{ + Set<int> set = {3}; + EXPECT_TRUE(set.contains(3)); + EXPECT_FALSE(set.contains(4)); + + Set<int> set2(set); + set2.add(4); + EXPECT_TRUE(set2.contains(3)); + EXPECT_TRUE(set2.contains(4)); + + EXPECT_FALSE(set.contains(4)); +} + +TEST(set, MoveConstructor) +{ + Set<int> set = {1, 2, 3}; + EXPECT_EQ(set.size(), 3); + Set<int> set2(std::move(set)); + EXPECT_EQ(set.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(set2.size(), 3); +} + +TEST(set, CopyAssignment) +{ + Set<int> set = {3}; + EXPECT_TRUE(set.contains(3)); + EXPECT_FALSE(set.contains(4)); + + Set<int> set2; + set2 = set; + set2.add(4); + EXPECT_TRUE(set2.contains(3)); + EXPECT_TRUE(set2.contains(4)); + + EXPECT_FALSE(set.contains(4)); +} + +TEST(set, MoveAssignment) +{ + Set<int> set = {1, 2, 3}; + EXPECT_EQ(set.size(), 3); + Set<int> set2; + set2 = std::move(set); + EXPECT_EQ(set.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(set2.size(), 3); +} + +TEST(set, RemoveContained) +{ + Set<int> set = {3, 4, 5}; + EXPECT_TRUE(set.contains(3)); + EXPECT_TRUE(set.contains(4)); + EXPECT_TRUE(set.contains(5)); + set.remove_contained(4); + EXPECT_TRUE(set.contains(3)); + EXPECT_FALSE(set.contains(4)); + EXPECT_TRUE(set.contains(5)); + set.remove_contained(3); + EXPECT_FALSE(set.contains(3)); + EXPECT_FALSE(set.contains(4)); + EXPECT_TRUE(set.contains(5)); + set.remove_contained(5); + EXPECT_FALSE(set.contains(3)); + EXPECT_FALSE(set.contains(4)); + EXPECT_FALSE(set.contains(5)); +} + +TEST(set, RemoveContainedMany) +{ + Set<int> set; + for (int i = 0; i < 1000; i++) { + set.add(i); + } + for (int i = 100; i < 1000; i++) { + set.remove_contained(i); + } + for (int i = 900; i < 1000; i++) { + set.add(i); + } + + for (int i = 0; i < 1000; i++) { + if (i < 100 || i >= 900) { + EXPECT_TRUE(set.contains(i)); + } + else { + EXPECT_FALSE(set.contains(i)); + } + } +} + +TEST(set, Intersects) +{ + Set<int> a = {3, 4, 5, 6}; + Set<int> b = {1, 2, 5}; + EXPECT_TRUE(Set<int>::Intersects(a, b)); + EXPECT_FALSE(Set<int>::Disjoint(a, b)); +} + +TEST(set, Disjoint) +{ + Set<int> a = {5, 6, 7, 8}; + Set<int> b = {2, 3, 4, 9}; + EXPECT_FALSE(Set<int>::Intersects(a, b)); + EXPECT_TRUE(Set<int>::Disjoint(a, b)); +} + +TEST(set, AddMultiple) +{ + Set<int> a; + a.add_multiple({5, 7}); + EXPECT_TRUE(a.contains(5)); + EXPECT_TRUE(a.contains(7)); + EXPECT_FALSE(a.contains(4)); + a.add_multiple({2, 4, 7}); + EXPECT_TRUE(a.contains(4)); + EXPECT_TRUE(a.contains(2)); + EXPECT_EQ(a.size(), 4); +} + +TEST(set, AddMultipleNew) +{ + Set<int> a; + a.add_multiple_new({5, 6}); + EXPECT_TRUE(a.contains(5)); + EXPECT_TRUE(a.contains(6)); +} + +TEST(set, Iterator) +{ + Set<int> set = {1, 3, 2, 5, 4}; + blender::Vector<int> vec; + for (int value : set) { + vec.append(value); + } + EXPECT_EQ(vec.size(), 5); + EXPECT_TRUE(vec.contains(1)); + EXPECT_TRUE(vec.contains(3)); + EXPECT_TRUE(vec.contains(2)); + EXPECT_TRUE(vec.contains(5)); + EXPECT_TRUE(vec.contains(4)); +} + +TEST(set, OftenAddRemoveContained) +{ + Set<int> set; + for (int i = 0; i < 100; i++) { + set.add(42); + EXPECT_EQ(set.size(), 1); + set.remove_contained(42); + EXPECT_EQ(set.size(), 0); + } +} + +TEST(set, UniquePtrValues) +{ + Set<std::unique_ptr<int>> set; + set.add_new(std::unique_ptr<int>(new int())); + auto value1 = std::unique_ptr<int>(new int()); + set.add_new(std::move(value1)); + set.add(std::unique_ptr<int>(new int())); + + EXPECT_EQ(set.size(), 3); +} + +TEST(set, Clear) +{ + Set<int> set = {3, 4, 6, 7}; + EXPECT_EQ(set.size(), 4); + set.clear(); + EXPECT_EQ(set.size(), 0); +} + +TEST(set, StringSet) +{ + Set<std::string> set; + set.add("hello"); + set.add("world"); + EXPECT_EQ(set.size(), 2); + EXPECT_TRUE(set.contains("hello")); + EXPECT_TRUE(set.contains("world")); + EXPECT_FALSE(set.contains("world2")); +} + +TEST(set, PointerSet) +{ + int a, b, c; + Set<int *> set; + set.add(&a); + set.add(&b); + EXPECT_EQ(set.size(), 2); + EXPECT_TRUE(set.contains(&a)); + EXPECT_TRUE(set.contains(&b)); + EXPECT_FALSE(set.contains(&c)); +} + +TEST(set, Remove) +{ + Set<int> set = {1, 2, 3, 4, 5, 6}; + EXPECT_EQ(set.size(), 6); + EXPECT_TRUE(set.remove(2)); + EXPECT_EQ(set.size(), 5); + EXPECT_FALSE(set.contains(2)); + EXPECT_FALSE(set.remove(2)); + EXPECT_EQ(set.size(), 5); + EXPECT_TRUE(set.remove(5)); + EXPECT_EQ(set.size(), 4); +} + +struct Type1 { + uint32_t value; +}; + +struct Type2 { + uint32_t value; +}; + +static bool operator==(const Type1 &a, const Type1 &b) +{ + return a.value == b.value; +} +static bool operator==(const Type2 &a, const Type1 &b) +{ + return a.value == b.value; +} + +} // namespace tests + +/* This has to be defined in ::blender namespace. */ +template<> struct DefaultHash<tests::Type1> { + uint32_t operator()(const tests::Type1 &value) const + { + return value.value; + } + + uint32_t operator()(const tests::Type2 &value) const + { + return value.value; + } +}; + +namespace tests { + +TEST(set, ContainsAs) +{ + Set<Type1> set; + set.add(Type1{5}); + EXPECT_TRUE(set.contains_as(Type1{5})); + EXPECT_TRUE(set.contains_as(Type2{5})); + EXPECT_FALSE(set.contains_as(Type1{6})); + EXPECT_FALSE(set.contains_as(Type2{6})); +} + +TEST(set, ContainsAsString) +{ + Set<std::string> set; + set.add("test"); + EXPECT_TRUE(set.contains_as("test")); + EXPECT_TRUE(set.contains_as(StringRef("test"))); + EXPECT_FALSE(set.contains_as("string")); + EXPECT_FALSE(set.contains_as(StringRef("string"))); +} + +TEST(set, RemoveContainedAs) +{ + Set<Type1> set; + set.add(Type1{5}); + EXPECT_TRUE(set.contains_as(Type2{5})); + set.remove_contained_as(Type2{5}); + EXPECT_FALSE(set.contains_as(Type2{5})); +} + +TEST(set, RemoveAs) +{ + Set<Type1> set; + set.add(Type1{5}); + EXPECT_TRUE(set.contains_as(Type2{5})); + set.remove_as(Type2{6}); + EXPECT_TRUE(set.contains_as(Type2{5})); + set.remove_as(Type2{5}); + EXPECT_FALSE(set.contains_as(Type2{5})); + set.remove_as(Type2{5}); + EXPECT_FALSE(set.contains_as(Type2{5})); +} + +TEST(set, AddAs) +{ + Set<std::string> set; + EXPECT_TRUE(set.add_as("test")); + EXPECT_TRUE(set.add_as(StringRef("qwe"))); + EXPECT_FALSE(set.add_as(StringRef("test"))); + EXPECT_FALSE(set.add_as("qwe")); +} + +template<uint N> struct EqualityIntModN { + bool operator()(uint a, uint b) const + { + return (a % N) == (b % N); + } +}; + +template<uint N> struct HashIntModN { + uint64_t operator()(uint value) const + { + return value % N; + } +}; + +TEST(set, CustomizeHashAndEquality) +{ + Set<uint, 0, DefaultProbingStrategy, HashIntModN<10>, EqualityIntModN<10>> set; + set.add(4); + EXPECT_TRUE(set.contains(4)); + EXPECT_TRUE(set.contains(14)); + EXPECT_TRUE(set.contains(104)); + EXPECT_FALSE(set.contains(5)); + set.add(55); + EXPECT_TRUE(set.contains(5)); + EXPECT_TRUE(set.contains(14)); + set.remove(1004); + EXPECT_FALSE(set.contains(14)); +} + +TEST(set, IntrusiveIntKey) +{ + Set<int, + 2, + DefaultProbingStrategy, + DefaultHash<int>, + DefaultEquality, + IntegerSetSlot<int, 100, 200>> + set; + EXPECT_TRUE(set.add(4)); + EXPECT_TRUE(set.add(3)); + EXPECT_TRUE(set.add(11)); + EXPECT_TRUE(set.add(8)); + EXPECT_FALSE(set.add(3)); + EXPECT_FALSE(set.add(4)); + EXPECT_TRUE(set.remove(4)); + EXPECT_FALSE(set.remove(7)); + EXPECT_TRUE(set.add(4)); + EXPECT_TRUE(set.remove(4)); +} + +struct MyKeyType { + uint32_t key; + int32_t attached_data; + + uint64_t hash() const + { + return key; + } + + friend bool operator==(const MyKeyType &a, const MyKeyType &b) + { + return a.key == b.key; + } +}; + +TEST(set, LookupKey) +{ + Set<MyKeyType> set; + set.add({1, 10}); + set.add({2, 20}); + EXPECT_EQ(set.lookup_key({1, 30}).attached_data, 10); + EXPECT_EQ(set.lookup_key({2, 0}).attached_data, 20); +} + +TEST(set, LookupKeyDefault) +{ + Set<MyKeyType> set; + set.add({1, 10}); + set.add({2, 20}); + + MyKeyType fallback{5, 50}; + EXPECT_EQ(set.lookup_key_default({1, 66}, fallback).attached_data, 10); + EXPECT_EQ(set.lookup_key_default({4, 40}, fallback).attached_data, 50); +} + +TEST(set, LookupKeyPtr) +{ + Set<MyKeyType> set; + set.add({1, 10}); + set.add({2, 20}); + EXPECT_EQ(set.lookup_key_ptr({1, 50})->attached_data, 10); + EXPECT_EQ(set.lookup_key_ptr({2, 50})->attached_data, 20); + EXPECT_EQ(set.lookup_key_ptr({3, 50}), nullptr); +} + +/** + * Set this to 1 to activate the benchmark. It is disabled by default, because it prints a lot. + */ +#if 0 +template<typename SetT> +BLI_NOINLINE void benchmark_random_ints(StringRef name, int amount, int factor) +{ + RNG *rng = BLI_rng_new(0); + Vector<int> values; + for (int i = 0; i < amount; i++) { + values.append(BLI_rng_get_int(rng) * factor); + } + BLI_rng_free(rng); + + SetT set; + { + SCOPED_TIMER(name + " Add"); + for (int value : values) { + set.add(value); + } + } + int count = 0; + { + SCOPED_TIMER(name + " Contains"); + for (int value : values) { + count += set.contains(value); + } + } + { + SCOPED_TIMER(name + " Remove"); + for (int value : values) { + count += set.remove(value); + } + } + + /* Print the value for simple error checking and to avoid some compiler optimizations. */ + std::cout << "Count: " << count << "\n"; +} + +TEST(set, Benchmark) +{ + for (int i = 0; i < 3; i++) { + benchmark_random_ints<blender::Set<int>>("blender::Set ", 100000, 1); + benchmark_random_ints<blender::StdUnorderedSetWrapper<int>>("std::unordered_set", 100000, 1); + } + std::cout << "\n"; + for (int i = 0; i < 3; i++) { + uint32_t factor = (3 << 10); + benchmark_random_ints<blender::Set<int>>("blender::Set ", 100000, factor); + benchmark_random_ints<blender::StdUnorderedSetWrapper<int>>("std::unordered_set", 100000, factor); + } +} + +/** + * Output of the rudimentary benchmark above on my hardware. + * + * Timer 'blender::Set Add' took 5.5573 ms + * Timer 'blender::Set Contains' took 0.807384 ms + * Timer 'blender::Set Remove' took 0.953436 ms + * Count: 199998 + * Timer 'std::unordered_set Add' took 12.551 ms + * Timer 'std::unordered_set Contains' took 2.3323 ms + * Timer 'std::unordered_set Remove' took 5.07082 ms + * Count: 199998 + * Timer 'blender::Set Add' took 2.62526 ms + * Timer 'blender::Set Contains' took 0.407499 ms + * Timer 'blender::Set Remove' took 0.472981 ms + * Count: 199998 + * Timer 'std::unordered_set Add' took 6.26945 ms + * Timer 'std::unordered_set Contains' took 1.17236 ms + * Timer 'std::unordered_set Remove' took 3.77402 ms + * Count: 199998 + * Timer 'blender::Set Add' took 2.59152 ms + * Timer 'blender::Set Contains' took 0.415254 ms + * Timer 'blender::Set Remove' took 0.477559 ms + * Count: 199998 + * Timer 'std::unordered_set Add' took 6.28129 ms + * Timer 'std::unordered_set Contains' took 1.17562 ms + * Timer 'std::unordered_set Remove' took 3.77811 ms + * Count: 199998 + * + * Timer 'blender::Set Add' took 3.16514 ms + * Timer 'blender::Set Contains' took 0.732895 ms + * Timer 'blender::Set Remove' took 1.08171 ms + * Count: 198790 + * Timer 'std::unordered_set Add' took 6.57377 ms + * Timer 'std::unordered_set Contains' took 1.17008 ms + * Timer 'std::unordered_set Remove' took 3.7946 ms + * Count: 198790 + * Timer 'blender::Set Add' took 3.11439 ms + * Timer 'blender::Set Contains' took 0.740159 ms + * Timer 'blender::Set Remove' took 1.06749 ms + * Count: 198790 + * Timer 'std::unordered_set Add' took 6.35597 ms + * Timer 'std::unordered_set Contains' took 1.17713 ms + * Timer 'std::unordered_set Remove' took 3.77826 ms + * Count: 198790 + * Timer 'blender::Set Add' took 3.09876 ms + * Timer 'blender::Set Contains' took 0.742072 ms + * Timer 'blender::Set Remove' took 1.06622 ms + * Count: 198790 + * Timer 'std::unordered_set Add' took 6.4469 ms + * Timer 'std::unordered_set Contains' took 1.16515 ms + * Timer 'std::unordered_set Remove' took 3.80639 ms + * Count: 198790 + */ + +#endif /* Benchmark */ + +} // namespace tests +} // namespace blender diff --git a/source/blender/blenlib/tests/BLI_span_test.cc b/source/blender/blenlib/tests/BLI_span_test.cc new file mode 100644 index 00000000000..587497624f4 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_span_test.cc @@ -0,0 +1,311 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_span.hh" +#include "BLI_strict_flags.h" +#include "BLI_vector.hh" +#include "testing/testing.h" + +namespace blender::tests { + +TEST(span, FromSmallVector) +{ + Vector<int> a = {1, 2, 3}; + Span<int> a_span = a; + EXPECT_EQ(a_span.size(), 3); + EXPECT_EQ(a_span[0], 1); + EXPECT_EQ(a_span[1], 2); + EXPECT_EQ(a_span[2], 3); +} + +TEST(span, AddConstToPointer) +{ + int a = 0; + std::vector<int *> vec = {&a}; + Span<int *> span = vec; + Span<const int *> const_span = span; + EXPECT_EQ(const_span.size(), 1); +} + +TEST(span, IsReferencing) +{ + int array[] = {3, 5, 8}; + MutableSpan<int> span(array, ARRAY_SIZE(array)); + EXPECT_EQ(span.size(), 3); + EXPECT_EQ(span[1], 5); + array[1] = 10; + EXPECT_EQ(span[1], 10); +} + +TEST(span, DropBack) +{ + Vector<int> a = {4, 5, 6, 7}; + auto slice = Span<int>(a).drop_back(2); + EXPECT_EQ(slice.size(), 2); + EXPECT_EQ(slice[0], 4); + EXPECT_EQ(slice[1], 5); +} + +TEST(span, DropBackAll) +{ + Vector<int> a = {4, 5, 6, 7}; + auto slice = Span<int>(a).drop_back(a.size()); + EXPECT_EQ(slice.size(), 0); +} + +TEST(span, DropFront) +{ + Vector<int> a = {4, 5, 6, 7}; + auto slice = Span<int>(a).drop_front(1); + EXPECT_EQ(slice.size(), 3); + EXPECT_EQ(slice[0], 5); + EXPECT_EQ(slice[1], 6); + EXPECT_EQ(slice[2], 7); +} + +TEST(span, DropFrontAll) +{ + Vector<int> a = {4, 5, 6, 7}; + auto slice = Span<int>(a).drop_front(a.size()); + EXPECT_EQ(slice.size(), 0); +} + +TEST(span, TakeFront) +{ + Vector<int> a = {4, 5, 6, 7}; + auto slice = Span<int>(a).take_front(2); + EXPECT_EQ(slice.size(), 2); + EXPECT_EQ(slice[0], 4); + EXPECT_EQ(slice[1], 5); +} + +TEST(span, TakeBack) +{ + Vector<int> a = {5, 6, 7, 8}; + auto slice = Span<int>(a).take_back(2); + EXPECT_EQ(slice.size(), 2); + EXPECT_EQ(slice[0], 7); + EXPECT_EQ(slice[1], 8); +} + +TEST(span, Slice) +{ + Vector<int> a = {4, 5, 6, 7}; + auto slice = Span<int>(a).slice(1, 2); + EXPECT_EQ(slice.size(), 2); + EXPECT_EQ(slice[0], 5); + EXPECT_EQ(slice[1], 6); +} + +TEST(span, SliceEmpty) +{ + Vector<int> a = {4, 5, 6, 7}; + auto slice = Span<int>(a).slice(2, 0); + EXPECT_EQ(slice.size(), 0); +} + +TEST(span, SliceRange) +{ + Vector<int> a = {1, 2, 3, 4, 5}; + auto slice = Span<int>(a).slice(IndexRange(2, 2)); + EXPECT_EQ(slice.size(), 2); + EXPECT_EQ(slice[0], 3); + EXPECT_EQ(slice[1], 4); +} + +TEST(span, Contains) +{ + Vector<int> a = {4, 5, 6, 7}; + Span<int> a_span = a; + EXPECT_TRUE(a_span.contains(4)); + EXPECT_TRUE(a_span.contains(5)); + EXPECT_TRUE(a_span.contains(6)); + EXPECT_TRUE(a_span.contains(7)); + EXPECT_FALSE(a_span.contains(3)); + EXPECT_FALSE(a_span.contains(8)); +} + +TEST(span, Count) +{ + Vector<int> a = {2, 3, 4, 3, 3, 2, 2, 2, 2}; + Span<int> a_span = a; + EXPECT_EQ(a_span.count(1), 0); + EXPECT_EQ(a_span.count(2), 5); + EXPECT_EQ(a_span.count(3), 3); + EXPECT_EQ(a_span.count(4), 1); + EXPECT_EQ(a_span.count(5), 0); +} + +static void test_ref_from_initializer_list(Span<int> span) +{ + EXPECT_EQ(span.size(), 4); + EXPECT_EQ(span[0], 3); + EXPECT_EQ(span[1], 6); + EXPECT_EQ(span[2], 8); + EXPECT_EQ(span[3], 9); +} + +TEST(span, FromInitializerList) +{ + test_ref_from_initializer_list({3, 6, 8, 9}); +} + +TEST(span, FromVector) +{ + std::vector<int> a = {1, 2, 3, 4}; + Span<int> a_span(a); + EXPECT_EQ(a_span.size(), 4); + EXPECT_EQ(a_span[0], 1); + EXPECT_EQ(a_span[1], 2); + EXPECT_EQ(a_span[2], 3); + EXPECT_EQ(a_span[3], 4); +} + +TEST(span, FromArray) +{ + std::array<int, 2> a = {5, 6}; + Span<int> a_span(a); + EXPECT_EQ(a_span.size(), 2); + EXPECT_EQ(a_span[0], 5); + EXPECT_EQ(a_span[1], 6); +} + +TEST(span, Fill) +{ + std::array<int, 5> a = {4, 5, 6, 7, 8}; + MutableSpan<int> a_span(a); + a_span.fill(1); + EXPECT_EQ(a[0], 1); + EXPECT_EQ(a[1], 1); + EXPECT_EQ(a[2], 1); + EXPECT_EQ(a[3], 1); + EXPECT_EQ(a[4], 1); +} + +TEST(span, FillIndices) +{ + std::array<int, 5> a = {0, 0, 0, 0, 0}; + MutableSpan<int> a_span(a); + a_span.fill_indices({0, 2, 3}, 1); + EXPECT_EQ(a[0], 1); + EXPECT_EQ(a[1], 0); + EXPECT_EQ(a[2], 1); + EXPECT_EQ(a[3], 1); + EXPECT_EQ(a[4], 0); +} + +TEST(span, SizeInBytes) +{ + std::array<int, 10> a; + Span<int> a_span(a); + EXPECT_EQ(a_span.size_in_bytes(), (int64_t)sizeof(a)); + EXPECT_EQ(a_span.size_in_bytes(), 40); +} + +TEST(span, FirstLast) +{ + std::array<int, 4> a = {6, 7, 8, 9}; + Span<int> a_span(a); + EXPECT_EQ(a_span.first(), 6); + EXPECT_EQ(a_span.last(), 9); +} + +TEST(span, FirstLast_OneElement) +{ + int a = 3; + Span<int> a_span(&a, 1); + EXPECT_EQ(a_span.first(), 3); + EXPECT_EQ(a_span.last(), 3); +} + +TEST(span, Get) +{ + std::array<int, 3> a = {5, 6, 7}; + Span<int> a_span(a); + EXPECT_EQ(a_span.get(0, 42), 5); + EXPECT_EQ(a_span.get(1, 42), 6); + EXPECT_EQ(a_span.get(2, 42), 7); + EXPECT_EQ(a_span.get(3, 42), 42); + EXPECT_EQ(a_span.get(4, 42), 42); +} + +TEST(span, ContainsPtr) +{ + std::array<int, 3> a = {5, 6, 7}; + int other = 10; + Span<int> a_span(a); + EXPECT_TRUE(a_span.contains_ptr(&a[0] + 0)); + EXPECT_TRUE(a_span.contains_ptr(&a[0] + 1)); + EXPECT_TRUE(a_span.contains_ptr(&a[0] + 2)); + EXPECT_FALSE(a_span.contains_ptr(&a[0] + 3)); + EXPECT_FALSE(a_span.contains_ptr(&a[0] - 1)); + EXPECT_FALSE(a_span.contains_ptr(&other)); +} + +TEST(span, FirstIndex) +{ + std::array<int, 5> a = {4, 5, 4, 2, 5}; + Span<int> a_span(a); + + EXPECT_EQ(a_span.first_index(4), 0); + EXPECT_EQ(a_span.first_index(5), 1); + EXPECT_EQ(a_span.first_index(2), 3); +} + +TEST(span, CastSameSize) +{ + int value = 0; + std::array<int *, 4> a = {&value, nullptr, nullptr, nullptr}; + Span<int *> a_span = a; + Span<float *> new_a_span = a_span.cast<float *>(); + + EXPECT_EQ(a_span.size(), 4); + EXPECT_EQ(new_a_span.size(), 4); + + EXPECT_EQ(a_span[0], &value); + EXPECT_EQ(new_a_span[0], (float *)&value); +} + +TEST(span, CastSmallerSize) +{ + std::array<uint32_t, 4> a = {3, 4, 5, 6}; + Span<uint32_t> a_span = a; + Span<uint16_t> new_a_span = a_span.cast<uint16_t>(); + + EXPECT_EQ(a_span.size(), 4); + EXPECT_EQ(new_a_span.size(), 8); +} + +TEST(span, CastLargerSize) +{ + std::array<uint16_t, 4> a = {4, 5, 6, 7}; + Span<uint16_t> a_span = a; + Span<uint32_t> new_a_span = a_span.cast<uint32_t>(); + + EXPECT_EQ(a_span.size(), 4); + EXPECT_EQ(new_a_span.size(), 2); +} + +TEST(span, VoidPointerSpan) +{ + int a; + float b; + double c; + + auto func1 = [](Span<void *> span) { EXPECT_EQ(span.size(), 3); }; + func1({&a, &b, &c}); +} + +TEST(span, CopyFrom) +{ + std::array<int, 4> src = {5, 6, 7, 8}; + std::array<int, 4> dst = {1, 2, 3, 4}; + + EXPECT_EQ(dst[2], 3); + MutableSpan(dst).copy_from(src); + EXPECT_EQ(dst[0], 5); + EXPECT_EQ(dst[1], 6); + EXPECT_EQ(dst[2], 7); + EXPECT_EQ(dst[3], 8); +} + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_stack_cxx_test.cc b/source/blender/blenlib/tests/BLI_stack_cxx_test.cc new file mode 100644 index 00000000000..3572e751b88 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_stack_cxx_test.cc @@ -0,0 +1,188 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_stack.hh" +#include "BLI_strict_flags.h" +#include "BLI_vector.hh" +#include "testing/testing.h" + +namespace blender::tests { + +TEST(stack, DefaultConstructor) +{ + Stack<int> stack; + EXPECT_EQ(stack.size(), 0); + EXPECT_TRUE(stack.is_empty()); +} + +TEST(stack, SpanConstructor) +{ + std::array<int, 3> array = {4, 7, 2}; + Stack<int> stack(array); + EXPECT_EQ(stack.size(), 3); + EXPECT_EQ(stack.pop(), 2); + EXPECT_EQ(stack.pop(), 7); + EXPECT_EQ(stack.pop(), 4); + EXPECT_TRUE(stack.is_empty()); +} + +TEST(stack, CopyConstructor) +{ + Stack<int> stack1 = {1, 2, 3, 4, 5, 6, 7}; + Stack<int> stack2 = stack1; + EXPECT_EQ(stack1.size(), 7); + EXPECT_EQ(stack2.size(), 7); + for (int i = 7; i >= 1; i--) { + EXPECT_FALSE(stack1.is_empty()); + EXPECT_FALSE(stack2.is_empty()); + EXPECT_EQ(stack1.pop(), i); + EXPECT_EQ(stack2.pop(), i); + } + EXPECT_TRUE(stack1.is_empty()); + EXPECT_TRUE(stack2.is_empty()); +} + +TEST(stack, MoveConstructor) +{ + Stack<int> stack1 = {1, 2, 3, 4, 5, 6, 7}; + Stack<int> stack2 = std::move(stack1); + EXPECT_EQ(stack1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(stack2.size(), 7); + for (int i = 7; i >= 1; i--) { + EXPECT_EQ(stack2.pop(), i); + } +} + +TEST(stack, CopyAssignment) +{ + Stack<int> stack1 = {1, 2, 3, 4, 5, 6, 7}; + Stack<int> stack2 = {2, 3, 4, 5, 6, 7}; + stack2 = stack1; + + EXPECT_EQ(stack1.size(), 7); + EXPECT_EQ(stack2.size(), 7); + for (int i = 7; i >= 1; i--) { + EXPECT_FALSE(stack1.is_empty()); + EXPECT_FALSE(stack2.is_empty()); + EXPECT_EQ(stack1.pop(), i); + EXPECT_EQ(stack2.pop(), i); + } + EXPECT_TRUE(stack1.is_empty()); + EXPECT_TRUE(stack2.is_empty()); +} + +TEST(stack, MoveAssignment) +{ + Stack<int> stack1 = {1, 2, 3, 4, 5, 6, 7}; + Stack<int> stack2 = {5, 3, 7, 2, 2}; + stack2 = std::move(stack1); + EXPECT_EQ(stack1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(stack2.size(), 7); + for (int i = 7; i >= 1; i--) { + EXPECT_EQ(stack2.pop(), i); + } +} + +TEST(stack, Push) +{ + Stack<int> stack; + EXPECT_EQ(stack.size(), 0); + stack.push(3); + EXPECT_EQ(stack.size(), 1); + stack.push(5); + EXPECT_EQ(stack.size(), 2); +} + +TEST(stack, PushMultiple) +{ + Stack<int> stack; + EXPECT_EQ(stack.size(), 0); + stack.push_multiple({1, 2, 3}); + EXPECT_EQ(stack.size(), 3); + EXPECT_EQ(stack.pop(), 3); + EXPECT_EQ(stack.pop(), 2); + EXPECT_EQ(stack.pop(), 1); +} + +TEST(stack, PushPopMany) +{ + Stack<int> stack; + for (int i = 0; i < 1000; i++) { + stack.push(i); + EXPECT_EQ(stack.size(), static_cast<unsigned int>(i + 1)); + } + for (int i = 999; i > 50; i--) { + EXPECT_EQ(stack.pop(), i); + EXPECT_EQ(stack.size(), static_cast<unsigned int>(i)); + } + for (int i = 51; i < 5000; i++) { + stack.push(i); + EXPECT_EQ(stack.size(), static_cast<unsigned int>(i + 1)); + } + for (int i = 4999; i >= 0; i--) { + EXPECT_EQ(stack.pop(), i); + EXPECT_EQ(stack.size(), static_cast<unsigned int>(i)); + } +} + +TEST(stack, PushMultipleAfterPop) +{ + Stack<int> stack; + for (int i = 0; i < 1000; i++) { + stack.push(i); + } + for (int i = 999; i >= 0; i--) { + EXPECT_EQ(stack.pop(), i); + } + + Vector<int> values; + for (int i = 0; i < 5000; i++) { + values.append(i); + } + stack.push_multiple(values); + EXPECT_EQ(stack.size(), 5000); + + for (int i = 4999; i >= 0; i--) { + EXPECT_EQ(stack.pop(), i); + } +} + +TEST(stack, Pop) +{ + Stack<int> stack; + stack.push(4); + stack.push(6); + EXPECT_EQ(stack.pop(), 6); + EXPECT_EQ(stack.pop(), 4); +} + +TEST(stack, Peek) +{ + Stack<int> stack; + stack.push(3); + stack.push(4); + EXPECT_EQ(stack.peek(), 4); + EXPECT_EQ(stack.peek(), 4); + stack.pop(); + EXPECT_EQ(stack.peek(), 3); +} + +TEST(stack, UniquePtrValues) +{ + Stack<std::unique_ptr<int>> stack; + stack.push(std::unique_ptr<int>(new int())); + stack.push(std::unique_ptr<int>(new int())); + std::unique_ptr<int> a = stack.pop(); + std::unique_ptr<int> &b = stack.peek(); + UNUSED_VARS(a, b); +} + +TEST(stack, OveralignedValues) +{ + Stack<AlignedBuffer<1, 512>, 2> stack; + for (int i = 0; i < 100; i++) { + stack.push({}); + EXPECT_EQ((uintptr_t)&stack.peek() % 512, 0); + } +} + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_string_ref_test.cc b/source/blender/blenlib/tests/BLI_string_ref_test.cc new file mode 100644 index 00000000000..d08c8a77455 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_string_ref_test.cc @@ -0,0 +1,277 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_strict_flags.h" +#include "BLI_string_ref.hh" +#include "BLI_vector.hh" +#include "testing/testing.h" + +namespace blender::tests { + +TEST(string_ref_null, DefaultConstructor) +{ + StringRefNull ref; + EXPECT_EQ(ref.size(), 0); + EXPECT_EQ(ref[0], '\0'); +} + +TEST(string_ref_null, CStringConstructor) +{ + const char *str = "Hello"; + StringRefNull ref(str); + EXPECT_EQ(ref.size(), 5); + EXPECT_EQ(ref.data(), str); +} + +TEST(string_ref_null, CStringLengthConstructor) +{ + const char *str = "Hello"; + StringRefNull ref(str, 5); + EXPECT_EQ(ref.size(), 5); + EXPECT_EQ(ref.data(), str); +} + +TEST(string_ref, DefaultConstructor) +{ + StringRef ref; + EXPECT_EQ(ref.size(), 0); +} + +TEST(string_ref, StartEndConstructor) +{ + const char *text = "hello world"; + StringRef ref(text, text + 5); + EXPECT_EQ(ref.size(), 5); + EXPECT_TRUE(ref == "hello"); + EXPECT_FALSE(ref == "hello "); +} + +TEST(string_ref, StartEndConstructorNullptr) +{ + StringRef ref(nullptr, nullptr); + EXPECT_EQ(ref.size(), 0); + EXPECT_TRUE(ref == ""); +} + +TEST(string_ref, StartEndConstructorSame) +{ + const char *text = "hello world"; + StringRef ref(text, text); + EXPECT_EQ(ref.size(), 0); + EXPECT_TRUE(ref == ""); +} + +TEST(string_ref, CStringConstructor) +{ + const char *str = "Test"; + StringRef ref(str); + EXPECT_EQ(ref.size(), 4); + EXPECT_EQ(ref.data(), str); +} + +TEST(string_ref, PointerWithLengthConstructor) +{ + const char *str = "Test"; + StringRef ref(str, 2); + EXPECT_EQ(ref.size(), 2); + EXPECT_EQ(ref.data(), str); +} + +TEST(string_ref, StdStringConstructor) +{ + std::string str = "Test"; + StringRef ref(str); + EXPECT_EQ(ref.size(), 4); + EXPECT_EQ(ref.data(), str.data()); +} + +TEST(string_ref, SubscriptOperator) +{ + StringRef ref("hello"); + EXPECT_EQ(ref.size(), 5); + EXPECT_EQ(ref[0], 'h'); + EXPECT_EQ(ref[1], 'e'); + EXPECT_EQ(ref[2], 'l'); + EXPECT_EQ(ref[3], 'l'); + EXPECT_EQ(ref[4], 'o'); +} + +TEST(string_ref, ToStdString) +{ + StringRef ref("test"); + std::string str = ref; + EXPECT_EQ(str.size(), 4); + EXPECT_EQ(str, "test"); +} + +TEST(string_ref, Print) +{ + StringRef ref("test"); + std::stringstream ss; + ss << ref; + ss << ref; + std::string str = ss.str(); + EXPECT_EQ(str.size(), 8); + EXPECT_EQ(str, "testtest"); +} + +TEST(string_ref, Add) +{ + StringRef a("qwe"); + StringRef b("asd"); + std::string result = a + b; + EXPECT_EQ(result, "qweasd"); +} + +TEST(string_ref, AddCharPtr1) +{ + StringRef ref("test"); + std::string result = ref + "qwe"; + EXPECT_EQ(result, "testqwe"); +} + +TEST(string_ref, AddCharPtr2) +{ + StringRef ref("test"); + std::string result = "qwe" + ref; + EXPECT_EQ(result, "qwetest"); +} + +TEST(string_ref, AddString1) +{ + StringRef ref("test"); + std::string result = ref + std::string("asd"); + EXPECT_EQ(result, "testasd"); +} + +TEST(string_ref, AddString2) +{ + StringRef ref("test"); + std::string result = std::string("asd") + ref; + EXPECT_EQ(result, "asdtest"); +} + +TEST(string_ref, CompareEqual) +{ + StringRef ref1("test"); + StringRef ref2("test"); + StringRef ref3("other"); + EXPECT_TRUE(ref1 == ref2); + EXPECT_FALSE(ref1 == ref3); + EXPECT_TRUE(ref1 != ref3); + EXPECT_FALSE(ref1 != ref2); +} + +TEST(string_ref, CompareEqualCharPtr1) +{ + StringRef ref("test"); + EXPECT_TRUE(ref == "test"); + EXPECT_FALSE(ref == "other"); + EXPECT_TRUE(ref != "other"); + EXPECT_FALSE(ref != "test"); +} + +TEST(string_ref, CompareEqualCharPtr2) +{ + StringRef ref("test"); + EXPECT_TRUE("test" == ref); + EXPECT_FALSE("other" == ref); + EXPECT_TRUE(ref != "other"); + EXPECT_FALSE(ref != "test"); +} + +TEST(string_ref, CompareEqualString1) +{ + StringRef ref("test"); + EXPECT_TRUE(ref == std::string("test")); + EXPECT_FALSE(ref == std::string("other")); + EXPECT_TRUE(ref != std::string("other")); + EXPECT_FALSE(ref != std::string("test")); +} + +TEST(string_ref, CompareEqualString2) +{ + StringRef ref("test"); + EXPECT_TRUE(std::string("test") == ref); + EXPECT_FALSE(std::string("other") == ref); + EXPECT_TRUE(std::string("other") != ref); + EXPECT_FALSE(std::string("test") != ref); +} + +TEST(string_ref, Iterate) +{ + StringRef ref("test"); + Vector<char> chars; + for (char c : ref) { + chars.append(c); + } + EXPECT_EQ(chars.size(), 4); + EXPECT_EQ(chars[0], 't'); + EXPECT_EQ(chars[1], 'e'); + EXPECT_EQ(chars[2], 's'); + EXPECT_EQ(chars[3], 't'); +} + +TEST(string_ref, StartsWith) +{ + StringRef ref("test"); + EXPECT_TRUE(ref.startswith("")); + EXPECT_TRUE(ref.startswith("t")); + EXPECT_TRUE(ref.startswith("te")); + EXPECT_TRUE(ref.startswith("tes")); + EXPECT_TRUE(ref.startswith("test")); + EXPECT_FALSE(ref.startswith("test ")); + EXPECT_FALSE(ref.startswith("a")); +} + +TEST(string_ref, EndsWith) +{ + StringRef ref("test"); + EXPECT_TRUE(ref.endswith("")); + EXPECT_TRUE(ref.endswith("t")); + EXPECT_TRUE(ref.endswith("st")); + EXPECT_TRUE(ref.endswith("est")); + EXPECT_TRUE(ref.endswith("test")); + EXPECT_FALSE(ref.endswith(" test")); + EXPECT_FALSE(ref.endswith("a")); +} + +TEST(string_ref, DropPrefixN) +{ + StringRef ref("test"); + StringRef ref2 = ref.drop_prefix(2); + StringRef ref3 = ref2.drop_prefix(2); + EXPECT_EQ(ref2.size(), 2); + EXPECT_EQ(ref3.size(), 0); + EXPECT_EQ(ref2, "st"); + EXPECT_EQ(ref3, ""); +} + +TEST(string_ref, DropPrefix) +{ + StringRef ref("test"); + StringRef ref2 = ref.drop_prefix("tes"); + EXPECT_EQ(ref2.size(), 1); + EXPECT_EQ(ref2, "t"); +} + +TEST(string_ref, Substr) +{ + StringRef ref("hello world"); + EXPECT_EQ(ref.substr(0, 5), "hello"); + EXPECT_EQ(ref.substr(4, 0), ""); + EXPECT_EQ(ref.substr(3, 4), "lo w"); + EXPECT_EQ(ref.substr(6, 5), "world"); +} + +TEST(string_ref, Copy) +{ + StringRef ref("hello"); + char dst[10]; + memset(dst, 0xFF, 10); + ref.copy(dst); + EXPECT_EQ(dst[5], '\0'); + EXPECT_EQ(dst[6], 0xFF); + EXPECT_EQ(ref, dst); +} + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_vector_set_test.cc b/source/blender/blenlib/tests/BLI_vector_set_test.cc new file mode 100644 index 00000000000..8f3db8d8403 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_vector_set_test.cc @@ -0,0 +1,164 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_strict_flags.h" +#include "BLI_vector_set.hh" +#include "testing/testing.h" + +namespace blender::tests { + +TEST(vector_set, DefaultConstructor) +{ + VectorSet<int> set; + EXPECT_EQ(set.size(), 0); + EXPECT_TRUE(set.is_empty()); +} + +TEST(vector_set, InitializerListConstructor_WithoutDuplicates) +{ + VectorSet<int> set = {1, 4, 5}; + EXPECT_EQ(set.size(), 3); + EXPECT_EQ(set[0], 1); + EXPECT_EQ(set[1], 4); + EXPECT_EQ(set[2], 5); +} + +TEST(vector_set, InitializerListConstructor_WithDuplicates) +{ + VectorSet<int> set = {1, 3, 3, 2, 1, 5}; + EXPECT_EQ(set.size(), 4); + EXPECT_EQ(set[0], 1); + EXPECT_EQ(set[1], 3); + EXPECT_EQ(set[2], 2); + EXPECT_EQ(set[3], 5); +} + +TEST(vector_set, Copy) +{ + VectorSet<int> set1 = {1, 2, 3}; + VectorSet<int> set2 = set1; + EXPECT_EQ(set1.size(), 3); + EXPECT_EQ(set2.size(), 3); + EXPECT_EQ(set1.index_of(2), 1); + EXPECT_EQ(set2.index_of(2), 1); +} + +TEST(vector_set, CopyAssignment) +{ + VectorSet<int> set1 = {1, 2, 3}; + VectorSet<int> set2 = {}; + set2 = set1; + EXPECT_EQ(set1.size(), 3); + EXPECT_EQ(set2.size(), 3); + EXPECT_EQ(set1.index_of(2), 1); + EXPECT_EQ(set2.index_of(2), 1); +} + +TEST(vector_set, Move) +{ + VectorSet<int> set1 = {1, 2, 3}; + VectorSet<int> set2 = std::move(set1); + EXPECT_EQ(set1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(set2.size(), 3); +} + +TEST(vector_set, MoveAssignment) +{ + VectorSet<int> set1 = {1, 2, 3}; + VectorSet<int> set2 = {}; + set2 = std::move(set1); + EXPECT_EQ(set1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(set2.size(), 3); +} + +TEST(vector_set, AddNewIncreasesSize) +{ + VectorSet<int> set; + EXPECT_TRUE(set.is_empty()); + EXPECT_EQ(set.size(), 0); + set.add(5); + EXPECT_FALSE(set.is_empty()); + EXPECT_EQ(set.size(), 1); +} + +TEST(vector_set, AddExistingDoesNotIncreaseSize) +{ + VectorSet<int> set; + EXPECT_EQ(set.size(), 0); + EXPECT_TRUE(set.add(5)); + EXPECT_EQ(set.size(), 1); + EXPECT_FALSE(set.add(5)); + EXPECT_EQ(set.size(), 1); +} + +TEST(vector_set, Index) +{ + VectorSet<int> set = {3, 6, 4}; + EXPECT_EQ(set.index_of(6), 1); + EXPECT_EQ(set.index_of(3), 0); + EXPECT_EQ(set.index_of(4), 2); +} + +TEST(vector_set, IndexTry) +{ + VectorSet<int> set = {3, 6, 4}; + EXPECT_EQ(set.index_of_try(5), -1); + EXPECT_EQ(set.index_of_try(3), 0); + EXPECT_EQ(set.index_of_try(6), 1); + EXPECT_EQ(set.index_of_try(2), -1); +} + +TEST(vector_set, RemoveContained) +{ + VectorSet<int> set = {4, 5, 6, 7}; + EXPECT_EQ(set.size(), 4); + set.remove_contained(5); + EXPECT_EQ(set.size(), 3); + EXPECT_EQ(set[0], 4); + EXPECT_EQ(set[1], 7); + EXPECT_EQ(set[2], 6); + set.remove_contained(6); + EXPECT_EQ(set.size(), 2); + EXPECT_EQ(set[0], 4); + EXPECT_EQ(set[1], 7); + set.remove_contained(4); + EXPECT_EQ(set.size(), 1); + EXPECT_EQ(set[0], 7); + set.remove_contained(7); + EXPECT_EQ(set.size(), 0); +} + +TEST(vector_set, AddMultipleTimes) +{ + VectorSet<int> set; + for (int i = 0; i < 100; i++) { + EXPECT_FALSE(set.contains(i * 13)); + set.add(i * 12); + set.add(i * 13); + EXPECT_TRUE(set.contains(i * 13)); + } +} + +TEST(vector_set, UniquePtrValue) +{ + VectorSet<std::unique_ptr<int>> set; + set.add_new(std::unique_ptr<int>(new int())); + set.add(std::unique_ptr<int>(new int())); + set.index_of_try(std::unique_ptr<int>(new int())); + std::unique_ptr<int> value = set.pop(); + UNUSED_VARS(value); +} + +TEST(vector_set, Remove) +{ + VectorSet<int> set; + EXPECT_TRUE(set.add(5)); + EXPECT_TRUE(set.contains(5)); + EXPECT_FALSE(set.remove(6)); + EXPECT_TRUE(set.contains(5)); + EXPECT_TRUE(set.remove(5)); + EXPECT_FALSE(set.contains(5)); + EXPECT_FALSE(set.remove(5)); + EXPECT_FALSE(set.contains(5)); +} + +} // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_vector_test.cc b/source/blender/blenlib/tests/BLI_vector_test.cc new file mode 100644 index 00000000000..f72dfc5deb8 --- /dev/null +++ b/source/blender/blenlib/tests/BLI_vector_test.cc @@ -0,0 +1,639 @@ +/* Apache License, Version 2.0 */ + +#include "BLI_strict_flags.h" +#include "BLI_vector.hh" +#include "testing/testing.h" +#include <forward_list> + +namespace blender::tests { + +TEST(vector, DefaultConstructor) +{ + Vector<int> vec; + EXPECT_EQ(vec.size(), 0); +} + +TEST(vector, SizeConstructor) +{ + Vector<int> vec(3); + EXPECT_EQ(vec.size(), 3); +} + +/** + * Tests that the trivially constructible types are not zero-initialized. We do not want that for + * performance reasons. + */ +TEST(vector, TrivialTypeSizeConstructor) +{ + Vector<char, 1> *vec = new Vector<char, 1>(1); + char *ptr = &(*vec)[0]; + vec->~Vector(); + + const char magic = 42; + *ptr = magic; + EXPECT_EQ(*ptr, magic); + + new (vec) Vector<char, 1>(1); + EXPECT_EQ((*vec)[0], magic); + EXPECT_EQ(*ptr, magic); + delete vec; +} + +TEST(vector, SizeValueConstructor) +{ + Vector<int> vec(4, 10); + EXPECT_EQ(vec.size(), 4); + EXPECT_EQ(vec[0], 10); + EXPECT_EQ(vec[1], 10); + EXPECT_EQ(vec[2], 10); + EXPECT_EQ(vec[3], 10); +} + +TEST(vector, InitializerListConstructor) +{ + Vector<int> vec = {1, 3, 4, 6}; + EXPECT_EQ(vec.size(), 4); + EXPECT_EQ(vec[0], 1); + EXPECT_EQ(vec[1], 3); + EXPECT_EQ(vec[2], 4); + EXPECT_EQ(vec[3], 6); +} + +TEST(vector, ConvertingConstructor) +{ + std::array<float, 5> values = {5.4f, 7.3f, -8.1f, 5.0f, 0.0f}; + Vector<int> vec = values; + EXPECT_EQ(vec.size(), 5); + EXPECT_EQ(vec[0], 5); + EXPECT_EQ(vec[1], 7); + EXPECT_EQ(vec[2], -8); + EXPECT_EQ(vec[3], 5); + EXPECT_EQ(vec[4], 0); +} + +struct TestListValue { + TestListValue *next, *prev; + int value; +}; + +TEST(vector, ListBaseConstructor) +{ + TestListValue *value1 = new TestListValue{0, 0, 4}; + TestListValue *value2 = new TestListValue{0, 0, 5}; + TestListValue *value3 = new TestListValue{0, 0, 6}; + + ListBase list = {NULL, NULL}; + BLI_addtail(&list, value1); + BLI_addtail(&list, value2); + BLI_addtail(&list, value3); + Vector<TestListValue *> vec(list); + + EXPECT_EQ(vec.size(), 3); + EXPECT_EQ(vec[0]->value, 4); + EXPECT_EQ(vec[1]->value, 5); + EXPECT_EQ(vec[2]->value, 6); + + delete value1; + delete value2; + delete value3; +} + +TEST(vector, ContainerConstructor) +{ + std::forward_list<int> list; + list.push_front(3); + list.push_front(1); + list.push_front(5); + + Vector<int> vec = Vector<int>::FromContainer(list); + EXPECT_EQ(vec.size(), 3); + EXPECT_EQ(vec[0], 5); + EXPECT_EQ(vec[1], 1); + EXPECT_EQ(vec[2], 3); +} + +TEST(vector, CopyConstructor) +{ + Vector<int> vec1 = {1, 2, 3}; + Vector<int> vec2(vec1); + EXPECT_EQ(vec2.size(), 3); + EXPECT_EQ(vec2[0], 1); + EXPECT_EQ(vec2[1], 2); + EXPECT_EQ(vec2[2], 3); + + vec1[1] = 5; + EXPECT_EQ(vec1[1], 5); + EXPECT_EQ(vec2[1], 2); +} + +TEST(vector, CopyConstructor2) +{ + Vector<int, 2> vec1 = {1, 2, 3, 4}; + Vector<int, 3> vec2(vec1); + + EXPECT_EQ(vec1.size(), 4); + EXPECT_EQ(vec2.size(), 4); + EXPECT_NE(vec1.data(), vec2.data()); + EXPECT_EQ(vec2[0], 1); + EXPECT_EQ(vec2[1], 2); + EXPECT_EQ(vec2[2], 3); + EXPECT_EQ(vec2[3], 4); +} + +TEST(vector, CopyConstructor3) +{ + Vector<int, 20> vec1 = {1, 2, 3, 4}; + Vector<int, 1> vec2(vec1); + + EXPECT_EQ(vec1.size(), 4); + EXPECT_EQ(vec2.size(), 4); + EXPECT_NE(vec1.data(), vec2.data()); + EXPECT_EQ(vec2[2], 3); +} + +TEST(vector, CopyConstructor4) +{ + Vector<int, 5> vec1 = {1, 2, 3, 4}; + Vector<int, 6> vec2(vec1); + + EXPECT_EQ(vec1.size(), 4); + EXPECT_EQ(vec2.size(), 4); + EXPECT_NE(vec1.data(), vec2.data()); + EXPECT_EQ(vec2[3], 4); +} + +TEST(vector, MoveConstructor) +{ + Vector<int> vec1 = {1, 2, 3, 4}; + Vector<int> vec2(std::move(vec1)); + + EXPECT_EQ(vec1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(vec2.size(), 4); + EXPECT_EQ(vec2[0], 1); + EXPECT_EQ(vec2[1], 2); + EXPECT_EQ(vec2[2], 3); + EXPECT_EQ(vec2[3], 4); +} + +TEST(vector, MoveConstructor2) +{ + Vector<int, 2> vec1 = {1, 2, 3, 4}; + Vector<int, 3> vec2(std::move(vec1)); + + EXPECT_EQ(vec1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(vec2.size(), 4); + EXPECT_EQ(vec2[0], 1); + EXPECT_EQ(vec2[1], 2); + EXPECT_EQ(vec2[2], 3); + EXPECT_EQ(vec2[3], 4); +} + +TEST(vector, MoveConstructor3) +{ + Vector<int, 20> vec1 = {1, 2, 3, 4}; + Vector<int, 1> vec2(std::move(vec1)); + + EXPECT_EQ(vec1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(vec2.size(), 4); + EXPECT_EQ(vec2[2], 3); +} + +TEST(vector, MoveConstructor4) +{ + Vector<int, 5> vec1 = {1, 2, 3, 4}; + Vector<int, 6> vec2(std::move(vec1)); + + EXPECT_EQ(vec1.size(), 0); /* NOLINT: bugprone-use-after-move */ + EXPECT_EQ(vec2.size(), 4); + EXPECT_EQ(vec2[3], 4); +} + +TEST(vector, MoveAssignment) +{ + Vector<int> vec = {1, 2}; + EXPECT_EQ(vec.size(), 2); + EXPECT_EQ(vec[0], 1); + EXPECT_EQ(vec[1], 2); + + vec = Vector<int>({5}); + EXPECT_EQ(vec.size(), 1); + EXPECT_EQ(vec[0], 5); +} + +TEST(vector, CopyAssignment) +{ + Vector<int> vec1 = {1, 2, 3}; + Vector<int> vec2 = {4, 5}; + EXPECT_EQ(vec1.size(), 3); + EXPECT_EQ(vec2.size(), 2); + + vec2 = vec1; + EXPECT_EQ(vec2.size(), 3); + + vec1[0] = 7; + EXPECT_EQ(vec1[0], 7); + EXPECT_EQ(vec2[0], 1); +} + +TEST(vector, Append) +{ + Vector<int> vec; + vec.append(3); + vec.append(6); + vec.append(7); + EXPECT_EQ(vec.size(), 3); + EXPECT_EQ(vec[0], 3); + EXPECT_EQ(vec[1], 6); + EXPECT_EQ(vec[2], 7); +} + +TEST(vector, AppendAndGetIndex) +{ + Vector<int> vec; + EXPECT_EQ(vec.append_and_get_index(10), 0); + EXPECT_EQ(vec.append_and_get_index(10), 1); + EXPECT_EQ(vec.append_and_get_index(10), 2); + vec.append(10); + EXPECT_EQ(vec.append_and_get_index(10), 4); +} + +TEST(vector, AppendNonDuplicates) +{ + Vector<int> vec; + vec.append_non_duplicates(4); + EXPECT_EQ(vec.size(), 1); + vec.append_non_duplicates(5); + EXPECT_EQ(vec.size(), 2); + vec.append_non_duplicates(4); + EXPECT_EQ(vec.size(), 2); +} + +TEST(vector, ExtendNonDuplicates) +{ + Vector<int> vec; + vec.extend_non_duplicates({1, 2}); + EXPECT_EQ(vec.size(), 2); + vec.extend_non_duplicates({3, 4}); + EXPECT_EQ(vec.size(), 4); + vec.extend_non_duplicates({0, 1, 2, 3}); + EXPECT_EQ(vec.size(), 5); +} + +TEST(vector, Iterator) +{ + Vector<int> vec({1, 4, 9, 16}); + int i = 1; + for (int value : vec) { + EXPECT_EQ(value, i * i); + i++; + } +} + +TEST(vector, BecomeLarge) +{ + Vector<int, 4> vec; + for (int i = 0; i < 100; i++) { + vec.append(i * 5); + } + EXPECT_EQ(vec.size(), 100); + for (int i = 0; i < 100; i++) { + EXPECT_EQ(vec[i], static_cast<int>(i * 5)); + } +} + +static Vector<int> return_by_value_helper() +{ + return Vector<int>({3, 5, 1}); +} + +TEST(vector, ReturnByValue) +{ + Vector<int> vec = return_by_value_helper(); + EXPECT_EQ(vec.size(), 3); + EXPECT_EQ(vec[0], 3); + EXPECT_EQ(vec[1], 5); + EXPECT_EQ(vec[2], 1); +} + +TEST(vector, VectorOfVectors_Append) +{ + Vector<Vector<int>> vec; + EXPECT_EQ(vec.size(), 0); + + Vector<int> v({1, 2}); + vec.append(v); + vec.append({7, 8}); + EXPECT_EQ(vec.size(), 2); + EXPECT_EQ(vec[0][0], 1); + EXPECT_EQ(vec[0][1], 2); + EXPECT_EQ(vec[1][0], 7); + EXPECT_EQ(vec[1][1], 8); +} + +TEST(vector, RemoveLast) +{ + Vector<int> vec = {5, 6}; + EXPECT_EQ(vec.size(), 2); + vec.remove_last(); + EXPECT_EQ(vec.size(), 1); + vec.remove_last(); + EXPECT_EQ(vec.size(), 0); +} + +TEST(vector, IsEmpty) +{ + Vector<int> vec; + EXPECT_TRUE(vec.is_empty()); + vec.append(1); + EXPECT_FALSE(vec.is_empty()); + vec.remove_last(); + EXPECT_TRUE(vec.is_empty()); +} + +TEST(vector, RemoveReorder) +{ + Vector<int> vec = {4, 5, 6, 7}; + vec.remove_and_reorder(1); + EXPECT_EQ(vec[0], 4); + EXPECT_EQ(vec[1], 7); + EXPECT_EQ(vec[2], 6); + vec.remove_and_reorder(2); + EXPECT_EQ(vec[0], 4); + EXPECT_EQ(vec[1], 7); + vec.remove_and_reorder(0); + EXPECT_EQ(vec[0], 7); + vec.remove_and_reorder(0); + EXPECT_TRUE(vec.is_empty()); +} + +TEST(vector, RemoveFirstOccurrenceAndReorder) +{ + Vector<int> vec = {4, 5, 6, 7}; + vec.remove_first_occurrence_and_reorder(5); + EXPECT_EQ(vec[0], 4); + EXPECT_EQ(vec[1], 7); + EXPECT_EQ(vec[2], 6); + vec.remove_first_occurrence_and_reorder(6); + EXPECT_EQ(vec[0], 4); + EXPECT_EQ(vec[1], 7); + vec.remove_first_occurrence_and_reorder(4); + EXPECT_EQ(vec[0], 7); + vec.remove_first_occurrence_and_reorder(7); + EXPECT_EQ(vec.size(), 0); +} + +TEST(vector, Remove) +{ + Vector<int> vec = {1, 2, 3, 4, 5, 6}; + vec.remove(3); + EXPECT_TRUE(std::equal(vec.begin(), vec.end(), Span<int>({1, 2, 3, 5, 6}).begin())); + vec.remove(0); + EXPECT_TRUE(std::equal(vec.begin(), vec.end(), Span<int>({2, 3, 5, 6}).begin())); + vec.remove(3); + EXPECT_TRUE(std::equal(vec.begin(), vec.end(), Span<int>({2, 3, 5}).begin())); + vec.remove(1); + EXPECT_TRUE(std::equal(vec.begin(), vec.end(), Span<int>({2, 5}).begin())); + vec.remove(1); + EXPECT_TRUE(std::equal(vec.begin(), vec.end(), Span<int>({2}).begin())); + vec.remove(0); + EXPECT_TRUE(std::equal(vec.begin(), vec.end(), Span<int>({}).begin())); +} + +TEST(vector, ExtendSmallVector) +{ + Vector<int> a = {2, 3, 4}; + Vector<int> b = {11, 12}; + b.extend(a); + EXPECT_EQ(b.size(), 5); + EXPECT_EQ(b[0], 11); + EXPECT_EQ(b[1], 12); + EXPECT_EQ(b[2], 2); + EXPECT_EQ(b[3], 3); + EXPECT_EQ(b[4], 4); +} + +TEST(vector, ExtendArray) +{ + int array[] = {3, 4, 5, 6}; + + Vector<int> a; + a.extend(array, 2); + + EXPECT_EQ(a.size(), 2); + EXPECT_EQ(a[0], 3); + EXPECT_EQ(a[1], 4); +} + +TEST(vector, Last) +{ + Vector<int> a{3, 5, 7}; + EXPECT_EQ(a.last(), 7); +} + +TEST(vector, AppendNTimes) +{ + Vector<int> a; + a.append_n_times(5, 3); + a.append_n_times(2, 2); + EXPECT_EQ(a.size(), 5); + EXPECT_EQ(a[0], 5); + EXPECT_EQ(a[1], 5); + EXPECT_EQ(a[2], 5); + EXPECT_EQ(a[3], 2); + EXPECT_EQ(a[4], 2); +} + +TEST(vector, UniquePtrValue) +{ + Vector<std::unique_ptr<int>> vec; + vec.append(std::unique_ptr<int>(new int())); + vec.append(std::unique_ptr<int>(new int())); + vec.append(std::unique_ptr<int>(new int())); + vec.append(std::unique_ptr<int>(new int())); + EXPECT_EQ(vec.size(), 4); + + std::unique_ptr<int> &a = vec.last(); + std::unique_ptr<int> b = vec.pop_last(); + vec.remove_and_reorder(0); + vec.remove(0); + EXPECT_EQ(vec.size(), 1); + + UNUSED_VARS(a, b); +} + +class TypeConstructMock { + public: + bool default_constructed = false; + bool copy_constructed = false; + bool move_constructed = false; + bool copy_assigned = false; + bool move_assigned = false; + + TypeConstructMock() : default_constructed(true) + { + } + + TypeConstructMock(const TypeConstructMock &UNUSED(other)) : copy_constructed(true) + { + } + + TypeConstructMock(TypeConstructMock &&UNUSED(other)) noexcept : move_constructed(true) + { + } + + TypeConstructMock &operator=(const TypeConstructMock &other) + { + if (this == &other) { + return *this; + } + + copy_assigned = true; + return *this; + } + + TypeConstructMock &operator=(TypeConstructMock &&other) noexcept + { + if (this == &other) { + return *this; + } + + move_assigned = true; + return *this; + } +}; + +TEST(vector, SizeConstructorCallsDefaultConstructor) +{ + Vector<TypeConstructMock> vec(3); + EXPECT_TRUE(vec[0].default_constructed); + EXPECT_TRUE(vec[1].default_constructed); + EXPECT_TRUE(vec[2].default_constructed); +} + +TEST(vector, SizeValueConstructorCallsCopyConstructor) +{ + Vector<TypeConstructMock> vec(3, TypeConstructMock()); + EXPECT_TRUE(vec[0].copy_constructed); + EXPECT_TRUE(vec[1].copy_constructed); + EXPECT_TRUE(vec[2].copy_constructed); +} + +TEST(vector, AppendCallsCopyConstructor) +{ + Vector<TypeConstructMock> vec; + TypeConstructMock value; + vec.append(value); + EXPECT_TRUE(vec[0].copy_constructed); +} + +TEST(vector, AppendCallsMoveConstructor) +{ + Vector<TypeConstructMock> vec; + vec.append(TypeConstructMock()); + EXPECT_TRUE(vec[0].move_constructed); +} + +TEST(vector, SmallVectorCopyCallsCopyConstructor) +{ + Vector<TypeConstructMock, 2> src(2); + Vector<TypeConstructMock, 2> dst(src); + EXPECT_TRUE(dst[0].copy_constructed); + EXPECT_TRUE(dst[1].copy_constructed); +} + +TEST(vector, LargeVectorCopyCallsCopyConstructor) +{ + Vector<TypeConstructMock, 2> src(5); + Vector<TypeConstructMock, 2> dst(src); + EXPECT_TRUE(dst[0].copy_constructed); + EXPECT_TRUE(dst[1].copy_constructed); +} + +TEST(vector, SmallVectorMoveCallsMoveConstructor) +{ + Vector<TypeConstructMock, 2> src(2); + Vector<TypeConstructMock, 2> dst(std::move(src)); + EXPECT_TRUE(dst[0].move_constructed); + EXPECT_TRUE(dst[1].move_constructed); +} + +TEST(vector, LargeVectorMoveCallsNoConstructor) +{ + Vector<TypeConstructMock, 2> src(5); + Vector<TypeConstructMock, 2> dst(std::move(src)); + + EXPECT_TRUE(dst[0].default_constructed); + EXPECT_FALSE(dst[0].move_constructed); + EXPECT_FALSE(dst[0].copy_constructed); +} + +TEST(vector, Resize) +{ + std::string long_string = "012345678901234567890123456789"; + Vector<std::string> vec; + EXPECT_EQ(vec.size(), 0); + vec.resize(2); + EXPECT_EQ(vec.size(), 2); + EXPECT_EQ(vec[0], ""); + EXPECT_EQ(vec[1], ""); + vec.resize(5, long_string); + EXPECT_EQ(vec.size(), 5); + EXPECT_EQ(vec[0], ""); + EXPECT_EQ(vec[1], ""); + EXPECT_EQ(vec[2], long_string); + EXPECT_EQ(vec[3], long_string); + EXPECT_EQ(vec[4], long_string); + vec.resize(1); + EXPECT_EQ(vec.size(), 1); + EXPECT_EQ(vec[0], ""); +} + +TEST(vector, FirstIndexOf) +{ + Vector<int> vec = {2, 3, 5, 7, 5, 9}; + EXPECT_EQ(vec.first_index_of(2), 0); + EXPECT_EQ(vec.first_index_of(5), 2); + EXPECT_EQ(vec.first_index_of(9), 5); +} + +TEST(vector, FirstIndexTryOf) +{ + Vector<int> vec = {2, 3, 5, 7, 5, 9}; + EXPECT_EQ(vec.first_index_of_try(2), 0); + EXPECT_EQ(vec.first_index_of_try(4), -1); + EXPECT_EQ(vec.first_index_of_try(5), 2); + EXPECT_EQ(vec.first_index_of_try(9), 5); + EXPECT_EQ(vec.first_index_of_try(1), -1); +} + +TEST(vector, OveralignedValues) +{ + Vector<AlignedBuffer<1, 512>, 2> vec; + for (int i = 0; i < 100; i++) { + vec.append({}); + EXPECT_EQ((uintptr_t)&vec.last() % 512, 0); + } +} + +TEST(vector, ConstructVoidPointerVector) +{ + int a; + float b; + double c; + Vector<void *> vec = {&a, &b, &c}; + EXPECT_EQ(vec.size(), 3); +} + +TEST(vector, Fill) +{ + Vector<int> vec(5); + vec.fill(3); + EXPECT_EQ(vec.size(), 5u); + EXPECT_EQ(vec[0], 3); + EXPECT_EQ(vec[1], 3); + EXPECT_EQ(vec[2], 3); + EXPECT_EQ(vec[3], 3); + EXPECT_EQ(vec[4], 3); +} + +} // namespace blender::tests |