diff options
Diffstat (limited to 'source')
241 files changed, 11189 insertions, 5699 deletions
diff --git a/source/blender/blendthumb/CMakeLists.txt b/source/blender/blendthumb/CMakeLists.txt index 330cefa247a..6160d225d45 100644 --- a/source/blender/blendthumb/CMakeLists.txt +++ b/source/blender/blendthumb/CMakeLists.txt @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-or-later # Copyright 2006 Blender Foundation. All rights reserved. -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- # Shared Thumbnail Extraction Logic include_directories( diff --git a/source/blender/blenkernel/BKE_DerivedMesh.h b/source/blender/blenkernel/BKE_DerivedMesh.h index 4274ca97fd1..da1e45ababd 100644 --- a/source/blender/blenkernel/BKE_DerivedMesh.h +++ b/source/blender/blenkernel/BKE_DerivedMesh.h @@ -141,14 +141,6 @@ struct DerivedMesh { void (*copyLoopArray)(DerivedMesh *dm, struct MLoop *r_loop); void (*copyPolyArray)(DerivedMesh *dm, struct MPoly *r_poly); - /** Return a copy of all verts/edges/faces from the derived mesh - * it is the caller's responsibility to free the returned pointer - */ - struct MVert *(*dupVertArray)(DerivedMesh *dm); - struct MEdge *(*dupEdgeArray)(DerivedMesh *dm); - struct MLoop *(*dupLoopArray)(DerivedMesh *dm); - struct MPoly *(*dupPolyArray)(DerivedMesh *dm); - /** Return a pointer to the entire array of vert/edge/face custom data * from the derived mesh (this gives a pointer to the actual data, not * a copy) @@ -254,11 +246,6 @@ void DM_copy_vert_data(struct DerivedMesh *source, int count); /** - * Sets up mpolys for a DM based on face iterators in source. - */ -void DM_DupPolys(DerivedMesh *source, DerivedMesh *target); - -/** * Ensure the array is large enough. * * \note This function must always be thread-protected by caller. diff --git a/source/blender/blenkernel/BKE_attribute.hh b/source/blender/blenkernel/BKE_attribute.hh index 4aa6c133e9e..fbdacee139c 100644 --- a/source/blender/blenkernel/BKE_attribute.hh +++ b/source/blender/blenkernel/BKE_attribute.hh @@ -553,6 +553,11 @@ class MutableAttributeAccessor : public AttributeAccessor { GAttributeWriter lookup_for_write(const AttributeIDRef &attribute_id); /** + * Same as above, but returns a type that makes it easier to work with the attribute as a span. + */ + GSpanAttributeWriter lookup_for_write_span(const AttributeIDRef &attribute_id); + + /** * Get a writable attribute or non if it does not exist. * Make sure to call #finish after changes are done. */ @@ -569,6 +574,19 @@ class MutableAttributeAccessor : public AttributeAccessor { } /** + * Same as above, but returns a type that makes it easier to work with the attribute as a span. + */ + template<typename T> + SpanAttributeWriter<T> lookup_for_write_span(const AttributeIDRef &attribute_id) + { + AttributeWriter<T> attribute = this->lookup_for_write<T>(attribute_id); + if (attribute) { + return SpanAttributeWriter<T>{std::move(attribute), true}; + } + return {}; + } + + /** * Create a new attribute. * \return True, when a new attribute has been created. False, when it's not possible to create * this attribute or there is already an attribute with that id. @@ -692,6 +710,19 @@ Vector<AttributeTransferData> retrieve_attributes_for_transfer( eAttrDomainMask domain_mask, const Set<std::string> &skip = {}); +/** + * Copy attributes for the domain based on the elementwise mask. + * + * \param mask_indices: Indexed elements to copy from the source data-block. + * \param domain: Attribute domain to transfer. + * \param skip: Named attributes to ignore/skip. + */ +void copy_attribute_domain(AttributeAccessor src_attributes, + MutableAttributeAccessor dst_attributes, + IndexMask selection, + eAttrDomain domain, + const Set<std::string> &skip = {}); + bool allow_procedural_attribute_access(StringRef attribute_name); extern const char *no_procedural_access_message; diff --git a/source/blender/blenkernel/BKE_cdderivedmesh.h b/source/blender/blenkernel/BKE_cdderivedmesh.h index 3c929857c14..2d1aca7c3c8 100644 --- a/source/blender/blenkernel/BKE_cdderivedmesh.h +++ b/source/blender/blenkernel/BKE_cdderivedmesh.h @@ -25,10 +25,6 @@ struct Mesh; * data to not overwrite the original. */ struct DerivedMesh *CDDM_from_mesh(struct Mesh *mesh); -/* Copies the given DerivedMesh with verts, faces & edges stored as - * custom element data. */ -struct DerivedMesh *CDDM_copy(struct DerivedMesh *source); - #ifdef __cplusplus } #endif diff --git a/source/blender/blenkernel/BKE_compute_contexts.hh b/source/blender/blenkernel/BKE_compute_contexts.hh new file mode 100644 index 00000000000..a8f0022f49b --- /dev/null +++ b/source/blender/blenkernel/BKE_compute_contexts.hh @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** + * This file implements some specific compute contexts for concepts in Blender. + */ + +#include "BLI_compute_context.hh" + +namespace blender::bke { + +class ModifierComputeContext : public ComputeContext { + private: + static constexpr const char *s_static_type = "MODIFIER"; + + /** + * Use modifier name instead of something like `session_uuid` for now because: + * - It's more obvious that the name matches between the original and evaluated object. + * - We might want that the context hash is consistent between sessions in the future. + */ + std::string modifier_name_; + + public: + ModifierComputeContext(const ComputeContext *parent, std::string modifier_name); + + private: + void print_current_in_line(std::ostream &stream) const override; +}; + +class NodeGroupComputeContext : public ComputeContext { + private: + static constexpr const char *s_static_type = "NODE_GROUP"; + + std::string node_name_; + + public: + NodeGroupComputeContext(const ComputeContext *parent, std::string node_name); + + StringRefNull node_name() const; + + private: + void print_current_in_line(std::ostream &stream) const override; +}; + +} // namespace blender::bke diff --git a/source/blender/blenkernel/BKE_cryptomatte.h b/source/blender/blenkernel/BKE_cryptomatte.h index 56049ecf405..b2024f09278 100644 --- a/source/blender/blenkernel/BKE_cryptomatte.h +++ b/source/blender/blenkernel/BKE_cryptomatte.h @@ -25,6 +25,8 @@ struct CryptomatteSession *BKE_cryptomatte_init(void); struct CryptomatteSession *BKE_cryptomatte_init_from_render_result( const struct RenderResult *render_result); struct CryptomatteSession *BKE_cryptomatte_init_from_scene(const struct Scene *scene); +struct CryptomatteSession *BKE_cryptomatte_init_from_view_layer( + const struct ViewLayer *view_layer); void BKE_cryptomatte_free(struct CryptomatteSession *session); void BKE_cryptomatte_add_layer(struct CryptomatteSession *session, const char *layer_name); diff --git a/source/blender/blenkernel/BKE_cryptomatte.hh b/source/blender/blenkernel/BKE_cryptomatte.hh index cd3f8dc9f58..dd08f7b5c4f 100644 --- a/source/blender/blenkernel/BKE_cryptomatte.hh +++ b/source/blender/blenkernel/BKE_cryptomatte.hh @@ -12,6 +12,7 @@ #include "BKE_cryptomatte.h" +#include "BLI_hash_mm3.h" #include "BLI_map.hh" #include "BLI_string_ref.hh" @@ -54,10 +55,14 @@ struct CryptomatteHash { uint32_t hash; CryptomatteHash(uint32_t hash); - CryptomatteHash(const char *name, int name_len); - static CryptomatteHash from_hex_encoded(blender::StringRef hex_encoded); + CryptomatteHash(const char *name, int name_len) + { + hash = BLI_hash_mm3((const unsigned char *)name, name_len, 0); + } + static CryptomatteHash from_hex_encoded(blender::StringRef hex_encoded); std::string hex_encoded() const; + /** * Convert a cryptomatte hash to a float. * @@ -70,7 +75,20 @@ struct CryptomatteHash { * * Note that this conversion assumes to be running on a L-endian system. */ - float float_encoded() const; + float float_encoded() const + { + uint32_t mantissa = hash & ((1 << 23) - 1); + uint32_t exponent = (hash >> 23) & ((1 << 8) - 1); + exponent = MAX2(exponent, (uint32_t)1); + exponent = MIN2(exponent, (uint32_t)254); + exponent = exponent << 23; + uint32_t sign = (hash >> 31); + sign = sign << 31; + uint32_t float_bits = sign | exponent | mantissa; + float f; + memcpy(&f, &float_bits, sizeof(uint32_t)); + return f; + } }; struct CryptomatteLayer { @@ -107,6 +125,8 @@ struct CryptomatteStampDataCallbackData { const blender::Vector<std::string> &BKE_cryptomatte_layer_names_get( const CryptomatteSession &session); +CryptomatteLayer *BKE_cryptomatte_layer_get(CryptomatteSession &session, + const StringRef layer_name); struct CryptomatteSessionDeleter { void operator()(CryptomatteSession *session) diff --git a/source/blender/blenkernel/BKE_curves.hh b/source/blender/blenkernel/BKE_curves.hh index 4b0fc293b54..9f150c13d6e 100644 --- a/source/blender/blenkernel/BKE_curves.hh +++ b/source/blender/blenkernel/BKE_curves.hh @@ -22,6 +22,7 @@ #include "BLI_virtual_array.hh" #include "BKE_attribute.hh" +#include "BKE_attribute_math.hh" namespace blender::bke { @@ -162,6 +163,11 @@ class CurvesGeometry : public ::CurvesGeometry { IndexRange curves_range() const; /** + * Number of control points in the indexed curve. + */ + int points_num_for_curve(const int index) const; + + /** * The index of the first point in every curve. The size of this span is one larger than the * number of curves. Consider using #points_for_curve rather than using the offsets directly. */ @@ -532,6 +538,16 @@ bool segment_is_vector(Span<int8_t> handle_types_left, int segment_index); /** + * True if the Bezier curve contains polygonal segments of HandleType::BEZIER_HANDLE_VECTOR. + * + * \param num_curve_points: Number of points in the curve. + * \param evaluated_size: Number of evaluated points in the curve. + * \param cyclic: If curve is cyclic. + * \param resolution: Curve resolution. + */ +bool has_vector_handles(int num_curve_points, int64_t evaluated_size, bool cyclic, int resolution); + +/** * Return true if the curve's last cyclic segment has a vector type. * This only makes a difference in the shape of cyclic curves. */ @@ -693,6 +709,36 @@ void interpolate_to_evaluated(const GSpan src, const Span<int> evaluated_offsets, GMutableSpan dst); +void calculate_basis(const float parameter, float r_weights[4]); + +/** + * Interpolate the control point values for the given parameter on the piecewise segment. + * \param a: Value associated with the first control point influencing the segment. + * \param d: Value associated with the fourth control point. + * \param parameter: Parameter in range [0, 1] to compute the interpolation for. + */ +template<typename T> +T interpolate(const T &a, const T &b, const T &c, const T &d, const float parameter) +{ + float n[4]; + calculate_basis(parameter, n); + /* TODO: Use DefaultMixer or other generic mixing in the basis evaluation function to simplify + * supporting more types. */ + if constexpr (!is_same_any_v<T, float, float2, float3, float4, int8_t, int, int64_t>) { + T return_value; + attribute_math::DefaultMixer<T> mixer({&return_value, 1}); + mixer.mix_in(0, a, n[0] * 0.5f); + mixer.mix_in(0, b, n[1] * 0.5f); + mixer.mix_in(0, c, n[2] * 0.5f); + mixer.mix_in(0, d, n[3] * 0.5f); + mixer.finalize(); + return return_value; + } + else { + return 0.5f * (a * n[0] + b * n[1] + c * n[2] + d * n[3]); + } +} + } // namespace catmull_rom /** \} */ @@ -807,6 +853,16 @@ inline IndexRange CurvesGeometry::curves_range() const return IndexRange(this->curves_num()); } +inline int CurvesGeometry::points_num_for_curve(const int index) const +{ + BLI_assert(this->curve_num > 0); + BLI_assert(this->curve_num > index); + BLI_assert(this->curve_offsets != nullptr); + const int offset = this->curve_offsets[index]; + const int offset_next = this->curve_offsets[index + 1]; + return offset_next - offset; +} + inline bool CurvesGeometry::is_single_type(const CurveType type) const { return this->curve_type_counts()[type] == this->curves_num(); @@ -833,6 +889,7 @@ inline IndexRange CurvesGeometry::points_for_curve(const int index) const { /* Offsets are not allocated when there are no curves. */ BLI_assert(this->curve_num > 0); + BLI_assert(this->curve_num > index); BLI_assert(this->curve_offsets != nullptr); const int offset = this->curve_offsets[index]; const int offset_next = this->curve_offsets[index + 1]; @@ -905,11 +962,13 @@ inline float CurvesGeometry::evaluated_length_total_for_curve(const int curve_in /** \} */ +namespace curves { + /* -------------------------------------------------------------------- */ /** \name Bezier Inline Methods * \{ */ -namespace curves::bezier { +namespace bezier { inline bool point_is_sharp(const Span<int8_t> handle_types_left, const Span<int8_t> handle_types_right, @@ -929,14 +988,24 @@ inline bool segment_is_vector(const int8_t left, const int8_t right) return segment_is_vector(HandleType(left), HandleType(right)); } +inline bool has_vector_handles(const int num_curve_points, + const int64_t evaluated_size, + const bool cyclic, + const int resolution) +{ + return evaluated_size - !cyclic != (int64_t)segments_num(num_curve_points, cyclic) * resolution; +} + inline float3 calculate_vector_handle(const float3 &point, const float3 &next_point) { return math::interpolate(point, next_point, 1.0f / 3.0f); } +} // namespace bezier + /** \} */ -} // namespace curves::bezier +} // namespace curves struct CurvesSurfaceTransforms { float4x4 curves_to_world; diff --git a/source/blender/blenkernel/BKE_curves_utils.hh b/source/blender/blenkernel/BKE_curves_utils.hh index 0fbd33002e1..5579ab5654a 100644 --- a/source/blender/blenkernel/BKE_curves_utils.hh +++ b/source/blender/blenkernel/BKE_curves_utils.hh @@ -11,9 +11,301 @@ #include "BLI_function_ref.hh" #include "BLI_generic_pointer.hh" +#include "BLI_index_range.hh" namespace blender::bke::curves { +/* -------------------------------------------------------------------- + * Utility structs. + */ + +/** + * Reference to a piecewise segment on a spline curve. + */ +struct CurveSegment { + /** + * Index of the previous control/evaluated point on the curve. First point on the segment. + */ + int index; + /** + * Index of the next control/evaluated point on the curve. Last point on the curve segment. + * Should be 0 for looped segments. + */ + int next_index; +}; + +/** + * Reference to a point on a piecewise curve (spline). + * + * Tracks indices of the neighbouring control/evaluated point pair associated with the segment + * in which the point resides. Referenced point within the segment is defined by a + * normalized parameter in the range [0, 1]. + */ +struct CurvePoint : public CurveSegment { + /** + * Normalized parameter in the range [0, 1] defining the point on the piecewise segment. + * Note that the curve point representation is not unique at segment endpoints. + */ + float parameter; + + /** + * True if the parameter is an integer and references a control/evaluated point. + */ + inline bool is_controlpoint() const; + + /* + * Compare if the points are equal. + */ + inline bool operator==(const CurvePoint &other) const; + inline bool operator!=(const CurvePoint &other) const; + + /** + * Compare if 'this' point comes before 'other'. Loop segment for cyclical curves counts + * as the first (least) segment. + */ + inline bool operator<(const CurvePoint &other) const; +}; + +/** + * Cyclical index range. Iterates the interval [start, end). + */ +class IndexRangeCyclic { + /* Index to the start and end of the iterated range. + */ + int64_t start_ = 0; + int64_t end_ = 0; + /* Index for the start and end of the entire iterable range which contains the iterated range + * (e.g. the point range for an indiviudal spline/curve within the entire Curves point domain). + */ + int64_t range_start_ = 0; + int64_t range_end_ = 0; + /* Number of times the range end is passed when the range is iterated. + */ + int64_t cycles_ = 0; + + constexpr IndexRangeCyclic(int64_t begin, + int64_t end, + int64_t iterable_range_start, + int64_t iterable_range_end, + int64_t cycles) + : start_(begin), + end_(end), + range_start_(iterable_range_start), + range_end_(iterable_range_end), + cycles_(cycles) + { + } + + public: + constexpr IndexRangeCyclic() = default; + ~IndexRangeCyclic() = default; + + constexpr IndexRangeCyclic(int64_t start, int64_t end, IndexRange iterable_range, int64_t cycles) + : start_(start), + end_(end), + range_start_(iterable_range.first()), + range_end_(iterable_range.one_after_last()), + cycles_(cycles) + { + } + + /** + * Create an iterator over the cyclical interval [start_index, end_index). + */ + constexpr IndexRangeCyclic(int64_t start, int64_t end, IndexRange iterable_range) + : start_(start), + end_(end == iterable_range.one_after_last() ? iterable_range.first() : end), + range_start_(iterable_range.first()), + range_end_(iterable_range.one_after_last()), + cycles_(end < start) + { + } + + /** + * Increment the range by adding the given number of indices to the beginning of the range. + */ + constexpr IndexRangeCyclic push_forward(int n) + { + BLI_assert(n >= 0); + int64_t nstart = start_ - n; + int64_t cycles = cycles_; + if (nstart < range_start_) { + + cycles += (int64_t)(n / (range_end_ - range_start_)) + (end_ < nstart) - (end_ < start_); + } + return {nstart, end_, range_start_, range_end_, cycles}; + } + /** + * Increment the range by adding the given number of indices to the end of the range. + */ + constexpr IndexRangeCyclic push_backward(int n) + { + BLI_assert(n >= 0); + int64_t new_end = end_ + n; + int64_t cycles = cycles_; + if (range_end_ <= new_end) { + cycles += (int64_t)(n / (range_end_ - range_start_)) + (new_end < start_) - (end_ < start_); + } + return {start_, new_end, range_start_, range_end_, cycles}; + } + + /** + * Get the index range for the curve buffer. + */ + constexpr IndexRange curve_range() const + { + return IndexRange(range_start_, total_size()); + } + + /** + * Range between the first element up to the end of the range. + */ + constexpr IndexRange range_before_loop() const + { + return IndexRange(start_, size_before_loop()); + } + + /** + * Range between the first element in the iterable range up to the last element in the range. + */ + constexpr IndexRange range_after_loop() const + { + return IndexRange(range_start_, size_after_loop()); + } + + /** + * Size of the entire iterable range. + */ + constexpr int64_t total_size() const + { + return range_end_ - range_start_; + } + + /** + * Number of elements between the first element in the range up to the last element in the curve. + */ + constexpr int64_t size_before_loop() const + { + return range_end_ - start_; + } + + /** + * Number of elements between the first element in the iterable range up to the last element in + * the range. + */ + constexpr int64_t size_after_loop() const + { + return end_ - range_start_; + } + + /** + * Get number of elements iterated by the cyclical index range. + */ + constexpr int64_t size() const + { + if (cycles_ > 0) { + return size_before_loop() + end_ + (cycles_ - 1) * (range_end_ - range_start_); + } + else { + return end_ - start_; + } + } + + /** + * Return the number of times the iterator will cycle before ending. + */ + constexpr int64_t cycles() const + { + return cycles_; + } + + constexpr int64_t first() const + { + return start_; + } + + constexpr int64_t one_after_last() const + { + return end_; + } + + struct CyclicIterator; /* Forward declaration */ + + constexpr CyclicIterator begin() const + { + return CyclicIterator(range_start_, range_end_, start_, 0); + } + + constexpr CyclicIterator end() const + { + return CyclicIterator(range_start_, range_end_, end_, cycles_); + } + + struct CyclicIterator { + int64_t index_, begin_, end_, cycles_; + + constexpr CyclicIterator(int64_t range_begin, int64_t range_end, int64_t index, int64_t cycles) + : index_(index), begin_(range_begin), end_(range_end), cycles_(cycles) + { + BLI_assert(range_begin <= index && index <= range_end); + } + + constexpr CyclicIterator(const CyclicIterator ©) + : index_(copy.index_), begin_(copy.begin_), end_(copy.end_), cycles_(copy.cycles_) + { + } + ~CyclicIterator() = default; + + constexpr CyclicIterator &operator=(const CyclicIterator ©) + { + if (this == ©) { + return *this; + } + index_ = copy.index_; + begin_ = copy.begin_; + end_ = copy.end_; + cycles_ = copy.cycles_; + return *this; + } + constexpr CyclicIterator &operator++() + { + index_++; + if (index_ == end_) { + index_ = begin_; + cycles_++; + } + return *this; + } + + void increment(int64_t n) + { + for (int i = 0; i < n; i++) { + ++*this; + } + } + + constexpr const int64_t &operator*() const + { + return index_; + } + + constexpr bool operator==(const CyclicIterator &other) const + { + return index_ == other.index_ && cycles_ == other.cycles_; + } + constexpr bool operator!=(const CyclicIterator &other) const + { + return !this->operator==(other); + } + }; +}; + +/** \} */ + +/* -------------------------------------------------------------------- + * Utility functions. + */ + /** * Copy the provided point attribute values between all curves in the #curve_ranges index * ranges, assuming that all curves have the same number of control points in #src_curves @@ -88,4 +380,40 @@ void foreach_curve_by_type(const VArray<int8_t> &types, FunctionRef<void(IndexMask)> bezier_fn, FunctionRef<void(IndexMask)> nurbs_fn); +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #CurvePoint Inline Methods + * \{ */ + +inline bool CurvePoint::is_controlpoint() const +{ + return parameter == 0.0 || parameter == 1.0; +} + +inline bool CurvePoint::operator==(const CurvePoint &other) const +{ + return (parameter == other.parameter && index == other.index) || + (parameter == 1.0 && other.parameter == 0.0 && next_index == other.index) || + (parameter == 0.0 && other.parameter == 1.0 && index == other.next_index); +} +inline bool CurvePoint::operator!=(const CurvePoint &other) const +{ + return !this->operator==(other); +} + +inline bool CurvePoint::operator<(const CurvePoint &other) const +{ + if (index == other.index) { + return parameter < other.parameter; + } + else { + /* Use next index for cyclic comparison due to loop segment < first segment. */ + return next_index < other.next_index && + !(next_index == other.index && parameter == 1.0 && other.parameter == 0.0); + } +} + +/** \} */ + } // namespace blender::bke::curves diff --git a/source/blender/blenkernel/BKE_customdata.h b/source/blender/blenkernel/BKE_customdata.h index 09d37682b3c..24fa5f0e87a 100644 --- a/source/blender/blenkernel/BKE_customdata.h +++ b/source/blender/blenkernel/BKE_customdata.h @@ -178,13 +178,11 @@ bool CustomData_merge_mesh_to_bmesh(const struct CustomData *source, int totelem); /** - * Reallocate custom data to a new element count. - * Only affects on data layers which are owned by the CustomData itself, - * referenced data is kept unchanged, - * - * \note Take care of referenced layers by yourself! + * Reallocate custom data to a new element count. If the new size is larger, the new values use + * the #CD_CONSTRUCT behavior, so trivial types must be initialized by the caller. After being + * resized, the #CustomData does not contain any referenced layers. */ -void CustomData_realloc(struct CustomData *data, int totelem); +void CustomData_realloc(struct CustomData *data, int old_size, int new_size); /** * BMesh version of CustomData_merge; merges the layouts of source and `dest`, diff --git a/source/blender/blenkernel/BKE_deform.h b/source/blender/blenkernel/BKE_deform.h index 677a1053826..4023d6829d4 100644 --- a/source/blender/blenkernel/BKE_deform.h +++ b/source/blender/blenkernel/BKE_deform.h @@ -50,19 +50,29 @@ void BKE_defgroup_copy_list(struct ListBase *outbase, const struct ListBase *inb struct bDeformGroup *BKE_defgroup_duplicate(const struct bDeformGroup *ingroup); struct bDeformGroup *BKE_object_defgroup_find_name(const struct Object *ob, const char *name); /** - * \note caller must free. + * Returns flip map for the vertex-groups of `ob`. + * + * \param use_default: How to handle cases where no symmetrical group is found. + * - false: sets these indices to -1, indicating the group should be ignored. + * - true: sets the index to its location in the array (making the group point to it's self). + * Enable this for symmetrical actions which apply weight operations on symmetrical vertices + * where the symmetrical group will be used (if found), otherwise the same group is used. + * + * \return An index array `r_flip_map_num` length, + * (aligned with the list result from `BKE_id_defgroup_list_get(ob)`). + * referencing the index of the symmetrical vertex-group of a fall-back value (see `use_default`). + * The caller is responsible for freeing the array. */ int *BKE_object_defgroup_flip_map(const struct Object *ob, bool use_default, int *r_flip_map_num); /** - * Returns flip map for only unlocked defgroups. - * \note caller must free. + * A version of #BKE_object_defgroup_flip_map that ignores locked groups. */ int *BKE_object_defgroup_flip_map_unlocked(const struct Object *ob, bool use_default, int *r_flip_map_num); /** - * \note caller must free. + * A version of #BKE_object_defgroup_flip_map that only takes a single group into account. */ int *BKE_object_defgroup_flip_map_single(const struct Object *ob, bool use_default, diff --git a/source/blender/blenkernel/BKE_node_runtime.hh b/source/blender/blenkernel/BKE_node_runtime.hh index f2e551a9f32..194820aa4ba 100644 --- a/source/blender/blenkernel/BKE_node_runtime.hh +++ b/source/blender/blenkernel/BKE_node_runtime.hh @@ -21,6 +21,7 @@ struct bNodeType; namespace blender::nodes { struct FieldInferencingInterface; class NodeDeclaration; +struct GeometryNodesLazyFunctionGraphInfo; } // namespace blender::nodes namespace blender::bke { @@ -49,6 +50,15 @@ class bNodeTreeRuntime : NonCopyable, NonMovable { std::unique_ptr<nodes::FieldInferencingInterface> field_inferencing_interface; /** + * For geometry nodes, a lazy function graph with some additional info is cached. This is used to + * evaluate the node group. Caching it here allows us to reuse the preprocessed node tree in case + * its used multiple times. + */ + std::mutex geometry_nodes_lazy_function_graph_info_mutex; + std::unique_ptr<nodes::GeometryNodesLazyFunctionGraphInfo> + geometry_nodes_lazy_function_graph_info; + + /** * Protects access to all topology cache variables below. This is necessary so that the cache can * be updated on a const #bNodeTree. */ @@ -70,6 +80,7 @@ class bNodeTreeRuntime : NonCopyable, NonMovable { MultiValueMap<const bNodeType *, bNode *> nodes_by_type; Vector<bNode *> toposort_left_to_right; Vector<bNode *> toposort_right_to_left; + Vector<bNode *> group_nodes; bool has_link_cycle = false; bool has_undefined_nodes_or_sockets = false; bNode *group_output_node = nullptr; @@ -148,6 +159,12 @@ class bNodeRuntime : NonCopyable, NonMovable { namespace node_tree_runtime { +/** + * Is executed when the depsgraph determines that something in the node group changed that will + * affect the output. + */ +void handle_node_tree_output_changed(bNodeTree &tree_cow); + class AllowUsingOutdatedInfo : NonCopyable, NonMovable { private: const bNodeTree &tree_; @@ -241,6 +258,18 @@ inline blender::Span<bNode *> bNodeTree::all_nodes() return this->runtime->nodes; } +inline blender::Span<const bNode *> bNodeTree::group_nodes() const +{ + BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this)); + return this->runtime->group_nodes; +} + +inline blender::Span<bNode *> bNodeTree::group_nodes() +{ + BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this)); + return this->runtime->group_nodes; +} + inline bool bNodeTree::has_link_cycle() const { BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this)); @@ -413,7 +442,6 @@ inline blender::Span<const bNodeLink *> bNode::internal_links_span() const inline const blender::nodes::NodeDeclaration *bNode::declaration() const { - BLI_assert(this->runtime->declaration != nullptr); return this->runtime->declaration; } diff --git a/source/blender/blenkernel/BKE_paint.h b/source/blender/blenkernel/BKE_paint.h index 9a067e761d7..eef91bacc2f 100644 --- a/source/blender/blenkernel/BKE_paint.h +++ b/source/blender/blenkernel/BKE_paint.h @@ -213,9 +213,7 @@ bool BKE_paint_always_hide_test(struct Object *ob); /** * Returns non-zero if any of the face's vertices are hidden, zero otherwise. */ -bool paint_is_face_hidden(const struct MLoopTri *lt, - const bool *hide_vert, - const struct MLoop *mloop); +bool paint_is_face_hidden(const struct MLoopTri *lt, const bool *hide_poly); /** * Returns non-zero if any of the corners of the grid * face whose inner corner is at (x, y) are hidden, zero otherwise. @@ -689,7 +687,7 @@ void BKE_sculpt_update_object_for_edit(struct Depsgraph *depsgraph, bool need_pmap, bool need_mask, bool is_paint_tool); -void BKE_sculpt_update_object_before_eval(const struct Scene *scene, struct Object *ob_eval); +void BKE_sculpt_update_object_before_eval(struct Object *ob_eval); void BKE_sculpt_update_object_after_eval(struct Depsgraph *depsgraph, struct Object *ob_eval); /** @@ -698,6 +696,7 @@ void BKE_sculpt_update_object_after_eval(struct Depsgraph *depsgraph, struct Obj */ struct MultiresModifierData *BKE_sculpt_multires_active(const struct Scene *scene, struct Object *ob); +int *BKE_sculpt_face_sets_ensure(struct Mesh *mesh); int BKE_sculpt_mask_layers_ensure(struct Object *ob, struct MultiresModifierData *mmd); void BKE_sculpt_toolsettings_data_ensure(struct Scene *scene); @@ -719,18 +718,17 @@ void BKE_sculpt_sync_face_sets_visibility_to_grids(struct Mesh *mesh, struct SubdivCCG *subdiv_ccg); /** - * Ensures that a Face Set data-layers exists. If it does not, it creates one respecting the - * visibility stored in the vertices of the mesh. If it does, it copies the visibility from the - * mesh to the Face Sets. */ -void BKE_sculpt_face_sets_ensure_from_base_mesh_visibility(struct Mesh *mesh); + * If a face set layer exists, initialize its visibility (sign) from the mesh's hidden values. + */ +void BKE_sculpt_face_sets_update_from_base_mesh_visibility(struct Mesh *mesh); /** - * Ensures we do have expected mesh data in original mesh for the sculpt mode. + * Makes sculpt data consistent with other data on the mesh. * * \note IDs are expected to be original ones here, and calling code should ensure it updates its * depsgraph properly after calling this function if it needs up-to-date evaluated data. */ -void BKE_sculpt_ensure_orig_mesh_data(struct Scene *scene, struct Object *object); +void BKE_sculpt_ensure_orig_mesh_data(struct Object *object); /** * Test if PBVH can be used directly for drawing, which is faster than diff --git a/source/blender/blenkernel/BKE_pbvh.h b/source/blender/blenkernel/BKE_pbvh.h index 716ee7d2a21..6a194698bd8 100644 --- a/source/blender/blenkernel/BKE_pbvh.h +++ b/source/blender/blenkernel/BKE_pbvh.h @@ -491,6 +491,12 @@ void BKE_pbvh_grids_update(PBVH *pbvh, void BKE_pbvh_subdiv_cgg_set(PBVH *pbvh, struct SubdivCCG *subdiv_ccg); void BKE_pbvh_face_sets_set(PBVH *pbvh, int *face_sets); +/** + * If an operation causes the hide status stored in the mesh to change, this must be called + * to update the references to those attributes, since they are only added when necessary. + */ +void BKE_pbvh_update_hide_attributes_from_mesh(PBVH *pbvh); + void BKE_pbvh_face_sets_color_set(PBVH *pbvh, int seed, int color_default); void BKE_pbvh_respect_hide_set(PBVH *pbvh, bool respect_hide); @@ -674,6 +680,8 @@ const float (*BKE_pbvh_get_vert_normals(const PBVH *pbvh))[3]; const bool *BKE_pbvh_get_vert_hide(const PBVH *pbvh); bool *BKE_pbvh_get_vert_hide_for_write(PBVH *pbvh); +const bool *BKE_pbvh_get_poly_hide(const PBVH *pbvh); + PBVHColorBufferNode *BKE_pbvh_node_color_buffer_get(PBVHNode *node); void BKE_pbvh_node_color_buffer_free(PBVH *pbvh); bool BKE_pbvh_get_color_layer(const struct Mesh *me, diff --git a/source/blender/blenkernel/CMakeLists.txt b/source/blender/blenkernel/CMakeLists.txt index b982c69a378..2f1e1897f8d 100644 --- a/source/blender/blenkernel/CMakeLists.txt +++ b/source/blender/blenkernel/CMakeLists.txt @@ -98,6 +98,7 @@ set(SRC intern/collision.c intern/colorband.c intern/colortools.c + intern/compute_contexts.cc intern/constraint.c intern/context.c intern/crazyspace.cc @@ -352,6 +353,7 @@ set(SRC BKE_collision.h BKE_colorband.h BKE_colortools.h + BKE_compute_contexts.hh BKE_constraint.h BKE_context.h BKE_crazyspace.h diff --git a/source/blender/blenkernel/intern/DerivedMesh.cc b/source/blender/blenkernel/intern/DerivedMesh.cc index 0036ed1cf61..d7db0ad765c 100644 --- a/source/blender/blenkernel/intern/DerivedMesh.cc +++ b/source/blender/blenkernel/intern/DerivedMesh.cc @@ -147,54 +147,6 @@ static MPoly *dm_getPolyArray(DerivedMesh *dm) return mpoly; } -static MVert *dm_dupVertArray(DerivedMesh *dm) -{ - MVert *tmp = (MVert *)MEM_malloc_arrayN( - dm->getNumVerts(dm), sizeof(*tmp), "dm_dupVertArray tmp"); - - if (tmp) { - dm->copyVertArray(dm, tmp); - } - - return tmp; -} - -static MEdge *dm_dupEdgeArray(DerivedMesh *dm) -{ - MEdge *tmp = (MEdge *)MEM_malloc_arrayN( - dm->getNumEdges(dm), sizeof(*tmp), "dm_dupEdgeArray tmp"); - - if (tmp) { - dm->copyEdgeArray(dm, tmp); - } - - return tmp; -} - -static MLoop *dm_dupLoopArray(DerivedMesh *dm) -{ - MLoop *tmp = (MLoop *)MEM_malloc_arrayN( - dm->getNumLoops(dm), sizeof(*tmp), "dm_dupLoopArray tmp"); - - if (tmp) { - dm->copyLoopArray(dm, tmp); - } - - return tmp; -} - -static MPoly *dm_dupPolyArray(DerivedMesh *dm) -{ - MPoly *tmp = (MPoly *)MEM_malloc_arrayN( - dm->getNumPolys(dm), sizeof(*tmp), "dm_dupPolyArray tmp"); - - if (tmp) { - dm->copyPolyArray(dm, tmp); - } - - return tmp; -} - static int dm_getNumLoopTri(DerivedMesh *dm) { const int numlooptris = poly_to_tri_count(dm->getNumPolys(dm), dm->getNumLoops(dm)); @@ -233,10 +185,6 @@ void DM_init_funcs(DerivedMesh *dm) dm->getEdgeArray = dm_getEdgeArray; dm->getLoopArray = dm_getLoopArray; dm->getPolyArray = dm_getPolyArray; - dm->dupVertArray = dm_dupVertArray; - dm->dupEdgeArray = dm_dupEdgeArray; - dm->dupLoopArray = dm_dupLoopArray; - dm->dupPolyArray = dm_dupPolyArray; dm->getLoopTriArray = dm_getLoopTriArray; @@ -331,36 +279,6 @@ bool DM_release(DerivedMesh *dm) return false; } -void DM_DupPolys(DerivedMesh *source, DerivedMesh *target) -{ - CustomData_free(&target->loopData, source->numLoopData); - CustomData_free(&target->polyData, source->numPolyData); - - CustomData_copy(&source->loopData, - &target->loopData, - CD_MASK_DERIVEDMESH.lmask, - CD_DUPLICATE, - source->numLoopData); - CustomData_copy(&source->polyData, - &target->polyData, - CD_MASK_DERIVEDMESH.pmask, - CD_DUPLICATE, - source->numPolyData); - - target->numLoopData = source->numLoopData; - target->numPolyData = source->numPolyData; - - if (!CustomData_has_layer(&target->polyData, CD_MPOLY)) { - MPoly *mpoly; - MLoop *mloop; - - mloop = source->dupLoopArray(source); - mpoly = source->dupPolyArray(source); - CustomData_add_layer(&target->loopData, CD_MLOOP, CD_ASSIGN, mloop, source->numLoopData); - CustomData_add_layer(&target->polyData, CD_MPOLY, CD_ASSIGN, mpoly, source->numPolyData); - } -} - void DM_ensure_looptri_data(DerivedMesh *dm) { const unsigned int totpoly = dm->numPolyData; @@ -1802,7 +1720,7 @@ void makeDerivedMesh(struct Depsgraph *depsgraph, BKE_object_free_derived_caches(ob); if (DEG_is_active(depsgraph)) { - BKE_sculpt_update_object_before_eval(scene, ob); + BKE_sculpt_update_object_before_eval(ob); } /* NOTE: Access the `edit_mesh` after freeing the derived caches, so that `ob->data` is restored diff --git a/source/blender/blenkernel/intern/appdir.c b/source/blender/blenkernel/intern/appdir.c index 24e4305d916..96ac81fdb63 100644 --- a/source/blender/blenkernel/intern/appdir.c +++ b/source/blender/blenkernel/intern/appdir.c @@ -371,14 +371,16 @@ static bool get_path_local_ex(char *targetpath, relfolder[0] = '\0'; } - /* Try `{g_app.program_dirname}/2.xx/{folder_name}` the default directory + /* Try `{g_app.program_dirname}/3.xx/{folder_name}` the default directory * for a portable distribution. See `WITH_INSTALL_PORTABLE` build-option. */ const char *path_base = g_app.program_dirname; #if defined(__APPLE__) && !defined(WITH_PYTHON_MODULE) /* Due new code-sign situation in OSX > 10.9.5 - * we must move the blender_version dir with contents to Resources. */ - char osx_resourses[FILE_MAX]; - BLI_snprintf(osx_resourses, sizeof(osx_resourses), "%s../Resources", g_app.program_dirname); + * we must move the blender_version dir with contents to Resources. + * Add 4 + 9 for the temporary `/../` path & `Resources`. */ + char osx_resourses[FILE_MAX + 4 + 9]; + BLI_path_join( + osx_resourses, sizeof(osx_resourses), g_app.program_dirname, "..", "Resources", NULL); /* Remove the '/../' added above. */ BLI_path_normalize(NULL, osx_resourses); path_base = osx_resourses; diff --git a/source/blender/blenkernel/intern/attribute_access.cc b/source/blender/blenkernel/intern/attribute_access.cc index 6ca3a286a5e..1e237da8119 100644 --- a/source/blender/blenkernel/intern/attribute_access.cc +++ b/source/blender/blenkernel/intern/attribute_access.cc @@ -14,6 +14,7 @@ #include "DNA_meshdata_types.h" #include "DNA_pointcloud_types.h" +#include "BLI_array_utils.hh" #include "BLI_color.hh" #include "BLI_math_vec_types.hh" #include "BLI_span.hh" @@ -726,8 +727,22 @@ bool CustomDataAttributes::remove(const AttributeIDRef &attribute_id) void CustomDataAttributes::reallocate(const int size) { + const int old_size = size_; size_ = size; - CustomData_realloc(&data, size); + CustomData_realloc(&data, old_size, size_); + if (size_ > old_size) { + /* Fill default new values. */ + const int new_elements_num = size_ - old_size; + this->foreach_attribute( + [&](const bke::AttributeIDRef &id, const bke::AttributeMetaData /*meta_data*/) { + GMutableSpan new_data = this->get_for_write(id)->take_back(new_elements_num); + const CPPType &type = new_data.type(); + type.fill_assign_n(type.default_value(), new_data.data(), new_data.size()); + return true; + }, + /* Dummy. */ + ATTR_DOMAIN_POINT); + } } void CustomDataAttributes::clear() @@ -875,6 +890,16 @@ GAttributeWriter MutableAttributeAccessor::lookup_for_write(const AttributeIDRef return attribute; } +GSpanAttributeWriter MutableAttributeAccessor::lookup_for_write_span( + const AttributeIDRef &attribute_id) +{ + GAttributeWriter attribute = this->lookup_for_write(attribute_id); + if (attribute) { + return GSpanAttributeWriter{std::move(attribute), true}; + } + return {}; +} + GAttributeWriter MutableAttributeAccessor::lookup_or_add_for_write( const AttributeIDRef &attribute_id, const eAttrDomain domain, @@ -950,6 +975,37 @@ Vector<AttributeTransferData> retrieve_attributes_for_transfer( return attributes; } +void copy_attribute_domain(const AttributeAccessor src_attributes, + MutableAttributeAccessor dst_attributes, + const IndexMask selection, + const eAttrDomain domain, + const Set<std::string> &skip) +{ + src_attributes.for_all( + [&](const bke::AttributeIDRef &id, const bke::AttributeMetaData &meta_data) { + if (meta_data.domain != domain) { + return true; + } + if (id.is_named() && skip.contains(id.name())) { + return true; + } + if (!id.should_be_kept()) { + return true; + } + + const GVArray src = src_attributes.lookup(id, meta_data.domain); + BLI_assert(src); + + /* Copy attribute. */ + GSpanAttributeWriter dst = dst_attributes.lookup_or_add_for_write_only_span( + id, domain, meta_data.data_type); + array_utils::copy(src, selection, dst.span); + dst.finish(); + + return true; + }); +} + } // namespace blender::bke /** \} */ diff --git a/source/blender/blenkernel/intern/cdderivedmesh.c b/source/blender/blenkernel/intern/cdderivedmesh.c index 93286751f92..0261b2d7674 100644 --- a/source/blender/blenkernel/intern/cdderivedmesh.c +++ b/source/blender/blenkernel/intern/cdderivedmesh.c @@ -244,43 +244,3 @@ DerivedMesh *CDDM_from_mesh(Mesh *mesh) { return cdDM_from_mesh_ex(mesh, CD_REFERENCE, &CD_MASK_MESH); } - -DerivedMesh *CDDM_copy(DerivedMesh *source) -{ - CDDerivedMesh *cddm = cdDM_create("CDDM_copy cddm"); - DerivedMesh *dm = &cddm->dm; - int numVerts = source->numVertData; - int numEdges = source->numEdgeData; - int numTessFaces = 0; - int numLoops = source->numLoopData; - int numPolys = source->numPolyData; - - /* NOTE: Don't copy tessellation faces if not requested explicitly. */ - - /* ensure these are created if they are made on demand */ - source->getVertDataArray(source, CD_ORIGINDEX); - source->getEdgeDataArray(source, CD_ORIGINDEX); - source->getPolyDataArray(source, CD_ORIGINDEX); - - /* this initializes dm, and copies all non mvert/medge/mface layers */ - DM_from_template(dm, source, DM_TYPE_CDDM, numVerts, numEdges, numTessFaces, numLoops, numPolys); - dm->deformedOnly = source->deformedOnly; - dm->cd_flag = source->cd_flag; - - CustomData_copy_data(&source->vertData, &dm->vertData, 0, 0, numVerts); - CustomData_copy_data(&source->edgeData, &dm->edgeData, 0, 0, numEdges); - - /* now add mvert/medge/mface layers */ - cddm->mvert = source->dupVertArray(source); - cddm->medge = source->dupEdgeArray(source); - - CustomData_add_layer(&dm->vertData, CD_MVERT, CD_ASSIGN, cddm->mvert, numVerts); - CustomData_add_layer(&dm->edgeData, CD_MEDGE, CD_ASSIGN, cddm->medge, numEdges); - - DM_DupPolys(source, dm); - - cddm->mloop = CustomData_get_layer(&dm->loopData, CD_MLOOP); - cddm->mpoly = CustomData_get_layer(&dm->polyData, CD_MPOLY); - - return dm; -} diff --git a/source/blender/blenkernel/intern/compute_contexts.cc b/source/blender/blenkernel/intern/compute_contexts.cc new file mode 100644 index 00000000000..026706d363e --- /dev/null +++ b/source/blender/blenkernel/intern/compute_contexts.cc @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BKE_compute_contexts.hh" + +namespace blender::bke { + +ModifierComputeContext::ModifierComputeContext(const ComputeContext *parent, + std::string modifier_name) + : ComputeContext(s_static_type, parent), modifier_name_(std::move(modifier_name)) +{ + hash_.mix_in(s_static_type, strlen(s_static_type)); + hash_.mix_in(modifier_name_.data(), modifier_name_.size()); +} + +void ModifierComputeContext::print_current_in_line(std::ostream &stream) const +{ + stream << "Modifier: " << modifier_name_; +} + +NodeGroupComputeContext::NodeGroupComputeContext(const ComputeContext *parent, + std::string node_name) + : ComputeContext(s_static_type, parent), node_name_(std::move(node_name)) +{ + hash_.mix_in(s_static_type, strlen(s_static_type)); + hash_.mix_in(node_name_.data(), node_name_.size()); +} + +StringRefNull NodeGroupComputeContext::node_name() const +{ + return node_name_; +} + +void NodeGroupComputeContext::print_current_in_line(std::ostream &stream) const +{ + stream << "Node: " << node_name_; +} + +} // namespace blender::bke diff --git a/source/blender/blenkernel/intern/cryptomatte.cc b/source/blender/blenkernel/intern/cryptomatte.cc index 102bda0f2b6..72204f6624e 100644 --- a/source/blender/blenkernel/intern/cryptomatte.cc +++ b/source/blender/blenkernel/intern/cryptomatte.cc @@ -41,7 +41,9 @@ struct CryptomatteSession { CryptomatteSession() = default; CryptomatteSession(const Main *bmain); CryptomatteSession(StampData *stamp_data); + CryptomatteSession(const ViewLayer *view_layer); CryptomatteSession(const Scene *scene); + void init(const ViewLayer *view_layer); blender::bke::cryptomatte::CryptomatteLayer &add_layer(std::string layer_name); std::optional<std::string> operator[](float encoded_hash) const; @@ -54,13 +56,15 @@ struct CryptomatteSession { CryptomatteSession::CryptomatteSession(const Main *bmain) { if (!BLI_listbase_is_empty(&bmain->objects)) { - blender::bke::cryptomatte::CryptomatteLayer &objects = add_layer("CryptoObject"); + blender::bke::cryptomatte::CryptomatteLayer &objects = add_layer( + RE_PASSNAME_CRYPTOMATTE_OBJECT); LISTBASE_FOREACH (ID *, id, &bmain->objects) { objects.add_ID(*id); } } if (!BLI_listbase_is_empty(&bmain->materials)) { - blender::bke::cryptomatte::CryptomatteLayer &materials = add_layer("CryptoMaterial"); + blender::bke::cryptomatte::CryptomatteLayer &materials = add_layer( + RE_PASSNAME_CRYPTOMATTE_MATERIAL); LISTBASE_FOREACH (ID *, id, &bmain->materials) { materials.add_ID(*id); } @@ -83,24 +87,34 @@ CryptomatteSession::CryptomatteSession(StampData *stamp_data) false); } +CryptomatteSession::CryptomatteSession(const ViewLayer *view_layer) +{ + init(view_layer); +} + CryptomatteSession::CryptomatteSession(const Scene *scene) { - LISTBASE_FOREACH (ViewLayer *, view_layer, &scene->view_layers) { - eViewLayerCryptomatteFlags cryptoflags = static_cast<eViewLayerCryptomatteFlags>( - view_layer->cryptomatte_flag & VIEW_LAYER_CRYPTOMATTE_ALL); - if (cryptoflags == 0) { - cryptoflags = static_cast<eViewLayerCryptomatteFlags>(VIEW_LAYER_CRYPTOMATTE_ALL); - } + LISTBASE_FOREACH (const ViewLayer *, view_layer, &scene->view_layers) { + init(view_layer); + } +} - if (cryptoflags & VIEW_LAYER_CRYPTOMATTE_OBJECT) { - add_layer(blender::StringRefNull(view_layer->name) + ".CryptoObject"); - } - if (cryptoflags & VIEW_LAYER_CRYPTOMATTE_ASSET) { - add_layer(blender::StringRefNull(view_layer->name) + ".CryptoAsset"); - } - if (cryptoflags & VIEW_LAYER_CRYPTOMATTE_MATERIAL) { - add_layer(blender::StringRefNull(view_layer->name) + ".CryptoMaterial"); - } +void CryptomatteSession::init(const ViewLayer *view_layer) +{ + eViewLayerCryptomatteFlags cryptoflags = static_cast<eViewLayerCryptomatteFlags>( + view_layer->cryptomatte_flag & VIEW_LAYER_CRYPTOMATTE_ALL); + if (cryptoflags == 0) { + cryptoflags = static_cast<eViewLayerCryptomatteFlags>(VIEW_LAYER_CRYPTOMATTE_ALL); + } + + if (cryptoflags & VIEW_LAYER_CRYPTOMATTE_OBJECT) { + add_layer(blender::StringRefNull(view_layer->name) + "." + RE_PASSNAME_CRYPTOMATTE_OBJECT); + } + if (cryptoflags & VIEW_LAYER_CRYPTOMATTE_ASSET) { + add_layer(blender::StringRefNull(view_layer->name) + "." + RE_PASSNAME_CRYPTOMATTE_ASSET); + } + if (cryptoflags & VIEW_LAYER_CRYPTOMATTE_MATERIAL) { + add_layer(blender::StringRefNull(view_layer->name) + "." + RE_PASSNAME_CRYPTOMATTE_MATERIAL); } } @@ -142,6 +156,12 @@ struct CryptomatteSession *BKE_cryptomatte_init_from_scene(const struct Scene *s return session; } +struct CryptomatteSession *BKE_cryptomatte_init_from_view_layer(const struct ViewLayer *view_layer) +{ + CryptomatteSession *session = new CryptomatteSession(view_layer); + return session; +} + void BKE_cryptomatte_add_layer(struct CryptomatteSession *session, const char *layer_name) { session->add_layer(layer_name); @@ -485,11 +505,6 @@ CryptomatteHash::CryptomatteHash(uint32_t hash) : hash(hash) { } -CryptomatteHash::CryptomatteHash(const char *name, const int name_len) -{ - hash = BLI_hash_mm3((const unsigned char *)name, name_len, 0); -} - CryptomatteHash CryptomatteHash::from_hex_encoded(blender::StringRef hex_encoded) { CryptomatteHash result(0); @@ -504,21 +519,6 @@ std::string CryptomatteHash::hex_encoded() const return encoded.str(); } -float CryptomatteHash::float_encoded() const -{ - uint32_t mantissa = hash & ((1 << 23) - 1); - uint32_t exponent = (hash >> 23) & ((1 << 8) - 1); - exponent = MAX2(exponent, (uint32_t)1); - exponent = MIN2(exponent, (uint32_t)254); - exponent = exponent << 23; - uint32_t sign = (hash >> 31); - sign = sign << 31; - uint32_t float_bits = sign | exponent | mantissa; - float f; - memcpy(&f, &float_bits, sizeof(uint32_t)); - return f; -} - std::unique_ptr<CryptomatteLayer> CryptomatteLayer::read_from_manifest( blender::StringRefNull manifest) { @@ -625,4 +625,9 @@ const blender::Vector<std::string> &BKE_cryptomatte_layer_names_get( return session.layer_names; } +CryptomatteLayer *BKE_cryptomatte_layer_get(CryptomatteSession &session, StringRef layer_name) +{ + return session.layers.lookup_ptr(layer_name); +} + } // namespace blender::bke::cryptomatte diff --git a/source/blender/blenkernel/intern/curve_catmull_rom.cc b/source/blender/blenkernel/intern/curve_catmull_rom.cc index 952d59edcf9..dac88948036 100644 --- a/source/blender/blenkernel/intern/curve_catmull_rom.cc +++ b/source/blender/blenkernel/intern/curve_catmull_rom.cc @@ -17,16 +17,14 @@ int calculate_evaluated_num(const int points_num, const bool cyclic, const int r } /* Adapted from Cycles #catmull_rom_basis_eval function. */ -template<typename T> -static T calculate_basis(const T &a, const T &b, const T &c, const T &d, const float parameter) +void calculate_basis(const float parameter, float r_weights[4]) { const float t = parameter; const float s = 1.0f - parameter; - const float n0 = -t * s * s; - const float n1 = 2.0f + t * t * (3.0f * t - 5.0f); - const float n2 = 2.0f + s * s * (3.0f * s - 5.0f); - const float n3 = -s * t * t; - return 0.5f * (a * n0 + b * n1 + c * n2 + d * n3); + r_weights[0] = -t * s * s; + r_weights[1] = 2.0f + t * t * (3.0f * t - 5.0f); + r_weights[2] = 2.0f + s * s * (3.0f * s - 5.0f); + r_weights[3] = -s * t * t; } template<typename T> @@ -35,7 +33,7 @@ static void evaluate_segment(const T &a, const T &b, const T &c, const T &d, Mut const float step = 1.0f / dst.size(); dst.first() = b; for (const int i : dst.index_range().drop_front(1)) { - dst[i] = calculate_basis<T>(a, b, c, d, i * step); + dst[i] = interpolate<T>(a, b, c, d, i * step); } } diff --git a/source/blender/blenkernel/intern/curves_geometry.cc b/source/blender/blenkernel/intern/curves_geometry.cc index 35b209179d3..06789e34ad4 100644 --- a/source/blender/blenkernel/intern/curves_geometry.cc +++ b/source/blender/blenkernel/intern/curves_geometry.cc @@ -963,11 +963,11 @@ void CurvesGeometry::ensure_can_interpolate_to_evaluated() const void CurvesGeometry::resize(const int points_num, const int curves_num) { if (points_num != this->point_num) { - CustomData_realloc(&this->point_data, points_num); + CustomData_realloc(&this->point_data, this->points_num(), points_num); this->point_num = points_num; } if (curves_num != this->curve_num) { - CustomData_realloc(&this->curve_data, curves_num); + CustomData_realloc(&this->curve_data, this->curves_num(), curves_num); this->curve_num = curves_num; this->curve_offsets = (int *)MEM_reallocN(this->curve_offsets, sizeof(int) * (curves_num + 1)); } @@ -1380,69 +1380,49 @@ static void reverse_swap_curve_point_data(const CurvesGeometry &curves, }); } -static bool layer_matches_name_and_type(const CustomDataLayer &layer, - const StringRef name, - const eCustomDataType type) -{ - if (layer.type != type) { - return false; - } - return layer.name == name; -} - void CurvesGeometry::reverse_curves(const IndexMask curves_to_reverse) { - CustomData_duplicate_referenced_layers(&this->point_data, this->points_num()); + Set<StringRef> bezier_handle_names{{ATTR_HANDLE_POSITION_LEFT, + ATTR_HANDLE_POSITION_RIGHT, + ATTR_HANDLE_TYPE_LEFT, + ATTR_HANDLE_TYPE_RIGHT}}; - /* Collect the Bezier handle attributes while iterating through the point custom data layers; - * they need special treatment later. */ - MutableSpan<float3> positions_left; - MutableSpan<float3> positions_right; - MutableSpan<int8_t> types_left; - MutableSpan<int8_t> types_right; + MutableAttributeAccessor attributes = this->attributes_for_write(); - for (const int layer_i : IndexRange(this->point_data.totlayer)) { - CustomDataLayer &layer = this->point_data.layers[layer_i]; - - if (positions_left.is_empty() && - layer_matches_name_and_type(layer, ATTR_HANDLE_POSITION_LEFT, CD_PROP_FLOAT3)) { - positions_left = {static_cast<float3 *>(layer.data), this->points_num()}; - continue; - } - if (positions_right.is_empty() && - layer_matches_name_and_type(layer, ATTR_HANDLE_POSITION_RIGHT, CD_PROP_FLOAT3)) { - positions_right = {static_cast<float3 *>(layer.data), this->points_num()}; - continue; - } - if (types_left.is_empty() && - layer_matches_name_and_type(layer, ATTR_HANDLE_TYPE_LEFT, CD_PROP_INT8)) { - types_left = {static_cast<int8_t *>(layer.data), this->points_num()}; - continue; + attributes.for_all([&](const AttributeIDRef &id, AttributeMetaData meta_data) { + if (meta_data.domain != ATTR_DOMAIN_POINT) { + return true; } - if (types_right.is_empty() && - layer_matches_name_and_type(layer, ATTR_HANDLE_TYPE_RIGHT, CD_PROP_INT8)) { - types_right = {static_cast<int8_t *>(layer.data), this->points_num()}; - continue; + if (id.is_named() && bezier_handle_names.contains(id.name())) { + return true; } - const eCustomDataType data_type = static_cast<eCustomDataType>(layer.type); - attribute_math::convert_to_static_type(data_type, [&](auto dummy) { + GSpanAttributeWriter attribute = attributes.lookup_for_write_span(id); + attribute_math::convert_to_static_type(attribute.span.type(), [&](auto dummy) { using T = decltype(dummy); - reverse_curve_point_data<T>( - *this, curves_to_reverse, {static_cast<T *>(layer.data), this->points_num()}); + reverse_curve_point_data<T>(*this, curves_to_reverse, attribute.span.typed<T>()); }); - } + attribute.finish(); + return true; + }); /* In order to maintain the shape of Bezier curves, handle attributes must reverse, but also the * values for the left and right must swap. Use a utility to swap and reverse at the same time, * to avoid loading the attribute twice. Generally we can expect the right layer to exist when * the left does, but there's no need to count on it, so check for both attributes. */ - if (!positions_left.is_empty() && !positions_right.is_empty()) { - reverse_swap_curve_point_data(*this, curves_to_reverse, positions_left, positions_right); + if (attributes.contains(ATTR_HANDLE_POSITION_LEFT) && + attributes.contains(ATTR_HANDLE_POSITION_RIGHT)) { + reverse_swap_curve_point_data(*this, + curves_to_reverse, + this->handle_positions_left_for_write(), + this->handle_positions_right_for_write()); } - if (!types_left.is_empty() && !types_right.is_empty()) { - reverse_swap_curve_point_data(*this, curves_to_reverse, types_left, types_right); + if (attributes.contains(ATTR_HANDLE_TYPE_LEFT) && attributes.contains(ATTR_HANDLE_TYPE_RIGHT)) { + reverse_swap_curve_point_data(*this, + curves_to_reverse, + this->handle_types_left_for_write(), + this->handle_types_right_for_write()); } this->tag_topology_changed(); @@ -1450,21 +1430,20 @@ void CurvesGeometry::reverse_curves(const IndexMask curves_to_reverse) void CurvesGeometry::remove_attributes_based_on_types() { - const int points_num = this->points_num(); - const int curves_num = this->curves_num(); + MutableAttributeAccessor attributes = this->attributes_for_write(); if (!this->has_curve_with_type(CURVE_TYPE_BEZIER)) { - CustomData_free_layer_named(&this->point_data, ATTR_HANDLE_TYPE_LEFT.c_str(), points_num); - CustomData_free_layer_named(&this->point_data, ATTR_HANDLE_TYPE_RIGHT.c_str(), points_num); - CustomData_free_layer_named(&this->point_data, ATTR_HANDLE_POSITION_LEFT.c_str(), points_num); - CustomData_free_layer_named(&this->point_data, ATTR_HANDLE_POSITION_RIGHT.c_str(), points_num); + attributes.remove(ATTR_HANDLE_TYPE_LEFT); + attributes.remove(ATTR_HANDLE_TYPE_RIGHT); + attributes.remove(ATTR_HANDLE_POSITION_LEFT); + attributes.remove(ATTR_HANDLE_POSITION_RIGHT); } if (!this->has_curve_with_type(CURVE_TYPE_NURBS)) { - CustomData_free_layer_named(&this->point_data, ATTR_NURBS_WEIGHT.c_str(), points_num); - CustomData_free_layer_named(&this->curve_data, ATTR_NURBS_ORDER.c_str(), curves_num); - CustomData_free_layer_named(&this->curve_data, ATTR_NURBS_KNOTS_MODE.c_str(), curves_num); + attributes.remove(ATTR_NURBS_WEIGHT); + attributes.remove(ATTR_NURBS_ORDER); + attributes.remove(ATTR_NURBS_KNOTS_MODE); } if (!this->has_curve_with_type({CURVE_TYPE_BEZIER, CURVE_TYPE_CATMULL_ROM, CURVE_TYPE_NURBS})) { - CustomData_free_layer_named(&this->curve_data, ATTR_RESOLUTION.c_str(), curves_num); + attributes.remove(ATTR_RESOLUTION); } } diff --git a/source/blender/blenkernel/intern/customdata.cc b/source/blender/blenkernel/intern/customdata.cc index 24373053896..82a1a2aa8f6 100644 --- a/source/blender/blenkernel/intern/customdata.cc +++ b/source/blender/blenkernel/intern/customdata.cc @@ -2408,19 +2408,37 @@ bool CustomData_merge_mesh_to_bmesh(const CustomData *source, return result; } -void CustomData_realloc(CustomData *data, const int totelem) +void CustomData_realloc(CustomData *data, const int old_size, const int new_size) { - BLI_assert(totelem >= 0); + BLI_assert(new_size >= 0); for (int i = 0; i < data->totlayer; i++) { CustomDataLayer *layer = &data->layers[i]; - const LayerTypeInfo *typeInfo; + const LayerTypeInfo *typeInfo = layerType_getInfo(layer->type); + + const int64_t old_size_in_bytes = int64_t(old_size) * typeInfo->size; + const int64_t new_size_in_bytes = int64_t(new_size) * typeInfo->size; if (layer->flag & CD_FLAG_NOFREE) { - continue; + const void *old_data = layer->data; + layer->data = MEM_malloc_arrayN(new_size, typeInfo->size, __func__); + if (typeInfo->copy) { + typeInfo->copy(old_data, layer->data, std::min(old_size, new_size)); + } + else { + std::memcpy(layer->data, old_data, std::min(old_size_in_bytes, new_size_in_bytes)); + } + layer->flag &= ~CD_FLAG_NOFREE; + } + else { + layer->data = MEM_reallocN(layer->data, new_size_in_bytes); + } + + if (new_size > old_size) { + /* Initialize new values for non-trivial types. */ + if (typeInfo->construct) { + const int new_elements_num = new_size - old_size; + typeInfo->construct(POINTER_OFFSET(layer->data, old_size_in_bytes), new_elements_num); + } } - typeInfo = layerType_getInfo(layer->type); - /* Use calloc to avoid the need to manually initialize new data in layers. - * Useful for types like #MDeformVert which contain a pointer. */ - layer->data = MEM_recallocN(layer->data, (size_t)totelem * typeInfo->size); } } diff --git a/source/blender/blenkernel/intern/image_save.cc b/source/blender/blenkernel/intern/image_save.cc index e65a94d5301..6f62ee123cb 100644 --- a/source/blender/blenkernel/intern/image_save.cc +++ b/source/blender/blenkernel/intern/image_save.cc @@ -175,12 +175,12 @@ bool BKE_image_save_options_init(ImageSaveOptions *opts, BLI_strncpy(opts->filepath, G.ima, sizeof(opts->filepath)); } else { - BLI_snprintf(opts->filepath, sizeof(opts->filepath), "//%s", DATA_("untitled")); + BLI_path_join(opts->filepath, sizeof(opts->filepath), "//", DATA_("untitled"), nullptr); BLI_path_abs(opts->filepath, BKE_main_blendfile_path(bmain)); } } else { - BLI_snprintf(opts->filepath, sizeof(opts->filepath), "//%s", ima->id.name + 2); + BLI_path_join(opts->filepath, sizeof(opts->filepath), "//", ima->id.name + 2, nullptr); BLI_path_make_safe(opts->filepath); BLI_path_abs(opts->filepath, is_prev_save ? G.ima : BKE_main_blendfile_path(bmain)); } diff --git a/source/blender/blenkernel/intern/mesh.cc b/source/blender/blenkernel/intern/mesh.cc index a0548b7efd4..6bf25da5ae7 100644 --- a/source/blender/blenkernel/intern/mesh.cc +++ b/source/blender/blenkernel/intern/mesh.cc @@ -1416,19 +1416,15 @@ void BKE_mesh_material_remap(Mesh *me, const uint *remap, uint remap_len) } else { MutableAttributeAccessor attributes = me->attributes_for_write(); - AttributeWriter<int> material_indices = attributes.lookup_for_write<int>("material_index"); + SpanAttributeWriter<int> material_indices = attributes.lookup_or_add_for_write_span<int>( + "material_index", ATTR_DOMAIN_FACE); if (!material_indices) { return; } - if (material_indices.domain != ATTR_DOMAIN_FACE) { - BLI_assert_unreachable(); - return; - } - MutableVArraySpan<int> indices_span(material_indices.varray); - for (const int i : indices_span.index_range()) { - MAT_NR_REMAP(indices_span[i]); + for (const int i : material_indices.span.index_range()) { + MAT_NR_REMAP(material_indices.span[i]); } - indices_span.save(); + material_indices.span.save(); material_indices.finish(); } @@ -1616,14 +1612,14 @@ void BKE_mesh_do_versions_cd_flag_init(Mesh *mesh) const Span<MEdge> edges = mesh->edges(); for (const MVert &vert : verts) { - if (vert.bweight != 0) { + if (vert.bweight_legacy != 0) { mesh->cd_flag |= ME_CDFLAG_VERT_BWEIGHT; break; } } for (const MEdge &edge : edges) { - if (edge.bweight != 0) { + if (edge.bweight_legacy != 0) { mesh->cd_flag |= ME_CDFLAG_EDGE_BWEIGHT; if (mesh->cd_flag & ME_CDFLAG_EDGE_CREASE) { break; @@ -2095,11 +2091,11 @@ void BKE_mesh_split_faces(Mesh *mesh, bool free_loop_normals) const bool do_edges = (num_new_edges > 0); /* Reallocate all vert and edge related data. */ + CustomData_realloc(&mesh->vdata, mesh->totvert, mesh->totvert + num_new_verts); mesh->totvert += num_new_verts; - CustomData_realloc(&mesh->vdata, mesh->totvert); if (do_edges) { + CustomData_realloc(&mesh->edata, mesh->totedge, mesh->totedge + num_new_edges); mesh->totedge += num_new_edges; - CustomData_realloc(&mesh->edata, mesh->totedge); } /* Update normals manually to avoid recalculation after this operation. */ diff --git a/source/blender/blenkernel/intern/mesh_convert.cc b/source/blender/blenkernel/intern/mesh_convert.cc index b7d8972aa7b..e56df0e3fe3 100644 --- a/source/blender/blenkernel/intern/mesh_convert.cc +++ b/source/blender/blenkernel/intern/mesh_convert.cc @@ -105,8 +105,9 @@ static void make_edges_mdata_extend(Mesh &mesh) #endif if (totedge_new) { - CustomData_realloc(&mesh.edata, totedge + totedge_new); - + /* The only layer should be edges, so no other layers need to be initialized. */ + BLI_assert(mesh.edata.totlayer == 1); + CustomData_realloc(&mesh.edata, totedge, totedge + totedge_new); mesh.totedge += totedge_new; MutableSpan<MEdge> edges = mesh.edges_for_write(); MEdge *medge = &edges[totedge]; @@ -634,9 +635,11 @@ void BKE_pointcloud_from_mesh(Mesh *me, PointCloud *pointcloud) using namespace blender; BLI_assert(me != nullptr); - + /* The pointcloud should only contain the position attribute, otherwise more attributes would + * need to be initialized below. */ + BLI_assert(pointcloud->attributes().all_ids().size() == 1); + CustomData_realloc(&pointcloud->pdata, pointcloud->totpoint, me->totvert); pointcloud->totpoint = me->totvert; - CustomData_realloc(&pointcloud->pdata, pointcloud->totpoint); /* Copy over all attributes. */ CustomData_merge(&me->vdata, &pointcloud->pdata, CD_MASK_PROP_ALL, CD_DUPLICATE, me->totvert); diff --git a/source/blender/blenkernel/intern/mesh_legacy_convert.cc b/source/blender/blenkernel/intern/mesh_legacy_convert.cc index 10fc8ff3195..627c0057a28 100644 --- a/source/blender/blenkernel/intern/mesh_legacy_convert.cc +++ b/source/blender/blenkernel/intern/mesh_legacy_convert.cc @@ -929,13 +929,13 @@ void BKE_mesh_legacy_bevel_weight_from_layers(Mesh *mesh) CustomData_get_layer(&mesh->vdata, CD_BWEIGHT))) { mesh->cd_flag |= ME_CDFLAG_VERT_BWEIGHT; for (const int i : verts.index_range()) { - verts[i].bweight = std::clamp(weights[i], 0.0f, 1.0f) * 255.0f; + verts[i].bweight_legacy = std::clamp(weights[i], 0.0f, 1.0f) * 255.0f; } } else { mesh->cd_flag &= ~ME_CDFLAG_VERT_BWEIGHT; for (const int i : verts.index_range()) { - verts[i].bweight = 0; + verts[i].bweight_legacy = 0; } } MutableSpan<MEdge> edges = mesh->edges_for_write(); @@ -943,13 +943,13 @@ void BKE_mesh_legacy_bevel_weight_from_layers(Mesh *mesh) CustomData_get_layer(&mesh->edata, CD_BWEIGHT))) { mesh->cd_flag |= ME_CDFLAG_EDGE_BWEIGHT; for (const int i : edges.index_range()) { - edges[i].bweight = std::clamp(weights[i], 0.0f, 1.0f) * 255.0f; + edges[i].bweight_legacy = std::clamp(weights[i], 0.0f, 1.0f) * 255.0f; } } else { mesh->cd_flag &= ~ME_CDFLAG_EDGE_BWEIGHT; for (const int i : edges.index_range()) { - edges[i].bweight = 0; + edges[i].bweight_legacy = 0; } } } @@ -962,7 +962,7 @@ void BKE_mesh_legacy_bevel_weight_to_layers(Mesh *mesh) float *weights = static_cast<float *>( CustomData_add_layer(&mesh->vdata, CD_BWEIGHT, CD_CONSTRUCT, nullptr, verts.size())); for (const int i : verts.index_range()) { - weights[i] = verts[i].bweight / 255.0f; + weights[i] = verts[i].bweight_legacy / 255.0f; } } @@ -971,7 +971,7 @@ void BKE_mesh_legacy_bevel_weight_to_layers(Mesh *mesh) float *weights = static_cast<float *>( CustomData_add_layer(&mesh->edata, CD_BWEIGHT, CD_CONSTRUCT, nullptr, edges.size())); for (const int i : edges.index_range()) { - weights[i] = edges[i].bweight / 255.0f; + weights[i] = edges[i].bweight_legacy / 255.0f; } } } @@ -1077,7 +1077,7 @@ void BKE_mesh_legacy_convert_material_indices_to_mpoly(Mesh *mesh) "material_index", ATTR_DOMAIN_FACE, 0); threading::parallel_for(polys.index_range(), 4096, [&](IndexRange range) { for (const int i : range) { - polys[i].mat_nr = material_indices[i]; + polys[i].mat_nr_legacy = material_indices[i]; } }); } @@ -1089,12 +1089,12 @@ void BKE_mesh_legacy_convert_mpoly_to_material_indices(Mesh *mesh) MutableAttributeAccessor attributes = mesh->attributes_for_write(); const Span<MPoly> polys = mesh->polys(); if (std::any_of( - polys.begin(), polys.end(), [](const MPoly &poly) { return poly.mat_nr != 0; })) { + polys.begin(), polys.end(), [](const MPoly &poly) { return poly.mat_nr_legacy != 0; })) { SpanAttributeWriter<int> material_indices = attributes.lookup_or_add_for_write_only_span<int>( "material_index", ATTR_DOMAIN_FACE); threading::parallel_for(polys.index_range(), 4096, [&](IndexRange range) { for (const int i : range) { - material_indices.span[i] = polys[i].mat_nr; + material_indices.span[i] = polys[i].mat_nr_legacy; } }); material_indices.finish(); diff --git a/source/blender/blenkernel/intern/mesh_remesh_voxel.cc b/source/blender/blenkernel/intern/mesh_remesh_voxel.cc index eb14028f49a..a77879fb573 100644 --- a/source/blender/blenkernel/intern/mesh_remesh_voxel.cc +++ b/source/blender/blenkernel/intern/mesh_remesh_voxel.cc @@ -124,6 +124,7 @@ static Mesh *remesh_quadriflow(const Mesh *input_mesh, /* Construct the new output mesh */ Mesh *mesh = BKE_mesh_new_nomain(qrd.out_totverts, 0, 0, qrd.out_totfaces * 4, qrd.out_totfaces); + BKE_mesh_copy_parameters(mesh, input_mesh); MutableSpan<MVert> mesh_verts = mesh->verts_for_write(); MutableSpan<MPoly> polys = mesh->polys_for_write(); MutableSpan<MLoop> loops = mesh->loops_for_write(); @@ -273,7 +274,9 @@ Mesh *BKE_mesh_remesh_voxel(const Mesh *mesh, { #ifdef WITH_OPENVDB openvdb::FloatGrid::Ptr level_set = remesh_voxel_level_set_create(mesh, voxel_size); - return remesh_voxel_volume_to_mesh(level_set, isovalue, adaptivity, false); + Mesh *result = remesh_voxel_volume_to_mesh(level_set, isovalue, adaptivity, false); + BKE_mesh_copy_parameters(result, mesh); + return result; #else UNUSED_VARS(mesh, voxel_size, adaptivity, isovalue); return nullptr; diff --git a/source/blender/blenkernel/intern/nla.c b/source/blender/blenkernel/intern/nla.c index 9457c20eb7d..da508ff865c 100644 --- a/source/blender/blenkernel/intern/nla.c +++ b/source/blender/blenkernel/intern/nla.c @@ -1241,7 +1241,7 @@ static NlaStrip *nlastrip_find_active(ListBase /* NlaStrip */ *strips) float BKE_nlastrip_compute_frame_from_previous_strip(NlaStrip *strip) { - float limit_prev = MINFRAMEF; + float limit_prev = MINAFRAMEF; /* Find the previous end frame, with a special case if the previous strip was a transition : */ if (strip->prev) { diff --git a/source/blender/blenkernel/intern/node.cc b/source/blender/blenkernel/intern/node.cc index 2ae0b456b0d..b82cf30416a 100644 --- a/source/blender/blenkernel/intern/node.cc +++ b/source/blender/blenkernel/intern/node.cc @@ -71,6 +71,7 @@ #include "NOD_composite.h" #include "NOD_function.h" #include "NOD_geometry.h" +#include "NOD_geometry_nodes_lazy_function.hh" #include "NOD_node_declaration.hh" #include "NOD_shader.h" #include "NOD_socket.h" diff --git a/source/blender/blenkernel/intern/node_runtime.cc b/source/blender/blenkernel/intern/node_runtime.cc index a8281820a0b..00b78284791 100644 --- a/source/blender/blenkernel/intern/node_runtime.cc +++ b/source/blender/blenkernel/intern/node_runtime.cc @@ -10,8 +10,22 @@ #include "BLI_task.hh" #include "BLI_timeit.hh" +#include "NOD_geometry_nodes_lazy_function.hh" + namespace blender::bke::node_tree_runtime { +void handle_node_tree_output_changed(bNodeTree &tree_cow) +{ + if (tree_cow.type == NTREE_GEOMETRY) { + /* Rebuild geometry nodes lazy function graph. */ + { + std::lock_guard lock{tree_cow.runtime->geometry_nodes_lazy_function_graph_info_mutex}; + tree_cow.runtime->geometry_nodes_lazy_function_graph_info.reset(); + } + blender::nodes::ensure_geometry_nodes_lazy_function_graph(tree_cow); + } +} + static void double_checked_lock(std::mutex &mutex, bool &data_is_dirty, FunctionRef<void()> fn) { if (!data_is_dirty) { @@ -36,11 +50,15 @@ static void update_node_vector(const bNodeTree &ntree) { bNodeTreeRuntime &tree_runtime = *ntree.runtime; tree_runtime.nodes.clear(); + tree_runtime.group_nodes.clear(); tree_runtime.has_undefined_nodes_or_sockets = false; LISTBASE_FOREACH (bNode *, node, &ntree.nodes) { node->runtime->index_in_tree = tree_runtime.nodes.append_and_get_index(node); node->runtime->owner_tree = const_cast<bNodeTree *>(&ntree); tree_runtime.has_undefined_nodes_or_sockets |= node->typeinfo == &NodeTypeUndefined; + if (node->is_group()) { + tree_runtime.group_nodes.append(node); + } } } diff --git a/source/blender/blenkernel/intern/packedFile.c b/source/blender/blenkernel/intern/packedFile.c index 7c96c463339..901b42ac0b2 100644 --- a/source/blender/blenkernel/intern/packedFile.c +++ b/source/blender/blenkernel/intern/packedFile.c @@ -526,21 +526,27 @@ static void unpack_generate_paths(const char *name, BLI_strncpy(tempdir, "//", sizeof(tempdir)); } - switch (id_type) { - case ID_VF: - BLI_snprintf(r_relpath, relpathlen, "//fonts/%s", tempname); - break; - case ID_SO: - BLI_snprintf(r_relpath, relpathlen, "//sounds/%s", tempname); - break; - case ID_IM: - BLI_snprintf(r_relpath, relpathlen, "//textures/%s", tempname); - break; - case ID_VO: - BLI_snprintf(r_relpath, relpathlen, "//volumes/%s", tempname); - break; - default: - break; + { + const char *dir_name = NULL; + switch (id_type) { + case ID_VF: + dir_name = "fonts"; + break; + case ID_SO: + dir_name = "sounds"; + break; + case ID_IM: + dir_name = "textures"; + break; + case ID_VO: + dir_name = "volumes"; + break; + default: + break; + } + if (dir_name) { + BLI_path_join(r_relpath, relpathlen, "//", dir_name, tempname, NULL); + } } { diff --git a/source/blender/blenkernel/intern/paint.cc b/source/blender/blenkernel/intern/paint.cc index d277bbcca63..1a1bf285847 100644 --- a/source/blender/blenkernel/intern/paint.cc +++ b/source/blender/blenkernel/intern/paint.cc @@ -1088,10 +1088,10 @@ bool BKE_paint_ensure(ToolSettings *ts, Paint **r_paint) Sculpt *data = MEM_cnew<Sculpt>(__func__); paint = &data->paint; - /* Turn on X plane mirror symmetry by default */ + /* Turn on X plane mirror symmetry by default. */ paint->symmetry_flags |= PAINT_SYMM_X; - /* Make sure at least dyntopo subdivision is enabled */ + /* Make sure at least dyntopo subdivision is enabled. */ data->flags |= SCULPT_DYNTOPO_SUBDIVIDE | SCULPT_DYNTOPO_COLLAPSE; } else if ((GpPaint **)r_paint == &ts->gp_paint) { @@ -1146,7 +1146,7 @@ void BKE_paint_init(Main *bmain, Scene *sce, ePaintMode mode, const uchar col[3] brush = BKE_brush_first_search(bmain, ob_mode); if (!brush) { brush = BKE_brush_add(bmain, "Brush", ob_mode); - id_us_min(&brush->id); /* fake user only */ + id_us_min(&brush->id); /* Fake user only. */ } BKE_paint_brush_set(paint, brush); } @@ -1247,18 +1247,17 @@ void BKE_paint_blend_read_lib(BlendLibReader *reader, Scene *sce, Paint *p) } } -bool paint_is_face_hidden(const MLoopTri *lt, const bool *hide_vert, const MLoop *mloop) +bool paint_is_face_hidden(const MLoopTri *lt, const bool *hide_poly) { - if (!hide_vert) { + if (!hide_poly) { return false; } - return ((hide_vert[mloop[lt->tri[0]].v]) || (hide_vert[mloop[lt->tri[1]].v]) || - (hide_vert[mloop[lt->tri[2]].v])); + return hide_poly[lt->poly]; } bool paint_is_grid_face_hidden(const uint *grid_hidden, int gridsize, int x, int y) { - /* skip face if any of its corners are hidden */ + /* Skip face if any of its corners are hidden. */ return (BLI_BITMAP_TEST(grid_hidden, y * gridsize + x) || BLI_BITMAP_TEST(grid_hidden, y * gridsize + x + 1) || BLI_BITMAP_TEST(grid_hidden, (y + 1) * gridsize + x + 1) || @@ -1288,7 +1287,7 @@ float paint_grid_paint_mask(const GridPaintMask *gpm, uint level, uint x, uint y return gpm->data[(y * factor) * gridsize + (x * factor)]; } -/* threshold to move before updating the brush rotation */ +/* Threshold to move before updating the brush rotation. */ #define RAKE_THRESHHOLD 20 void paint_update_brush_rake_rotation(UnifiedPaintSettings *ups, Brush *brush, float rotation) @@ -1331,8 +1330,8 @@ bool paint_calculate_rake_rotation(UnifiedPaintSettings *ups, paint_update_brush_rake_rotation(ups, brush, rotation); ok = true; } - /* make sure we reset here to the last rotation to avoid accumulating - * values in case a random rotation is also added */ + /* Make sure we reset here to the last rotation to avoid accumulating + * values in case a random rotation is also added. */ else { paint_update_brush_rake_rotation(ups, brush, ups->last_rake_angle); ok = false; @@ -1528,20 +1527,29 @@ void BKE_sculptsession_free(Object *ob) } } -MultiresModifierData *BKE_sculpt_multires_active(const Scene *scene, Object *ob) +static MultiresModifierData *sculpt_multires_modifier_get(const Scene *scene, + Object *ob, + const bool auto_create_mdisps) { Mesh *me = (Mesh *)ob->data; ModifierData *md; VirtualModifierData virtualModifierData; if (ob->sculpt && ob->sculpt->bm) { - /* can't combine multires and dynamic topology */ + /* Can't combine multires and dynamic topology. */ return nullptr; } + bool need_mdisps = false; + if (!CustomData_get_layer(&me->ldata, CD_MDISPS)) { - /* multires can't work without displacement layer */ - return nullptr; + if (!auto_create_mdisps) { + /* Multires can't work without displacement layer. */ + return nullptr; + } + else { + need_mdisps = true; + } } /* Weight paint operates on original vertices, and needs to treat multires as regular modifier @@ -1559,6 +1567,10 @@ MultiresModifierData *BKE_sculpt_multires_active(const Scene *scene, Object *ob) } if (mmd->sculptlvl > 0 && !(mmd->flags & eMultiresModifierFlag_UseSculptBaseMesh)) { + if (need_mdisps) { + CustomData_add_layer(&me->ldata, CD_MDISPS, CD_SET_DEFAULT, nullptr, me->totloop); + } + return mmd; } @@ -1569,6 +1581,11 @@ MultiresModifierData *BKE_sculpt_multires_active(const Scene *scene, Object *ob) return nullptr; } +MultiresModifierData *BKE_sculpt_multires_active(const Scene *scene, Object *ob) +{ + return sculpt_multires_modifier_get(scene, ob, false); +} + /* Checks if there are any supported deformation modifiers active */ static bool sculpt_modifiers_active(Scene *scene, Sculpt *sd, Object *ob) { @@ -1580,14 +1597,14 @@ static bool sculpt_modifiers_active(Scene *scene, Sculpt *sd, Object *ob) return false; } - /* non-locked shape keys could be handled in the same way as deformed mesh */ + /* Non-locked shape keys could be handled in the same way as deformed mesh. */ if ((ob->shapeflag & OB_SHAPE_LOCK) == 0 && me->key && ob->shapenr) { return true; } md = BKE_modifiers_get_virtual_modifierlist(ob, &virtualModifierData); - /* exception for shape keys because we can edit those */ + /* Exception for shape keys because we can edit those. */ for (; md; md = md->next) { const ModifierTypeInfo *mti = BKE_modifier_get_info(static_cast<ModifierType>(md->type)); if (!BKE_modifier_is_enabled(scene, md, eModifierMode_Realtime)) { @@ -1614,22 +1631,15 @@ static bool sculpt_modifiers_active(Scene *scene, Sculpt *sd, Object *ob) return false; } -/** - * \param need_mask: So that the evaluated mesh that is returned has mask data. - */ -static void sculpt_update_object(Depsgraph *depsgraph, - Object *ob, - Object *ob_eval, - bool need_pmap, - bool need_mask, - bool is_paint_tool) +static void sculpt_update_object( + Depsgraph *depsgraph, Object *ob, Object *ob_eval, bool need_pmap, bool is_paint_tool) { Scene *scene = DEG_get_input_scene(depsgraph); Sculpt *sd = scene->toolsettings->sculpt; SculptSession *ss = ob->sculpt; Mesh *me = BKE_object_get_original_mesh(ob); Mesh *me_eval = BKE_object_get_evaluated_mesh(ob_eval); - MultiresModifierData *mmd = BKE_sculpt_multires_active(scene, ob); + MultiresModifierData *mmd = sculpt_multires_modifier_get(scene, ob, true); const bool use_face_sets = (ob->mode & OB_MODE_SCULPT) != 0; BLI_assert(me_eval != nullptr); @@ -1644,15 +1654,6 @@ static void sculpt_update_object(Depsgraph *depsgraph, ss->scene = scene; - if (need_mask) { - if (mmd == nullptr) { - BLI_assert(CustomData_has_layer(&me->vdata, CD_PAINT_MASK)); - } - else { - BLI_assert(CustomData_has_layer(&me->ldata, CD_GRID_PAINT_MASK)); - } - } - ss->shapekey_active = (mmd == nullptr) ? BKE_keyblock_from_object(ob) : nullptr; /* NOTE: Weight pPaint require mesh info for loop lookup, but it never uses multires code path, @@ -1708,7 +1709,6 @@ static void sculpt_update_object(Depsgraph *depsgraph, /* Sculpt Face Sets. */ if (use_face_sets) { - BLI_assert(CustomData_has_layer(&me->pdata, CD_SCULPT_FACE_SETS)); ss->face_sets = static_cast<int *>(CustomData_get_layer(&me->pdata, CD_SCULPT_FACE_SETS)); } else { @@ -1841,24 +1841,7 @@ static void sculpt_update_object(Depsgraph *depsgraph, } } -static void sculpt_face_sets_ensure(Mesh *mesh) -{ - if (CustomData_has_layer(&mesh->pdata, CD_SCULPT_FACE_SETS)) { - return; - } - - int *new_face_sets = static_cast<int *>(CustomData_add_layer( - &mesh->pdata, CD_SCULPT_FACE_SETS, CD_CONSTRUCT, nullptr, mesh->totpoly)); - - /* Initialize the new Face Set data-layer with a default valid visible ID and set the default - * color to render it white. */ - for (int i = 0; i < mesh->totpoly; i++) { - new_face_sets[i] = 1; - } - mesh->face_sets_color_default = 1; -} - -void BKE_sculpt_update_object_before_eval(const Scene *scene, Object *ob_eval) +void BKE_sculpt_update_object_before_eval(Object *ob_eval) { /* Update before mesh evaluation in the dependency graph. */ SculptSession *ss = ob_eval->sculpt; @@ -1888,16 +1871,6 @@ void BKE_sculpt_update_object_before_eval(const Scene *scene, Object *ob_eval) MEM_freeN(nodes); } } - - if (ss) { - Object *ob_orig = DEG_get_original_object(ob_eval); - Mesh *mesh = BKE_object_get_original_mesh(ob_orig); - MultiresModifierData *mmd = BKE_sculpt_multires_active(scene, ob_orig); - - /* Ensure attribute layout is still correct. */ - sculpt_face_sets_ensure(mesh); - BKE_sculpt_mask_layers_ensure(ob_orig, mmd); - } } void BKE_sculpt_update_object_after_eval(Depsgraph *depsgraph, Object *ob_eval) @@ -1906,7 +1879,7 @@ void BKE_sculpt_update_object_after_eval(Depsgraph *depsgraph, Object *ob_eval) * other data when modifiers change the mesh. */ Object *ob_orig = DEG_get_original_object(ob_eval); - sculpt_update_object(depsgraph, ob_orig, ob_eval, false, false, false); + sculpt_update_object(depsgraph, ob_orig, ob_eval, false, false); } void BKE_sculpt_color_layer_create_if_needed(Object *object) @@ -1944,13 +1917,53 @@ void BKE_sculpt_color_layer_create_if_needed(Object *object) } void BKE_sculpt_update_object_for_edit( - Depsgraph *depsgraph, Object *ob_orig, bool need_pmap, bool need_mask, bool is_paint_tool) + Depsgraph *depsgraph, Object *ob_orig, bool need_pmap, bool /*need_mask*/, bool is_paint_tool) { BLI_assert(ob_orig == DEG_get_original_object(ob_orig)); Object *ob_eval = DEG_get_evaluated_object(depsgraph, ob_orig); - sculpt_update_object(depsgraph, ob_orig, ob_eval, need_pmap, need_mask, is_paint_tool); + sculpt_update_object(depsgraph, ob_orig, ob_eval, need_pmap, is_paint_tool); +} + +int *BKE_sculpt_face_sets_ensure(Mesh *mesh) +{ + using namespace blender; + using namespace blender::bke; + if (CustomData_has_layer(&mesh->pdata, CD_SCULPT_FACE_SETS)) { + return static_cast<int *>(CustomData_get_layer(&mesh->pdata, CD_SCULPT_FACE_SETS)); + } + + const AttributeAccessor attributes = mesh->attributes_for_write(); + const VArray<bool> hide_poly = attributes.lookup_or_default<bool>( + ".hide_poly", ATTR_DOMAIN_FACE, false); + + MutableSpan<int> face_sets = { + static_cast<int *>(CustomData_add_layer( + &mesh->pdata, CD_SCULPT_FACE_SETS, CD_CONSTRUCT, nullptr, mesh->totpoly)), + mesh->totpoly}; + + /* Initialize the new face sets with a default valid visible ID and set the default + * color to render it white. */ + if (hide_poly.is_single() && !hide_poly.get_internal_single()) { + face_sets.fill(1); + } + else { + const int face_sets_default_visible_id = 1; + const int face_sets_default_hidden_id = -2; + + const VArraySpan<bool> hide_poly_span{hide_poly}; + for (const int i : face_sets.index_range()) { + /* Assign a new hidden ID to hidden faces. This way we get at initial split in two Face Sets + * between hidden and visible faces based on the previous mesh visibly from other mode that + * can be useful in some cases. */ + face_sets[i] = hide_poly_span[i] ? face_sets_default_hidden_id : + face_sets_default_visible_id; + } + } + + mesh->face_sets_color_default = 1; + return face_sets.data(); } int BKE_sculpt_mask_layers_ensure(Object *ob, MultiresModifierData *mmd) @@ -2014,7 +2027,7 @@ int BKE_sculpt_mask_layers_ensure(Object *ob, MultiresModifierData *mmd) ret |= SCULPT_MASK_LAYER_CALC_LOOP; } - /* create vertex paint mask layer if there isn't one already */ + /* Create vertex paint mask layer if there isn't one already. */ if (!paint_mask) { CustomData_add_layer(&me->vdata, CD_PAINT_MASK, CD_SET_DEFAULT, nullptr, me->totvert); ret |= SCULPT_MASK_LAYER_CALC_VERT; @@ -2038,7 +2051,7 @@ void BKE_sculpt_toolsettings_data_ensure(Scene *scene) sd->constant_detail = 3.0f; } - /* Set sane default tiling offsets */ + /* Set sane default tiling offsets. */ if (!sd->paint.tile_offset[0]) { sd->paint.tile_offset[0] = 1.0f; } @@ -2056,8 +2069,7 @@ static bool check_sculpt_object_deformed(Object *object, const bool for_construc /* Active modifiers means extra deformation, which can't be handled correct * on birth of PBVH and sculpt "layer" levels, so use PBVH only for internal brush - * stuff and show final evaluated mesh so user would see actual object shape. - */ + * stuff and show final evaluated mesh so user would see actual object shape. */ deformed |= object->sculpt->deform_modifiers_active; if (for_construction) { @@ -2066,75 +2078,55 @@ static bool check_sculpt_object_deformed(Object *object, const bool for_construc else { /* As in case with modifiers, we can't synchronize deformation made against * PBVH and non-locked keyblock, so also use PBVH only for brushes and - * final DM to give final result to user. - */ + * final DM to give final result to user. */ deformed |= object->sculpt->shapekey_active && (object->shapeflag & OB_SHAPE_LOCK) == 0; } return deformed; } -void BKE_sculpt_face_sets_ensure_from_base_mesh_visibility(Mesh *mesh) +void BKE_sculpt_face_sets_update_from_base_mesh_visibility(Mesh *mesh) { - const int face_sets_default_visible_id = 1; - const int face_sets_default_hidden_id = -(face_sets_default_visible_id + 1); - - bool initialize_new_face_sets = false; - - if (CustomData_has_layer(&mesh->pdata, CD_SCULPT_FACE_SETS)) { - /* Make everything visible. */ - int *current_face_sets = static_cast<int *>( - CustomData_get_layer(&mesh->pdata, CD_SCULPT_FACE_SETS)); - for (int i = 0; i < mesh->totpoly; i++) { - current_face_sets[i] = abs(current_face_sets[i]); - } - } - else { - initialize_new_face_sets = true; - int *new_face_sets = static_cast<int *>(CustomData_add_layer( - &mesh->pdata, CD_SCULPT_FACE_SETS, CD_CONSTRUCT, nullptr, mesh->totpoly)); - - /* Initialize the new Face Set data-layer with a default valid visible ID and set the default - * color to render it white. */ - for (int i = 0; i < mesh->totpoly; i++) { - new_face_sets[i] = face_sets_default_visible_id; - } - mesh->face_sets_color_default = face_sets_default_visible_id; + using namespace blender; + using namespace blender::bke; + if (!CustomData_has_layer(&mesh->pdata, CD_SCULPT_FACE_SETS)) { + return; } - int *face_sets = static_cast<int *>(CustomData_get_layer(&mesh->pdata, CD_SCULPT_FACE_SETS)); - const bool *hide_poly = (const bool *)CustomData_get_layer_named( - &mesh->pdata, CD_PROP_BOOL, ".hide_poly"); + const AttributeAccessor attributes = mesh->attributes(); + const VArray<bool> hide_poly = attributes.lookup_or_default<bool>( + ".hide_poly", ATTR_DOMAIN_FACE, false); + if (hide_poly.is_single() && !hide_poly.get_internal_single()) { + return; + } - for (int i = 0; i < mesh->totpoly; i++) { - if (!(hide_poly && hide_poly[i])) { - continue; - } + MutableSpan<int> face_sets{ + static_cast<int *>(CustomData_get_layer(&mesh->pdata, CD_SCULPT_FACE_SETS)), mesh->totpoly}; - if (initialize_new_face_sets) { - /* When initializing a new Face Set data-layer, assign a new hidden Face Set ID to hidden - * vertices. This way, we get at initial split in two Face Sets between hidden and - * visible vertices based on the previous mesh visibly from other mode that can be - * useful in some cases. */ - face_sets[i] = face_sets_default_hidden_id; - } - else { - /* Otherwise, set the already existing Face Set ID to hidden. */ - face_sets[i] = -abs(face_sets[i]); - } + for (const int i : hide_poly.index_range()) { + face_sets[i] = hide_poly[i] ? -std::abs(face_sets[i]) : std::abs(face_sets[i]); } } -void BKE_sculpt_sync_face_sets_visibility_to_base_mesh(Mesh *mesh) +static void set_hide_poly_from_face_sets(Mesh &mesh) { + using namespace blender; using namespace blender::bke; - const int *face_sets = static_cast<const int *>( - CustomData_get_layer(&mesh->pdata, CD_SCULPT_FACE_SETS)); - if (!face_sets) { + if (!CustomData_has_layer(&mesh.pdata, CD_SCULPT_FACE_SETS)) { + return; + } + + const Span<int> face_sets{ + static_cast<const int *>(CustomData_get_layer(&mesh.pdata, CD_SCULPT_FACE_SETS)), + mesh.totpoly}; + + MutableAttributeAccessor attributes = mesh.attributes_for_write(); + if (std::all_of( + face_sets.begin(), face_sets.end(), [&](const int value) { return value > 0; })) { + attributes.remove(".hide_poly"); return; } - MutableAttributeAccessor attributes = mesh->attributes_for_write(); SpanAttributeWriter<bool> hide_poly = attributes.lookup_or_add_for_write_only_span<bool>( ".hide_poly", ATTR_DOMAIN_FACE); if (!hide_poly) { @@ -2144,7 +2136,11 @@ void BKE_sculpt_sync_face_sets_visibility_to_base_mesh(Mesh *mesh) hide_poly.span[i] = face_sets[i] < 0; } hide_poly.finish(); +} +void BKE_sculpt_sync_face_sets_visibility_to_base_mesh(Mesh *mesh) +{ + set_hide_poly_from_face_sets(*mesh); BKE_mesh_flush_hidden_from_polys(mesh); } @@ -2182,41 +2178,29 @@ void BKE_sculpt_sync_face_sets_visibility_to_grids(Mesh *mesh, SubdivCCG *subdiv void BKE_sculpt_sync_face_set_visibility(Mesh *mesh, SubdivCCG *subdiv_ccg) { - BKE_sculpt_face_sets_ensure_from_base_mesh_visibility(mesh); + BKE_sculpt_face_sets_update_from_base_mesh_visibility(mesh); BKE_sculpt_sync_face_sets_visibility_to_base_mesh(mesh); BKE_sculpt_sync_face_sets_visibility_to_grids(mesh, subdiv_ccg); } -void BKE_sculpt_ensure_orig_mesh_data(Scene *scene, Object *object) +void BKE_sculpt_ensure_orig_mesh_data(Object *object) { Mesh *mesh = BKE_mesh_from_object(object); - MultiresModifierData *mmd = BKE_sculpt_multires_active(scene, object); - BLI_assert(object->mode == OB_MODE_SCULPT); /* Copy the current mesh visibility to the Face Sets. */ - BKE_sculpt_face_sets_ensure_from_base_mesh_visibility(mesh); - if (object->sculpt != nullptr) { - /* If a sculpt session is active, ensure we have its face-set data properly up-to-date. */ - object->sculpt->face_sets = static_cast<int *>( - CustomData_get_layer(&mesh->pdata, CD_SCULPT_FACE_SETS)); - - /* NOTE: In theory we could add that on the fly when required by sculpt code. - * But this then requires proper update of depsgraph etc. For now we play safe, optimization is - * always possible later if it's worth it. */ - BKE_sculpt_mask_layers_ensure(object, mmd); - } + BKE_sculpt_face_sets_update_from_base_mesh_visibility(mesh); /* Tessfaces aren't used and will become invalid. */ BKE_mesh_tessface_clear(mesh); /* We always need to flush updates from depsgraph here, since at the very least - * `BKE_sculpt_face_sets_ensure_from_base_mesh_visibility()` will have updated some data layer of + * `BKE_sculpt_face_sets_update_from_base_mesh_visibility()` will have updated some data layer of * the mesh. * * All known potential sources of updates: * - Addition of, or changes to, the `CD_SCULPT_FACE_SETS` data layer - * (`BKE_sculpt_face_sets_ensure_from_base_mesh_visibility`). + * (`BKE_sculpt_face_sets_update_from_base_mesh_visibility`). * - Addition of a `CD_PAINT_MASK` data layer (`BKE_sculpt_mask_layers_ensure`). * - Object has any active modifier (modifier stack can be different in Sculpt mode). * - Multires: diff --git a/source/blender/blenkernel/intern/pbvh.c b/source/blender/blenkernel/intern/pbvh.c index 6d761f56f13..2e273e076d5 100644 --- a/source/blender/blenkernel/intern/pbvh.c +++ b/source/blender/blenkernel/intern/pbvh.c @@ -285,7 +285,7 @@ static void build_mesh_leaf_node(PBVH *pbvh, PBVHNode *node) } if (has_visible == false) { - if (!paint_is_face_hidden(lt, pbvh->hide_vert, pbvh->mloop)) { + if (!paint_is_face_hidden(lt, pbvh->hide_poly)) { has_visible = true; } } @@ -552,6 +552,7 @@ void BKE_pbvh_build_mesh(PBVH *pbvh, pbvh->mesh = mesh; pbvh->header.type = PBVH_FACES; pbvh->mpoly = mpoly; + pbvh->hide_poly = (bool *)CustomData_get_layer_named(&mesh->pdata, CD_PROP_BOOL, ".hide_poly"); pbvh->material_indices = (const int *)CustomData_get_layer_named( &mesh->pdata, CD_PROP_INT32, "material_index"); pbvh->mloop = mloop; @@ -1313,11 +1314,7 @@ static void pbvh_update_draw_buffer_cb(void *__restrict userdata, } case PBVH_FACES: node->draw_buffers = GPU_pbvh_mesh_buffers_build( - pbvh->mesh, - pbvh->looptri, - CustomData_get_layer(pbvh->pdata, CD_SCULPT_FACE_SETS), - node->prim_indices, - node->totprim); + pbvh->mesh, pbvh->looptri, node->prim_indices, node->totprim); break; case PBVH_BMESH: node->draw_buffers = GPU_pbvh_bmesh_buffers_build(pbvh->flags & @@ -2293,7 +2290,7 @@ static bool pbvh_faces_node_raycast(PBVH *pbvh, const MLoopTri *lt = &pbvh->looptri[faces[i]]; const int *face_verts = node->face_vert_indices[i]; - if (pbvh->respect_hide && paint_is_face_hidden(lt, pbvh->hide_vert, mloop)) { + if (pbvh->respect_hide && paint_is_face_hidden(lt, pbvh->hide_poly)) { continue; } @@ -2602,7 +2599,7 @@ static bool pbvh_faces_node_nearest_to_ray(PBVH *pbvh, const MLoopTri *lt = &pbvh->looptri[faces[i]]; const int *face_verts = node->face_vert_indices[i]; - if (pbvh->respect_hide && paint_is_face_hidden(lt, pbvh->hide_vert, mloop)) { + if (pbvh->respect_hide && paint_is_face_hidden(lt, pbvh->hide_poly)) { continue; } @@ -3219,6 +3216,12 @@ const bool *BKE_pbvh_get_vert_hide(const PBVH *pbvh) return pbvh->hide_vert; } +const bool *BKE_pbvh_get_poly_hide(const PBVH *pbvh) +{ + BLI_assert(pbvh->header.type == PBVH_FACES); + return pbvh->hide_poly; +} + bool *BKE_pbvh_get_vert_hide_for_write(PBVH *pbvh) { BLI_assert(pbvh->header.type == PBVH_FACES); @@ -3244,6 +3247,14 @@ void BKE_pbvh_face_sets_set(PBVH *pbvh, int *face_sets) pbvh->face_sets = face_sets; } +void BKE_pbvh_update_hide_attributes_from_mesh(PBVH *pbvh) +{ + if (pbvh->header.type == PBVH_FACES) { + pbvh->hide_vert = CustomData_get_layer_named(&pbvh->mesh->vdata, CD_PROP_BOOL, ".hide_vert"); + pbvh->hide_poly = CustomData_get_layer_named(&pbvh->mesh->pdata, CD_PROP_BOOL, ".hide_poly"); + } +} + void BKE_pbvh_respect_hide_set(PBVH *pbvh, bool respect_hide) { pbvh->respect_hide = respect_hide; diff --git a/source/blender/blenkernel/intern/pbvh_intern.h b/source/blender/blenkernel/intern/pbvh_intern.h index b848327b7a9..8ab56839f9c 100644 --- a/source/blender/blenkernel/intern/pbvh_intern.h +++ b/source/blender/blenkernel/intern/pbvh_intern.h @@ -156,6 +156,7 @@ struct PBVH { bool *hide_vert; struct MVert *verts; const struct MPoly *mpoly; + bool *hide_poly; /** Material indices. Only valid for polygon meshes. */ const int *material_indices; const struct MLoop *mloop; diff --git a/source/blender/blenkernel/intern/pointcloud.cc b/source/blender/blenkernel/intern/pointcloud.cc index fe6353bc72d..b45e164b594 100644 --- a/source/blender/blenkernel/intern/pointcloud.cc +++ b/source/blender/blenkernel/intern/pointcloud.cc @@ -189,8 +189,9 @@ IDTypeInfo IDType_ID_PT = { static void pointcloud_random(PointCloud *pointcloud) { + BLI_assert(pointcloud->totpoint == 0); pointcloud->totpoint = 400; - CustomData_realloc(&pointcloud->pdata, pointcloud->totpoint); + CustomData_realloc(&pointcloud->pdata, 0, pointcloud->totpoint); RNG *rng = BLI_rng_new(0); @@ -238,9 +239,6 @@ PointCloud *BKE_pointcloud_new_nomain(const int totpoint) nullptr, ID_PT, BKE_idtype_idcode_to_name(ID_PT), LIB_ID_CREATE_LOCALIZE)); pointcloud_init_data(&pointcloud->id); - - pointcloud->totpoint = totpoint; - CustomData_add_layer_named(&pointcloud->pdata, CD_PROP_FLOAT, CD_SET_DEFAULT, @@ -248,8 +246,8 @@ PointCloud *BKE_pointcloud_new_nomain(const int totpoint) pointcloud->totpoint, POINTCLOUD_ATTR_RADIUS); + CustomData_realloc(&pointcloud->pdata, 0, totpoint); pointcloud->totpoint = totpoint; - CustomData_realloc(&pointcloud->pdata, pointcloud->totpoint); return pointcloud; } diff --git a/source/blender/blenlib/BLI_array_utils.hh b/source/blender/blenlib/BLI_array_utils.hh new file mode 100644 index 00000000000..dd65147a926 --- /dev/null +++ b/source/blender/blenlib/BLI_array_utils.hh @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_generic_span.hh" +#include "BLI_generic_virtual_array.hh" +#include "BLI_index_mask.hh" +#include "BLI_task.hh" + +namespace blender::array_utils { + +/** + * Fill the destination span by copying masked values from the src array. Threaded based on + * grainsize. + */ +void copy(const GVArray &src, IndexMask selection, GMutableSpan dst, int64_t grain_size = 4096); + +/** + * Fill the destination span by copying values from the src array. Threaded based on + * grainsize. + */ +template<typename T> +inline void copy(const Span<T> src, + const IndexMask selection, + MutableSpan<T> dst, + const int64_t grain_size = 4096) +{ + threading::parallel_for(selection.index_range(), grain_size, [&](const IndexRange range) { + for (const int64_t index : selection.slice(range)) { + dst[index] = src[index]; + } + }); +} + +} // namespace blender::array_utils diff --git a/source/blender/blenlib/BLI_compute_context.hh b/source/blender/blenlib/BLI_compute_context.hh new file mode 100644 index 00000000000..7422467e400 --- /dev/null +++ b/source/blender/blenlib/BLI_compute_context.hh @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup bli + * + * When logging computed values, we generally want to know where the value was computed. For + * example, geometry nodes logs socket values so that they can be displayed in the ui. For that we + * can combine the logged value with a `ComputeContext`, which identifies the place where the value + * was computed. + * + * This is not a trivial problem because e.g. just storing storing a pointer to the socket a value + * belongs to is not enough. That's because the same socket may correspond to many different values + * when the socket is used in a node group that is used multiple times. In this case, not only does + * the socket have to be stored but also the entire nested node group path that led to the + * evaluation of the socket. + * + * Storing the entire "context path" for every logged value is not feasible, because that path can + * become quite long. So that would need much more memory, more compute overhead and makes it + * complicated to compare if two contexts are the same. If the identifier for a compute context + * would have a variable size, it would also be much harder to create a map from context to values. + * + * The solution implemented below uses the following key ideas: + * - Every compute context can be hashed to a unique fixed size value (`ComputeContextHash`). While + * technically there could be hash collisions, the hashing algorithm has to be chosen to make + * that practically impossible. This way an entire context path, possibly consisting of many + * nested contexts, is represented by a single value that can be stored easily. + * - A nested compute context is build as singly linked list, where every compute context has a + * pointer to the parent compute context. Note that a link in the other direction is not possible + * because the same parent compute context may be used by many different children which possibly + * run on different threads. + */ + +#include "BLI_array.hh" +#include "BLI_linear_allocator.hh" +#include "BLI_stack.hh" +#include "BLI_string_ref.hh" + +namespace blender { + +/** + * A hash that uniquely identifies a specific (non-fixed-size) compute context. The hash has to + * have enough bits to make collisions practically impossible. + */ +struct ComputeContextHash { + static constexpr int64_t HashSizeInBytes = 16; + uint64_t v1 = 0; + uint64_t v2 = 0; + + uint64_t hash() const + { + return v1; + } + + friend bool operator==(const ComputeContextHash &a, const ComputeContextHash &b) + { + return a.v1 == b.v1 && a.v2 == b.v2; + } + + void mix_in(const void *data, int64_t len); + + friend std::ostream &operator<<(std::ostream &stream, const ComputeContextHash &hash); +}; + +static_assert(sizeof(ComputeContextHash) == ComputeContextHash::HashSizeInBytes); + +/** + * Identifies the context in which a computation happens. This context can be used to identify + * values logged during the computation. For more details, see the comment at the top of the file. + * + * This class should be subclassed to implement specific contexts. + */ +class ComputeContext { + private: + /** + * Only used for debugging currently. + */ + const char *static_type_; + /** + * Pointer to the context that this context is child of. That allows nesting compute contexts. + */ + const ComputeContext *parent_ = nullptr; + + protected: + /** + * The hash that uniquely identifies this context. It's a combined hash of this context as well + * as all the parent contexts. + */ + ComputeContextHash hash_; + + public: + ComputeContext(const char *static_type, const ComputeContext *parent) + : static_type_(static_type), parent_(parent) + { + if (parent != nullptr) { + hash_ = parent_->hash_; + } + } + virtual ~ComputeContext() = default; + + const ComputeContextHash &hash() const + { + return hash_; + } + + const char *static_type() const + { + return static_type_; + } + + const ComputeContext *parent() const + { + return parent_; + } + + /** + * Print the entire nested context stack. + */ + void print_stack(std::ostream &stream, StringRef name) const; + + /** + * Print information about this specific context. This has to be implemented by each subclass. + */ + virtual void print_current_in_line(std::ostream &stream) const = 0; + + friend std::ostream &operator<<(std::ostream &stream, const ComputeContext &compute_context); +}; + +/** + * Utility class to build a context stack in one place. This is typically used to get the hash that + * corresponds to a specific nested compute context, in order to look up corresponding logged + * values. + */ +class ComputeContextBuilder { + private: + LinearAllocator<> allocator_; + Stack<destruct_ptr<ComputeContext>> contexts_; + + public: + bool is_empty() const + { + return contexts_.is_empty(); + } + + const ComputeContext *current() const + { + if (contexts_.is_empty()) { + return nullptr; + } + return contexts_.peek().get(); + } + + const ComputeContextHash hash() const + { + BLI_assert(!contexts_.is_empty()); + return this->current()->hash(); + } + + template<typename T, typename... Args> void push(Args &&...args) + { + const ComputeContext *current = this->current(); + destruct_ptr<T> context = allocator_.construct<T>(current, std::forward<Args>(args)...); + contexts_.push(std::move(context)); + } + + void pop() + { + contexts_.pop(); + } +}; + +} // namespace blender diff --git a/source/blender/blenlib/BLI_generic_span.hh b/source/blender/blenlib/BLI_generic_span.hh index 143ab235d2e..e7a08988c46 100644 --- a/source/blender/blenlib/BLI_generic_span.hh +++ b/source/blender/blenlib/BLI_generic_span.hh @@ -100,6 +100,34 @@ class GSpan { { return this->slice(range.start(), range.size()); } + + GSpan drop_front(const int64_t n) const + { + BLI_assert(n >= 0); + const int64_t new_size = std::max<int64_t>(0, size_ - n); + return GSpan(*type_, POINTER_OFFSET(data_, type_->size() * n), new_size); + } + + GSpan drop_back(const int64_t n) const + { + BLI_assert(n >= 0); + const int64_t new_size = std::max<int64_t>(0, size_ - n); + return GSpan(*type_, data_, new_size); + } + + GSpan take_front(const int64_t n) const + { + BLI_assert(n >= 0); + const int64_t new_size = std::min<int64_t>(size_, n); + return GSpan(*type_, data_, new_size); + } + + GSpan take_back(const int64_t n) const + { + BLI_assert(n >= 0); + const int64_t new_size = std::min<int64_t>(size_, n); + return GSpan(*type_, POINTER_OFFSET(data_, type_->size() * (size_ - new_size)), new_size); + } }; /** @@ -199,6 +227,35 @@ class GMutableSpan { return this->slice(range.start(), range.size()); } + GMutableSpan drop_front(const int64_t n) const + { + BLI_assert(n >= 0); + const int64_t new_size = std::max<int64_t>(0, size_ - n); + return GMutableSpan(*type_, POINTER_OFFSET(data_, type_->size() * n), new_size); + } + + GMutableSpan drop_back(const int64_t n) const + { + BLI_assert(n >= 0); + const int64_t new_size = std::max<int64_t>(0, size_ - n); + return GMutableSpan(*type_, data_, new_size); + } + + GMutableSpan take_front(const int64_t n) const + { + BLI_assert(n >= 0); + const int64_t new_size = std::min<int64_t>(size_, n); + return GMutableSpan(*type_, data_, new_size); + } + + GMutableSpan take_back(const int64_t n) const + { + BLI_assert(n >= 0); + const int64_t new_size = std::min<int64_t>(size_, n); + return GMutableSpan( + *type_, POINTER_OFFSET(data_, type_->size() * (size_ - new_size)), new_size); + } + /** * Copy all values from another span into this span. This invokes undefined behavior when the * destination contains uninitialized data and T is not trivially copy constructible. diff --git a/source/blender/blenlib/BLI_multi_value_map.hh b/source/blender/blenlib/BLI_multi_value_map.hh index 1fc5a797574..81b536e7d3c 100644 --- a/source/blender/blenlib/BLI_multi_value_map.hh +++ b/source/blender/blenlib/BLI_multi_value_map.hh @@ -115,6 +115,14 @@ template<typename Key, typename Value> class MultiValueMap { } /** + * Get the number of keys. + */ + int64_t size() const + { + return map_.size(); + } + + /** * NOTE: This signature will change when the implementation changes. */ typename MapType::ItemIterator items() const diff --git a/source/blender/blenlib/BLI_path_util.h b/source/blender/blenlib/BLI_path_util.h index 75002f52d94..136258e50f2 100644 --- a/source/blender/blenlib/BLI_path_util.h +++ b/source/blender/blenlib/BLI_path_util.h @@ -84,10 +84,18 @@ void BLI_join_dirfile(char *__restrict dst, * Join multiple strings into a path, ensuring only a single path separator between each, * and trailing slash is kept. * + * \param path: The first patch which has special treatment, + * allowing `//` prefix which is kept intact unlike double-slashes which are stripped + * from the bounds of all other paths passed in. + * Passing in the following paths all result in the same output (`//a/b/c`): + * - `"//", "a", "b", "c"`. + * - `"//", "/a/", "/b/", "/c"`. + * - `"//a", "b/c"`. + * * \note If you want a trailing slash, add `SEP_STR` as the last path argument, * duplicate slashes will be cleaned up. */ -size_t BLI_path_join(char *__restrict dst, size_t dst_len, const char *path_first, ...) +size_t BLI_path_join(char *__restrict dst, size_t dst_len, const char *path, ...) ATTR_NONNULL(1, 3) ATTR_SENTINEL(0); /** * Like Python's `os.path.basename()` diff --git a/source/blender/blenlib/CMakeLists.txt b/source/blender/blenlib/CMakeLists.txt index d87c60e6099..470ffebcad4 100644 --- a/source/blender/blenlib/CMakeLists.txt +++ b/source/blender/blenlib/CMakeLists.txt @@ -23,7 +23,6 @@ set(INC_SYS ) set(SRC - intern/BLI_args.c intern/BLI_array.c intern/BLI_assert.c intern/BLI_color.cc @@ -48,11 +47,13 @@ set(SRC intern/array_store.c intern/array_store_utils.c intern/array_utils.c + intern/array_utils.cc intern/astar.c intern/bitmap.c intern/bitmap_draw_2d.c intern/boxpack_2d.c intern/buffer.c + intern/compute_context.cc intern/convexhull_2d.c intern/cpp_type.cc intern/delaunay_2d.cc @@ -159,12 +160,12 @@ set(SRC BLI_alloca.h BLI_allocator.hh BLI_any.hh - BLI_args.h BLI_array.h BLI_array.hh BLI_array_store.h BLI_array_store_utils.h BLI_array_utils.h + BLI_array_utils.hh BLI_asan.h BLI_assert.h BLI_astar.h @@ -180,6 +181,7 @@ set(SRC BLI_compiler_attrs.h BLI_compiler_compat.h BLI_compiler_typecheck.h + BLI_compute_context.hh BLI_console.h BLI_convexhull_2d.h BLI_cpp_type.hh @@ -353,6 +355,14 @@ set(LIB ${ZSTD_LIBRARIES} ) +if(NOT WITH_PYTHON_MODULE) + list(APPEND SRC + intern/BLI_args.c + + BLI_args.h + ) +endif() + if(WITH_MEM_VALGRIND) add_definitions(-DWITH_MEM_VALGRIND) endif() diff --git a/source/blender/blenlib/intern/array_utils.cc b/source/blender/blenlib/intern/array_utils.cc new file mode 100644 index 00000000000..d4266295944 --- /dev/null +++ b/source/blender/blenlib/intern/array_utils.cc @@ -0,0 +1,18 @@ +#include "BLI_array_utils.hh" +#include "BLI_task.hh" + +namespace blender::array_utils { + +void copy(const GVArray &src, + const IndexMask selection, + GMutableSpan dst, + const int64_t grain_size) +{ + BLI_assert(src.type() == dst.type()); + BLI_assert(src.size() == dst.size()); + threading::parallel_for(selection.index_range(), grain_size, [&](const IndexRange range) { + src.materialize_to_uninitialized(selection.slice(range), dst.data()); + }); +} + +} // namespace blender::array_utils diff --git a/source/blender/blenlib/intern/compute_context.cc b/source/blender/blenlib/intern/compute_context.cc new file mode 100644 index 00000000000..50a4a90a4a9 --- /dev/null +++ b/source/blender/blenlib/intern/compute_context.cc @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_compute_context.hh" +#include "BLI_hash_md5.h" + +namespace blender { + +void ComputeContextHash::mix_in(const void *data, int64_t len) +{ + DynamicStackBuffer<> buffer_owner(HashSizeInBytes + len, 8); + char *buffer = static_cast<char *>(buffer_owner.buffer()); + memcpy(buffer, this, HashSizeInBytes); + memcpy(buffer + HashSizeInBytes, data, len); + + BLI_hash_md5_buffer(buffer, HashSizeInBytes + len, this); +} + +std::ostream &operator<<(std::ostream &stream, const ComputeContextHash &hash) +{ + std::stringstream ss; + ss << "0x" << std::hex << hash.v1 << hash.v2; + stream << ss.str(); + return stream; +} + +void ComputeContext::print_stack(std::ostream &stream, StringRef name) const +{ + Stack<const ComputeContext *> stack; + for (const ComputeContext *current = this; current; current = current->parent_) { + stack.push(current); + } + stream << "Context Stack: " << name << "\n"; + while (!stack.is_empty()) { + const ComputeContext *current = stack.pop(); + stream << "-> "; + current->print_current_in_line(stream); + const ComputeContextHash ¤t_hash = current->hash_; + stream << " \t(hash: " << current_hash << ")\n"; + } +} + +std::ostream &operator<<(std::ostream &stream, const ComputeContext &compute_context) +{ + compute_context.print_stack(stream, ""); + return stream; +} + +} // namespace blender diff --git a/source/blender/blenlib/intern/cpp_type.cc b/source/blender/blenlib/intern/cpp_type.cc index d6a087cf175..38de32d3ec8 100644 --- a/source/blender/blenlib/intern/cpp_type.cc +++ b/source/blender/blenlib/intern/cpp_type.cc @@ -26,3 +26,4 @@ BLI_CPP_TYPE_MAKE(ColorGeometry4f, blender::ColorGeometry4f, CPPTypeFlags::Basic BLI_CPP_TYPE_MAKE(ColorGeometry4b, blender::ColorGeometry4b, CPPTypeFlags::BasicType) BLI_CPP_TYPE_MAKE(string, std::string, CPPTypeFlags::BasicType) +BLI_CPP_TYPE_MAKE(StringVector, blender::Vector<std::string>, CPPTypeFlags::None) diff --git a/source/blender/blenlib/intern/path_util.c b/source/blender/blenlib/intern/path_util.c index 1e95aa3b7b0..c053c3907db 100644 --- a/source/blender/blenlib/intern/path_util.c +++ b/source/blender/blenlib/intern/path_util.c @@ -1505,8 +1505,8 @@ size_t BLI_path_join(char *__restrict dst, const size_t dst_len, const char *pat return ofs; } - /* remove trailing slashes, unless there are _only_ trailing slashes - * (allow "//" as the first argument). */ + /* Remove trailing slashes, unless there are *only* trailing slashes + * (allow `//` or `//some_path` as the first argument). */ bool has_trailing_slash = false; if (ofs != 0) { size_t len = ofs; diff --git a/source/blender/blenlib/tests/BLI_path_util_test.cc b/source/blender/blenlib/tests/BLI_path_util_test.cc index 4f6f4a5c413..54afc3d975d 100644 --- a/source/blender/blenlib/tests/BLI_path_util_test.cc +++ b/source/blender/blenlib/tests/BLI_path_util_test.cc @@ -298,6 +298,13 @@ TEST(path_util, JoinComplex) JOIN("1/2/3/", 100, "1", "////////", "", "2", "3\\"); } +TEST(path_util, JoinRelativePrefix) +{ + JOIN("//a/b/c", 100, "//a", "b", "c"); + JOIN("//a/b/c", 100, "//", "//a//", "//b//", "//c"); + JOIN("//a/b/c", 100, "//", "//", "a", "//", "b", "//", "c"); +} + #undef JOIN /* BLI_path_frame */ diff --git a/source/blender/compositor/operations/COM_ViewerOperation.cc b/source/blender/compositor/operations/COM_ViewerOperation.cc index aeadf8f255d..3bd5fa4ad14 100644 --- a/source/blender/compositor/operations/COM_ViewerOperation.cc +++ b/source/blender/compositor/operations/COM_ViewerOperation.cc @@ -156,7 +156,7 @@ void ViewerOperation::init_image() ibuf->y = display_height_; /* zero size can happen if no image buffers exist to define a sensible resolution */ if (ibuf->x > 0 && ibuf->y > 0) { - imb_addrectfloatImBuf(ibuf); + imb_addrectfloatImBuf(ibuf, 4); } ibuf->userflags |= IB_DISPLAY_BUFFER_INVALID; diff --git a/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc b/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc index ca3e4543a23..dcefb5528b2 100644 --- a/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc +++ b/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc @@ -1741,7 +1741,14 @@ void DepsgraphNodeBuilder::build_nodetree(bNodeTree *ntree) /* Animation, */ build_animdata(&ntree->id); /* Output update. */ - add_operation_node(&ntree->id, NodeType::NTREE_OUTPUT, OperationCode::NTREE_OUTPUT); + ID *id_cow = get_cow_id(&ntree->id); + add_operation_node(&ntree->id, + NodeType::NTREE_OUTPUT, + OperationCode::NTREE_OUTPUT, + [id_cow](::Depsgraph * /*depsgraph*/) { + bNodeTree *ntree_cow = reinterpret_cast<bNodeTree *>(id_cow); + bke::node_tree_runtime::handle_node_tree_output_changed(*ntree_cow); + }); /* nodetree's nodes... */ LISTBASE_FOREACH (bNode *, bnode, &ntree->nodes) { build_idproperties(bnode->prop); diff --git a/source/blender/depsgraph/intern/eval/deg_eval_flush.cc b/source/blender/depsgraph/intern/eval/deg_eval_flush.cc index 30ee626f0f8..09981eb32c5 100644 --- a/source/blender/depsgraph/intern/eval/deg_eval_flush.cc +++ b/source/blender/depsgraph/intern/eval/deg_eval_flush.cc @@ -371,10 +371,6 @@ void deg_graph_flush_updates(Depsgraph *graph) while (op_node != nullptr) { /* Tag operation as required for update. */ op_node->flag |= DEPSOP_FLAG_NEEDS_UPDATE; - /* Tag depsgraph visibility update when visibility operation is tagged for an update. */ - if (op_node->opcode == OperationCode::VISIBILITY) { - graph->need_update_nodes_visibility = true; - } /* Inform corresponding ID and component nodes about the change. */ ComponentNode *comp_node = op_node->owner; IDNode *id_node = comp_node->owner; diff --git a/source/blender/depsgraph/intern/eval/deg_eval_visibility.cc b/source/blender/depsgraph/intern/eval/deg_eval_visibility.cc index 515c9a197d7..a056ba1dfa7 100644 --- a/source/blender/depsgraph/intern/eval/deg_eval_visibility.cc +++ b/source/blender/depsgraph/intern/eval/deg_eval_visibility.cc @@ -34,13 +34,10 @@ void deg_evaluate_object_node_visibility(::Depsgraph *depsgraph, IDNode *id_node DEG_debug_print_eval(depsgraph, __func__, object->id.name, &object->id); - bool is_enabled; - if (graph->mode == DAG_EVAL_VIEWPORT) { - is_enabled = (object->base_flag & BASE_ENABLED_AND_MAYBE_VISIBLE_IN_VIEWPORT); - } - else { - is_enabled = (object->base_flag & BASE_ENABLED_RENDER); - }; + const int required_flags = (graph->mode == DAG_EVAL_VIEWPORT) ? BASE_ENABLED_VIEWPORT : + BASE_ENABLED_RENDER; + + const bool is_enabled = object->base_flag & required_flags; if (id_node->is_enabled_on_eval != is_enabled) { id_node->is_enabled_on_eval = is_enabled; diff --git a/source/blender/draw/CMakeLists.txt b/source/blender/draw/CMakeLists.txt index ac7f1c5ff68..e6b532ed25a 100644 --- a/source/blender/draw/CMakeLists.txt +++ b/source/blender/draw/CMakeLists.txt @@ -135,6 +135,7 @@ set(SRC engines/eevee/eevee_temporal_sampling.c engines/eevee/eevee_volumes.c engines/eevee_next/eevee_camera.cc + engines/eevee_next/eevee_cryptomatte.cc engines/eevee_next/eevee_depth_of_field.cc engines/eevee_next/eevee_engine.cc engines/eevee_next/eevee_film.cc @@ -387,6 +388,7 @@ set(GLSL_SRC engines/eevee/shaders/volumetric_frag.glsl engines/eevee/shaders/volumetric_geom.glsl engines/eevee/shaders/volumetric_vert.glsl + engines/eevee/shaders/volumetric_resolve_comp.glsl engines/eevee/shaders/volumetric_resolve_frag.glsl engines/eevee/shaders/volumetric_scatter_frag.glsl engines/eevee/shaders/volumetric_integration_frag.glsl @@ -395,6 +397,7 @@ set(GLSL_SRC engines/eevee_next/shaders/eevee_attributes_lib.glsl engines/eevee_next/shaders/eevee_camera_lib.glsl engines/eevee_next/shaders/eevee_colorspace_lib.glsl + engines/eevee_next/shaders/eevee_cryptomatte_lib.glsl engines/eevee_next/shaders/eevee_depth_of_field_accumulator_lib.glsl engines/eevee_next/shaders/eevee_depth_of_field_bokeh_lut_comp.glsl engines/eevee_next/shaders/eevee_depth_of_field_downsample_comp.glsl @@ -411,6 +414,7 @@ set(GLSL_SRC engines/eevee_next/shaders/eevee_depth_of_field_tiles_dilate_comp.glsl engines/eevee_next/shaders/eevee_depth_of_field_tiles_flatten_comp.glsl engines/eevee_next/shaders/eevee_film_comp.glsl + engines/eevee_next/shaders/eevee_film_cryptomatte_post_comp.glsl engines/eevee_next/shaders/eevee_film_frag.glsl engines/eevee_next/shaders/eevee_film_lib.glsl engines/eevee_next/shaders/eevee_geom_curves_vert.glsl diff --git a/source/blender/draw/engines/eevee/eevee_private.h b/source/blender/draw/engines/eevee/eevee_private.h index 8c6d96254ae..573c29b78a1 100644 --- a/source/blender/draw/engines/eevee/eevee_private.h +++ b/source/blender/draw/engines/eevee/eevee_private.h @@ -1261,6 +1261,7 @@ struct GPUShader *EEVEE_shaders_volumes_scatter_sh_get(void); struct GPUShader *EEVEE_shaders_volumes_scatter_with_lights_sh_get(void); struct GPUShader *EEVEE_shaders_volumes_integration_sh_get(void); struct GPUShader *EEVEE_shaders_volumes_resolve_sh_get(bool accum); +struct GPUShader *EEVEE_shaders_volumes_resolve_comp_sh_get(bool float_target); struct GPUShader *EEVEE_shaders_volumes_accum_sh_get(void); struct GPUShader *EEVEE_shaders_ggx_lut_sh_get(void); struct GPUShader *EEVEE_shaders_ggx_refraction_lut_sh_get(void); diff --git a/source/blender/draw/engines/eevee/eevee_shaders.c b/source/blender/draw/engines/eevee/eevee_shaders.c index 04d1168a30d..a7290b3894e 100644 --- a/source/blender/draw/engines/eevee/eevee_shaders.c +++ b/source/blender/draw/engines/eevee/eevee_shaders.c @@ -133,6 +133,7 @@ static struct { struct GPUShader *scatter_with_lights_sh; struct GPUShader *volumetric_integration_sh; struct GPUShader *volumetric_resolve_sh[2]; + struct GPUShader *volumetric_resolve_comp_sh[2]; struct GPUShader *volumetric_accum_sh; /* Shader strings */ @@ -261,6 +262,7 @@ extern char datatoc_volumetric_frag_glsl[]; extern char datatoc_volumetric_geom_glsl[]; extern char datatoc_volumetric_integration_frag_glsl[]; extern char datatoc_volumetric_lib_glsl[]; +extern char datatoc_volumetric_resolve_comp_glsl[]; extern char datatoc_volumetric_resolve_frag_glsl[]; extern char datatoc_volumetric_scatter_frag_glsl[]; extern char datatoc_volumetric_vert_glsl[]; @@ -903,6 +905,20 @@ struct GPUShader *EEVEE_shaders_volumes_resolve_sh_get(bool accum) return e_data.volumetric_resolve_sh[index]; } +struct GPUShader *EEVEE_shaders_volumes_resolve_comp_sh_get(bool float_target) +{ + const int index = (float_target ? 1 : 0); + if (e_data.volumetric_resolve_comp_sh[index] == NULL) { + e_data.volumetric_resolve_comp_sh[index] = DRW_shader_create_compute_with_shaderlib( + datatoc_volumetric_resolve_comp_glsl, + e_data.lib, + float_target ? "#define TARGET_IMG_FLOAT\n" SHADER_DEFINES : SHADER_DEFINES, + __func__); + } + + return e_data.volumetric_resolve_comp_sh[index]; +} + struct GPUShader *EEVEE_shaders_volumes_accum_sh_get() { if (e_data.volumetric_accum_sh == NULL) { diff --git a/source/blender/draw/engines/eevee/eevee_volumes.c b/source/blender/draw/engines/eevee/eevee_volumes.c index 2d96cffb4ba..b2e5a0abe94 100644 --- a/source/blender/draw/engines/eevee/eevee_volumes.c +++ b/source/blender/draw/engines/eevee/eevee_volumes.c @@ -396,18 +396,37 @@ void EEVEE_volumes_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata) grp, NULL, USE_VOLUME_OPTI ? 1 : common_data->vol_tex_size[2]); DRW_PASS_CREATE(psl->volumetric_resolve_ps, DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM); - grp = DRW_shgroup_create(EEVEE_shaders_volumes_resolve_sh_get(false), - psl->volumetric_resolve_ps); - DRW_shgroup_uniform_texture_ref(grp, "inScattering", &txl->volume_scatter); - DRW_shgroup_uniform_texture_ref(grp, "inTransmittance", &txl->volume_transmit); - DRW_shgroup_uniform_texture_ref(grp, "inSceneDepth", &e_data.depth_src); - DRW_shgroup_uniform_block(grp, "light_block", sldata->light_ubo); - DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo); - DRW_shgroup_uniform_block(grp, "probe_block", sldata->probe_ubo); - DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined); - DRW_shgroup_uniform_block(grp, "shadow_block", sldata->shadow_ubo); + if (GPU_compute_shader_support() && GPU_shader_image_load_store_support()) { + const bool use_float_target = DRW_state_is_image_render(); + grp = DRW_shgroup_create(EEVEE_shaders_volumes_resolve_comp_sh_get(use_float_target), + psl->volumetric_resolve_ps); + DRW_shgroup_uniform_texture_ref(grp, "inScattering", &txl->volume_scatter); + DRW_shgroup_uniform_texture_ref(grp, "inTransmittance", &txl->volume_transmit); + DRW_shgroup_uniform_texture_ref(grp, "inSceneDepth", &e_data.depth_src); + DRW_shgroup_uniform_block(grp, "light_block", sldata->light_ubo); + DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo); + DRW_shgroup_uniform_block(grp, "probe_block", sldata->probe_ubo); + DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined); + DRW_shgroup_uniform_block(grp, "shadow_block", sldata->shadow_ubo); + DRW_shgroup_uniform_image_ref(grp, "target_img", &txl->color); - DRW_shgroup_call_procedural_triangles(grp, NULL, 1); + const float *size = DRW_viewport_size_get(); + DRW_shgroup_call_compute(grp, size[0], size[1], 1); + } + else { + grp = DRW_shgroup_create(EEVEE_shaders_volumes_resolve_sh_get(false), + psl->volumetric_resolve_ps); + DRW_shgroup_uniform_texture_ref(grp, "inScattering", &txl->volume_scatter); + DRW_shgroup_uniform_texture_ref(grp, "inTransmittance", &txl->volume_transmit); + DRW_shgroup_uniform_texture_ref(grp, "inSceneDepth", &e_data.depth_src); + DRW_shgroup_uniform_block(grp, "light_block", sldata->light_ubo); + DRW_shgroup_uniform_block(grp, "common_block", sldata->common_ubo); + DRW_shgroup_uniform_block(grp, "probe_block", sldata->probe_ubo); + DRW_shgroup_uniform_block(grp, "renderpass_block", sldata->renderpass_ubo.combined); + DRW_shgroup_uniform_block(grp, "shadow_block", sldata->shadow_ubo); + + DRW_shgroup_call_procedural_triangles(grp, NULL, 1); + } } } @@ -546,11 +565,16 @@ void EEVEE_volumes_resolve(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *veda } /* Apply for opaque geometry. */ - GPU_framebuffer_bind(fbl->main_color_fb); - DRW_draw_pass(psl->volumetric_resolve_ps); + if (GPU_compute_shader_support() && GPU_shader_image_load_store_support()) { + DRW_draw_pass(psl->volumetric_resolve_ps); + } + else { + GPU_framebuffer_bind(fbl->main_color_fb); + DRW_draw_pass(psl->volumetric_resolve_ps); - /* Restore. */ - GPU_framebuffer_bind(fbl->main_fb); + /* Restore. */ + GPU_framebuffer_bind(fbl->main_fb); + } } } diff --git a/source/blender/draw/engines/eevee/shaders/volumetric_resolve_comp.glsl b/source/blender/draw/engines/eevee/shaders/volumetric_resolve_comp.glsl new file mode 100644 index 00000000000..2b0139ff923 --- /dev/null +++ b/source/blender/draw/engines/eevee/shaders/volumetric_resolve_comp.glsl @@ -0,0 +1,38 @@ + +#pragma BLENDER_REQUIRE(volumetric_lib.glsl) + +/* Based on Frosbite Unified Volumetric. + * https://www.ea.com/frostbite/news/physically-based-unified-volumetric-rendering-in-frostbite */ + +/* Step 4 : Apply final integration on top of the scene color. */ + +uniform sampler2D inSceneDepth; + +layout(local_size_x = 1, local_size_y = 1) in; + +#ifdef TARGET_IMG_FLOAT +layout(binding = 0, rgba32f) uniform image2D target_img; +#else +layout(binding = 0, rgba16f) uniform image2D target_img; +#endif + +void main() +{ + ivec2 co = ivec2(gl_GlobalInvocationID.xy); + vec2 uvs = co / vec2(textureSize(inSceneDepth, 0)); + float scene_depth = texture(inSceneDepth, uvs).r; + + vec3 transmittance, scattering; + volumetric_resolve(uvs, scene_depth, transmittance, scattering); + + /* Approximate volume alpha by using a monochromatic transmittance + * and adding it to the scene alpha. */ + float alpha = dot(transmittance, vec3(1.0 / 3.0)); + + vec4 color0 = vec4(scattering, 1.0 - alpha); + vec4 color1 = vec4(transmittance, alpha); + + vec4 color_in = imageLoad(target_img, co); + vec4 color_out = color0 + color1 * color_in; + imageStore(target_img, co, color_out); +} diff --git a/source/blender/draw/engines/eevee_next/eevee_cryptomatte.cc b/source/blender/draw/engines/eevee_next/eevee_cryptomatte.cc new file mode 100644 index 00000000000..340a587b1c1 --- /dev/null +++ b/source/blender/draw/engines/eevee_next/eevee_cryptomatte.cc @@ -0,0 +1,130 @@ +#include "BKE_cryptomatte.hh" + +#include "GPU_material.h" + +#include "eevee_cryptomatte.hh" +#include "eevee_instance.hh" +#include "eevee_renderbuffers.hh" + +namespace blender::eevee { + +void Cryptomatte::begin_sync() +{ + const eViewLayerEEVEEPassType enabled_passes = static_cast<eViewLayerEEVEEPassType>( + inst_.film.enabled_passes_get() & + (EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT | EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET | + EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET)); + + session_.reset(); + object_layer_ = nullptr; + asset_layer_ = nullptr; + material_layer_ = nullptr; + + if (enabled_passes && !inst_.is_viewport()) { + session_.reset(BKE_cryptomatte_init_from_view_layer(inst_.view_layer)); + + for (const std::string &layer_name : + bke::cryptomatte::BKE_cryptomatte_layer_names_get(*session_)) { + StringRef layer_name_ref = layer_name; + bke::cryptomatte::CryptomatteLayer *layer = bke::cryptomatte::BKE_cryptomatte_layer_get( + *session_, layer_name); + if (layer_name_ref.endswith(RE_PASSNAME_CRYPTOMATTE_OBJECT)) { + object_layer_ = layer; + } + else if (layer_name_ref.endswith(RE_PASSNAME_CRYPTOMATTE_ASSET)) { + asset_layer_ = layer; + } + else if (layer_name_ref.endswith(RE_PASSNAME_CRYPTOMATTE_MATERIAL)) { + material_layer_ = layer; + } + } + } + + if (!(enabled_passes & + (EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT | EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET))) { + cryptomatte_object_buf.resize(16); + } +} + +void Cryptomatte::sync_object(Object *ob, ResourceHandle res_handle) +{ + const eViewLayerEEVEEPassType enabled_passes = inst_.film.enabled_passes_get(); + if (!(enabled_passes & + (EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT | EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET))) { + return; + } + + uint32_t resource_id = res_handle.resource_index(); + float2 object_hashes(0.0f, 0.0f); + + if (enabled_passes & EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT) { + object_hashes[0] = register_id(EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT, ob->id); + } + + if (enabled_passes & EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET) { + Object *asset = ob; + while (asset->parent) { + asset = asset->parent; + } + object_hashes[1] = register_id(EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET, asset->id); + } + cryptomatte_object_buf.get_or_resize(resource_id) = object_hashes; +} + +void Cryptomatte::sync_material(const ::Material *material) +{ + /* Material crypto hashes are generated during shader codegen stage. We only need to register + * them to store inside the metadata. */ + if (material_layer_ && material) { + material_layer_->add_ID(material->id); + } +} + +void Cryptomatte::end_sync() +{ + cryptomatte_object_buf.push_update(); + + object_layer_ = nullptr; + asset_layer_ = nullptr; + material_layer_ = nullptr; +} + +float Cryptomatte::register_id(const eViewLayerEEVEEPassType layer, const ID &id) const +{ + BLI_assert(ELEM(layer, + EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT, + EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET, + EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL)); + + uint32_t cryptomatte_hash = 0; + if (session_) { + if (layer == EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT) { + BLI_assert(object_layer_); + cryptomatte_hash = object_layer_->add_ID(id); + } + else if (layer == EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET) { + BLI_assert(asset_layer_); + cryptomatte_hash = asset_layer_->add_ID(id); + } + else if (layer == EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL) { + BLI_assert(material_layer_); + cryptomatte_hash = material_layer_->add_ID(id); + } + } + else { + const char *name = &id.name[2]; + const int name_len = BLI_strnlen(name, MAX_NAME - 2); + cryptomatte_hash = BKE_cryptomatte_hash(name, name_len); + } + + return BKE_cryptomatte_hash_to_float(cryptomatte_hash); +} + +void Cryptomatte::store_metadata(RenderResult *render_result) +{ + if (session_) { + BKE_cryptomatte_store_metadata(&*session_, render_result, inst_.view_layer); + } +} + +} // namespace blender::eevee
\ No newline at end of file diff --git a/source/blender/draw/engines/eevee_next/eevee_cryptomatte.hh b/source/blender/draw/engines/eevee_next/eevee_cryptomatte.hh new file mode 100644 index 00000000000..37f5edf4c6d --- /dev/null +++ b/source/blender/draw/engines/eevee_next/eevee_cryptomatte.hh @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * Copyright 2021 Blender Foundation. + */ + +/** \file + * \ingroup eevee + * + * Cryptomatte. + * + * During rasterization, cryptomatte hashes are stored into a single array texture. + * The film pass then resamples this texture using pixel filter weighting. + * Each cryptomatte layer can hold N samples. These are stored in sequential layers + * of the array texture. The samples are sorted and merged only for final rendering. + */ + +#pragma once + +#include "eevee_shader_shared.hh" + +#include "BKE_cryptomatte.hh" + +extern "C" { +struct Material; +struct CryptomatteSession; +} + +namespace blender::eevee { + +class Instance; + +/* -------------------------------------------------------------------- */ +/** \name Cryptomatte + * \{ */ + +class Cryptomatte { + private: + class Instance &inst_; + + bke::cryptomatte::CryptomatteSessionPtr session_; + + /* Cached pointer to the cryptomatte layer instances. */ + bke::cryptomatte::CryptomatteLayer *object_layer_ = nullptr; + bke::cryptomatte::CryptomatteLayer *asset_layer_ = nullptr; + bke::cryptomatte::CryptomatteLayer *material_layer_ = nullptr; + + /** Contains per object hashes (object and asset hash). Indexed by resource ID. */ + CryptomatteObjectBuf cryptomatte_object_buf; + + public: + Cryptomatte(Instance &inst) : inst_(inst){}; + + void begin_sync(); + void sync_object(Object *ob, ResourceHandle res_handle); + void sync_material(const ::Material *material); + void end_sync(); + + template<typename T> void bind_resources(draw::detail::PassBase<T> *pass) + { + pass->bind_ssbo(CRYPTOMATTE_BUF_SLOT, &cryptomatte_object_buf); + } + + /* Register ID to use inside cryptomatte layer and returns associated hash as float. */ + float register_id(const eViewLayerEEVEEPassType layer, const ID &id) const; + void store_metadata(RenderResult *render_result); +}; + +/** \} */ + +} // namespace blender::eevee diff --git a/source/blender/draw/engines/eevee_next/eevee_defines.hh b/source/blender/draw/engines/eevee_next/eevee_defines.hh index 2f338e707c0..248dfae6df9 100644 --- a/source/blender/draw/engines/eevee_next/eevee_defines.hh +++ b/source/blender/draw/engines/eevee_next/eevee_defines.hh @@ -82,6 +82,7 @@ #define RBUFS_EMISSION_SLOT 4 #define RBUFS_AOV_COLOR_SLOT 5 #define RBUFS_AOV_VALUE_SLOT 6 +#define RBUFS_CRYPTOMATTE_SLOT 7 /* Uniform Buffers. */ /* Only during prepass. */ @@ -96,6 +97,8 @@ #define LIGHT_TILE_BUF_SLOT 3 #define RBUFS_AOV_BUF_SLOT 5 #define SAMPLING_BUF_SLOT 6 +#define CRYPTOMATTE_BUF_SLOT 7 + /* Only during pre-pass. */ #define VELOCITY_OBJ_PREV_BUF_SLOT 0 #define VELOCITY_OBJ_NEXT_BUF_SLOT 1 diff --git a/source/blender/draw/engines/eevee_next/eevee_engine.cc b/source/blender/draw/engines/eevee_next/eevee_engine.cc index 2e476b7d891..5ef198838c9 100644 --- a/source/blender/draw/engines/eevee_next/eevee_engine.cc +++ b/source/blender/draw/engines/eevee_next/eevee_engine.cc @@ -140,7 +140,7 @@ static void eevee_instance_free(void *instance) delete reinterpret_cast<eevee::Instance *>(instance); } -static void eevee_render_to_image(void *UNUSED(vedata), +static void eevee_render_to_image(void *vedata, struct RenderEngine *engine, struct RenderLayer *layer, const struct rcti *UNUSED(rect)) @@ -164,59 +164,31 @@ static void eevee_render_to_image(void *UNUSED(vedata), instance->init(size, &rect, engine, depsgraph, nullptr, camera_original_ob, layer); instance->render_frame(layer, viewname); - delete instance; + EEVEE_Data *ved = static_cast<EEVEE_Data *>(vedata); + if (ved->instance) { + delete ved->instance; + } + ved->instance = instance; } -static void eevee_render_update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer) +static void eevee_store_metadata(void *vedata, struct RenderResult *render_result) { if (!GPU_shader_storage_buffer_objects_support()) { return; } + EEVEE_Data *ved = static_cast<EEVEE_Data *>(vedata); + eevee::Instance *instance = ved->instance; + instance->store_metadata(render_result); + delete instance; + ved->instance = nullptr; +} - RE_engine_register_pass(engine, scene, view_layer, RE_PASSNAME_COMBINED, 4, "RGBA", SOCK_RGBA); - -#define CHECK_PASS_LEGACY(name, type, channels, chanid) \ - if (view_layer->passflag & (SCE_PASS_##name)) { \ - RE_engine_register_pass( \ - engine, scene, view_layer, RE_PASSNAME_##name, channels, chanid, type); \ - } \ - ((void)0) -#define CHECK_PASS_EEVEE(name, type, channels, chanid) \ - if (view_layer->eevee.render_passes & (EEVEE_RENDER_PASS_##name)) { \ - RE_engine_register_pass( \ - engine, scene, view_layer, RE_PASSNAME_##name, channels, chanid, type); \ - } \ - ((void)0) - - CHECK_PASS_LEGACY(Z, SOCK_FLOAT, 1, "Z"); - CHECK_PASS_LEGACY(MIST, SOCK_FLOAT, 1, "Z"); - CHECK_PASS_LEGACY(NORMAL, SOCK_VECTOR, 3, "XYZ"); - CHECK_PASS_LEGACY(DIFFUSE_DIRECT, SOCK_RGBA, 3, "RGB"); - CHECK_PASS_LEGACY(DIFFUSE_COLOR, SOCK_RGBA, 3, "RGB"); - CHECK_PASS_LEGACY(GLOSSY_DIRECT, SOCK_RGBA, 3, "RGB"); - CHECK_PASS_LEGACY(GLOSSY_COLOR, SOCK_RGBA, 3, "RGB"); - CHECK_PASS_EEVEE(VOLUME_LIGHT, SOCK_RGBA, 3, "RGB"); - CHECK_PASS_LEGACY(EMIT, SOCK_RGBA, 3, "RGB"); - CHECK_PASS_LEGACY(ENVIRONMENT, SOCK_RGBA, 3, "RGB"); - /* TODO: CHECK_PASS_LEGACY(SHADOW, SOCK_RGBA, 3, "RGB"); - * CHECK_PASS_LEGACY(AO, SOCK_RGBA, 3, "RGB"); - * When available they should be converted from Value textures to RGB. */ - - LISTBASE_FOREACH (ViewLayerAOV *, aov, &view_layer->aovs) { - if ((aov->flag & AOV_CONFLICT) != 0) { - continue; - } - switch (aov->type) { - case AOV_TYPE_COLOR: - RE_engine_register_pass(engine, scene, view_layer, aov->name, 4, "RGBA", SOCK_RGBA); - break; - case AOV_TYPE_VALUE: - RE_engine_register_pass(engine, scene, view_layer, aov->name, 1, "X", SOCK_FLOAT); - break; - default: - break; - } +static void eevee_render_update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer) +{ + if (!GPU_shader_storage_buffer_objects_support()) { + return; } + eevee::Instance::update_passes(engine, scene, view_layer); } static const DrawEngineDataSize eevee_data_size = DRW_VIEWPORT_DATA_SIZE(EEVEE_Data); @@ -238,7 +210,7 @@ DrawEngineType draw_engine_eevee_next_type = { nullptr, nullptr, &eevee_render_to_image, - nullptr, + &eevee_store_metadata, }; RenderEngineType DRW_engine_viewport_eevee_next_type = { diff --git a/source/blender/draw/engines/eevee_next/eevee_film.cc b/source/blender/draw/engines/eevee_next/eevee_film.cc index 4679889e59a..b89746d99e2 100644 --- a/source/blender/draw/engines/eevee_next/eevee_film.cc +++ b/source/blender/draw/engines/eevee_next/eevee_film.cc @@ -162,6 +162,45 @@ inline bool operator!=(const FilmData &a, const FilmData &b) /** \name Film * \{ */ +static eViewLayerEEVEEPassType enabled_passes(const ViewLayer *view_layer) +{ + eViewLayerEEVEEPassType result = eViewLayerEEVEEPassType(view_layer->eevee.render_passes); + +#define ENABLE_FROM_LEGACY(name_legacy, name_eevee) \ + SET_FLAG_FROM_TEST(result, \ + (view_layer->passflag & SCE_PASS_##name_legacy) != 0, \ + EEVEE_RENDER_PASS_##name_eevee); + + ENABLE_FROM_LEGACY(COMBINED, COMBINED) + ENABLE_FROM_LEGACY(Z, Z) + ENABLE_FROM_LEGACY(MIST, MIST) + ENABLE_FROM_LEGACY(NORMAL, NORMAL) + ENABLE_FROM_LEGACY(SHADOW, SHADOW) + ENABLE_FROM_LEGACY(AO, AO) + ENABLE_FROM_LEGACY(EMIT, EMIT) + ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT) + ENABLE_FROM_LEGACY(DIFFUSE_COLOR, DIFFUSE_COLOR) + ENABLE_FROM_LEGACY(GLOSSY_COLOR, SPECULAR_COLOR) + ENABLE_FROM_LEGACY(DIFFUSE_DIRECT, DIFFUSE_LIGHT) + ENABLE_FROM_LEGACY(GLOSSY_DIRECT, SPECULAR_LIGHT) + ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT) + ENABLE_FROM_LEGACY(VECTOR, VECTOR) + +#undef ENABLE_FROM_LEGACY + + SET_FLAG_FROM_TEST(result, + view_layer->cryptomatte_flag & VIEW_LAYER_CRYPTOMATTE_OBJECT, + EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT); + SET_FLAG_FROM_TEST(result, + view_layer->cryptomatte_flag & VIEW_LAYER_CRYPTOMATTE_ASSET, + EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET); + SET_FLAG_FROM_TEST(result, + view_layer->cryptomatte_flag & VIEW_LAYER_CRYPTOMATTE_MATERIAL, + EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL); + + return result; +} + void Film::init(const int2 &extent, const rcti *output_rect) { Sampling &sampling = inst_.sampling; @@ -186,29 +225,7 @@ void Film::init(const int2 &extent, const rcti *output_rect) } else { /* Render Case. */ - render_passes = eViewLayerEEVEEPassType(inst_.view_layer->eevee.render_passes); - -#define ENABLE_FROM_LEGACY(name_legacy, name_eevee) \ - SET_FLAG_FROM_TEST(render_passes, \ - (inst_.view_layer->passflag & SCE_PASS_##name_legacy) != 0, \ - EEVEE_RENDER_PASS_##name_eevee); - - ENABLE_FROM_LEGACY(COMBINED, COMBINED) - ENABLE_FROM_LEGACY(Z, Z) - ENABLE_FROM_LEGACY(MIST, MIST) - ENABLE_FROM_LEGACY(NORMAL, NORMAL) - ENABLE_FROM_LEGACY(SHADOW, SHADOW) - ENABLE_FROM_LEGACY(AO, AO) - ENABLE_FROM_LEGACY(EMIT, EMIT) - ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT) - ENABLE_FROM_LEGACY(DIFFUSE_COLOR, DIFFUSE_COLOR) - ENABLE_FROM_LEGACY(GLOSSY_COLOR, SPECULAR_COLOR) - ENABLE_FROM_LEGACY(DIFFUSE_DIRECT, DIFFUSE_LIGHT) - ENABLE_FROM_LEGACY(GLOSSY_DIRECT, SPECULAR_LIGHT) - ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT) - ENABLE_FROM_LEGACY(VECTOR, VECTOR) - -#undef ENABLE_FROM_LEGACY + render_passes = enabled_passes(inst_.view_layer); } /* Filter obsolete passes. */ @@ -241,6 +258,7 @@ void Film::init(const int2 &extent, const rcti *output_rect) /* TODO(fclem): parameter hidden in experimental. * We need to figure out LOD bias first in order to preserve texture crispiness. */ data.scaling_factor = 1; + data.cryptomatte_samples_len = inst_.view_layer->cryptomatte_levels; data.background_opacity = (scene.r.alphamode == R_ALPHAPREMUL) ? 0.0f : 1.0f; if (inst_.is_viewport() && false /* TODO(fclem): StudioLight */) { @@ -273,7 +291,8 @@ void Film::init(const int2 &extent, const rcti *output_rect) /* Set pass offsets. */ data_.display_id = aovs_info.display_id; - data_.display_is_value = aovs_info.display_is_value; + data_.display_storage_type = aovs_info.display_is_value ? PASS_STORAGE_VALUE : + PASS_STORAGE_COLOR; /* Combined is in a separate buffer. */ data_.combined_id = (enabled_passes_ & EEVEE_RENDER_PASS_COMBINED) ? 0 : -1; @@ -284,13 +303,13 @@ void Film::init(const int2 &extent, const rcti *output_rect) data_.value_len = 0; auto pass_index_get = [&](eViewLayerEEVEEPassType pass_type) { - bool is_value = pass_is_value(pass_type); + ePassStorageType storage_type = pass_storage_type(pass_type); int index = (enabled_passes_ & pass_type) ? - (is_value ? data_.value_len : data_.color_len)++ : + (storage_type == PASS_STORAGE_VALUE ? data_.value_len : data_.color_len)++ : -1; if (inst_.is_viewport() && inst_.v3d->shading.render_pass == pass_type) { data_.display_id = index; - data_.display_is_value = is_value; + data_.display_storage_type = storage_type; } return index; }; @@ -316,6 +335,24 @@ void Film::init(const int2 &extent, const rcti *output_rect) data_.color_len += data_.aov_color_len; data_.value_len += data_.aov_value_len; + + int cryptomatte_id = 0; + auto cryptomatte_index_get = [&](eViewLayerEEVEEPassType pass_type) { + int index = -1; + if (enabled_passes_ & pass_type) { + index = cryptomatte_id; + cryptomatte_id += data_.cryptomatte_samples_len / 2; + + if (inst_.is_viewport() && inst_.v3d->shading.render_pass == pass_type) { + data_.display_id = index; + data_.display_storage_type = PASS_STORAGE_CRYPTOMATTE; + } + } + return index; + }; + data_.cryptomatte_object_id = cryptomatte_index_get(EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT); + data_.cryptomatte_asset_id = cryptomatte_index_get(EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET); + data_.cryptomatte_material_id = cryptomatte_index_get(EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL); } { /* TODO(@fclem): Over-scans. */ @@ -327,6 +364,7 @@ void Film::init(const int2 &extent, const rcti *output_rect) eGPUTextureFormat float_format = GPU_R16F; eGPUTextureFormat weight_format = GPU_R32F; eGPUTextureFormat depth_format = GPU_R32F; + eGPUTextureFormat cryptomatte_format = GPU_RGBA32F; int reset = 0; reset += depth_tx_.ensure_2d(depth_format, data_.extent); @@ -341,6 +379,12 @@ void Film::init(const int2 &extent, const rcti *output_rect) reset += value_accum_tx_.ensure_2d_array(float_format, (data_.value_len > 0) ? data_.extent : int2(1), (data_.value_len > 0) ? data_.value_len : 1); + /* Divided by two as two cryptomatte samples fit in pixel (RG, BA). */ + int cryptomatte_array_len = cryptomatte_layer_len_get() * data_.cryptomatte_samples_len / 2; + reset += cryptomatte_tx_.ensure_2d_array(cryptomatte_format, + (cryptomatte_array_len > 0) ? data_.extent : int2(1), + (cryptomatte_array_len > 0) ? cryptomatte_array_len : + 1); if (reset > 0) { sampling.reset(); @@ -353,6 +397,7 @@ void Film::init(const int2 &extent, const rcti *output_rect) combined_tx_.current().clear(float4(0.0f)); weight_tx_.current().clear(float4(0.0f)); depth_tx_.clear(float4(0.0f)); + cryptomatte_tx_.clear(float4(0.0f)); } } @@ -398,6 +443,7 @@ void Film::sync() accumulate_ps_.bind_texture("ambient_occlusion_tx", &rbuffers.ambient_occlusion_tx); accumulate_ps_.bind_texture("aov_color_tx", &rbuffers.aov_color_tx); accumulate_ps_.bind_texture("aov_value_tx", &rbuffers.aov_value_tx); + accumulate_ps_.bind_texture("cryptomatte_tx", &rbuffers.cryptomatte_tx); /* NOTE(@fclem): 16 is the max number of sampled texture in many implementations. * If we need more, we need to pack more of the similar passes in the same textures as arrays or * use image binding instead. */ @@ -408,6 +454,7 @@ void Film::sync() accumulate_ps_.bind_image("depth_img", &depth_tx_); accumulate_ps_.bind_image("color_accum_img", &color_accum_tx_); accumulate_ps_.bind_image("value_accum_img", &value_accum_tx_); + accumulate_ps_.bind_image("cryptomatte_img", &cryptomatte_tx_); /* Sync with rendering passes. */ accumulate_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH | GPU_BARRIER_SHADER_IMAGE_ACCESS); if (use_compute) { @@ -416,6 +463,22 @@ void Film::sync() else { accumulate_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3); } + + const int cryptomatte_layer_count = cryptomatte_layer_len_get(); + const bool is_cryptomatte_pass_enabled = cryptomatte_layer_count > 0; + const bool do_cryptomatte_sorting = inst_.is_viewport() == false; + cryptomatte_post_ps_.init(); + if (is_cryptomatte_pass_enabled && do_cryptomatte_sorting) { + cryptomatte_post_ps_.state_set(DRW_STATE_NO_DRAW); + cryptomatte_post_ps_.shader_set(inst_.shaders.static_shader_get(FILM_CRYPTOMATTE_POST)); + cryptomatte_post_ps_.bind_image("cryptomatte_img", &cryptomatte_tx_); + cryptomatte_post_ps_.bind_image("weight_img", &weight_tx_.current()); + cryptomatte_post_ps_.push_constant("cryptomatte_layer_len", cryptomatte_layer_count); + cryptomatte_post_ps_.push_constant("cryptomatte_samples_per_layer", + inst_.view_layer->cryptomatte_levels); + int2 dispatch_size = math::divide_ceil(int2(cryptomatte_tx_.size()), int2(FILM_GROUP_SIZE)); + cryptomatte_post_ps_.dispatch(int3(UNPACK2(dispatch_size), 1)); + } } void Film::end_sync() @@ -463,6 +526,29 @@ eViewLayerEEVEEPassType Film::enabled_passes_get() const return enabled_passes_; } +int Film::cryptomatte_layer_len_get() const +{ + int result = 0; + result += data_.cryptomatte_object_id == -1 ? 0 : 1; + result += data_.cryptomatte_asset_id == -1 ? 0 : 1; + result += data_.cryptomatte_material_id == -1 ? 0 : 1; + return result; +} + +int Film::cryptomatte_layer_max_get() const +{ + if (data_.cryptomatte_material_id != -1) { + return 3; + } + if (data_.cryptomatte_asset_id != -1) { + return 2; + } + if (data_.cryptomatte_object_id != -1) { + return 1; + } + return 0; +} + void Film::update_sample_table() { data_.subpixel_offset = pixel_jitter_get(); @@ -599,20 +685,28 @@ void Film::display() /* IMPORTANT: Do not swap! No accumulation has happened. */ } -float *Film::read_pass(eViewLayerEEVEEPassType pass_type) +void Film::cryptomatte_sort() { + DRW_manager_get()->submit(cryptomatte_post_ps_); +} + +float *Film::read_pass(eViewLayerEEVEEPassType pass_type, int layer_offset) +{ + ePassStorageType storage_type = pass_storage_type(pass_type); + const bool is_value = storage_type == PASS_STORAGE_VALUE; + const bool is_cryptomatte = storage_type == PASS_STORAGE_CRYPTOMATTE; - bool is_value = pass_is_value(pass_type); Texture &accum_tx = (pass_type == EEVEE_RENDER_PASS_COMBINED) ? combined_tx_.current() : (pass_type == EEVEE_RENDER_PASS_Z) ? depth_tx_ : - (is_value ? value_accum_tx_ : color_accum_tx_); + (is_cryptomatte ? cryptomatte_tx_ : + (is_value ? value_accum_tx_ : color_accum_tx_)); accum_tx.ensure_layer_views(); int index = pass_id_get(pass_type); - GPUTexture *pass_tx = accum_tx.layer_view(index); + GPUTexture *pass_tx = accum_tx.layer_view(index + layer_offset); GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE); diff --git a/source/blender/draw/engines/eevee_next/eevee_film.hh b/source/blender/draw/engines/eevee_next/eevee_film.hh index 796fcb24808..5478c20aff2 100644 --- a/source/blender/draw/engines/eevee_next/eevee_film.hh +++ b/source/blender/draw/engines/eevee_next/eevee_film.hh @@ -43,11 +43,16 @@ class Film { /** Incoming combined buffer with post FX applied (motion blur + depth of field). */ GPUTexture *combined_final_tx_ = nullptr; - /** Main accumulation textures containing every render-pass except depth and combined. */ + /** + * Main accumulation textures containing every render-pass except depth, cryptomatte and + * combined. + */ Texture color_accum_tx_; Texture value_accum_tx_; /** Depth accumulation texture. Separated because using a different format. */ Texture depth_tx_; + /** Cryptomatte texture. Separated because it requires full floats. */ + Texture cryptomatte_tx_; /** Combined "Color" buffer. Double buffered to allow re-projection. */ SwapChain<Texture, 2> combined_tx_; /** Weight buffers. Double buffered to allow updating it during accumulation. */ @@ -56,6 +61,7 @@ class Film { bool force_disable_reprojection_ = false; PassSimple accumulate_ps_ = {"Film.Accumulate"}; + PassSimple cryptomatte_post_ps_ = {"Film.Cryptomatte.Post"}; FilmDataBuf data_; @@ -73,10 +79,13 @@ class Film { /** Accumulate the newly rendered sample contained in #RenderBuffers and blit to display. */ void accumulate(const DRWView *view, GPUTexture *combined_final_tx); + /** Sort and normalize cryptomatte samples. */ + void cryptomatte_sort(); + /** Blit to display. No rendered sample needed. */ void display(); - float *read_pass(eViewLayerEEVEEPassType pass_type); + float *read_pass(eViewLayerEEVEEPassType pass_type, int layer_offset); float *read_aov(ViewLayerAOV *aov); /** Returns shading views internal resolution. */ @@ -93,17 +102,23 @@ class Film { } eViewLayerEEVEEPassType enabled_passes_get() const; + int cryptomatte_layer_max_get() const; + int cryptomatte_layer_len_get() const; - static bool pass_is_value(eViewLayerEEVEEPassType pass_type) + static ePassStorageType pass_storage_type(eViewLayerEEVEEPassType pass_type) { switch (pass_type) { case EEVEE_RENDER_PASS_Z: case EEVEE_RENDER_PASS_MIST: case EEVEE_RENDER_PASS_SHADOW: case EEVEE_RENDER_PASS_AO: - return true; + return PASS_STORAGE_VALUE; + case EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT: + case EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET: + case EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL: + return PASS_STORAGE_CRYPTOMATTE; default: - return false; + return PASS_STORAGE_COLOR; } } @@ -154,8 +169,12 @@ class Film { return data_.shadow_id; case EEVEE_RENDER_PASS_AO: return data_.ambient_occlusion_id; - case EEVEE_RENDER_PASS_CRYPTOMATTE: - return -1; /* TODO */ + case EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT: + return data_.cryptomatte_object_id; + case EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET: + return data_.cryptomatte_asset_id; + case EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL: + return data_.cryptomatte_material_id; case EEVEE_RENDER_PASS_VECTOR: return data_.vector_id; default: @@ -163,44 +182,80 @@ class Film { } } - static const char *pass_to_render_pass_name(eViewLayerEEVEEPassType pass_type) + static const Vector<std::string> pass_to_render_pass_names(eViewLayerEEVEEPassType pass_type, + const ViewLayer *view_layer) { + Vector<std::string> result; + + auto build_cryptomatte_passes = [&](const char *pass_name) { + const int num_cryptomatte_passes = (view_layer->cryptomatte_levels + 1) / 2; + for (int pass = 0; pass < num_cryptomatte_passes; pass++) { + std::stringstream ss; + ss.fill('0'); + ss << pass_name; + ss.width(2); + ss << pass; + result.append(ss.str()); + } + }; + switch (pass_type) { case EEVEE_RENDER_PASS_COMBINED: - return RE_PASSNAME_COMBINED; + result.append(RE_PASSNAME_COMBINED); + break; case EEVEE_RENDER_PASS_Z: - return RE_PASSNAME_Z; + result.append(RE_PASSNAME_Z); + break; case EEVEE_RENDER_PASS_MIST: - return RE_PASSNAME_MIST; + result.append(RE_PASSNAME_MIST); + break; case EEVEE_RENDER_PASS_NORMAL: - return RE_PASSNAME_NORMAL; + result.append(RE_PASSNAME_NORMAL); + break; case EEVEE_RENDER_PASS_DIFFUSE_LIGHT: - return RE_PASSNAME_DIFFUSE_DIRECT; + result.append(RE_PASSNAME_DIFFUSE_DIRECT); + break; case EEVEE_RENDER_PASS_DIFFUSE_COLOR: - return RE_PASSNAME_DIFFUSE_COLOR; + result.append(RE_PASSNAME_DIFFUSE_COLOR); + break; case EEVEE_RENDER_PASS_SPECULAR_LIGHT: - return RE_PASSNAME_GLOSSY_DIRECT; + result.append(RE_PASSNAME_GLOSSY_DIRECT); + break; case EEVEE_RENDER_PASS_SPECULAR_COLOR: - return RE_PASSNAME_GLOSSY_COLOR; + result.append(RE_PASSNAME_GLOSSY_COLOR); + break; case EEVEE_RENDER_PASS_VOLUME_LIGHT: - return RE_PASSNAME_VOLUME_LIGHT; + result.append(RE_PASSNAME_VOLUME_LIGHT); + break; case EEVEE_RENDER_PASS_EMIT: - return RE_PASSNAME_EMIT; + result.append(RE_PASSNAME_EMIT); + break; case EEVEE_RENDER_PASS_ENVIRONMENT: - return RE_PASSNAME_ENVIRONMENT; + result.append(RE_PASSNAME_ENVIRONMENT); + break; case EEVEE_RENDER_PASS_SHADOW: - return RE_PASSNAME_SHADOW; + result.append(RE_PASSNAME_SHADOW); + break; case EEVEE_RENDER_PASS_AO: - return RE_PASSNAME_AO; - case EEVEE_RENDER_PASS_CRYPTOMATTE: - BLI_assert_msg(0, "Cryptomatte is not implemented yet."); - return ""; /* TODO */ + result.append(RE_PASSNAME_AO); + break; + case EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT: + build_cryptomatte_passes(RE_PASSNAME_CRYPTOMATTE_OBJECT); + break; + case EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET: + build_cryptomatte_passes(RE_PASSNAME_CRYPTOMATTE_ASSET); + break; + case EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL: + build_cryptomatte_passes(RE_PASSNAME_CRYPTOMATTE_MATERIAL); + break; case EEVEE_RENDER_PASS_VECTOR: - return RE_PASSNAME_VECTOR; + result.append(RE_PASSNAME_VECTOR); + break; default: BLI_assert(0); - return ""; + break; } + return result; } private: diff --git a/source/blender/draw/engines/eevee_next/eevee_instance.cc b/source/blender/draw/engines/eevee_next/eevee_instance.cc index 6150f32f150..9cba3749d52 100644 --- a/source/blender/draw/engines/eevee_next/eevee_instance.cc +++ b/source/blender/draw/engines/eevee_next/eevee_instance.cc @@ -102,6 +102,7 @@ void Instance::begin_sync() materials.begin_sync(); velocity.begin_sync(); /* NOTE: Also syncs camera. */ lights.begin_sync(); + cryptomatte.begin_sync(); gpencil_engine_enabled = false; @@ -182,6 +183,7 @@ void Instance::end_sync() lights.end_sync(); sampling.end_sync(); film.end_sync(); + cryptomatte.end_sync(); } void Instance::render_sync() @@ -236,10 +238,15 @@ void Instance::render_read_result(RenderLayer *render_layer, const char *view_na continue; } - const char *pass_name = Film::pass_to_render_pass_name(pass_type); - RenderPass *rp = RE_pass_find_by_name(render_layer, pass_name, view_name); - if (rp) { - float *result = film.read_pass(pass_type); + Vector<std::string> pass_names = Film::pass_to_render_pass_names(pass_type, view_layer); + for (int64_t pass_offset : IndexRange(pass_names.size())) { + RenderPass *rp = RE_pass_find_by_name( + render_layer, pass_names[pass_offset].c_str(), view_name); + if (!rp) { + continue; + } + float *result = film.read_pass(pass_type, pass_offset); + if (result) { BLI_mutex_lock(&render->update_render_passes_mutex); /* WORKAROUND: We use texture read to avoid using a framebuffer to get the render result. @@ -255,10 +262,13 @@ void Instance::render_read_result(RenderLayer *render_layer, const char *view_na /* The vector pass is initialized to weird values. Set it to neutral value if not rendered. */ if ((pass_bits & EEVEE_RENDER_PASS_VECTOR) == 0) { - const char *vector_pass_name = Film::pass_to_render_pass_name(EEVEE_RENDER_PASS_VECTOR); - RenderPass *vector_rp = RE_pass_find_by_name(render_layer, vector_pass_name, view_name); - if (vector_rp) { - memset(vector_rp->rect, 0, sizeof(float) * 4 * vector_rp->rectx * vector_rp->recty); + for (std::string vector_pass_name : + Film::pass_to_render_pass_names(EEVEE_RENDER_PASS_VECTOR, view_layer)) { + RenderPass *vector_rp = RE_pass_find_by_name( + render_layer, vector_pass_name.c_str(), view_name); + if (vector_rp) { + memset(vector_rp->rect, 0, sizeof(float) * 4 * vector_rp->rectx * vector_rp->recty); + } } } } @@ -290,6 +300,8 @@ void Instance::render_frame(RenderLayer *render_layer, const char *view_name) #endif } + this->film.cryptomatte_sort(); + this->render_read_result(render_layer, view_name); } @@ -313,6 +325,76 @@ void Instance::draw_viewport(DefaultFramebufferList *dfbl) } } +void Instance::store_metadata(RenderResult *render_result) +{ + cryptomatte.store_metadata(render_result); +} + +void Instance::update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer) +{ + RE_engine_register_pass(engine, scene, view_layer, RE_PASSNAME_COMBINED, 4, "RGBA", SOCK_RGBA); + +#define CHECK_PASS_LEGACY(name, type, channels, chanid) \ + if (view_layer->passflag & (SCE_PASS_##name)) { \ + RE_engine_register_pass( \ + engine, scene, view_layer, RE_PASSNAME_##name, channels, chanid, type); \ + } \ + ((void)0) +#define CHECK_PASS_EEVEE(name, type, channels, chanid) \ + if (view_layer->eevee.render_passes & (EEVEE_RENDER_PASS_##name)) { \ + RE_engine_register_pass( \ + engine, scene, view_layer, RE_PASSNAME_##name, channels, chanid, type); \ + } \ + ((void)0) + + CHECK_PASS_LEGACY(Z, SOCK_FLOAT, 1, "Z"); + CHECK_PASS_LEGACY(MIST, SOCK_FLOAT, 1, "Z"); + CHECK_PASS_LEGACY(NORMAL, SOCK_VECTOR, 3, "XYZ"); + CHECK_PASS_LEGACY(DIFFUSE_DIRECT, SOCK_RGBA, 3, "RGB"); + CHECK_PASS_LEGACY(DIFFUSE_COLOR, SOCK_RGBA, 3, "RGB"); + CHECK_PASS_LEGACY(GLOSSY_DIRECT, SOCK_RGBA, 3, "RGB"); + CHECK_PASS_LEGACY(GLOSSY_COLOR, SOCK_RGBA, 3, "RGB"); + CHECK_PASS_EEVEE(VOLUME_LIGHT, SOCK_RGBA, 3, "RGB"); + CHECK_PASS_LEGACY(EMIT, SOCK_RGBA, 3, "RGB"); + CHECK_PASS_LEGACY(ENVIRONMENT, SOCK_RGBA, 3, "RGB"); + /* TODO: CHECK_PASS_LEGACY(SHADOW, SOCK_RGBA, 3, "RGB"); + * CHECK_PASS_LEGACY(AO, SOCK_RGBA, 3, "RGB"); + * When available they should be converted from Value textures to RGB. */ + + LISTBASE_FOREACH (ViewLayerAOV *, aov, &view_layer->aovs) { + if ((aov->flag & AOV_CONFLICT) != 0) { + continue; + } + switch (aov->type) { + case AOV_TYPE_COLOR: + RE_engine_register_pass(engine, scene, view_layer, aov->name, 4, "RGBA", SOCK_RGBA); + break; + case AOV_TYPE_VALUE: + RE_engine_register_pass(engine, scene, view_layer, aov->name, 1, "X", SOCK_FLOAT); + break; + default: + break; + } + } + + /* NOTE: Name channels lowercase rgba so that compression rules check in OpenEXR DWA code uses + * loseless compression. Reportedly this naming is the only one which works good from the + * interoperability point of view. Using xyzw naming is not portable. */ + auto register_cryptomatte_passes = [&](eViewLayerCryptomatteFlags cryptomatte_layer, + eViewLayerEEVEEPassType eevee_pass) { + if (view_layer->cryptomatte_flag & cryptomatte_layer) { + for (std::string pass_name : Film::pass_to_render_pass_names(eevee_pass, view_layer)) { + RE_engine_register_pass( + engine, scene, view_layer, pass_name.c_str(), 4, "rgba", SOCK_RGBA); + } + } + }; + register_cryptomatte_passes(VIEW_LAYER_CRYPTOMATTE_OBJECT, EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT); + register_cryptomatte_passes(VIEW_LAYER_CRYPTOMATTE_ASSET, EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET); + register_cryptomatte_passes(VIEW_LAYER_CRYPTOMATTE_MATERIAL, + EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL); +} + /** \} */ } // namespace blender::eevee diff --git a/source/blender/draw/engines/eevee_next/eevee_instance.hh b/source/blender/draw/engines/eevee_next/eevee_instance.hh index 4ab20d540bf..c8eecbd812d 100644 --- a/source/blender/draw/engines/eevee_next/eevee_instance.hh +++ b/source/blender/draw/engines/eevee_next/eevee_instance.hh @@ -16,6 +16,7 @@ #include "DRW_render.h" #include "eevee_camera.hh" +#include "eevee_cryptomatte.hh" #include "eevee_depth_of_field.hh" #include "eevee_film.hh" #include "eevee_hizbuffer.hh" @@ -49,6 +50,7 @@ class Instance { VelocityModule velocity; MotionBlurModule motion_blur; DepthOfField depth_of_field; + Cryptomatte cryptomatte; HiZBuffer hiz_buffer; Sampling sampling; Camera camera; @@ -91,6 +93,7 @@ class Instance { velocity(*this), motion_blur(*this), depth_of_field(*this), + cryptomatte(*this), hiz_buffer(*this), sampling(*this), camera(*this), @@ -117,9 +120,12 @@ class Instance { void render_sync(); void render_frame(RenderLayer *render_layer, const char *view_name); + void store_metadata(RenderResult *render_result); void draw_viewport(DefaultFramebufferList *dfbl); + static void update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer); + bool is_viewport() const { return render == nullptr; diff --git a/source/blender/draw/engines/eevee_next/eevee_pipeline.cc b/source/blender/draw/engines/eevee_next/eevee_pipeline.cc index 16bdfb04d14..33978518ffc 100644 --- a/source/blender/draw/engines/eevee_next/eevee_pipeline.cc +++ b/source/blender/draw/engines/eevee_next/eevee_pipeline.cc @@ -44,6 +44,7 @@ void WorldPipeline::sync(GPUMaterial *gpumat) world_ps_.bind_image("rp_diffuse_color_img", &rbufs.diffuse_color_tx); world_ps_.bind_image("rp_specular_color_img", &rbufs.specular_color_tx); world_ps_.bind_image("rp_emission_img", &rbufs.emission_tx); + world_ps_.bind_image("rp_cryptomatte_img", &rbufs.cryptomatte_tx); world_ps_.draw(DRW_cache_fullscreen_quad_get(), handle); /* To allow opaque pass rendering over it. */ @@ -110,6 +111,8 @@ void ForwardPipeline::sync() /* AOVs. */ opaque_ps_.bind_image(RBUFS_AOV_COLOR_SLOT, &inst_.render_buffers.aov_color_tx); opaque_ps_.bind_image(RBUFS_AOV_VALUE_SLOT, &inst_.render_buffers.aov_value_tx); + /* Cryptomatte. */ + opaque_ps_.bind_image(RBUFS_CRYPTOMATTE_SLOT, &inst_.render_buffers.cryptomatte_tx); /* Storage Buf. */ opaque_ps_.bind_ssbo(RBUFS_AOV_BUF_SLOT, &inst_.film.aovs_info); /* Textures. */ @@ -117,6 +120,7 @@ void ForwardPipeline::sync() inst_.lights.bind_resources(&opaque_ps_); inst_.sampling.bind_resources(&opaque_ps_); + inst_.cryptomatte.bind_resources(&opaque_ps_); } opaque_single_sided_ps_ = &opaque_ps_.sub("SingleSided"); diff --git a/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc b/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc index c18c913d797..8e36e1d071c 100644 --- a/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc +++ b/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc @@ -72,6 +72,20 @@ void RenderBuffers::acquire(int2 extent) color_format, (aovs.color_len > 0) ? extent : int2(1), max_ii(1, aovs.color_len)); aov_value_tx.ensure_2d_array( float_format, (aovs.value_len > 0) ? extent : int2(1), max_ii(1, aovs.value_len)); + + eGPUTextureFormat cryptomatte_format = GPU_R32F; + const int cryptomatte_layer_len = inst_.film.cryptomatte_layer_max_get(); + if (cryptomatte_layer_len == 2) { + cryptomatte_format = GPU_RG32F; + } + else if (cryptomatte_layer_len == 3) { + cryptomatte_format = GPU_RGBA32F; + } + cryptomatte_tx.acquire( + pass_extent(static_cast<eViewLayerEEVEEPassType>(EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT | + EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET | + EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL)), + cryptomatte_format); } void RenderBuffers::release() @@ -88,6 +102,7 @@ void RenderBuffers::release() environment_tx.release(); shadow_tx.release(); ambient_occlusion_tx.release(); + cryptomatte_tx.release(); } } // namespace blender::eevee diff --git a/source/blender/draw/engines/eevee_next/eevee_renderbuffers.hh b/source/blender/draw/engines/eevee_next/eevee_renderbuffers.hh index 0b761d618cc..ae5d7fbae5c 100644 --- a/source/blender/draw/engines/eevee_next/eevee_renderbuffers.hh +++ b/source/blender/draw/engines/eevee_next/eevee_renderbuffers.hh @@ -35,7 +35,7 @@ class RenderBuffers { TextureFromPool environment_tx; TextureFromPool shadow_tx; TextureFromPool ambient_occlusion_tx; - // TextureFromPool cryptomatte_tx; /* TODO */ + TextureFromPool cryptomatte_tx; /* TODO(fclem): Use texture from pool once they support texture array. */ Texture light_tx; Texture aov_color_tx; diff --git a/source/blender/draw/engines/eevee_next/eevee_shader.cc b/source/blender/draw/engines/eevee_next/eevee_shader.cc index 7ff343d14a8..64b1d4891a9 100644 --- a/source/blender/draw/engines/eevee_next/eevee_shader.cc +++ b/source/blender/draw/engines/eevee_next/eevee_shader.cc @@ -84,6 +84,8 @@ const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_ return "eevee_film_frag"; case FILM_COMP: return "eevee_film_comp"; + case FILM_CRYPTOMATTE_POST: + return "eevee_film_cryptomatte_post"; case HIZ_DEBUG: return "eevee_hiz_debug"; case HIZ_UPDATE: diff --git a/source/blender/draw/engines/eevee_next/eevee_shader.hh b/source/blender/draw/engines/eevee_next/eevee_shader.hh index 9ef42c84373..88538557c07 100644 --- a/source/blender/draw/engines/eevee_next/eevee_shader.hh +++ b/source/blender/draw/engines/eevee_next/eevee_shader.hh @@ -28,6 +28,7 @@ namespace blender::eevee { enum eShaderType { FILM_FRAG = 0, FILM_COMP, + FILM_CRYPTOMATTE_POST, DOF_BOKEH_LUT, DOF_DOWNSAMPLE, diff --git a/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh b/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh index bcdb42c0093..8e96445d6b9 100644 --- a/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh +++ b/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh @@ -199,6 +199,12 @@ enum eFilmWeightLayerIndex : uint32_t { FILM_WEIGHT_LAYER_DISTANCE = 1u, }; +enum ePassStorageType : uint32_t { + PASS_STORAGE_COLOR = 0u, + PASS_STORAGE_VALUE = 1u, + PASS_STORAGE_CRYPTOMATTE = 2u, +}; + struct FilmSample { int2 texel; float weight; @@ -255,13 +261,19 @@ struct FilmData { int combined_id; /** Id of the render-pass to be displayed. -1 for combined. */ int display_id; - /** True if the render-pass to be displayed is from the value accum buffer. */ - bool1 display_is_value; + /** Storage type of the render-pass to be displayed. */ + ePassStorageType display_storage_type; /** True if we bypass the accumulation and directly output the accumulation buffer. */ bool1 display_only; /** Start of AOVs and number of aov. */ int aov_color_id, aov_color_len; int aov_value_id, aov_value_len; + /** Start of cryptomatte per layer (-1 if pass is not enabled). */ + int cryptomatte_object_id; + int cryptomatte_asset_id; + int cryptomatte_material_id; + /** Max number of samples stored per layer (is even number). */ + int cryptomatte_samples_len; /** Settings to render mist pass */ float mist_scale, mist_bias, mist_exponent; /** Scene exposure used for better noise reduction. */ @@ -750,6 +762,7 @@ using SamplingDataBuf = draw::StorageBuffer<SamplingData>; using VelocityGeometryBuf = draw::StorageArrayBuffer<float4, 16, true>; using VelocityIndexBuf = draw::StorageArrayBuffer<VelocityIndex, 16>; using VelocityObjectBuf = draw::StorageArrayBuffer<float4x4, 16>; +using CryptomatteObjectBuf = draw::StorageArrayBuffer<float2, 16>; } // namespace blender::eevee #endif diff --git a/source/blender/draw/engines/eevee_next/eevee_sync.cc b/source/blender/draw/engines/eevee_next/eevee_sync.cc index 5f8b87c24b9..09ea7c9ec3d 100644 --- a/source/blender/draw/engines/eevee_next/eevee_sync.cc +++ b/source/blender/draw/engines/eevee_next/eevee_sync.cc @@ -120,10 +120,14 @@ void SyncModule::sync_mesh(Object *ob, is_shadow_caster = is_shadow_caster || material->shadow.sub_pass != nullptr; is_alpha_blend = is_alpha_blend || material->is_alpha_blend_transparent; + + GPUMaterial *gpu_material = material_array.gpu_materials[i]; + ::Material *mat = GPU_material_get_material(gpu_material); + inst_.cryptomatte.sync_material(mat); } inst_.manager->extract_object_attributes(res_handle, ob_ref, material_array.gpu_materials); - + inst_.cryptomatte.sync_object(ob, res_handle); // shadows.sync_object(ob, ob_handle, is_shadow_caster, is_alpha_blend); } @@ -320,6 +324,12 @@ void SyncModule::sync_curves(Object *ob, shgroup_curves_call(material.prepass, ob, part_sys, modifier_data); shgroup_curves_call(material.shadow, ob, part_sys, modifier_data); + inst_.cryptomatte.sync_object(ob, res_handle); + GPUMaterial *gpu_material = + inst_.materials.material_array_get(ob, has_motion).gpu_materials[mat_nr - 1]; + ::Material *mat = GPU_material_get_material(gpu_material); + inst_.cryptomatte.sync_material(mat); + /* TODO(fclem) Hair velocity. */ // shading_passes.velocity.gpencil_add(ob, ob_handle); diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_cryptomatte_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_cryptomatte_lib.glsl new file mode 100644 index 00000000000..e874a6b56ea --- /dev/null +++ b/source/blender/draw/engines/eevee_next/shaders/eevee_cryptomatte_lib.glsl @@ -0,0 +1,70 @@ +/** Storing/merging and sorting cryptomatte samples. */ + +bool cryptomatte_can_merge_sample(vec2 dst, vec2 src) +{ + if (dst == vec2(0.0, 0.0)) { + return true; + } + if (dst.x == src.x) { + return true; + } + return false; +} + +vec2 cryptomatte_merge_sample(vec2 dst, vec2 src) +{ + return vec2(src.x, dst.y + src.y); +} + +vec4 cryptomatte_false_color(float hash) +{ + uint m3hash = floatBitsToUint(hash); + return vec4(hash, + float(m3hash << 8) / float(0xFFFFFFFFu), + float(m3hash << 16) / float(0xFFFFFFFFu), + 1.0); +} + +void cryptomatte_clear_samples(FilmSample dst) +{ + int layer_len = imageSize(cryptomatte_img).z; + for (int i = 0; i < layer_len; i++) { + imageStore(cryptomatte_img, ivec3(dst.texel, i), vec4(0.0)); + } +} + +void cryptomatte_store_film_sample(FilmSample dst, + int cryptomatte_layer_id, + vec2 crypto_sample, + out vec4 out_color) +{ + if (crypto_sample.y == 0.0) { + return; + } + for (int i = 0; i < film_buf.cryptomatte_samples_len / 2; i++) { + ivec3 img_co = ivec3(dst.texel, cryptomatte_layer_id + i); + vec4 sample_pair = imageLoad(cryptomatte_img, img_co); + if (cryptomatte_can_merge_sample(sample_pair.xy, crypto_sample)) { + sample_pair.xy = cryptomatte_merge_sample(sample_pair.xy, crypto_sample); + /* In viewport only one layer is active. */ + /* TODO(jbakker): we are displaying the first sample, but we should display the highest + * weighted one. */ + if (cryptomatte_layer_id + i == 0) { + out_color = cryptomatte_false_color(sample_pair.x); + } + } + else if (cryptomatte_can_merge_sample(sample_pair.zw, crypto_sample)) { + sample_pair.zw = cryptomatte_merge_sample(sample_pair.zw, crypto_sample); + } + else if (i == film_buf.cryptomatte_samples_len / 2 - 1) { + /* TODO(jbakker): New hash detected, but there is no space left to store it. Currently we + * will ignore this sample, but ideally we could replace a sample with a lowest weight. */ + continue; + } + else { + continue; + } + imageStore(cryptomatte_img, img_co, sample_pair); + break; + } +} diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_cryptomatte_post_comp.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_cryptomatte_post_comp.glsl new file mode 100644 index 00000000000..120edd9c35e --- /dev/null +++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_cryptomatte_post_comp.glsl @@ -0,0 +1,77 @@ +#pragma BLENDER_REQUIRE(common_math_lib.glsl) + +#define CRYPTOMATTE_LEVELS_MAX 16 + +void cryptomatte_load_samples(ivec2 texel, int layer, out vec2 samples[CRYPTOMATTE_LEVELS_MAX]) +{ + int pass_len = divide_ceil(cryptomatte_samples_per_layer, 2); + int layer_id = layer * pass_len; + + /* Read all samples from the cryptomatte layer. */ + for (int p = 0; p < pass_len; p++) { + vec4 pass_sample = imageLoad(cryptomatte_img, ivec3(texel, p + layer_id)); + samples[p * 2] = pass_sample.xy; + samples[p * 2 + 1] = pass_sample.zw; + } + for (int i = pass_len * 2; i < CRYPTOMATTE_LEVELS_MAX; i++) { + samples[i] = vec2(0.0); + } +} + +void cryptomatte_sort_samples(inout vec2 samples[CRYPTOMATTE_LEVELS_MAX]) +{ + /* Sort samples. Lame implementation, can be replaced with a more efficient algorithm. */ + for (int i = 0; i < cryptomatte_samples_per_layer - 1 && samples[i].y != 0.0; i++) { + int highest_index = i; + float highest_weight = samples[i].y; + for (int j = i + 1; j < cryptomatte_samples_per_layer && samples[j].y != 0.0; j++) { + if (samples[j].y > highest_weight) { + highest_index = j; + highest_weight = samples[j].y; + } + }; + + if (highest_index != i) { + vec2 tmp = samples[i]; + samples[i] = samples[highest_index]; + samples[highest_index] = tmp; + } + } +} +void cryptomatte_normalize_weight(float total_weight, inout vec2 samples[CRYPTOMATTE_LEVELS_MAX]) +{ + for (int i = 0; i < CRYPTOMATTE_LEVELS_MAX; i++) { + samples[i].y /= total_weight; + } +} + +void cryptomatte_store_samples(ivec2 texel, int layer, in vec2 samples[CRYPTOMATTE_LEVELS_MAX]) +{ + int pass_len = divide_ceil(cryptomatte_samples_per_layer, 2); + int layer_id = layer * pass_len; + + /* Store samples back to the cryptomatte layer. */ + for (int p = 0; p < pass_len; p++) { + vec4 pass_sample; + pass_sample.xy = samples[p * 2]; + pass_sample.zw = samples[p * 2 + 1]; + imageStore(cryptomatte_img, ivec3(texel, p + layer_id), pass_sample); + } +} + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + for (int layer = 0; layer < cryptomatte_layer_len; layer++) { + vec2 samples[CRYPTOMATTE_LEVELS_MAX]; + cryptomatte_load_samples(texel, layer, samples); + cryptomatte_sort_samples(samples); + /* Repeat texture coordinates as the weight can be optimized to a small portion of the film. */ + float weight = imageLoad( + weight_img, + ivec3(texel % imageSize(weight_img).xy, FILM_WEIGHT_LAYER_ACCUMULATION)) + .x; + cryptomatte_normalize_weight(weight, samples); + cryptomatte_store_samples(texel, layer, samples); + } +} diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl index 26040234fd0..e2aaf9128a5 100644 --- a/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl +++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl @@ -13,13 +13,17 @@ void main() if (film_buf.display_id == -1) { out_color = texelFetch(in_combined_tx, texel_film, 0); } - else if (film_buf.display_is_value) { + else if (film_buf.display_storage_type == PASS_STORAGE_VALUE) { out_color.rgb = imageLoad(value_accum_img, ivec3(texel_film, film_buf.display_id)).rrr; out_color.a = 1.0; } - else { + else if (film_buf.display_storage_type == PASS_STORAGE_COLOR) { out_color = imageLoad(color_accum_img, ivec3(texel_film, film_buf.display_id)); } + else /* PASS_STORAGE_CRYPTOMATTE */ { + out_color = cryptomatte_false_color( + imageLoad(cryptomatte_img, ivec3(texel_film, film_buf.display_id)).r); + } } else { film_process_data(texel_film, out_color, out_depth); diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl index 087efa9100d..21b9a83abb9 100644 --- a/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl +++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl @@ -8,6 +8,7 @@ #pragma BLENDER_REQUIRE(eevee_camera_lib.glsl) #pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl) #pragma BLENDER_REQUIRE(eevee_colorspace_lib.glsl) +#pragma BLENDER_REQUIRE(eevee_cryptomatte_lib.glsl) /* Return scene linear Z depth from the camera or radial depth for panoramic cameras. */ float film_depth_convert_to_scene(float depth) @@ -158,6 +159,45 @@ void film_sample_accum_combined(FilmSample samp, inout vec4 accum, inout float w weight_accum += weight; } +void film_sample_cryptomatte_accum(FilmSample samp, + int layer, + sampler2D tex, + inout vec2 crypto_samples[4]) +{ + float hash = texelFetch(tex, samp.texel, 0)[layer]; + /* Find existing entry. */ + for (int i = 0; i < 4; i++) { + if (crypto_samples[i].x == hash) { + crypto_samples[i].y += samp.weight; + return; + } + } + /* Overwrite entry with less weight. */ + for (int i = 0; i < 4; i++) { + if (crypto_samples[i].y < samp.weight) { + crypto_samples[i] = vec2(hash, samp.weight); + return; + } + } +} + +void film_cryptomatte_layer_accum_and_store( + FilmSample dst, ivec2 texel_film, int pass_id, int layer_component, inout vec4 out_color) +{ + if (pass_id == -1) { + return; + } + /* x = hash, y = accumed weight. Only keep track of 4 highest weighted samples. */ + vec2 crypto_samples[4] = vec2[4](vec2(0.0), vec2(0.0), vec2(0.0), vec2(0.0)); + for (int i = 0; i < film_buf.samples_len; i++) { + FilmSample src = film_sample_get(i, texel_film); + film_sample_cryptomatte_accum(src, layer_component, cryptomatte_tx, crypto_samples); + } + for (int i = 0; i < 4; i++) { + cryptomatte_store_film_sample(dst, pass_id, crypto_samples[i], out_color); + } +} + /** \} */ /* -------------------------------------------------------------------- */ @@ -698,4 +738,18 @@ void film_process_data(ivec2 texel_film, out vec4 out_color, out float out_depth } film_store_value(dst, film_buf.aov_value_id + aov, aov_accum, out_color); } + + if (film_buf.cryptomatte_samples_len != 0) { + /* Cryptomatte passes cannot be cleared by a weighted store like other passes. */ + if (!film_buf.use_history || film_buf.use_reprojection) { + cryptomatte_clear_samples(dst); + } + + film_cryptomatte_layer_accum_and_store( + dst, texel_film, film_buf.cryptomatte_object_id, 0, out_color); + film_cryptomatte_layer_accum_and_store( + dst, texel_film, film_buf.cryptomatte_asset_id, 1, out_color); + film_cryptomatte_layer_accum_and_store( + dst, texel_film, film_buf.cryptomatte_material_id, 2, out_color); + } } diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl index 39758c0dfc1..ab29067763d 100644 --- a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl +++ b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl @@ -107,6 +107,9 @@ void main() imageStore(rp_diffuse_color_img, out_texel, vec4(g_diffuse_data.color, 1.0)); imageStore(rp_specular_color_img, out_texel, vec4(specular_color, 1.0)); imageStore(rp_emission_img, out_texel, vec4(g_emission, 1.0)); + imageStore(rp_cryptomatte_img, + out_texel, + vec4(cryptomatte_object_buf[resource_id], node_tree.crypto_hash, 0.0)); #endif out_radiance.rgb *= 1.0 - g_holdout; diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl index 1ef1c1f84b8..442c2579c84 100644 --- a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl +++ b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl @@ -33,6 +33,7 @@ void main() imageStore(rp_diffuse_color_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0)); imageStore(rp_specular_color_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0)); imageStore(rp_emission_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0)); + imageStore(rp_cryptomatte_img, out_texel, vec4(0.0)); out_background.rgb = safe_color(g_emission) * (1.0 - g_holdout); out_background.a = saturate(avg(g_transmittance)) * g_holdout; diff --git a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh index db82a3265d7..4541f14d96c 100644 --- a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh +++ b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh @@ -21,7 +21,7 @@ GPU_SHADER_CREATE_INFO(eevee_film) .sampler(13, ImageType::FLOAT_2D_ARRAY, "aov_value_tx") /* Color History for TAA needs to be sampler to leverage bilinear sampling. */ .sampler(14, ImageType::FLOAT_2D, "in_combined_tx") - // .sampler(15, ImageType::FLOAT_2D, "cryptomatte_tx") /* TODO */ + .sampler(15, ImageType::FLOAT_2D, "cryptomatte_tx") .image(0, GPU_R32F, Qualifier::READ, ImageType::FLOAT_2D_ARRAY, "in_weight_img") .image(1, GPU_R32F, Qualifier::WRITE, ImageType::FLOAT_2D_ARRAY, "out_weight_img") /* Color History for TAA needs to be sampler to leverage bilinear sampling. */ @@ -30,6 +30,7 @@ GPU_SHADER_CREATE_INFO(eevee_film) .image(4, GPU_R32F, Qualifier::READ_WRITE, ImageType::FLOAT_2D, "depth_img") .image(5, GPU_RGBA16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D_ARRAY, "color_accum_img") .image(6, GPU_R16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D_ARRAY, "value_accum_img") + .image(7, GPU_RGBA32F, Qualifier::READ_WRITE, ImageType::FLOAT_2D_ARRAY, "cryptomatte_img") .additional_info("eevee_shared") .additional_info("eevee_velocity_camera") .additional_info("draw_view"); @@ -45,3 +46,13 @@ GPU_SHADER_CREATE_INFO(eevee_film_comp) .local_group_size(FILM_GROUP_SIZE, FILM_GROUP_SIZE) .compute_source("eevee_film_comp.glsl") .additional_info("eevee_film"); + +GPU_SHADER_CREATE_INFO(eevee_film_cryptomatte_post) + .do_static_compilation(true) + .image(0, GPU_RGBA32F, Qualifier::READ_WRITE, ImageType::FLOAT_2D_ARRAY, "cryptomatte_img") + .image(1, GPU_R32F, Qualifier::READ, ImageType::FLOAT_2D_ARRAY, "weight_img") + .push_constant(Type::INT, "cryptomatte_layer_len") + .push_constant(Type::INT, "cryptomatte_samples_per_layer") + .local_group_size(FILM_GROUP_SIZE, FILM_GROUP_SIZE) + .compute_source("eevee_film_cryptomatte_post_comp.glsl") + .additional_info("eevee_shared"); diff --git a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh index 9abdd1f8adf..78d52d4b90e 100644 --- a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh +++ b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh @@ -92,6 +92,10 @@ GPU_SHADER_CREATE_INFO(eevee_render_pass_out) .image_out(RBUFS_SPEC_COLOR_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_color_img") .image_out(RBUFS_EMISSION_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_emission_img"); +GPU_SHADER_CREATE_INFO(eevee_cryptomatte_out) + .storage_buf(7, Qualifier::READ, "vec2", "cryptomatte_object_buf[]", Frequency::PASS) + .image_out(7, Qualifier::WRITE, GPU_RGBA32F, "rp_cryptomatte_img"); + GPU_SHADER_CREATE_INFO(eevee_surf_deferred) .vertex_out(eevee_surf_iface) /* NOTE: This removes the possibility of using gl_FragDepth. */ @@ -121,7 +125,10 @@ GPU_SHADER_CREATE_INFO(eevee_surf_forward) .fragment_out(0, Type::VEC4, "out_radiance", DualBlend::SRC_0) .fragment_out(0, Type::VEC4, "out_transmittance", DualBlend::SRC_1) .fragment_source("eevee_surf_forward_frag.glsl") - .additional_info("eevee_light_data", "eevee_utility_texture", "eevee_sampling_data" + .additional_info("eevee_cryptomatte_out", + "eevee_light_data", + "eevee_utility_texture", + "eevee_sampling_data" // "eevee_lightprobe_data", // "eevee_shadow_data" /* Optionally added depending on the material. */ @@ -141,7 +148,10 @@ GPU_SHADER_CREATE_INFO(eevee_surf_world) .push_constant(Type::FLOAT, "world_opacity_fade") .fragment_out(0, Type::VEC4, "out_background") .fragment_source("eevee_surf_world_frag.glsl") - .additional_info("eevee_aov_out", "eevee_render_pass_out", "eevee_utility_texture"); + .additional_info("eevee_aov_out", + "eevee_cryptomatte_out", + "eevee_render_pass_out", + "eevee_utility_texture"); #undef image_out #undef image_array_out diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h index 7b80ffd2b88..b49203d85f6 100644 --- a/source/blender/draw/intern/DRW_render.h +++ b/source/blender/draw/intern/DRW_render.h @@ -207,6 +207,10 @@ struct GPUShader *DRW_shader_create_with_lib_ex(const char *vert, const char *lib, const char *defines, const char *name); +struct GPUShader *DRW_shader_create_compute_with_shaderlib(const char *comp, + const DRWShaderLibrary *lib, + const char *defines, + const char *name); struct GPUShader *DRW_shader_create_with_shaderlib_ex(const char *vert, const char *geom, const char *frag, diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.cc b/source/blender/draw/intern/draw_cache_impl_mesh.cc index e60689f0237..c22382b3e09 100644 --- a/source/blender/draw/intern/draw_cache_impl_mesh.cc +++ b/source/blender/draw/intern/draw_cache_impl_mesh.cc @@ -556,8 +556,7 @@ static bool mesh_batch_cache_valid(Object *object, Mesh *me) } if (object->sculpt && object->sculpt->pbvh) { - if (cache->pbvh_is_drawing != BKE_pbvh_is_drawing(object->sculpt->pbvh) || - BKE_pbvh_draw_cache_invalid(object->sculpt->pbvh)) { + if (cache->pbvh_is_drawing != BKE_pbvh_is_drawing(object->sculpt->pbvh)) { return false; } diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c index 4bc3898c5e7..1ada99093c6 100644 --- a/source/blender/draw/intern/draw_manager_shader.c +++ b/source/blender/draw/intern/draw_manager_shader.c @@ -297,6 +297,18 @@ GPUShader *DRW_shader_create_with_lib_ex(const char *vert, return sh; } +GPUShader *DRW_shader_create_compute_with_shaderlib(const char *comp, + const DRWShaderLibrary *lib, + const char *defines, + const char *name) +{ + char *comp_with_lib = DRW_shader_library_create_shader_string(lib, comp); + GPUShader *sh = GPU_shader_create_compute(comp_with_lib, NULL, defines, name); + MEM_SAFE_FREE(comp_with_lib); + + return sh; +} + GPUShader *DRW_shader_create_with_shaderlib_ex(const char *vert, const char *geom, const char *frag, diff --git a/source/blender/draw/tests/shaders_test.cc b/source/blender/draw/tests/shaders_test.cc index e7baac63aae..892fd999fb5 100644 --- a/source/blender/draw/tests/shaders_test.cc +++ b/source/blender/draw/tests/shaders_test.cc @@ -360,6 +360,8 @@ static void test_eevee_glsl_shaders_static() EXPECT_NE(EEVEE_shaders_volumes_integration_sh_get(), nullptr); EXPECT_NE(EEVEE_shaders_volumes_resolve_sh_get(false), nullptr); EXPECT_NE(EEVEE_shaders_volumes_resolve_sh_get(true), nullptr); + EXPECT_NE(EEVEE_shaders_volumes_resolve_comp_sh_get(false), nullptr); + EXPECT_NE(EEVEE_shaders_volumes_resolve_comp_sh_get(true), nullptr); EXPECT_NE(EEVEE_shaders_volumes_accum_sh_get(), nullptr); EXPECT_NE(EEVEE_shaders_studiolight_probe_sh_get(), nullptr); EXPECT_NE(EEVEE_shaders_studiolight_background_sh_get(), nullptr); diff --git a/source/blender/editors/gpencil/gpencil_edit.c b/source/blender/editors/gpencil/gpencil_edit.c index 89ab9eda7f0..d53c0af2c54 100644 --- a/source/blender/editors/gpencil/gpencil_edit.c +++ b/source/blender/editors/gpencil/gpencil_edit.c @@ -67,6 +67,7 @@ #include "ED_armature.h" #include "ED_gpencil.h" +#include "ED_keyframing.h" #include "ED_object.h" #include "ED_outliner.h" #include "ED_screen.h" @@ -1713,12 +1714,17 @@ static int gpencil_strokes_paste_exec(bContext *C, wmOperator *op) } } - /* Ensure we have a frame to draw into + /* Ensure we have a frame to draw into. * NOTE: Since this is an op which creates strokes, - * we are obliged to add a new frame if one - * doesn't exist already + * we reuse active frame or add a new frame if one + * doesn't exist already depending on REC button status. */ - gpf = BKE_gpencil_layer_frame_get(gpl, scene->r.cfra, GP_GETFRAME_ADD_NEW); + if (IS_AUTOKEY_ON(scene) || (gpl->actframe == NULL)) { + gpf = BKE_gpencil_layer_frame_get(gpl, scene->r.cfra, GP_GETFRAME_ADD_NEW); + } + else { + gpf = BKE_gpencil_layer_frame_get(gpl, scene->r.cfra, GP_GETFRAME_USE_PREV); + } if (gpf) { /* Create new stroke */ bGPDstroke *new_stroke = BKE_gpencil_stroke_duplicate(gps, true, true); diff --git a/source/blender/editors/include/ED_mesh.h b/source/blender/editors/include/ED_mesh.h index b6a652bd3ab..26743a2bd08 100644 --- a/source/blender/editors/include/ED_mesh.h +++ b/source/blender/editors/include/ED_mesh.h @@ -139,6 +139,7 @@ struct UvElementMap *BM_uv_element_map_create(struct BMesh *bm, const struct Scene *scene, bool uv_selected, bool use_winding, + bool use_seams, bool do_islands); void BM_uv_element_map_free(struct UvElementMap *element_map); struct UvElement *BM_uv_element_get(const struct UvElementMap *map, diff --git a/source/blender/editors/include/UI_interface.hh b/source/blender/editors/include/UI_interface.hh index 82bfdd7e212..6c756984203 100644 --- a/source/blender/editors/include/UI_interface.hh +++ b/source/blender/editors/include/UI_interface.hh @@ -13,7 +13,7 @@ #include "UI_resources.h" -namespace blender::nodes::geometry_nodes_eval_log { +namespace blender::nodes::geo_eval_log { struct GeometryAttributeInfo; } @@ -44,12 +44,11 @@ void context_path_add_generic(Vector<ContextPathItem> &path, void template_breadcrumbs(uiLayout &layout, Span<ContextPathItem> context_path); -void attribute_search_add_items( - StringRefNull str, - bool can_create_attribute, - Span<const nodes::geometry_nodes_eval_log::GeometryAttributeInfo *> infos, - uiSearchItems *items, - bool is_first); +void attribute_search_add_items(StringRefNull str, + bool can_create_attribute, + Span<const nodes::geo_eval_log::GeometryAttributeInfo *> infos, + uiSearchItems *items, + bool is_first); } // namespace blender::ui diff --git a/source/blender/editors/interface/interface_template_attribute_search.cc b/source/blender/editors/interface/interface_template_attribute_search.cc index 0a684903f0f..55ca945671f 100644 --- a/source/blender/editors/interface/interface_template_attribute_search.cc +++ b/source/blender/editors/interface/interface_template_attribute_search.cc @@ -14,13 +14,15 @@ #include "BLT_translation.h" -#include "NOD_geometry_nodes_eval_log.hh" +#include "BKE_attribute.hh" + +#include "NOD_geometry_nodes_log.hh" #include "UI_interface.h" #include "UI_interface.hh" #include "UI_resources.h" -using blender::nodes::geometry_nodes_eval_log::GeometryAttributeInfo; +using blender::nodes::geo_eval_log::GeometryAttributeInfo; namespace blender::ui { diff --git a/source/blender/editors/io/io_obj.c b/source/blender/editors/io/io_obj.c index 66e95c019f6..cb8eafeb52d 100644 --- a/source/blender/editors/io/io_obj.c +++ b/source/blender/editors/io/io_obj.c @@ -93,6 +93,7 @@ static int wm_obj_export_exec(bContext *C, wmOperator *op) export_params.path_mode = RNA_enum_get(op->ptr, "path_mode"); export_params.export_triangulated_mesh = RNA_boolean_get(op->ptr, "export_triangulated_mesh"); export_params.export_curves_as_nurbs = RNA_boolean_get(op->ptr, "export_curves_as_nurbs"); + export_params.export_pbr_extensions = RNA_boolean_get(op->ptr, "export_pbr_extensions"); export_params.export_object_groups = RNA_boolean_get(op->ptr, "export_object_groups"); export_params.export_material_groups = RNA_boolean_get(op->ptr, "export_material_groups"); @@ -118,14 +119,13 @@ static void ui_obj_export_settings(uiLayout *layout, PointerRNA *imfptr) /* Object Transform options. */ box = uiLayoutBox(layout); - uiItemL(box, IFACE_("Object Properties"), ICON_OBJECT_DATA); col = uiLayoutColumn(box, false); sub = uiLayoutColumnWithHeading(col, false, IFACE_("Limit to")); uiItemR(sub, imfptr, "export_selected_objects", 0, IFACE_("Selected Only"), ICON_NONE); uiItemR(sub, imfptr, "scaling_factor", 0, NULL, ICON_NONE); row = uiLayoutRow(box, false); - uiItemR(row, imfptr, "forward_axis", UI_ITEM_R_EXPAND, IFACE_("Foward Axis"), ICON_NONE); + uiItemR(row, imfptr, "forward_axis", UI_ITEM_R_EXPAND, IFACE_("Forward Axis"), ICON_NONE); row = uiLayoutRow(box, false); uiItemR(row, imfptr, "up_axis", UI_ITEM_R_EXPAND, IFACE_("Up Axis"), ICON_NONE); @@ -134,27 +134,31 @@ static void ui_obj_export_settings(uiLayout *layout, PointerRNA *imfptr) sub = uiLayoutColumnWithHeading(col, false, IFACE_("Objects")); uiItemR(sub, imfptr, "apply_modifiers", 0, IFACE_("Apply Modifiers"), ICON_NONE); uiItemR(sub, imfptr, "export_eval_mode", 0, IFACE_("Properties"), ICON_NONE); - sub = uiLayoutColumn(sub, false); - uiLayoutSetEnabled(sub, export_materials); - uiItemR(sub, imfptr, "path_mode", 0, IFACE_("Path Mode"), ICON_NONE); - /* Options for what to write. */ + /* Geometry options. */ box = uiLayoutBox(layout); - uiItemL(box, IFACE_("Geometry"), ICON_EXPORT); col = uiLayoutColumn(box, false); - sub = uiLayoutColumnWithHeading(col, false, IFACE_("Export")); + sub = uiLayoutColumnWithHeading(col, false, IFACE_("Geometry")); uiItemR(sub, imfptr, "export_uv", 0, IFACE_("UV Coordinates"), ICON_NONE); uiItemR(sub, imfptr, "export_normals", 0, IFACE_("Normals"), ICON_NONE); uiItemR(sub, imfptr, "export_colors", 0, IFACE_("Colors"), ICON_NONE); - uiItemR(sub, imfptr, "export_materials", 0, IFACE_("Materials"), ICON_NONE); uiItemR(sub, imfptr, "export_triangulated_mesh", 0, IFACE_("Triangulated Mesh"), ICON_NONE); uiItemR(sub, imfptr, "export_curves_as_nurbs", 0, IFACE_("Curves as NURBS"), ICON_NONE); + /* Material options. */ + box = uiLayoutBox(layout); + col = uiLayoutColumn(box, false); + sub = uiLayoutColumnWithHeading(col, false, IFACE_("Materials")); + uiItemR(sub, imfptr, "export_materials", 0, IFACE_("Export"), ICON_NONE); + sub = uiLayoutColumn(sub, false); + uiLayoutSetEnabled(sub, export_materials); + uiItemR(sub, imfptr, "export_pbr_extensions", 0, IFACE_("PBR Extensions"), ICON_NONE); + uiItemR(sub, imfptr, "path_mode", 0, IFACE_("Path Mode"), ICON_NONE); + /* Grouping options. */ box = uiLayoutBox(layout); - uiItemL(box, IFACE_("Grouping"), ICON_GROUP); col = uiLayoutColumn(box, false); - sub = uiLayoutColumnWithHeading(col, false, IFACE_("Export")); + sub = uiLayoutColumnWithHeading(col, false, IFACE_("Grouping")); uiItemR(sub, imfptr, "export_object_groups", 0, IFACE_("Object Groups"), ICON_NONE); uiItemR(sub, imfptr, "export_material_groups", 0, IFACE_("Material Groups"), ICON_NONE); uiItemR(sub, imfptr, "export_vertex_groups", 0, IFACE_("Vertex Groups"), ICON_NONE); @@ -165,14 +169,13 @@ static void ui_obj_export_settings(uiLayout *layout, PointerRNA *imfptr) /* Animation options. */ box = uiLayoutBox(layout); - uiItemL(box, IFACE_("Animation"), ICON_ANIM); col = uiLayoutColumn(box, false); - sub = uiLayoutColumn(col, false); - uiItemR(sub, imfptr, "export_animation", 0, NULL, ICON_NONE); + sub = uiLayoutColumnWithHeading(col, false, IFACE_("Animation")); + uiItemR(sub, imfptr, "export_animation", 0, IFACE_("Export"), ICON_NONE); sub = uiLayoutColumn(sub, true); + uiLayoutSetEnabled(sub, export_animation); uiItemR(sub, imfptr, "start_frame", 0, IFACE_("Frame Start"), ICON_NONE); uiItemR(sub, imfptr, "end_frame", 0, IFACE_("End"), ICON_NONE); - uiLayoutSetEnabled(sub, export_animation); } static void wm_obj_export_draw(bContext *UNUSED(C), wmOperator *op) @@ -336,6 +339,12 @@ void WM_OT_obj_export(struct wmOperatorType *ot) "Export Materials", "Export MTL library. There must be a Principled-BSDF node for image textures to " "be exported to the MTL file"); + RNA_def_boolean(ot->srna, + "export_pbr_extensions", + false, + "Export Materials with PBR Extensions", + "Export MTL library using PBR extensions (roughness, metallic, sheen, " + "clearcoat, anisotropy, transmission)"); RNA_def_enum(ot->srna, "path_mode", io_obj_path_mode, diff --git a/source/blender/editors/io/io_usd.c b/source/blender/editors/io/io_usd.c index ba118a5e289..eb80cabcd7f 100644 --- a/source/blender/editors/io/io_usd.c +++ b/source/blender/editors/io/io_usd.c @@ -191,6 +191,19 @@ static void wm_usd_export_draw(bContext *UNUSED(C), wmOperator *op) uiItemR(box, ptr, "use_instancing", 0, NULL, ICON_NONE); } +static void free_operator_customdata(wmOperator *op) +{ + if (op->customdata) { + MEM_freeN(op->customdata); + op->customdata = NULL; + } +} + +static void wm_usd_export_cancel(bContext *UNUSED(C), wmOperator *op) +{ + free_operator_customdata(op); +} + static bool wm_usd_export_check(bContext *UNUSED(C), wmOperator *op) { char filepath[FILE_MAX]; @@ -215,6 +228,7 @@ void WM_OT_usd_export(struct wmOperatorType *ot) ot->exec = wm_usd_export_exec; ot->poll = WM_operator_winactive; ot->ui = wm_usd_export_draw; + ot->cancel = wm_usd_export_cancel; ot->check = wm_usd_export_check; ot->flag = OPTYPE_REGISTER | OPTYPE_PRESET; /* No UNDO possible. */ @@ -360,7 +374,7 @@ static int wm_usd_import_exec(bContext *C, wmOperator *op) const bool create_collection = RNA_boolean_get(op->ptr, "create_collection"); - char *prim_path_mask = malloc(1024); + char prim_path_mask[1024]; RNA_string_get(op->ptr, "prim_path_mask", prim_path_mask); const bool import_guide = RNA_boolean_get(op->ptr, "import_guide"); @@ -402,7 +416,6 @@ static int wm_usd_import_exec(bContext *C, wmOperator *op) .import_materials = import_materials, .import_meshes = import_meshes, .import_volumes = import_volumes, - .prim_path_mask = prim_path_mask, .import_subdiv = import_subdiv, .import_instance_proxies = import_instance_proxies, .create_collection = create_collection, @@ -416,11 +429,18 @@ static int wm_usd_import_exec(bContext *C, wmOperator *op) .light_intensity_scale = light_intensity_scale, .mtl_name_collision_mode = mtl_name_collision_mode}; + STRNCPY(params.prim_path_mask, prim_path_mask); + const bool ok = USD_import(C, filename, ¶ms, as_background_job); return as_background_job || ok ? OPERATOR_FINISHED : OPERATOR_CANCELLED; } +static void wm_usd_import_cancel(bContext *UNUSED(C), wmOperator *op) +{ + free_operator_customdata(op); +} + static void wm_usd_import_draw(bContext *UNUSED(C), wmOperator *op) { uiLayout *layout = op->layout; @@ -476,6 +496,7 @@ void WM_OT_usd_import(struct wmOperatorType *ot) ot->invoke = wm_usd_import_invoke; ot->exec = wm_usd_import_exec; + ot->cancel = wm_usd_import_cancel; ot->poll = WM_operator_winactive; ot->ui = wm_usd_import_draw; diff --git a/source/blender/editors/mesh/editmesh_utils.c b/source/blender/editors/mesh/editmesh_utils.c index 494e70ec9da..5c8ff930eb8 100644 --- a/source/blender/editors/mesh/editmesh_utils.c +++ b/source/blender/editors/mesh/editmesh_utils.c @@ -851,10 +851,99 @@ static void bm_uv_build_islands(UvElementMap *element_map, MEM_SAFE_FREE(map); } +/* return true if `loop` has UV co-ordinates which match `luv_a` and `luv_b` */ +static bool loop_uv_match(BMLoop *loop, MLoopUV *luv_a, MLoopUV *luv_b, int cd_loop_uv_offset) +{ + MLoopUV *luv_c = BM_ELEM_CD_GET_VOID_P(loop, cd_loop_uv_offset); + MLoopUV *luv_d = BM_ELEM_CD_GET_VOID_P(loop->next, cd_loop_uv_offset); + return compare_v2v2(luv_a->uv, luv_c->uv, STD_UV_CONNECT_LIMIT) && + compare_v2v2(luv_b->uv, luv_d->uv, STD_UV_CONNECT_LIMIT); +} + +/* Given `anchor` and `edge`, return true if there are edges that fan between them that are + * seam-free. */ +static bool seam_connected_recursive(BMVert *anchor, + BMEdge *edge, + MLoopUV *luv_anchor, + MLoopUV *luv_fan, + BMLoop *needle, + GSet *visited, + int cd_loop_uv_offset) +{ + BLI_assert(edge->v1 == anchor || edge->v2 == anchor); + BLI_assert(needle->v == anchor || needle->next->v == anchor); + + if (BM_elem_flag_test(edge, BM_ELEM_SEAM)) { + return false; /* Edge is a seam, don't traverse. */ + } + + if (!BLI_gset_add(visited, edge)) { + return false; /* Already visited. */ + } + + BMLoop *loop; + BMIter liter; + BM_ITER_ELEM (loop, &liter, edge, BM_LOOPS_OF_EDGE) { + if (loop->v == anchor) { + if (!loop_uv_match(loop, luv_anchor, luv_fan, cd_loop_uv_offset)) { + continue; /* `loop` is disjoint in UV space. */ + } + + if (loop->prev == needle) { + return true; /* Success. */ + } + + MLoopUV *luv_far = BM_ELEM_CD_GET_VOID_P(loop->prev, cd_loop_uv_offset); + if (seam_connected_recursive( + anchor, loop->prev->e, luv_anchor, luv_far, needle, visited, cd_loop_uv_offset)) { + return true; + } + } + else { + BLI_assert(loop->next->v == anchor); + if (!loop_uv_match(loop, luv_fan, luv_anchor, cd_loop_uv_offset)) { + continue; /* `loop` is disjoint in UV space. */ + } + + if (loop->next == needle) { + return true; /* Success. */ + } + + MLoopUV *luv_far = BM_ELEM_CD_GET_VOID_P(loop->next->next, cd_loop_uv_offset); + if (seam_connected_recursive( + anchor, loop->next->e, luv_anchor, luv_far, needle, visited, cd_loop_uv_offset)) { + return true; + } + } + } + + return false; +} + +/* Given `loop_a` and `loop_b` originate from the same vertex and share a UV, + * return true if there are edges that fan between them that are seam-free. + * return false otherwise. + */ +static bool seam_connected(BMLoop *loop_a, BMLoop *loop_b, GSet *visited, int cd_loop_uv_offset) +{ + BLI_assert(loop_a && loop_b); + BLI_assert(loop_a != loop_b); + BLI_assert(loop_a->v == loop_b->v); + + BLI_gset_clear(visited, NULL); + + MLoopUV *luv_anchor = BM_ELEM_CD_GET_VOID_P(loop_a, cd_loop_uv_offset); + MLoopUV *luv_fan = BM_ELEM_CD_GET_VOID_P(loop_a->next, cd_loop_uv_offset); + const bool result = seam_connected_recursive( + loop_a->v, loop_a->e, luv_anchor, luv_fan, loop_b, visited, cd_loop_uv_offset); + return result; +} + UvElementMap *BM_uv_element_map_create(BMesh *bm, const Scene *scene, const bool uv_selected, const bool use_winding, + const bool use_seams, const bool do_islands) { /* In uv sync selection, all UVs are visible. */ @@ -956,6 +1045,8 @@ UvElementMap *BM_uv_element_map_create(BMesh *bm, } BLI_buffer_free(&tf_uv_buf); + GSet *seam_visited_gset = use_seams ? BLI_gset_ptr_new(__func__) : NULL; + /* For each BMVert, sort associated linked list into unique uvs. */ int ev_index; BM_ITER_MESH_INDEX (ev, &iter, bm, BM_VERTS_OF_MESH, ev_index) { @@ -1001,6 +1092,10 @@ UvElementMap *BM_uv_element_map_create(BMesh *bm, winding[BM_elem_index_get(v->l->f)]; } + if (connected && use_seams) { + connected = seam_connected(iterv->l, v->l, seam_visited_gset, cd_loop_uv_offset); + } + if (connected) { if (lastv) { lastv->next = next; @@ -1026,6 +1121,10 @@ UvElementMap *BM_uv_element_map_create(BMesh *bm, element_map->vertex[ev_index] = newvlist; } + if (seam_visited_gset) { + BLI_gset_free(seam_visited_gset, NULL); + seam_visited_gset = NULL; + } MEM_SAFE_FREE(winding); /* at this point, every UvElement in vert points to a UvElement sharing the same vertex. diff --git a/source/blender/editors/object/object_modifier.cc b/source/blender/editors/object/object_modifier.cc index 61caa56cf7c..b5820ac55da 100644 --- a/source/blender/editors/object/object_modifier.cc +++ b/source/blender/editors/object/object_modifier.cc @@ -3400,6 +3400,7 @@ static int geometry_node_tree_copy_assign_exec(bContext *C, wmOperator *UNUSED(o nmd->node_group = new_tree; id_us_min(&tree->id); + DEG_id_tag_update(&ob->id, ID_RECALC_GEOMETRY); DEG_relations_tag_update(bmain); WM_event_add_notifier(C, NC_OBJECT | ND_MODIFIER, ob); return OPERATOR_FINISHED; diff --git a/source/blender/editors/object/object_remesh.cc b/source/blender/editors/object/object_remesh.cc index a6b51048209..aa8dc4debd9 100644 --- a/source/blender/editors/object/object_remesh.cc +++ b/source/blender/editors/object/object_remesh.cc @@ -186,7 +186,7 @@ static int voxel_remesh_exec(bContext *C, wmOperator *op) } if (ob->mode == OB_MODE_SCULPT) { - BKE_sculpt_ensure_orig_mesh_data(CTX_data_scene(C), ob); + BKE_sculpt_ensure_orig_mesh_data(ob); ED_sculpt_undo_geometry_end(ob); } @@ -912,7 +912,7 @@ static void quadriflow_start_job(void *customdata, short *stop, short *do_update } if (ob->mode == OB_MODE_SCULPT) { - BKE_sculpt_ensure_orig_mesh_data(qj->scene, ob); + BKE_sculpt_ensure_orig_mesh_data(ob); ED_sculpt_undo_geometry_end(ob); } diff --git a/source/blender/editors/sculpt_paint/paint_hide.c b/source/blender/editors/sculpt_paint/paint_hide.c index c1289364fb2..2b80c62a0ba 100644 --- a/source/blender/editors/sculpt_paint/paint_hide.c +++ b/source/blender/editors/sculpt_paint/paint_hide.c @@ -383,6 +383,7 @@ static int hide_show_exec(bContext *C, wmOperator *op) * sculpt but it looks wrong when entering editmode otherwise). */ if (pbvh_type == PBVH_FACES) { BKE_mesh_flush_hidden_from_verts(me); + BKE_pbvh_update_hide_attributes_from_mesh(pbvh); } SCULPT_visibility_sync_all_vertex_to_face_sets(ob->sculpt); diff --git a/source/blender/editors/sculpt_paint/paint_mask.c b/source/blender/editors/sculpt_paint/paint_mask.c index 0ea45f83336..437ff7506ba 100644 --- a/source/blender/editors/sculpt_paint/paint_mask.c +++ b/source/blender/editors/sculpt_paint/paint_mask.c @@ -134,6 +134,7 @@ static void mask_flood_fill_task_cb(void *__restrict userdata, static int mask_flood_fill_exec(bContext *C, wmOperator *op) { + const Scene *scene = CTX_data_scene(C); Object *ob = CTX_data_active_object(C); Depsgraph *depsgraph = CTX_data_ensure_evaluated_depsgraph(C); PaintMaskFloodMode mode; @@ -146,6 +147,9 @@ static int mask_flood_fill_exec(bContext *C, wmOperator *op) mode = RNA_enum_get(op->ptr, "mode"); value = RNA_float_get(op->ptr, "value"); + MultiresModifierData *mmd = BKE_sculpt_multires_active(scene, ob); + BKE_sculpt_mask_layers_ensure(ob, mmd); + BKE_sculpt_update_object_for_edit(depsgraph, ob, false, true, false); pbvh = ob->sculpt->pbvh; multires = (BKE_pbvh_type(pbvh) == PBVH_GRIDS); @@ -774,6 +778,8 @@ static void sculpt_gesture_init_face_set_properties(SculptGestureContext *sgcont struct Mesh *mesh = BKE_mesh_from_object(sgcontext->vc.obact); sgcontext->operation = MEM_callocN(sizeof(SculptGestureFaceSetOperation), "Face Set Operation"); + sgcontext->ss->face_sets = BKE_sculpt_face_sets_ensure(mesh); + SculptGestureFaceSetOperation *face_set_operation = (SculptGestureFaceSetOperation *) sgcontext->operation; @@ -817,7 +823,7 @@ static void mask_gesture_apply_task_cb(void *__restrict userdata, BKE_pbvh_vertex_iter_begin (sgcontext->ss->pbvh, node, vd, PBVH_ITER_UNIQUE) { if (sculpt_gesture_is_vertex_effected(sgcontext, &vd)) { - float prevmask = *vd.mask; + float prevmask = vd.mask ? *vd.mask : 0.0f; if (!any_masked) { any_masked = true; @@ -863,6 +869,10 @@ static void sculpt_gesture_init_mask_properties(SculptGestureContext *sgcontext, SculptGestureMaskOperation *mask_operation = (SculptGestureMaskOperation *)sgcontext->operation; + Object *object = sgcontext->vc.obact; + MultiresModifierData *mmd = BKE_sculpt_multires_active(sgcontext->vc.scene, object); + BKE_sculpt_mask_layers_ensure(sgcontext->vc.obact, mmd); + mask_operation->op.sculpt_gesture_begin = sculpt_gesture_mask_begin; mask_operation->op.sculpt_gesture_apply_for_symmetry_pass = sculpt_gesture_mask_apply_for_symmetry_pass; diff --git a/source/blender/editors/sculpt_paint/sculpt.c b/source/blender/editors/sculpt_paint/sculpt.c index 51ff064c58d..089a8a4cb54 100644 --- a/source/blender/editors/sculpt_paint/sculpt.c +++ b/source/blender/editors/sculpt_paint/sculpt.c @@ -253,11 +253,11 @@ float SCULPT_vertex_mask_get(SculptSession *ss, PBVHVertRef vertex) float *mask; switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: - return ss->vmask[vertex.i]; + return ss->vmask ? ss->vmask[vertex.i] : 0.0f; case PBVH_BMESH: v = (BMVert *)vertex.i; mask = BM_ELEM_CD_GET_VOID_P(v, CustomData_get_offset(&ss->bm->vdata, CD_PAINT_MASK)); - return *mask; + return mask ? *mask : 0.0f; case PBVH_GRIDS: { const CCGKey *key = BKE_pbvh_get_grid_key(ss->pbvh); const int grid_index = vertex.i / key->grid_area; @@ -329,8 +329,14 @@ int SCULPT_active_face_set_get(SculptSession *ss) { switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: + if (!ss->face_sets) { + return SCULPT_FACE_SET_NONE; + } return ss->face_sets[ss->active_face_index]; case PBVH_GRIDS: { + if (!ss->face_sets) { + return SCULPT_FACE_SET_NONE; + } const int face_index = BKE_subdiv_ccg_grid_to_face_index(ss->subdiv_ccg, ss->active_grid_index); return ss->face_sets[face_index]; @@ -383,6 +389,7 @@ bool SCULPT_vertex_visible_get(SculptSession *ss, PBVHVertRef vertex) void SCULPT_face_set_visibility_set(SculptSession *ss, int face_set, bool visible) { + BLI_assert(ss->face_sets != NULL); switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: case PBVH_GRIDS: @@ -405,6 +412,7 @@ void SCULPT_face_set_visibility_set(SculptSession *ss, int face_set, bool visibl void SCULPT_face_sets_visibility_invert(SculptSession *ss) { + BLI_assert(ss->face_sets != NULL); switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: case PBVH_GRIDS: @@ -422,6 +430,9 @@ void SCULPT_face_sets_visibility_all_set(SculptSession *ss, bool visible) switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: case PBVH_GRIDS: + if (!ss->face_sets) { + return; + } for (int i = 0; i < ss->totfaces; i++) { /* This can run on geometry without a face set assigned, so its ID sign can't be changed to @@ -446,11 +457,15 @@ void SCULPT_face_sets_visibility_all_set(SculptSession *ss, bool visible) bool SCULPT_vertex_any_face_set_visible_get(SculptSession *ss, PBVHVertRef vertex) { + const bool *hide_poly = BKE_pbvh_get_poly_hide(ss->pbvh); + if (!hide_poly) { + return true; + } switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: { MeshElemMap *vert_map = &ss->pmap[vertex.i]; for (int j = 0; j < ss->pmap[vertex.i].count; j++) { - if (ss->face_sets[vert_map->indices[j]] > 0) { + if (!hide_poly[vert_map->indices[j]]) { return true; } } @@ -466,11 +481,15 @@ bool SCULPT_vertex_any_face_set_visible_get(SculptSession *ss, PBVHVertRef verte bool SCULPT_vertex_all_face_sets_visible_get(const SculptSession *ss, PBVHVertRef vertex) { + const bool *hide_poly = BKE_pbvh_get_poly_hide(ss->pbvh); + if (!hide_poly) { + return true; + } switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: { MeshElemMap *vert_map = &ss->pmap[vertex.i]; for (int j = 0; j < ss->pmap[vertex.i].count; j++) { - if (ss->face_sets[vert_map->indices[j]] < 0) { + if (hide_poly[vert_map->indices[j]]) { return false; } } @@ -482,7 +501,7 @@ bool SCULPT_vertex_all_face_sets_visible_get(const SculptSession *ss, PBVHVertRe const CCGKey *key = BKE_pbvh_get_grid_key(ss->pbvh); const int grid_index = vertex.i / key->grid_area; const int face_index = BKE_subdiv_ccg_grid_to_face_index(ss->subdiv_ccg, grid_index); - return ss->face_sets[face_index] > 0; + return !hide_poly[face_index]; } } return true; @@ -492,6 +511,7 @@ void SCULPT_vertex_face_set_set(SculptSession *ss, PBVHVertRef vertex, int face_ { switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: { + BLI_assert(ss->face_sets != NULL); MeshElemMap *vert_map = &ss->pmap[vertex.i]; for (int j = 0; j < ss->pmap[vertex.i].count; j++) { if (ss->face_sets[vert_map->indices[j]] > 0) { @@ -502,6 +522,7 @@ void SCULPT_vertex_face_set_set(SculptSession *ss, PBVHVertRef vertex, int face_ case PBVH_BMESH: break; case PBVH_GRIDS: { + BLI_assert(ss->face_sets != NULL); const CCGKey *key = BKE_pbvh_get_grid_key(ss->pbvh); const int grid_index = vertex.i / key->grid_area; const int face_index = BKE_subdiv_ccg_grid_to_face_index(ss->subdiv_ccg, grid_index); @@ -517,6 +538,9 @@ int SCULPT_vertex_face_set_get(SculptSession *ss, PBVHVertRef vertex) { switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: { + if (!ss->face_sets) { + return SCULPT_FACE_SET_NONE; + } MeshElemMap *vert_map = &ss->pmap[vertex.i]; int face_set = 0; for (int i = 0; i < ss->pmap[vertex.i].count; i++) { @@ -529,6 +553,9 @@ int SCULPT_vertex_face_set_get(SculptSession *ss, PBVHVertRef vertex) case PBVH_BMESH: return 0; case PBVH_GRIDS: { + if (!ss->face_sets) { + return SCULPT_FACE_SET_NONE; + } const CCGKey *key = BKE_pbvh_get_grid_key(ss->pbvh); const int grid_index = vertex.i / key->grid_area; const int face_index = BKE_subdiv_ccg_grid_to_face_index(ss->subdiv_ccg, grid_index); @@ -542,6 +569,9 @@ bool SCULPT_vertex_has_face_set(SculptSession *ss, PBVHVertRef vertex, int face_ { switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: { + if (!ss->face_sets) { + return face_set == SCULPT_FACE_SET_NONE; + } MeshElemMap *vert_map = &ss->pmap[vertex.i]; for (int i = 0; i < ss->pmap[vertex.i].count; i++) { if (ss->face_sets[vert_map->indices[i]] == face_set) { @@ -553,6 +583,9 @@ bool SCULPT_vertex_has_face_set(SculptSession *ss, PBVHVertRef vertex, int face_ case PBVH_BMESH: return true; case PBVH_GRIDS: { + if (!ss->face_sets) { + return face_set == SCULPT_FACE_SET_NONE; + } const CCGKey *key = BKE_pbvh_get_grid_key(ss->pbvh); const int grid_index = vertex.i / key->grid_area; const int face_index = BKE_subdiv_ccg_grid_to_face_index(ss->subdiv_ccg, grid_index); @@ -569,6 +602,7 @@ void SCULPT_visibility_sync_all_face_sets_to_verts(Object *ob) switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: { BKE_sculpt_sync_face_sets_visibility_to_base_mesh(mesh); + BKE_pbvh_update_hide_attributes_from_mesh(ss->pbvh); break; } case PBVH_GRIDS: { @@ -599,6 +633,9 @@ static void UNUSED_FUNCTION(sculpt_visibility_sync_vertex_to_face_sets)(SculptSe void SCULPT_visibility_sync_all_vertex_to_face_sets(SculptSession *ss) { if (BKE_pbvh_type(ss->pbvh) == PBVH_FACES) { + if (ss->face_sets == NULL) { + return; + } for (int i = 0; i < ss->totfaces; i++) { const MPoly *poly = &ss->mpoly[i]; bool poly_visible = true; @@ -620,6 +657,9 @@ void SCULPT_visibility_sync_all_vertex_to_face_sets(SculptSession *ss) static bool sculpt_check_unique_face_set_in_base_mesh(SculptSession *ss, int index) { + if (!ss->face_sets) { + return true; + } MeshElemMap *vert_map = &ss->pmap[index]; int face_set = -1; for (int i = 0; i < ss->pmap[index].count; i++) { @@ -676,6 +716,9 @@ bool SCULPT_vertex_has_unique_face_set(SculptSession *ss, PBVHVertRef vertex) case PBVH_BMESH: return true; case PBVH_GRIDS: { + if (!ss->face_sets) { + return true; + } const CCGKey *key = BKE_pbvh_get_grid_key(ss->pbvh); const int grid_index = vertex.i / key->grid_area; const int vertex_index = vertex.i - grid_index * key->grid_area; @@ -703,6 +746,9 @@ int SCULPT_face_set_next_available_get(SculptSession *ss) switch (BKE_pbvh_type(ss->pbvh)) { case PBVH_FACES: case PBVH_GRIDS: { + if (!ss->face_sets) { + return 0; + } int next_face_set = 0; for (int i = 0; i < ss->totfaces; i++) { if (abs(ss->face_sets[i]) > next_face_set) { @@ -792,9 +838,10 @@ static void sculpt_vertex_neighbors_get_faces(SculptSession *ss, iter->capacity = SCULPT_VERTEX_NEIGHBOR_FIXED_CAPACITY; iter->neighbors = iter->neighbors_fixed; iter->neighbor_indices = iter->neighbor_indices_fixed; + const bool *hide_poly = BKE_pbvh_get_vert_hide(ss->pbvh); for (int i = 0; i < ss->pmap[vertex.i].count; i++) { - if (ss->face_sets[vert_map->indices[i]] < 0) { + if (hide_poly && hide_poly[vert_map->indices[i]]) { /* Skip connectivity from hidden faces. */ continue; } @@ -3302,6 +3349,15 @@ static void do_brush_action(Sculpt *sd, BKE_pbvh_ensure_node_loops(ss->pbvh); } + if (SCULPT_tool_is_mask(brush->sculpt_tool)) { + MultiresModifierData *mmd = BKE_sculpt_multires_active(ss->scene, ob); + BKE_sculpt_mask_layers_ensure(ob, mmd); + } + if (SCULPT_tool_is_face_sets(brush->sculpt_tool)) { + Mesh *mesh = BKE_object_get_original_mesh(ob); + ss->face_sets = BKE_sculpt_face_sets_ensure(mesh); + } + /* Build a list of all nodes that are potentially within the brush's area of influence */ if (SCULPT_tool_needs_all_pbvh_nodes(brush)) { diff --git a/source/blender/editors/sculpt_paint/sculpt_dyntopo.c b/source/blender/editors/sculpt_paint/sculpt_dyntopo.c index ad8a1cde9dc..46674c5d239 100644 --- a/source/blender/editors/sculpt_paint/sculpt_dyntopo.c +++ b/source/blender/editors/sculpt_paint/sculpt_dyntopo.c @@ -215,13 +215,7 @@ static void SCULPT_dynamic_topology_disable_ex( BKE_sculptsession_bm_to_me(ob, true); /* Reset Face Sets as they are no longer valid. */ - if (!CustomData_has_layer(&me->pdata, CD_SCULPT_FACE_SETS)) { - CustomData_add_layer(&me->pdata, CD_SCULPT_FACE_SETS, CD_SET_DEFAULT, NULL, me->totpoly); - } - ss->face_sets = CustomData_get_layer(&me->pdata, CD_SCULPT_FACE_SETS); - for (int i = 0; i < me->totpoly; i++) { - ss->face_sets[i] = 1; - } + CustomData_free_layers(&me->pdata, CD_SCULPT_FACE_SETS, me->totpoly); me->face_sets_color_default = 1; /* Sync the visibility to vertices manually as the pmap is still not initialized. */ diff --git a/source/blender/editors/sculpt_paint/sculpt_expand.c b/source/blender/editors/sculpt_paint/sculpt_expand.c index 4aafeacfbff..414a855ab2f 100644 --- a/source/blender/editors/sculpt_paint/sculpt_expand.c +++ b/source/blender/editors/sculpt_paint/sculpt_expand.c @@ -17,6 +17,7 @@ #include "DNA_brush_types.h" #include "DNA_mesh_types.h" #include "DNA_meshdata_types.h" +#include "DNA_modifier_types.h" #include "DNA_object_types.h" #include "BKE_brush.h" @@ -1390,9 +1391,15 @@ static void sculpt_expand_original_state_store(Object *ob, ExpandCache *expand_c /* Face Sets are always stored as they are needed for snapping. */ expand_cache->initial_face_sets = MEM_malloc_arrayN(totface, sizeof(int), "initial face set"); expand_cache->original_face_sets = MEM_malloc_arrayN(totface, sizeof(int), "original face set"); - for (int i = 0; i < totface; i++) { - expand_cache->initial_face_sets[i] = ss->face_sets[i]; - expand_cache->original_face_sets[i] = ss->face_sets[i]; + if (ss->face_sets) { + for (int i = 0; i < totface; i++) { + expand_cache->initial_face_sets[i] = ss->face_sets[i]; + expand_cache->original_face_sets[i] = ss->face_sets[i]; + } + } + else { + memset(expand_cache->initial_face_sets, SCULPT_FACE_SET_NONE, sizeof(int) * totface); + memset(expand_cache->original_face_sets, SCULPT_FACE_SET_NONE, sizeof(int) * totface); } if (expand_cache->target == SCULPT_EXPAND_TARGET_MASK) { @@ -2118,6 +2125,16 @@ static int sculpt_expand_invoke(bContext *C, wmOperator *op, const wmEvent *even return OPERATOR_CANCELLED; } + if (ss->expand_cache->target == SCULPT_EXPAND_TARGET_FACE_SETS) { + Mesh *mesh = ob->data; + ss->face_sets = BKE_sculpt_face_sets_ensure(mesh); + } + + if (ss->expand_cache->target == SCULPT_EXPAND_TARGET_MASK) { + MultiresModifierData *mmd = BKE_sculpt_multires_active(ss->scene, ob); + BKE_sculpt_mask_layers_ensure(ob, mmd); + } + /* Face Set operations are not supported in dyntopo. */ if (ss->expand_cache->target == SCULPT_EXPAND_TARGET_FACE_SETS && BKE_pbvh_type(ss->pbvh) == PBVH_BMESH) { diff --git a/source/blender/editors/sculpt_paint/sculpt_face_set.c b/source/blender/editors/sculpt_paint/sculpt_face_set.c index 64bc6188bbc..8aa645c6af5 100644 --- a/source/blender/editors/sculpt_paint/sculpt_face_set.c +++ b/source/blender/editors/sculpt_paint/sculpt_face_set.c @@ -303,6 +303,9 @@ static int sculpt_face_set_create_exec(bContext *C, wmOperator *op) return OPERATOR_CANCELLED; } + Mesh *mesh = ob->data; + ss->face_sets = BKE_sculpt_face_sets_ensure(mesh); + BKE_sculpt_update_object_for_edit(depsgraph, ob, true, mode == SCULPT_FACE_SET_MASKED, false); const int tot_vert = SCULPT_vertex_count_get(ss); @@ -349,7 +352,6 @@ static int sculpt_face_set_create_exec(bContext *C, wmOperator *op) } if (all_visible) { - Mesh *mesh = ob->data; mesh->face_sets_color_default = next_face_set; BKE_pbvh_face_sets_color_set( ss->pbvh, mesh->face_sets_color_seed, mesh->face_sets_color_default); @@ -373,7 +375,6 @@ static int sculpt_face_set_create_exec(bContext *C, wmOperator *op) } if (mode == SCULPT_FACE_SET_SELECTION) { - Mesh *mesh = ob->data; BMesh *bm; const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_ME(mesh); bm = BM_mesh_create(&allocsize, @@ -712,6 +713,9 @@ static int sculpt_face_set_init_exec(bContext *C, wmOperator *op) const float threshold = RNA_float_get(op->ptr, "threshold"); + Mesh *mesh = ob->data; + ss->face_sets = BKE_sculpt_face_sets_ensure(mesh); + switch (mode) { case SCULPT_FACE_SETS_FROM_LOOSE_PARTS: sculpt_face_sets_init_flood_fill(ob, sculpt_face_sets_init_loose_parts_test, threshold); @@ -850,6 +854,10 @@ static int sculpt_face_sets_change_visibility_exec(bContext *C, wmOperator *op) return OPERATOR_CANCELLED; } + if (!pbvh_has_face_sets(ss->pbvh)) { + return OPERATOR_CANCELLED; + } + BKE_sculpt_update_object_for_edit(depsgraph, ob, true, true, false); const int tot_vert = SCULPT_vertex_count_get(ss); @@ -1000,6 +1008,10 @@ static int sculpt_face_sets_randomize_colors_exec(bContext *C, wmOperator *UNUSE return OPERATOR_CANCELLED; } + if (!pbvh_has_face_sets(ss->pbvh)) { + return OPERATOR_CANCELLED; + } + PBVH *pbvh = ob->sculpt->pbvh; PBVHNode **nodes; int totnode; @@ -1154,7 +1166,9 @@ static void sculpt_face_set_shrink(Object *ob, static bool check_single_face_set(SculptSession *ss, int *face_sets, const bool check_visible_only) { - + if (face_sets == NULL) { + return true; + } int first_face_set = SCULPT_FACE_SET_NONE; if (check_visible_only) { for (int f = 0; f < ss->totfaces; f++) { diff --git a/source/blender/editors/sculpt_paint/sculpt_filter_mask.c b/source/blender/editors/sculpt_paint/sculpt_filter_mask.c index cba1d3dcdc1..bb27e4f1e9e 100644 --- a/source/blender/editors/sculpt_paint/sculpt_filter_mask.c +++ b/source/blender/editors/sculpt_paint/sculpt_filter_mask.c @@ -14,6 +14,7 @@ #include "DNA_mesh_types.h" #include "DNA_meshdata_types.h" +#include "DNA_modifier_types.h" #include "BKE_brush.h" #include "BKE_context.h" @@ -174,11 +175,15 @@ static int sculpt_mask_filter_exec(bContext *C, wmOperator *op) { Object *ob = CTX_data_active_object(C); Depsgraph *depsgraph = CTX_data_depsgraph_pointer(C); + const Scene *scene = CTX_data_scene(C); PBVHNode **nodes; Sculpt *sd = CTX_data_tool_settings(C)->sculpt; int totnode; int filter_type = RNA_enum_get(op->ptr, "filter_type"); + MultiresModifierData *mmd = BKE_sculpt_multires_active(scene, ob); + BKE_sculpt_mask_layers_ensure(ob, mmd); + BKE_sculpt_update_object_for_edit(depsgraph, ob, true, true, false); SculptSession *ss = ob->sculpt; diff --git a/source/blender/editors/sculpt_paint/sculpt_geodesic.c b/source/blender/editors/sculpt_paint/sculpt_geodesic.c index a5885092ee3..c0856ab21d2 100644 --- a/source/blender/editors/sculpt_paint/sculpt_geodesic.c +++ b/source/blender/editors/sculpt_paint/sculpt_geodesic.c @@ -170,6 +170,8 @@ static float *SCULPT_geodesic_mesh_create(Object *ob, } } + const bool *hide_poly = BKE_pbvh_get_poly_hide(ss->pbvh); + /* Add edges adjacent to an initial vertex to the queue. */ for (int i = 0; i < totedge; i++) { const int v1 = edges[i].v1; @@ -199,7 +201,7 @@ static float *SCULPT_geodesic_mesh_create(Object *ob, if (ss->epmap[e].count != 0) { for (int poly_map_index = 0; poly_map_index < ss->epmap[e].count; poly_map_index++) { const int poly = ss->epmap[e].indices[poly_map_index]; - if (ss->face_sets[poly] <= 0) { + if (hide_poly && hide_poly[poly]) { continue; } const MPoly *mpoly = &polys[poly]; diff --git a/source/blender/editors/sculpt_paint/sculpt_intern.h b/source/blender/editors/sculpt_paint/sculpt_intern.h index 4bc06d68a02..7a72e5cc84b 100644 --- a/source/blender/editors/sculpt_paint/sculpt_intern.h +++ b/source/blender/editors/sculpt_paint/sculpt_intern.h @@ -1837,6 +1837,16 @@ BLI_INLINE bool SCULPT_tool_is_paint(int tool) return ELEM(tool, SCULPT_TOOL_PAINT, SCULPT_TOOL_SMEAR); } +BLI_INLINE bool SCULPT_tool_is_mask(int tool) +{ + return ELEM(tool, SCULPT_TOOL_MASK); +} + +BLI_INLINE bool SCULPT_tool_is_face_sets(int tool) +{ + return ELEM(tool, SCULPT_TOOL_DRAW_FACE_SETS); +} + #ifdef __cplusplus } #endif diff --git a/source/blender/editors/sculpt_paint/sculpt_mask_expand.c b/source/blender/editors/sculpt_paint/sculpt_mask_expand.c index 9556d24f12c..ec246cd3788 100644 --- a/source/blender/editors/sculpt_paint/sculpt_mask_expand.c +++ b/source/blender/editors/sculpt_paint/sculpt_mask_expand.c @@ -391,7 +391,7 @@ static int sculpt_mask_expand_invoke(bContext *C, wmOperator *op, const wmEvent if (create_face_set) { ss->filter_cache->prev_face_set = MEM_callocN(sizeof(float) * ss->totfaces, "prev face mask"); for (int i = 0; i < ss->totfaces; i++) { - ss->filter_cache->prev_face_set[i] = ss->face_sets[i]; + ss->filter_cache->prev_face_set[i] = ss->face_sets ? ss->face_sets[i] : 0; } ss->filter_cache->new_face_set = SCULPT_face_set_next_available_get(ss); } diff --git a/source/blender/editors/sculpt_paint/sculpt_ops.c b/source/blender/editors/sculpt_paint/sculpt_ops.c index 10a2ece73de..055e02a5703 100644 --- a/source/blender/editors/sculpt_paint/sculpt_ops.c +++ b/source/blender/editors/sculpt_paint/sculpt_ops.c @@ -300,28 +300,30 @@ static void sculpt_init_session(Main *bmain, Depsgraph *depsgraph, Scene *scene, ob->sculpt = MEM_callocN(sizeof(SculptSession), "sculpt session"); ob->sculpt->mode_type = OB_MODE_SCULPT; - BKE_sculpt_ensure_orig_mesh_data(scene, ob); + BKE_sculpt_ensure_orig_mesh_data(ob); BKE_scene_graph_evaluated_ensure(depsgraph, bmain); /* This function expects a fully evaluated depsgraph. */ BKE_sculpt_update_object_for_edit(depsgraph, ob, false, false, false); - /* Here we can detect geometry that was just added to Sculpt Mode as it has the - * SCULPT_FACE_SET_NONE assigned, so we can create a new Face Set for it. */ - /* In sculpt mode all geometry that is assigned to SCULPT_FACE_SET_NONE is considered as not - * initialized, which is used is some operators that modify the mesh topology to perform certain - * actions in the new polys. After these operations are finished, all polys should have a valid - * face set ID assigned (different from SCULPT_FACE_SET_NONE) to manage their visibility - * correctly. */ - /* TODO(pablodp606): Based on this we can improve the UX in future tools for creating new - * objects, like moving the transform pivot position to the new area or masking existing - * geometry. */ SculptSession *ss = ob->sculpt; - const int new_face_set = SCULPT_face_set_next_available_get(ss); - for (int i = 0; i < ss->totfaces; i++) { - if (ss->face_sets[i] == SCULPT_FACE_SET_NONE) { - ss->face_sets[i] = new_face_set; + if (ss->face_sets) { + /* Here we can detect geometry that was just added to Sculpt Mode as it has the + * SCULPT_FACE_SET_NONE assigned, so we can create a new Face Set for it. */ + /* In sculpt mode all geometry that is assigned to SCULPT_FACE_SET_NONE is considered as not + * initialized, which is used is some operators that modify the mesh topology to perform + * certain actions in the new polys. After these operations are finished, all polys should have + * a valid face set ID assigned (different from SCULPT_FACE_SET_NONE) to manage their + * visibility correctly. */ + /* TODO(pablodp606): Based on this we can improve the UX in future tools for creating new + * objects, like moving the transform pivot position to the new area or masking existing + * geometry. */ + const int new_face_set = SCULPT_face_set_next_available_get(ss); + for (int i = 0; i < ss->totfaces; i++) { + if (ss->face_sets[i] == SCULPT_FACE_SET_NONE) { + ss->face_sets[i] = new_face_set; + } } } } diff --git a/source/blender/editors/sculpt_paint/sculpt_undo.c b/source/blender/editors/sculpt_paint/sculpt_undo.c index a31be07d8af..af94cad88f3 100644 --- a/source/blender/editors/sculpt_paint/sculpt_undo.c +++ b/source/blender/editors/sculpt_paint/sculpt_undo.c @@ -476,9 +476,10 @@ static bool sculpt_undo_restore_face_sets(bContext *C, SculptUndoNode *unode) ViewLayer *view_layer = CTX_data_view_layer(C); Object *ob = BKE_view_layer_active_object_get(view_layer); Mesh *me = BKE_object_get_original_mesh(ob); - int *face_sets = CustomData_get_layer(&me->pdata, CD_SCULPT_FACE_SETS); + int *face_sets = CustomData_add_layer( + &me->pdata, CD_SCULPT_FACE_SETS, CD_CONSTRUCT, NULL, me->totpoly); for (int i = 0; i < me->totpoly; i++) { - face_sets[i] = unode->face_sets[i]; + SWAP(int, face_sets[i], unode->face_sets[i]); } return false; } @@ -1354,8 +1355,13 @@ static SculptUndoNode *sculpt_undo_face_sets_push(Object *ob, SculptUndoType typ unode->face_sets = MEM_callocN(me->totpoly * sizeof(int), "sculpt face sets"); const int *face_sets = CustomData_get_layer(&me->pdata, CD_SCULPT_FACE_SETS); - for (int i = 0; i < me->totpoly; i++) { - unode->face_sets[i] = face_sets[i]; + if (face_sets) { + for (int i = 0; i < me->totpoly; i++) { + unode->face_sets[i] = face_sets[i]; + } + } + else { + memset(unode->face_sets, SCULPT_FACE_SET_NONE, sizeof(int) * me->totpoly); } BLI_addtail(&usculpt->nodes, unode); @@ -1513,7 +1519,9 @@ SculptUndoNode *SCULPT_undo_push_node(Object *ob, PBVHNode *node, SculptUndoType sculpt_undo_store_hidden(ob, unode); break; case SCULPT_UNDO_MASK: - sculpt_undo_store_mask(ob, unode); + if (pbvh_has_mask(ss->pbvh)) { + sculpt_undo_store_mask(ob, unode); + } break; case SCULPT_UNDO_COLOR: sculpt_undo_store_color(ob, unode); diff --git a/source/blender/editors/sculpt_paint/sculpt_uv.c b/source/blender/editors/sculpt_paint/sculpt_uv.c index 8b9776cf94d..4739fa52674 100644 --- a/source/blender/editors/sculpt_paint/sculpt_uv.c +++ b/source/blender/editors/sculpt_paint/sculpt_uv.c @@ -686,9 +686,10 @@ static UvSculptData *uv_sculpt_stroke_init(bContext *C, wmOperator *op, const wm /* Winding was added to island detection in 5197aa04c6bd * However the sculpt tools can flip faces, potentially creating orphaned islands. * See T100132 */ - bool use_winding = false; + const bool use_winding = false; + const bool use_seams = true; data->elementMap = BM_uv_element_map_create( - bm, scene, false, use_winding, do_island_optimization); + bm, scene, false, use_winding, use_seams, do_island_optimization); if (!data->elementMap) { uv_sculpt_stroke_exit(C, op); diff --git a/source/blender/editors/space_action/action_edit.c b/source/blender/editors/space_action/action_edit.c index 23c92cbdaa0..6d880f338f6 100644 --- a/source/blender/editors/space_action/action_edit.c +++ b/source/blender/editors/space_action/action_edit.c @@ -1364,7 +1364,7 @@ static int actkeys_ipo_exec(bContext *C, wmOperator *op) /* set handle type */ ANIM_animdata_keyframe_callback(&ac, - (ANIMFILTER_DATA_VISIBLE | ANIMFILTER_CURVE_VISIBLE | + (ANIMFILTER_DATA_VISIBLE | ANIMFILTER_LIST_VISIBLE | ANIMFILTER_FOREDIT | ANIMFILTER_NODUPLIS | ANIMFILTER_FCURVESONLY), ANIM_editkeyframes_ipo(mode)); @@ -1414,7 +1414,7 @@ static int actkeys_easing_exec(bContext *C, wmOperator *op) /* set handle type */ ANIM_animdata_keyframe_callback(&ac, - (ANIMFILTER_DATA_VISIBLE | ANIMFILTER_CURVE_VISIBLE | + (ANIMFILTER_DATA_VISIBLE | ANIMFILTER_LIST_VISIBLE | ANIMFILTER_FOREDIT | ANIMFILTER_NODUPLIS | ANIMFILTER_FCURVESONLY), ANIM_editkeyframes_easing(mode)); diff --git a/source/blender/editors/space_image/image_undo.cc b/source/blender/editors/space_image/image_undo.cc index 065641c4051..8f144264824 100644 --- a/source/blender/editors/space_image/image_undo.cc +++ b/source/blender/editors/space_image/image_undo.cc @@ -522,7 +522,7 @@ static void ubuf_ensure_compat_ibuf(const UndoImageBuf *ubuf, ImBuf *ibuf) IMB_rect_size_set(ibuf, ubuf->image_dims); if (ubuf->image_state.use_float) { - imb_addrectfloatImBuf(ibuf); + imb_addrectfloatImBuf(ibuf, 4); } else { imb_addrectImBuf(ibuf); diff --git a/source/blender/editors/space_nla/nla_buttons.c b/source/blender/editors/space_nla/nla_buttons.c index 9652819404e..72b2eb20f8f 100644 --- a/source/blender/editors/space_nla/nla_buttons.c +++ b/source/blender/editors/space_nla/nla_buttons.c @@ -213,7 +213,8 @@ static bool nla_panel_poll(const bContext *C, PanelType *pt) static bool nla_animdata_panel_poll(const bContext *C, PanelType *UNUSED(pt)) { PointerRNA ptr; - return (nla_panel_context(C, &ptr, NULL, NULL) && (ptr.data != NULL)); + PointerRNA strip_ptr; + return (nla_panel_context(C, &ptr, NULL, &strip_ptr) && (ptr.data != NULL) && (ptr.owner_id != strip_ptr.owner_id)); } static bool nla_strip_panel_poll(const bContext *C, PanelType *UNUSED(pt)) @@ -265,13 +266,18 @@ static bool nla_strip_eval_panel_poll(const bContext *C, PanelType *UNUSED(pt)) static void nla_panel_animdata(const bContext *C, Panel *panel) { PointerRNA adt_ptr; + PointerRNA strip_ptr; /* AnimData *adt; */ uiLayout *layout = panel->layout; uiLayout *row; uiBlock *block; /* check context and also validity of pointer */ - if (!nla_panel_context(C, &adt_ptr, NULL, NULL)) { + if (!nla_panel_context(C, &adt_ptr, NULL, &strip_ptr)) { + return; + } + + if(adt_ptr.owner_id == strip_ptr.owner_id){ return; } diff --git a/source/blender/editors/space_nla/nla_edit.c b/source/blender/editors/space_nla/nla_edit.c index 801d032a861..bcdbbb00d1c 100644 --- a/source/blender/editors/space_nla/nla_edit.c +++ b/source/blender/editors/space_nla/nla_edit.c @@ -606,6 +606,36 @@ void NLA_OT_view_frame(wmOperatorType *ot) * (or the active block if no space in the track). * \{ */ +/* Get a list of the editable tracks being shown in the NLA. */ +static int nlaedit_get_editable_tracks(bAnimContext *ac, ListBase *anim_data) +{ + const int filter = (ANIMFILTER_DATA_VISIBLE | ANIMFILTER_ACTIVE | ANIMFILTER_FOREDIT | + ANIMFILTER_FCURVESONLY); + return ANIM_animdata_filter(ac, anim_data, filter, ac->data, ac->datatype); +} + +static int nlaedit_add_actionclip_invoke(bContext *C, wmOperator *op, const wmEvent *event) +{ + /* Get editor data. */ + bAnimContext ac; + if (ANIM_animdata_get_context(C, &ac) == 0) { + return OPERATOR_CANCELLED; + } + + ListBase anim_data = {NULL, NULL}; + const size_t items = nlaedit_get_editable_tracks(&ac, &anim_data); + + if (items == 0) { + BKE_report(op->reports, + RPT_ERROR, + "No active track(s) to add strip to, select an existing track or add one before " + "trying again"); + return OPERATOR_CANCELLED; + } + + return WM_enum_search_invoke(C, op, event); +} + /* add the specified action as new strip */ static int nlaedit_add_actionclip_exec(bContext *C, wmOperator *op) { @@ -615,8 +645,6 @@ static int nlaedit_add_actionclip_exec(bContext *C, wmOperator *op) ListBase anim_data = {NULL, NULL}; bAnimListElem *ale; - size_t items; - int filter; bAction *act; @@ -654,20 +682,7 @@ static int nlaedit_add_actionclip_exec(bContext *C, wmOperator *op) */ nlaedit_add_tracks_empty(&ac); - /* get a list of the editable tracks being shown in the NLA - * - this is limited to active ones for now, but could be expanded to - */ - filter = (ANIMFILTER_DATA_VISIBLE | ANIMFILTER_ACTIVE | ANIMFILTER_FOREDIT | - ANIMFILTER_FCURVESONLY); - items = ANIM_animdata_filter(&ac, &anim_data, filter, ac.data, ac.datatype); - - if (items == 0) { - BKE_report(op->reports, - RPT_ERROR, - "No active track(s) to add strip to, select an existing track or add one before " - "trying again"); - return OPERATOR_CANCELLED; - } + nlaedit_get_editable_tracks(&ac, &anim_data); /* for every active track, * try to add strip to free space in track or to the top of the stack if no space */ @@ -736,7 +751,7 @@ void NLA_OT_actionclip_add(wmOperatorType *ot) "Add an Action-Clip strip (i.e. an NLA Strip referencing an Action) to the active track"; /* api callbacks */ - ot->invoke = WM_enum_search_invoke; + ot->invoke = nlaedit_add_actionclip_invoke; ot->exec = nlaedit_add_actionclip_exec; ot->poll = nlaop_poll_tweakmode_off; diff --git a/source/blender/editors/space_node/link_drag_search.cc b/source/blender/editors/space_node/link_drag_search.cc index a4be0a65230..f1387da97b5 100644 --- a/source/blender/editors/space_node/link_drag_search.cc +++ b/source/blender/editors/space_node/link_drag_search.cc @@ -227,12 +227,10 @@ static void link_drag_search_exec_fn(bContext *C, void *arg1, void *arg2) ED_node_tree_propagate_change(C, &bmain, snode.edittree); /* Start translation operator with the new node. */ - wmOperatorType *ot = WM_operatortype_find("NODE_OT_translate_attach", true); + wmOperatorType *ot = WM_operatortype_find("NODE_OT_translate_attach_remove_on_cancel", true); BLI_assert(ot); PointerRNA ptr; WM_operator_properties_create_ptr(&ptr, ot); - RNA_boolean_set(&ptr, "view2d_edge_pan", true); - RNA_boolean_set(&ptr, "remove_on_cancel", true); WM_operator_name_call_ptr(C, ot, WM_OP_INVOKE_DEFAULT, &ptr, nullptr); WM_operator_properties_free(&ptr); } diff --git a/source/blender/editors/space_node/node_draw.cc b/source/blender/editors/space_node/node_draw.cc index 3da799d0fd5..3a8e5d0aed6 100644 --- a/source/blender/editors/space_node/node_draw.cc +++ b/source/blender/editors/space_node/node_draw.cc @@ -13,6 +13,7 @@ #include "DNA_light_types.h" #include "DNA_linestyle_types.h" #include "DNA_material_types.h" +#include "DNA_modifier_types.h" #include "DNA_node_types.h" #include "DNA_screen_types.h" #include "DNA_space_types.h" @@ -29,11 +30,13 @@ #include "BLT_translation.h" +#include "BKE_compute_contexts.hh" #include "BKE_context.h" #include "BKE_idtype.h" #include "BKE_lib_id.h" #include "BKE_main.h" #include "BKE_node.h" +#include "BKE_node_runtime.hh" #include "BKE_node_tree_update.h" #include "BKE_object.h" @@ -65,7 +68,8 @@ #include "RNA_access.h" #include "RNA_prototypes.h" -#include "NOD_geometry_nodes_eval_log.hh" +#include "NOD_geometry_exec.hh" +#include "NOD_geometry_nodes_log.hh" #include "NOD_node_declaration.hh" #include "NOD_socket_declarations_geometry.hh" @@ -74,10 +78,11 @@ #include "node_intern.hh" /* own include */ +namespace geo_log = blender::nodes::geo_eval_log; + using blender::GPointer; +using blender::Vector; using blender::fn::GField; -namespace geo_log = blender::nodes::geometry_nodes_eval_log; -using geo_log::eNamedAttrUsage; extern "C" { /* XXX interface.h */ @@ -85,6 +90,17 @@ extern void ui_draw_dropshadow( const rctf *rct, float radius, float aspect, float alpha, int select); } +/** + * This is passed to many functions which draw the node editor. + */ +struct TreeDrawContext { + /** + * Geometry nodes logs various data during execution. The logged data that corresponds to the + * currently drawn node tree can be retrieved from the log below. + */ + geo_log::GeoTreeLog *geo_tree_log = nullptr; +}; + float ED_node_grid_size() { return U.widget_unit; @@ -157,6 +173,12 @@ void ED_node_tag_update_id(ID *id) namespace blender::ed::space_node { +static void node_socket_add_tooltip_in_node_editor(TreeDrawContext * /*tree_draw_ctx*/, + const bNodeTree *ntree, + const bNode *node, + const bNodeSocket *sock, + uiLayout *layout); + static bool compare_nodes(const bNode *a, const bNode *b) { /* These tell if either the node or any of the parent nodes is selected. @@ -313,7 +335,11 @@ float2 node_from_view(const bNode &node, const float2 &co) /** * Based on settings and sockets in node, set drawing rect info. */ -static void node_update_basis(const bContext &C, bNodeTree &ntree, bNode &node, uiBlock &block) +static void node_update_basis(const bContext &C, + TreeDrawContext &tree_draw_ctx, + bNodeTree &ntree, + bNode &node, + uiBlock &block) { PointerRNA nodeptr; RNA_pointer_create(&ntree.id, &RNA_Node, &node, &nodeptr); @@ -374,7 +400,7 @@ static void node_update_basis(const bContext &C, bNodeTree &ntree, bNode &node, const char *socket_label = nodeSocketLabel(socket); socket->typeinfo->draw((bContext *)&C, row, &sockptr, &nodeptr, IFACE_(socket_label)); - node_socket_add_tooltip(ntree, node, *socket, *row); + node_socket_add_tooltip_in_node_editor(&tree_draw_ctx, &ntree, &node, socket, row); UI_block_align_end(&block); UI_block_layout_resolve(&block, nullptr, &buty); @@ -506,7 +532,7 @@ static void node_update_basis(const bContext &C, bNodeTree &ntree, bNode &node, const char *socket_label = nodeSocketLabel(socket); socket->typeinfo->draw((bContext *)&C, row, &sockptr, &nodeptr, IFACE_(socket_label)); - node_socket_add_tooltip(ntree, node, *socket, *row); + node_socket_add_tooltip_in_node_editor(&tree_draw_ctx, &ntree, &node, socket, row); UI_block_align_end(&block); UI_block_layout_resolve(&block, nullptr, &buty); @@ -823,25 +849,16 @@ static void create_inspection_string_for_generic_value(const GPointer value, std } } -static void create_inspection_string_for_gfield(const geo_log::GFieldValueLog &value_log, - std::stringstream &ss) +static void create_inspection_string_for_field_info(const geo_log::FieldInfoLog &value_log, + std::stringstream &ss) { - const CPPType &type = value_log.type(); - const GField &field = value_log.field(); - const Span<std::string> input_tooltips = value_log.input_tooltips(); + const CPPType &type = value_log.type; + const Span<std::string> input_tooltips = value_log.input_tooltips; if (input_tooltips.is_empty()) { - if (field) { - BUFFER_FOR_CPP_TYPE_VALUE(type, buffer); - blender::fn::evaluate_constant_field(field, buffer); - create_inspection_string_for_generic_value({type, buffer}, ss); - type.destruct(buffer); - } - else { - /* Constant values should always be logged. */ - BLI_assert_unreachable(); - ss << "Value has not been logged"; - } + /* Should have been logged as constant value. */ + BLI_assert_unreachable(); + ss << "Value has not been logged"; } else { if (type.is<int>()) { @@ -874,11 +891,11 @@ static void create_inspection_string_for_gfield(const geo_log::GFieldValueLog &v } } -static void create_inspection_string_for_geometry(const geo_log::GeometryValueLog &value_log, - std::stringstream &ss, - const nodes::decl::Geometry *geometry) +static void create_inspection_string_for_geometry_info(const geo_log::GeometryInfoLog &value_log, + std::stringstream &ss, + const nodes::decl::Geometry *socket_decl) { - Span<GeometryComponentType> component_types = value_log.component_types(); + Span<GeometryComponentType> component_types = value_log.component_types; if (component_types.is_empty()) { ss << TIP_("Empty Geometry"); return; @@ -895,7 +912,7 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo const char *line_end = (type == component_types.last()) ? "" : ".\n"; switch (type) { case GEO_COMPONENT_TYPE_MESH: { - const geo_log::GeometryValueLog::MeshInfo &mesh_info = *value_log.mesh_info; + const geo_log::GeometryInfoLog::MeshInfo &mesh_info = *value_log.mesh_info; char line[256]; BLI_snprintf(line, sizeof(line), @@ -907,7 +924,7 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo break; } case GEO_COMPONENT_TYPE_POINT_CLOUD: { - const geo_log::GeometryValueLog::PointCloudInfo &pointcloud_info = + const geo_log::GeometryInfoLog::PointCloudInfo &pointcloud_info = *value_log.pointcloud_info; char line[256]; BLI_snprintf(line, @@ -918,7 +935,7 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo break; } case GEO_COMPONENT_TYPE_CURVE: { - const geo_log::GeometryValueLog::CurveInfo &curve_info = *value_log.curve_info; + const geo_log::GeometryInfoLog::CurveInfo &curve_info = *value_log.curve_info; char line[256]; BLI_snprintf(line, sizeof(line), @@ -928,7 +945,7 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo break; } case GEO_COMPONENT_TYPE_INSTANCES: { - const geo_log::GeometryValueLog::InstancesInfo &instances_info = *value_log.instances_info; + const geo_log::GeometryInfoLog::InstancesInfo &instances_info = *value_log.instances_info; char line[256]; BLI_snprintf(line, sizeof(line), @@ -943,7 +960,7 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo } case GEO_COMPONENT_TYPE_EDIT: { if (value_log.edit_data_info.has_value()) { - const geo_log::GeometryValueLog::EditDataInfo &edit_info = *value_log.edit_data_info; + const geo_log::GeometryInfoLog::EditDataInfo &edit_info = *value_log.edit_data_info; char line[256]; BLI_snprintf(line, sizeof(line), @@ -959,11 +976,11 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo /* If the geometry declaration is null, as is the case for input to group output, * or it is an output socket don't show supported types. */ - if (geometry == nullptr || geometry->in_out() == SOCK_OUT) { + if (socket_decl == nullptr || socket_decl->in_out() == SOCK_OUT) { return; } - Span<GeometryComponentType> supported_types = geometry->supported_types(); + Span<GeometryComponentType> supported_types = socket_decl->supported_types(); if (supported_types.is_empty()) { ss << ".\n\n" << TIP_("Supported: All Types"); return; @@ -1000,43 +1017,37 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo } } -static std::optional<std::string> create_socket_inspection_string(const bContext &C, - const bNode &node, +static std::optional<std::string> create_socket_inspection_string(TreeDrawContext &tree_draw_ctx, const bNodeSocket &socket) { - const SpaceNode *snode = CTX_wm_space_node(&C); - if (snode == nullptr) { - return {}; - }; - - const geo_log::SocketLog *socket_log = geo_log::ModifierLog::find_socket_by_node_editor_context( - *snode, node, socket); - if (socket_log == nullptr) { - return {}; - } - const geo_log::ValueLog *value_log = socket_log->value(); + using namespace blender::nodes::geo_eval_log; + tree_draw_ctx.geo_tree_log->ensure_socket_values(); + ValueLog *value_log = tree_draw_ctx.geo_tree_log->find_socket_value_log(socket); if (value_log == nullptr) { - return {}; + return std::nullopt; } - std::stringstream ss; if (const geo_log::GenericValueLog *generic_value_log = dynamic_cast<const geo_log::GenericValueLog *>(value_log)) { - create_inspection_string_for_generic_value(generic_value_log->value(), ss); + create_inspection_string_for_generic_value(generic_value_log->value, ss); } - if (const geo_log::GFieldValueLog *gfield_value_log = - dynamic_cast<const geo_log::GFieldValueLog *>(value_log)) { - create_inspection_string_for_gfield(*gfield_value_log, ss); + else if (const geo_log::FieldInfoLog *gfield_value_log = + dynamic_cast<const geo_log::FieldInfoLog *>(value_log)) { + create_inspection_string_for_field_info(*gfield_value_log, ss); } - else if (const geo_log::GeometryValueLog *geo_value_log = - dynamic_cast<const geo_log::GeometryValueLog *>(value_log)) { - create_inspection_string_for_geometry( + else if (const geo_log::GeometryInfoLog *geo_value_log = + dynamic_cast<const geo_log::GeometryInfoLog *>(value_log)) { + create_inspection_string_for_geometry_info( *geo_value_log, ss, dynamic_cast<const nodes::decl::Geometry *>(socket.runtime->declaration)); } - return ss.str(); + std::string str = ss.str(); + if (str.empty()) { + return std::nullopt; + } + return str; } static bool node_socket_has_tooltip(const bNodeTree &ntree, const bNodeSocket &socket) @@ -1046,34 +1057,42 @@ static bool node_socket_has_tooltip(const bNodeTree &ntree, const bNodeSocket &s } if (socket.runtime->declaration != nullptr) { - const blender::nodes::SocketDeclaration &socket_decl = *socket.runtime->declaration; + const nodes::SocketDeclaration &socket_decl = *socket.runtime->declaration; return !socket_decl.description().is_empty(); } return false; } -static char *node_socket_get_tooltip(const bContext &C, - const bNodeTree &ntree, - const bNode &node, - const bNodeSocket &socket) +static char *node_socket_get_tooltip(const bContext *C, + const bNodeTree *ntree, + const bNode *UNUSED(node), + const bNodeSocket *socket) { + SpaceNode *snode = CTX_wm_space_node(C); + TreeDrawContext tree_draw_ctx; + if (snode != nullptr) { + if (ntree->type == NTREE_GEOMETRY) { + tree_draw_ctx.geo_tree_log = geo_log::GeoModifierLog::get_tree_log_for_node_editor(*snode); + } + } + std::stringstream output; - if (socket.runtime->declaration != nullptr) { - const blender::nodes::SocketDeclaration &socket_decl = *socket.runtime->declaration; + if (socket->runtime->declaration != nullptr) { + const blender::nodes::SocketDeclaration &socket_decl = *socket->runtime->declaration; blender::StringRef description = socket_decl.description(); if (!description.is_empty()) { output << TIP_(description.data()); } } - if (ntree.type == NTREE_GEOMETRY) { + if (ntree->type == NTREE_GEOMETRY && tree_draw_ctx.geo_tree_log != nullptr) { if (!output.str().empty()) { output << ".\n\n"; } std::optional<std::string> socket_inspection_str = create_socket_inspection_string( - C, node, socket); + tree_draw_ctx, *socket); if (socket_inspection_str.has_value()) { output << *socket_inspection_str; } @@ -1083,37 +1102,46 @@ static char *node_socket_get_tooltip(const bContext &C, } if (output.str().empty()) { - output << nodeSocketLabel(&socket); + output << nodeSocketLabel(socket); } return BLI_strdup(output.str().c_str()); } -void node_socket_add_tooltip(const bNodeTree &ntree, - const bNode &node, - const bNodeSocket &sock, - uiLayout &layout) +static void node_socket_add_tooltip_in_node_editor(TreeDrawContext *UNUSED(tree_draw_ctx), + const bNodeTree *ntree, + const bNode *node, + const bNodeSocket *sock, + uiLayout *layout) { - if (!node_socket_has_tooltip(ntree, sock)) { + if (!node_socket_has_tooltip(*ntree, *sock)) { return; } - SocketTooltipData *data = MEM_new<SocketTooltipData>(__func__); - data->ntree = &ntree; - data->node = &node; - data->socket = &sock; + SocketTooltipData *data = MEM_cnew<SocketTooltipData>(__func__); + data->ntree = ntree; + data->node = node; + data->socket = sock; uiLayoutSetTooltipFunc( - &layout, + layout, [](bContext *C, void *argN, const char *UNUSED(tip)) { - const SocketTooltipData *data = static_cast<SocketTooltipData *>(argN); - return node_socket_get_tooltip(*C, *data->ntree, *data->node, *data->socket); + SocketTooltipData *data = static_cast<SocketTooltipData *>(argN); + return node_socket_get_tooltip(C, data->ntree, data->node, data->socket); }, data, MEM_dupallocN, MEM_freeN); } +void node_socket_add_tooltip(const bNodeTree &ntree, + const bNode &node, + const bNodeSocket &sock, + uiLayout &layout) +{ + node_socket_add_tooltip_in_node_editor(nullptr, &ntree, &node, &sock, &layout); +} + static void node_socket_draw_nested(const bContext &C, bNodeTree &ntree, PointerRNA &node_ptr, @@ -1178,7 +1206,7 @@ static void node_socket_draw_nested(const bContext &C, but, [](bContext *C, void *argN, const char *UNUSED(tip)) { SocketTooltipData *data = (SocketTooltipData *)argN; - return node_socket_get_tooltip(*C, *data->ntree, *data->node, *data->socket); + return node_socket_get_tooltip(C, data->ntree, data->node, data->socket); }, data, MEM_freeN); @@ -1607,27 +1635,26 @@ static char *node_errors_tooltip_fn(bContext *UNUSED(C), void *argN, const char #define NODE_HEADER_ICON_SIZE (0.8f * U.widget_unit) -static void node_add_error_message_button( - const bContext &C, bNode &node, uiBlock &block, const rctf &rect, float &icon_offset) +static void node_add_error_message_button(TreeDrawContext &tree_draw_ctx, + bNode &node, + uiBlock &block, + const rctf &rect, + float &icon_offset) { - SpaceNode *snode = CTX_wm_space_node(&C); - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_node_editor_context(*snode, - node); - if (node_log == nullptr) { - return; + Span<geo_log::NodeWarning> warnings; + if (tree_draw_ctx.geo_tree_log) { + geo_log::GeoNodeLog *node_log = tree_draw_ctx.geo_tree_log->nodes.lookup_ptr(node.name); + if (node_log != nullptr) { + warnings = node_log->warnings; + } } - - Span<geo_log::NodeWarning> warnings = node_log->warnings(); - if (warnings.is_empty()) { return; } - NodeErrorsTooltipData *tooltip_data = (NodeErrorsTooltipData *)MEM_mallocN( - sizeof(NodeErrorsTooltipData), __func__); - tooltip_data->warnings = warnings; - const geo_log::NodeWarningType display_type = node_error_highest_priority(warnings); + NodeErrorsTooltipData *tooltip_data = MEM_new<NodeErrorsTooltipData>(__func__); + tooltip_data->warnings = warnings; icon_offset -= NODE_HEADER_ICON_SIZE; UI_block_emboss_set(&block, UI_EMBOSS_NONE); @@ -1645,90 +1672,70 @@ static void node_add_error_message_button( 0, 0, nullptr); - UI_but_func_tooltip_set(but, node_errors_tooltip_fn, tooltip_data, MEM_freeN); + UI_but_func_tooltip_set(but, node_errors_tooltip_fn, tooltip_data, [](void *arg) { + MEM_delete(static_cast<NodeErrorsTooltipData *>(arg)); + }); UI_block_emboss_set(&block, UI_EMBOSS); } -static void get_exec_time_other_nodes(const bNode &node, - const SpaceNode &snode, - std::chrono::microseconds &exec_time, - int &node_count) +static std::optional<std::chrono::nanoseconds> node_get_execution_time( + TreeDrawContext &tree_draw_ctx, const bNodeTree &ntree, const bNode &node) { - if (node.type == NODE_GROUP) { - const geo_log::TreeLog *root_tree_log = geo_log::ModifierLog::find_tree_by_node_editor_context( - snode); - if (root_tree_log == nullptr) { - return; - } - const geo_log::TreeLog *tree_log = root_tree_log->lookup_child_log(node.name); - if (tree_log == nullptr) { - return; - } - tree_log->foreach_node_log([&](const geo_log::NodeLog &node_log) { - exec_time += node_log.execution_time(); - node_count++; - }); + const geo_log::GeoTreeLog *tree_log = tree_draw_ctx.geo_tree_log; + if (tree_log == nullptr) { + return std::nullopt; } - else { - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_node_editor_context( - snode, node); - if (node_log) { - exec_time += node_log->execution_time(); - node_count++; - } - } -} - -static std::chrono::microseconds node_get_execution_time(const bNodeTree &ntree, - const bNode &node, - const SpaceNode &snode, - int &node_count) -{ - std::chrono::microseconds exec_time = std::chrono::microseconds::zero(); if (node.type == NODE_GROUP_OUTPUT) { - const geo_log::TreeLog *tree_log = geo_log::ModifierLog::find_tree_by_node_editor_context( - snode); - - if (tree_log == nullptr) { - return exec_time; - } - tree_log->foreach_node_log([&](const geo_log::NodeLog &node_log) { - exec_time += node_log.execution_time(); - node_count++; - }); + return tree_log->run_time_sum; } - else if (node.type == NODE_FRAME) { + if (node.type == NODE_FRAME) { /* Could be cached in the future if this recursive code turns out to be slow. */ + std::chrono::nanoseconds run_time{0}; + bool found_node = false; LISTBASE_FOREACH (bNode *, tnode, &ntree.nodes) { if (tnode->parent != &node) { continue; } if (tnode->type == NODE_FRAME) { - exec_time += node_get_execution_time(ntree, *tnode, snode, node_count); + std::optional<std::chrono::nanoseconds> sub_frame_run_time = node_get_execution_time( + tree_draw_ctx, ntree, *tnode); + if (sub_frame_run_time.has_value()) { + run_time += *sub_frame_run_time; + found_node = true; + } } else { - get_exec_time_other_nodes(*tnode, snode, exec_time, node_count); + if (const geo_log::GeoNodeLog *node_log = tree_log->nodes.lookup_ptr_as(tnode->name)) { + found_node = true; + run_time += node_log->run_time; + } } } + if (found_node) { + return run_time; + } + return std::nullopt; } - else { - get_exec_time_other_nodes(node, snode, exec_time, node_count); + if (const geo_log::GeoNodeLog *node_log = tree_log->nodes.lookup_ptr(node.name)) { + return node_log->run_time; } - return exec_time; + return std::nullopt; } -static std::string node_get_execution_time_label(const SpaceNode &snode, const bNode &node) +static std::string node_get_execution_time_label(TreeDrawContext &tree_draw_ctx, + const SpaceNode &snode, + const bNode &node) { - int node_count = 0; - std::chrono::microseconds exec_time = node_get_execution_time( - *snode.edittree, node, snode, node_count); + const std::optional<std::chrono::nanoseconds> exec_time = node_get_execution_time( + tree_draw_ctx, *snode.edittree, node); - if (node_count == 0) { + if (!exec_time.has_value()) { return std::string(""); } - uint64_t exec_time_us = exec_time.count(); + const uint64_t exec_time_us = + std::chrono::duration_cast<std::chrono::microseconds>(*exec_time).count(); /* Don't show time if execution time is 0 microseconds. */ if (exec_time_us == 0) { @@ -1763,7 +1770,7 @@ struct NodeExtraInfoRow { }; struct NamedAttributeTooltipArg { - Map<std::string, eNamedAttrUsage> usage_by_attribute; + Map<std::string, geo_log::NamedAttributeUsage> usage_by_attribute; }; static char *named_attribute_tooltip(bContext *UNUSED(C), void *argN, const char *UNUSED(tip)) @@ -1775,7 +1782,7 @@ static char *named_attribute_tooltip(bContext *UNUSED(C), void *argN, const char struct NameWithUsage { StringRefNull name; - eNamedAttrUsage usage; + geo_log::NamedAttributeUsage usage; }; Vector<NameWithUsage> sorted_used_attribute; @@ -1790,16 +1797,16 @@ static char *named_attribute_tooltip(bContext *UNUSED(C), void *argN, const char for (const NameWithUsage &attribute : sorted_used_attribute) { const StringRefNull name = attribute.name; - const eNamedAttrUsage usage = attribute.usage; + const geo_log::NamedAttributeUsage usage = attribute.usage; ss << " \u2022 \"" << name << "\": "; Vector<std::string> usages; - if ((usage & eNamedAttrUsage::Read) != eNamedAttrUsage::None) { + if ((usage & geo_log::NamedAttributeUsage::Read) != geo_log::NamedAttributeUsage::None) { usages.append(TIP_("read")); } - if ((usage & eNamedAttrUsage::Write) != eNamedAttrUsage::None) { + if ((usage & geo_log::NamedAttributeUsage::Write) != geo_log::NamedAttributeUsage::None) { usages.append(TIP_("write")); } - if ((usage & eNamedAttrUsage::Remove) != eNamedAttrUsage::None) { + if ((usage & geo_log::NamedAttributeUsage::Remove) != geo_log::NamedAttributeUsage::None) { usages.append(TIP_("remove")); } for (const int i : usages.index_range()) { @@ -1817,7 +1824,7 @@ static char *named_attribute_tooltip(bContext *UNUSED(C), void *argN, const char } static NodeExtraInfoRow row_from_used_named_attribute( - const Map<std::string, eNamedAttrUsage> &usage_by_attribute_name) + const Map<std::string, geo_log::NamedAttributeUsage> &usage_by_attribute_name) { const int attributes_num = usage_by_attribute_name.size(); @@ -1831,32 +1838,11 @@ static NodeExtraInfoRow row_from_used_named_attribute( return row; } -static std::optional<NodeExtraInfoRow> node_get_accessed_attributes_row(const SpaceNode &snode, - const bNode &node) +static std::optional<NodeExtraInfoRow> node_get_accessed_attributes_row( + TreeDrawContext &tree_draw_ctx, const bNode &node) { - if (node.type == NODE_GROUP) { - const geo_log::TreeLog *root_tree_log = geo_log::ModifierLog::find_tree_by_node_editor_context( - snode); - if (root_tree_log == nullptr) { - return std::nullopt; - } - const geo_log::TreeLog *tree_log = root_tree_log->lookup_child_log(node.name); - if (tree_log == nullptr) { - return std::nullopt; - } - - Map<std::string, eNamedAttrUsage> usage_by_attribute; - tree_log->foreach_node_log([&](const geo_log::NodeLog &node_log) { - for (const geo_log::UsedNamedAttribute &used_attribute : node_log.used_named_attributes()) { - usage_by_attribute.lookup_or_add_as(used_attribute.name, - used_attribute.usage) |= used_attribute.usage; - } - }); - if (usage_by_attribute.is_empty()) { - return std::nullopt; - } - - return row_from_used_named_attribute(usage_by_attribute); + if (tree_draw_ctx.geo_tree_log == nullptr) { + return std::nullopt; } if (ELEM(node.type, GEO_NODE_STORE_NAMED_ATTRIBUTE, @@ -1865,31 +1851,26 @@ static std::optional<NodeExtraInfoRow> node_get_accessed_attributes_row(const Sp /* Only show the overlay when the name is passed in from somewhere else. */ LISTBASE_FOREACH (bNodeSocket *, socket, &node.inputs) { if (STREQ(socket->name, "Name")) { - if ((socket->flag & SOCK_IN_USE) == 0) { + if (!socket->is_directly_linked()) { return std::nullopt; } } } - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_node_editor_context( - snode, node.name); - if (node_log == nullptr) { - return std::nullopt; - } - Map<std::string, eNamedAttrUsage> usage_by_attribute; - for (const geo_log::UsedNamedAttribute &used_attribute : node_log->used_named_attributes()) { - usage_by_attribute.lookup_or_add_as(used_attribute.name, - used_attribute.usage) |= used_attribute.usage; - } - if (usage_by_attribute.is_empty()) { - return std::nullopt; - } - return row_from_used_named_attribute(usage_by_attribute); } - - return std::nullopt; + tree_draw_ctx.geo_tree_log->ensure_used_named_attributes(); + geo_log::GeoNodeLog *node_log = tree_draw_ctx.geo_tree_log->nodes.lookup_ptr(node.name); + if (node_log == nullptr) { + return std::nullopt; + } + if (node_log->used_named_attributes.is_empty()) { + return std::nullopt; + } + return row_from_used_named_attribute(node_log->used_named_attributes); } -static Vector<NodeExtraInfoRow> node_get_extra_info(const SpaceNode &snode, const bNode &node) +static Vector<NodeExtraInfoRow> node_get_extra_info(TreeDrawContext &tree_draw_ctx, + const SpaceNode &snode, + const bNode &node) { Vector<NodeExtraInfoRow> rows; if (!(snode.overlay.flag & SN_OVERLAY_SHOW_OVERLAYS)) { @@ -1898,7 +1879,8 @@ static Vector<NodeExtraInfoRow> node_get_extra_info(const SpaceNode &snode, cons if (snode.overlay.flag & SN_OVERLAY_SHOW_NAMED_ATTRIBUTES && snode.edittree->type == NTREE_GEOMETRY) { - if (std::optional<NodeExtraInfoRow> row = node_get_accessed_attributes_row(snode, node)) { + if (std::optional<NodeExtraInfoRow> row = node_get_accessed_attributes_row(tree_draw_ctx, + node)) { rows.append(std::move(*row)); } } @@ -1907,7 +1889,7 @@ static Vector<NodeExtraInfoRow> node_get_extra_info(const SpaceNode &snode, cons (ELEM(node.typeinfo->nclass, NODE_CLASS_GEOMETRY, NODE_CLASS_GROUP, NODE_CLASS_ATTRIBUTE) || ELEM(node.type, NODE_FRAME, NODE_GROUP_OUTPUT))) { NodeExtraInfoRow row; - row.text = node_get_execution_time_label(snode, node); + row.text = node_get_execution_time_label(tree_draw_ctx, snode, node); if (!row.text.empty()) { row.tooltip = TIP_( "The execution time from the node tree's latest evaluation. For frame and group nodes, " @@ -1916,14 +1898,17 @@ static Vector<NodeExtraInfoRow> node_get_extra_info(const SpaceNode &snode, cons rows.append(std::move(row)); } } - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_node_editor_context(snode, - node); - if (node_log != nullptr) { - for (const std::string &message : node_log->debug_messages()) { - NodeExtraInfoRow row; - row.text = message; - row.icon = ICON_INFO; - rows.append(std::move(row)); + + if (snode.edittree->type == NTREE_GEOMETRY && tree_draw_ctx.geo_tree_log != nullptr) { + tree_draw_ctx.geo_tree_log->ensure_debug_messages(); + const geo_log::GeoNodeLog *node_log = tree_draw_ctx.geo_tree_log->nodes.lookup_ptr(node.name); + if (node_log != nullptr) { + for (const StringRef message : node_log->debug_messages) { + NodeExtraInfoRow row; + row.text = message; + row.icon = ICON_INFO; + rows.append(std::move(row)); + } } } @@ -1988,9 +1973,12 @@ static void node_draw_extra_info_row(const bNode &node, } } -static void node_draw_extra_info_panel(const SpaceNode &snode, const bNode &node, uiBlock &block) +static void node_draw_extra_info_panel(TreeDrawContext &tree_draw_ctx, + const SpaceNode &snode, + const bNode &node, + uiBlock &block) { - Vector<NodeExtraInfoRow> extra_info_rows = node_get_extra_info(snode, node); + Vector<NodeExtraInfoRow> extra_info_rows = node_get_extra_info(tree_draw_ctx, snode, node); if (extra_info_rows.size() == 0) { return; @@ -2046,6 +2034,7 @@ static void node_draw_extra_info_panel(const SpaceNode &snode, const bNode &node } static void node_draw_basis(const bContext &C, + TreeDrawContext &tree_draw_ctx, const View2D &v2d, const SpaceNode &snode, bNodeTree &ntree, @@ -2070,7 +2059,7 @@ static void node_draw_basis(const bContext &C, GPU_line_width(1.0f); - node_draw_extra_info_panel(snode, node, block); + node_draw_extra_info_panel(tree_draw_ctx, snode, node, block); /* Header. */ { @@ -2165,7 +2154,7 @@ static void node_draw_basis(const bContext &C, UI_block_emboss_set(&block, UI_EMBOSS); } - node_add_error_message_button(C, node, block, rct, iconofs); + node_add_error_message_button(tree_draw_ctx, node, block, rct, iconofs); /* Title. */ if (node.flag & SELECT) { @@ -2338,6 +2327,7 @@ static void node_draw_basis(const bContext &C, } static void node_draw_hidden(const bContext &C, + TreeDrawContext &tree_draw_ctx, const View2D &v2d, const SpaceNode &snode, bNodeTree &ntree, @@ -2353,7 +2343,7 @@ static void node_draw_hidden(const bContext &C, const int color_id = node_get_colorid(node); - node_draw_extra_info_panel(snode, node, block); + node_draw_extra_info_panel(tree_draw_ctx, snode, node, block); /* Shadow. */ node_draw_shadow(snode, node, hiddenrad, 1.0f); @@ -2668,6 +2658,7 @@ static void reroute_node_prepare_for_draw(bNode &node) } static void node_update_nodetree(const bContext &C, + TreeDrawContext &tree_draw_ctx, bNodeTree &ntree, Span<bNode *> nodes, Span<uiBlock *> blocks) @@ -2694,7 +2685,7 @@ static void node_update_nodetree(const bContext &C, node_update_hidden(node, block); } else { - node_update_basis(C, ntree, node, block); + node_update_basis(C, tree_draw_ctx, ntree, node, block); } } } @@ -2795,6 +2786,7 @@ static void frame_node_draw_label(const bNodeTree &ntree, } static void frame_node_draw(const bContext &C, + TreeDrawContext &tree_draw_ctx, const ARegion ®ion, const SpaceNode &snode, bNodeTree &ntree, @@ -2841,7 +2833,7 @@ static void frame_node_draw(const bContext &C, /* label and text */ frame_node_draw_label(ntree, node, snode); - node_draw_extra_info_panel(snode, node, block); + node_draw_extra_info_panel(tree_draw_ctx, snode, node, block); UI_block_end(&C, &block); UI_block_draw(&C, &block); @@ -2895,6 +2887,7 @@ static void reroute_node_draw( } static void node_draw(const bContext &C, + TreeDrawContext &tree_draw_ctx, ARegion ®ion, const SpaceNode &snode, bNodeTree &ntree, @@ -2903,7 +2896,7 @@ static void node_draw(const bContext &C, bNodeInstanceKey key) { if (node.type == NODE_FRAME) { - frame_node_draw(C, region, snode, ntree, node, block); + frame_node_draw(C, tree_draw_ctx, region, snode, ntree, node, block); } else if (node.type == NODE_REROUTE) { reroute_node_draw(C, region, ntree, node, block); @@ -2911,10 +2904,10 @@ static void node_draw(const bContext &C, else { const View2D &v2d = region.v2d; if (node.flag & NODE_HIDDEN) { - node_draw_hidden(C, v2d, snode, ntree, node, block); + node_draw_hidden(C, tree_draw_ctx, v2d, snode, ntree, node, block); } else { - node_draw_basis(C, v2d, snode, ntree, node, block, key); + node_draw_basis(C, tree_draw_ctx, v2d, snode, ntree, node, block, key); } } } @@ -2922,6 +2915,7 @@ static void node_draw(const bContext &C, #define USE_DRAW_TOT_UPDATE static void node_draw_nodetree(const bContext &C, + TreeDrawContext &tree_draw_ctx, ARegion ®ion, SpaceNode &snode, bNodeTree &ntree, @@ -2946,7 +2940,7 @@ static void node_draw_nodetree(const bContext &C, } bNodeInstanceKey key = BKE_node_instance_key(parent_key, &ntree, nodes[i]); - node_draw(C, region, snode, ntree, *nodes[i], *blocks[i], key); + node_draw(C, tree_draw_ctx, region, snode, ntree, *nodes[i], *blocks[i], key); } /* Node lines. */ @@ -2976,7 +2970,7 @@ static void node_draw_nodetree(const bContext &C, } bNodeInstanceKey key = BKE_node_instance_key(parent_key, &ntree, nodes[i]); - node_draw(C, region, snode, ntree, *nodes[i], *blocks[i], key); + node_draw(C, tree_draw_ctx, region, snode, ntree, *nodes[i], *blocks[i], key); } } @@ -3035,8 +3029,17 @@ static void draw_nodetree(const bContext &C, Array<uiBlock *> blocks = node_uiblocks_init(C, nodes); - node_update_nodetree(C, ntree, nodes, blocks); - node_draw_nodetree(C, region, *snode, ntree, nodes, blocks, parent_key); + TreeDrawContext tree_draw_ctx; + if (ntree.type == NTREE_GEOMETRY) { + tree_draw_ctx.geo_tree_log = geo_log::GeoModifierLog::get_tree_log_for_node_editor(*snode); + if (tree_draw_ctx.geo_tree_log != nullptr) { + tree_draw_ctx.geo_tree_log->ensure_node_warnings(); + tree_draw_ctx.geo_tree_log->ensure_node_run_time(); + } + } + + node_update_nodetree(C, tree_draw_ctx, ntree, nodes, blocks); + node_draw_nodetree(C, tree_draw_ctx, region, *snode, ntree, nodes, blocks, parent_key); } /** diff --git a/source/blender/editors/space_node/node_geometry_attribute_search.cc b/source/blender/editors/space_node/node_geometry_attribute_search.cc index e328a86b0fd..809c4b2fe59 100644 --- a/source/blender/editors/space_node/node_geometry_attribute_search.cc +++ b/source/blender/editors/space_node/node_geometry_attribute_search.cc @@ -14,6 +14,7 @@ #include "DNA_space_types.h" #include "BKE_context.h" +#include "BKE_node_runtime.hh" #include "BKE_node_tree_update.h" #include "BKE_object.h" @@ -30,12 +31,11 @@ #include "UI_interface.hh" #include "UI_resources.h" -#include "NOD_geometry_nodes_eval_log.hh" +#include "NOD_geometry_nodes_log.hh" #include "node_intern.hh" -namespace geo_log = blender::nodes::geometry_nodes_eval_log; -using geo_log::GeometryAttributeInfo; +using blender::nodes::geo_eval_log::GeometryAttributeInfo; namespace blender::ed::space_node { @@ -50,6 +50,8 @@ BLI_STATIC_ASSERT(std::is_trivially_destructible_v<AttributeSearchData>, ""); static Vector<const GeometryAttributeInfo *> get_attribute_info_from_context( const bContext &C, AttributeSearchData &data) { + using namespace nodes::geo_eval_log; + SpaceNode *snode = CTX_wm_space_node(&C); if (!snode) { BLI_assert_unreachable(); @@ -65,41 +67,48 @@ static Vector<const GeometryAttributeInfo *> get_attribute_info_from_context( BLI_assert_unreachable(); return {}; } + GeoTreeLog *tree_log = GeoModifierLog::get_tree_log_for_node_editor(*snode); + if (tree_log == nullptr) { + return {}; + } + tree_log->ensure_socket_values(); /* For the attribute input node, collect attribute information from all nodes in the group. */ if (node->type == GEO_NODE_INPUT_NAMED_ATTRIBUTE) { - const geo_log::TreeLog *tree_log = geo_log::ModifierLog::find_tree_by_node_editor_context( - *snode); - if (tree_log == nullptr) { - return {}; - } - + tree_log->ensure_existing_attributes(); Vector<const GeometryAttributeInfo *> attributes; - Set<StringRef> names; - tree_log->foreach_node_log([&](const geo_log::NodeLog &node_log) { - for (const geo_log::SocketLog &socket_log : node_log.input_logs()) { - const geo_log::ValueLog *value_log = socket_log.value(); - if (const geo_log::GeometryValueLog *geo_value_log = - dynamic_cast<const geo_log::GeometryValueLog *>(value_log)) { - for (const GeometryAttributeInfo &attribute : geo_value_log->attributes()) { - if (bke::allow_procedural_attribute_access(attribute.name)) { - if (names.add(attribute.name)) { - attributes.append(&attribute); - } - } - } - } + for (const GeometryAttributeInfo *attribute : tree_log->existing_attributes) { + if (bke::allow_procedural_attribute_access(attribute->name)) { + attributes.append(attribute); } - }); + } return attributes; } - - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_node_editor_context( - *snode, data.node_name); + GeoNodeLog *node_log = tree_log->nodes.lookup_ptr(node->name); if (node_log == nullptr) { return {}; } - return node_log->lookup_available_attributes(); + Set<StringRef> names; + Vector<const GeometryAttributeInfo *> attributes; + for (const bNodeSocket *input_socket : node->input_sockets()) { + if (input_socket->type != SOCK_GEOMETRY) { + continue; + } + const ValueLog *value_log = tree_log->find_socket_value_log(*input_socket); + if (value_log == nullptr) { + continue; + } + if (const GeometryInfoLog *geo_log = dynamic_cast<const GeometryInfoLog *>(value_log)) { + for (const GeometryAttributeInfo &attribute : geo_log->attributes) { + if (bke::allow_procedural_attribute_access(attribute.name)) { + if (names.add(attribute.name)) { + attributes.append(&attribute); + } + } + } + } + } + return attributes; } static void attribute_search_update_fn( diff --git a/source/blender/editors/space_node/node_ops.cc b/source/blender/editors/space_node/node_ops.cc index 3b3189983e2..a208370a6f9 100644 --- a/source/blender/editors/space_node/node_ops.cc +++ b/source/blender/editors/space_node/node_ops.cc @@ -144,7 +144,7 @@ void ED_operatormacros_node() WM_operatortype_macro_define(ot, "NODE_OT_attach"); WM_operatortype_macro_define(ot, "NODE_OT_insert_offset"); - /* NODE_OT_translate_attach with remove_on_canel set to true */ + /* NODE_OT_translate_attach with remove_on_cancel set to true. */ ot = WM_operatortype_append_macro("NODE_OT_translate_attach_remove_on_cancel", "Move and Attach", "Move nodes and attach to frame", diff --git a/source/blender/editors/space_node/node_relationships.cc b/source/blender/editors/space_node/node_relationships.cc index 7dbaa8ccd6d..929fb64bd70 100644 --- a/source/blender/editors/space_node/node_relationships.cc +++ b/source/blender/editors/space_node/node_relationships.cc @@ -1727,32 +1727,34 @@ static int node_attach_invoke(bContext *C, wmOperator *UNUSED(op), const wmEvent SpaceNode &snode = *CTX_wm_space_node(C); bNodeTree &ntree = *snode.edittree; bNode *frame = node_find_frame_to_attach(region, ntree, event->mval); + if (frame == nullptr) { + /* Return "finished" so that auto offset operator macros can work. */ + return OPERATOR_FINISHED; + } - if (frame) { - LISTBASE_FOREACH_BACKWARD (bNode *, node, &ntree.nodes) { - if (node->flag & NODE_SELECT) { - if (node->parent == nullptr) { - /* disallow moving a parent into its child */ - if (nodeAttachNodeCheck(frame, node) == false) { - /* attach all unparented nodes */ - nodeAttachNode(node, frame); - } + LISTBASE_FOREACH_BACKWARD (bNode *, node, &ntree.nodes) { + if (node->flag & NODE_SELECT) { + if (node->parent == nullptr) { + /* disallow moving a parent into its child */ + if (nodeAttachNodeCheck(frame, node) == false) { + /* attach all unparented nodes */ + nodeAttachNode(node, frame); } - else { - /* attach nodes which share parent with the frame */ - bNode *parent; - for (parent = frame->parent; parent; parent = parent->parent) { - if (parent == node->parent) { - break; - } + } + else { + /* attach nodes which share parent with the frame */ + bNode *parent; + for (parent = frame->parent; parent; parent = parent->parent) { + if (parent == node->parent) { + break; } + } - if (parent) { - /* disallow moving a parent into its child */ - if (nodeAttachNodeCheck(frame, node) == false) { - nodeDetachNode(node); - nodeAttachNode(node, frame); - } + if (parent) { + /* disallow moving a parent into its child */ + if (nodeAttachNodeCheck(frame, node) == false) { + nodeDetachNode(node); + nodeAttachNode(node, frame); } } } diff --git a/source/blender/editors/space_node/node_select.cc b/source/blender/editors/space_node/node_select.cc index 1f1ce9c0c2b..d93b205b1b7 100644 --- a/source/blender/editors/space_node/node_select.cc +++ b/source/blender/editors/space_node/node_select.cc @@ -644,28 +644,29 @@ static bool node_mouse_select(bContext *C, } } - /* update node order */ - if (changed || found) { - bool active_texture_changed = false; - bool viewer_node_changed = false; - if ((node != nullptr) && (node_was_selected == false || params->select_passthrough == false)) { - viewer_node_changed = (node->flag & NODE_DO_OUTPUT) == 0 && node->type == GEO_NODE_VIEWER; - ED_node_set_active(&bmain, &snode, snode.edittree, node, &active_texture_changed); - } - else if (node != nullptr && node->type == GEO_NODE_VIEWER) { - ED_spreadsheet_context_paths_set_geometry_node(&bmain, &snode, node); - } - ED_node_set_active_viewer_key(&snode); - node_sort(*snode.edittree); - if ((active_texture_changed && has_workbench_in_texture_color(wm, scene, ob)) || - viewer_node_changed) { - DEG_id_tag_update(&snode.edittree->id, ID_RECALC_COPY_ON_WRITE); - } + if (!(changed || found)) { + return false; + } - WM_event_add_notifier(C, NC_NODE | NA_SELECTED, nullptr); + bool active_texture_changed = false; + bool viewer_node_changed = false; + if ((node != nullptr) && (node_was_selected == false || params->select_passthrough == false)) { + viewer_node_changed = (node->flag & NODE_DO_OUTPUT) == 0 && node->type == GEO_NODE_VIEWER; + ED_node_set_active(&bmain, &snode, snode.edittree, node, &active_texture_changed); } + else if (node != nullptr && node->type == GEO_NODE_VIEWER) { + ED_spreadsheet_context_paths_set_geometry_node(&bmain, &snode, node); + } + ED_node_set_active_viewer_key(&snode); + node_sort(*snode.edittree); + if ((active_texture_changed && has_workbench_in_texture_color(wm, scene, ob)) || + viewer_node_changed) { + DEG_id_tag_update(&snode.edittree->id, ID_RECALC_COPY_ON_WRITE); + } + + WM_event_add_notifier(C, NC_NODE | NA_SELECTED, nullptr); - return changed || found; + return true; } static int node_select_exec(bContext *C, wmOperator *op) diff --git a/source/blender/editors/space_spreadsheet/spreadsheet_data_source_geometry.cc b/source/blender/editors/space_spreadsheet/spreadsheet_data_source_geometry.cc index 3290c0ddd87..4703eacdcb9 100644 --- a/source/blender/editors/space_spreadsheet/spreadsheet_data_source_geometry.cc +++ b/source/blender/editors/space_spreadsheet/spreadsheet_data_source_geometry.cc @@ -4,6 +4,7 @@ #include "BLI_virtual_array.hh" #include "BKE_attribute.hh" +#include "BKE_compute_contexts.hh" #include "BKE_context.h" #include "BKE_curves.hh" #include "BKE_editmesh.h" @@ -26,7 +27,8 @@ #include "ED_curves_sculpt.h" #include "ED_spreadsheet.h" -#include "NOD_geometry_nodes_eval_log.hh" +#include "NOD_geometry_nodes_lazy_function.hh" +#include "NOD_geometry_nodes_log.hh" #include "BLT_translation.h" @@ -40,8 +42,8 @@ #include "spreadsheet_data_source_geometry.hh" #include "spreadsheet_intern.hh" -namespace geo_log = blender::nodes::geometry_nodes_eval_log; using blender::fn::GField; +using blender::nodes::geo_eval_log::ViewerNodeLog; namespace blender::ed::spreadsheet { @@ -465,19 +467,10 @@ GeometrySet spreadsheet_get_display_geometry_set(const SpaceSpreadsheet *sspread } } else { - const geo_log::NodeLog *node_log = - geo_log::ModifierLog::find_node_by_spreadsheet_editor_context(*sspreadsheet); - if (node_log != nullptr) { - for (const geo_log::SocketLog &input_log : node_log->input_logs()) { - if (const geo_log::GeometryValueLog *geo_value_log = - dynamic_cast<const geo_log::GeometryValueLog *>(input_log.value())) { - const GeometrySet *full_geometry = geo_value_log->full_geometry(); - if (full_geometry != nullptr) { - geometry_set = *full_geometry; - break; - } - } - } + if (const ViewerNodeLog *viewer_log = + nodes::geo_eval_log::GeoModifierLog::find_viewer_node_log_for_spreadsheet( + *sspreadsheet)) { + geometry_set = viewer_log->geometry; } } } @@ -495,27 +488,11 @@ static void find_fields_to_evaluate(const SpaceSpreadsheet *sspreadsheet, /* No viewer is currently referenced by the context path. */ return; } - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_spreadsheet_editor_context( - *sspreadsheet); - if (node_log == nullptr) { - return; - } - for (const geo_log::SocketLog &socket_log : node_log->input_logs()) { - const geo_log::ValueLog *value_log = socket_log.value(); - if (value_log == nullptr) { - continue; - } - if (const geo_log::GFieldValueLog *field_value_log = - dynamic_cast<const geo_log::GFieldValueLog *>(value_log)) { - const GField &field = field_value_log->field(); - if (field) { - r_fields.add("Viewer", std::move(field)); - } - } - if (const geo_log::GenericValueLog *generic_value_log = - dynamic_cast<const geo_log::GenericValueLog *>(value_log)) { - GPointer value = generic_value_log->value(); - r_fields.add("Viewer", fn::make_constant_field(*value.type(), value.get())); + if (const ViewerNodeLog *viewer_log = + nodes::geo_eval_log::GeoModifierLog::find_viewer_node_log_for_spreadsheet( + *sspreadsheet)) { + if (viewer_log->field) { + r_fields.add("Viewer", viewer_log->field); } } } diff --git a/source/blender/editors/transform/transform_convert_action.c b/source/blender/editors/transform/transform_convert_action.c index 252f150995e..83e47d9acc0 100644 --- a/source/blender/editors/transform/transform_convert_action.c +++ b/source/blender/editors/transform/transform_convert_action.c @@ -902,18 +902,18 @@ static void special_aftertrans_update__actedit(bContext *C, TransInfo *t) if (ELEM(t->frame_side, 'L', 'R')) { /* TFM_TIME_EXTEND */ /* same as below */ ED_markers_post_apply_transform( - ED_context_get_markers(C), t->scene, t->mode, t->values[0], t->frame_side); + ED_context_get_markers(C), t->scene, t->mode, t->values_final[0], t->frame_side); } else /* TFM_TIME_TRANSLATE */ #endif { ED_markers_post_apply_transform( - ED_context_get_markers(C), t->scene, t->mode, t->values[0], t->frame_side); + ED_context_get_markers(C), t->scene, t->mode, t->values_final[0], t->frame_side); } } else if (t->mode == TFM_TIME_SCALE) { ED_markers_post_apply_transform( - ED_context_get_markers(C), t->scene, t->mode, t->values[0], t->frame_side); + ED_context_get_markers(C), t->scene, t->mode, t->values_final[0], t->frame_side); } } diff --git a/source/blender/editors/transform/transform_convert_mesh_uv.c b/source/blender/editors/transform/transform_convert_mesh_uv.c index f3bef2c283b..27f12137e3a 100644 --- a/source/blender/editors/transform/transform_convert_mesh_uv.c +++ b/source/blender/editors/transform/transform_convert_mesh_uv.c @@ -265,7 +265,7 @@ static void createTransUVs(bContext *C, TransInfo *t) /* count */ if (is_island_center) { /* create element map with island information */ - elementmap = BM_uv_element_map_create(em->bm, scene, true, false, true); + elementmap = BM_uv_element_map_create(em->bm, scene, true, false, true, true); if (elementmap == NULL) { continue; } diff --git a/source/blender/editors/transform/transform_convert_sequencer.c b/source/blender/editors/transform/transform_convert_sequencer.c index eefc9d0cc2a..ddc99caeef5 100644 --- a/source/blender/editors/transform/transform_convert_sequencer.c +++ b/source/blender/editors/transform/transform_convert_sequencer.c @@ -708,12 +708,12 @@ static void special_aftertrans_update__sequencer(bContext *UNUSED(C), TransInfo if (t->mode == TFM_SEQ_SLIDE) { if (t->frame_side == 'B') { ED_markers_post_apply_transform( - &t->scene->markers, t->scene, TFM_TIME_TRANSLATE, t->values[0], t->frame_side); + &t->scene->markers, t->scene, TFM_TIME_TRANSLATE, t->values_final[0], t->frame_side); } } else if (ELEM(t->frame_side, 'L', 'R')) { ED_markers_post_apply_transform( - &t->scene->markers, t->scene, TFM_TIME_EXTEND, t->values[0], t->frame_side); + &t->scene->markers, t->scene, TFM_TIME_EXTEND, t->values_final[0], t->frame_side); } } } diff --git a/source/blender/editors/util/ed_util.c b/source/blender/editors/util/ed_util.c index 2f268d4ae23..e70851aedd6 100644 --- a/source/blender/editors/util/ed_util.c +++ b/source/blender/editors/util/ed_util.c @@ -377,7 +377,7 @@ void unpack_menu(bContext *C, char local_name[FILE_MAXDIR + FILE_MAX], fi[FILE_MAX]; BLI_split_file_part(abs_name, fi, sizeof(fi)); - BLI_snprintf(local_name, sizeof(local_name), "//%s/%s", folder, fi); + BLI_path_join(local_name, sizeof(local_name), "//", folder, fi, NULL); if (!STREQ(abs_name, local_name)) { switch (BKE_packedfile_compare_to_file(blendfile_path, local_name, pf)) { case PF_CMP_NOFILE: diff --git a/source/blender/editors/uvedit/uvedit_ops.c b/source/blender/editors/uvedit/uvedit_ops.c index 795e212fb0c..5e2d9097abd 100644 --- a/source/blender/editors/uvedit/uvedit_ops.c +++ b/source/blender/editors/uvedit/uvedit_ops.c @@ -535,7 +535,7 @@ static bool uvedit_uv_straighten(Scene *scene, BMesh *bm, eUVWeldAlign tool) return false; } - UvElementMap *element_map = BM_uv_element_map_create(bm, scene, true, false, true); + UvElementMap *element_map = BM_uv_element_map_create(bm, scene, true, false, true, true); if (element_map == NULL) { return false; } diff --git a/source/blender/editors/uvedit/uvedit_select.c b/source/blender/editors/uvedit/uvedit_select.c index 939930bd4c2..6c8fb9360bd 100644 --- a/source/blender/editors/uvedit/uvedit_select.c +++ b/source/blender/editors/uvedit/uvedit_select.c @@ -5386,7 +5386,7 @@ static void uv_isolate_selected_islands(const Scene *scene, BLI_assert((scene->toolsettings->uv_flag & UV_SYNC_SELECTION) == 0); BMFace *efa; BMIter iter, liter; - UvElementMap *elementmap = BM_uv_element_map_create(em->bm, scene, false, false, true); + UvElementMap *elementmap = BM_uv_element_map_create(em->bm, scene, false, false, true, true); if (elementmap == NULL) { return; } diff --git a/source/blender/editors/uvedit/uvedit_smart_stitch.c b/source/blender/editors/uvedit/uvedit_smart_stitch.c index f56c63f47b5..05b98ab9627 100644 --- a/source/blender/editors/uvedit/uvedit_smart_stitch.c +++ b/source/blender/editors/uvedit/uvedit_smart_stitch.c @@ -1855,7 +1855,7 @@ static StitchState *stitch_init(bContext *C, * for stitch this isn't useful behavior, see T86924. */ const int selectmode_orig = scene->toolsettings->selectmode; scene->toolsettings->selectmode = SCE_SELECT_VERTEX; - state->element_map = BM_uv_element_map_create(state->em->bm, scene, false, true, true); + state->element_map = BM_uv_element_map_create(state->em->bm, scene, false, true, true, true); scene->toolsettings->selectmode = selectmode_orig; if (!state->element_map) { diff --git a/source/blender/functions/CMakeLists.txt b/source/blender/functions/CMakeLists.txt index f1298a7f5b7..3d153813425 100644 --- a/source/blender/functions/CMakeLists.txt +++ b/source/blender/functions/CMakeLists.txt @@ -13,6 +13,10 @@ set(INC_SYS set(SRC intern/cpp_types.cc intern/field.cc + intern/lazy_function.cc + intern/lazy_function_execute.cc + intern/lazy_function_graph.cc + intern/lazy_function_graph_executor.cc intern/multi_function.cc intern/multi_function_builder.cc intern/multi_function_params.cc @@ -23,6 +27,10 @@ set(SRC FN_field.hh FN_field_cpp_type.hh + FN_lazy_function.hh + FN_lazy_function_execute.hh + FN_lazy_function_graph.hh + FN_lazy_function_graph_executor.hh FN_multi_function.hh FN_multi_function_builder.hh FN_multi_function_context.hh @@ -61,6 +69,7 @@ blender_add_lib(bf_functions "${SRC}" "${INC}" "${INC_SYS}" "${LIB}") if(WITH_GTESTS) set(TEST_SRC tests/FN_field_test.cc + tests/FN_lazy_function_test.cc tests/FN_multi_function_procedure_test.cc tests/FN_multi_function_test.cc diff --git a/source/blender/functions/FN_field.hh b/source/blender/functions/FN_field.hh index bc42cab8db5..ca12f407e49 100644 --- a/source/blender/functions/FN_field.hh +++ b/source/blender/functions/FN_field.hh @@ -565,6 +565,17 @@ template<typename T> struct ValueOrField { } return this->value; } + + friend std::ostream &operator<<(std::ostream &stream, const ValueOrField<T> &value_or_field) + { + if (value_or_field.field) { + stream << "ValueOrField<T>"; + } + else { + stream << value_or_field.value; + } + return stream; + } }; /** \} */ diff --git a/source/blender/functions/FN_field_cpp_type.hh b/source/blender/functions/FN_field_cpp_type.hh index 63a648f3202..6900a093dc6 100644 --- a/source/blender/functions/FN_field_cpp_type.hh +++ b/source/blender/functions/FN_field_cpp_type.hh @@ -59,7 +59,7 @@ class ValueOrFieldCPPType : public CPPType { public: template<typename T> ValueOrFieldCPPType(FieldCPPTypeParam<ValueOrField<T>> /* unused */, StringRef debug_name) - : CPPType(CPPTypeParam<ValueOrField<T>, CPPTypeFlags::None>(), debug_name), + : CPPType(CPPTypeParam<ValueOrField<T>, CPPTypeFlags::Printable>(), debug_name), base_type_(CPPType::get<T>()) { construct_from_value_ = [](void *dst, const void *value_or_field) { diff --git a/source/blender/functions/FN_lazy_function.hh b/source/blender/functions/FN_lazy_function.hh new file mode 100644 index 00000000000..59a3a90b0b0 --- /dev/null +++ b/source/blender/functions/FN_lazy_function.hh @@ -0,0 +1,384 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup fn + * + * A `LazyFunction` encapsulates a computation which has inputs, outputs and potentially side + * effects. Most importantly, a `LazyFunction` supports laziness in its inputs and outputs: + * - Only outputs that are actually used have to be computed. + * - Inputs can be requested lazily based on which outputs are used or what side effects the + * function has. + * + * A lazy-function that uses laziness may be executed more than once. The most common example is + * the geometry nodes switch node. Depending on a condition input, it decides which one of the + * other inputs is actually used. From the perspective of the switch node, its execution works as + * follows: + * 1. The switch node is first executed. It sees that the output is used. Now it requests the + * condition input from the caller and exits. + * 2. Once the caller is able to provide the condition input the switch node is executed again. + * This time it retrieves the condition and requests one of the other inputs. Then the node + * exits again, giving back control to the caller. + * 3. When the caller computed the second requested input the switch node executes a last time. + * This time it retrieves the new input and forwards it to the output. + * + * In some sense, a lazy-function can be thought of like a state machine. Every time it is + * executed, it advances its state until all required outputs are ready. + * + * The lazy-function interface is designed to support composition of many such functions into a new + * lazy-functions, all while keeping the laziness working. For example, in geometry nodes a switch + * node in a node group should still be able to decide whether a node in the parent group will be + * executed or not. This is essential to avoid doing unnecessary work. + * + * The lazy-function system consists of multiple core components: + * - The interface of a lazy-function itself including its calling convention. + * - A graph data structure that allows composing many lazy-functions by connecting their inputs + * and outputs. + * - An executor that allows multi-threaded execution or such a graph. + */ + +#include "BLI_cpp_type.hh" +#include "BLI_generic_pointer.hh" +#include "BLI_linear_allocator.hh" +#include "BLI_vector.hh" + +namespace blender::fn::lazy_function { + +enum class ValueUsage { + /** + * The value is definitely used and therefore has to be computed. + */ + Used, + /** + * It's unknown whether this value will be used or not. Computing it is ok but the result may be + * discarded. + */ + Maybe, + /** + * The value will definitely not be used. It can still be computed but the result will be + * discarded in all cases. + */ + Unused, +}; + +class LazyFunction; + +/** + * This allows passing arbitrary data into a lazy-function during execution. For that, #UserData + * has to be subclassed. This mainly exists because it's more type safe than passing a `void *` + * with no type information attached. + * + * Some lazy-functions may expect to find a certain type of user data when executed. + */ +class UserData { + public: + virtual ~UserData() = default; +}; + +/** + * Passed to the lazy-function when it is executed. + */ +struct Context { + /** + * If the lazy-function has some state (which only makes sense when it is executed more than once + * to finish its job), the state is stored here. This points to memory returned from + * #LazyFunction::init_storage. + */ + void *storage; + /** + * Custom user data that can be used in the function. + */ + UserData *user_data; +}; + +/** + * Defines the calling convention for a lazy-function. During execution, a lazy-function retrieves + * its inputs and sets the outputs through #Params. + */ +class Params { + public: + /** + * The lazy-function this #Params has been prepared for. + */ + const LazyFunction &fn_; + + public: + Params(const LazyFunction &fn); + + /** + * Get a pointer to an input value if the value is available already. Otherwise null is returned. + * + * The #LazyFunction must leave returned object in an initialized state, but can move from it. + */ + void *try_get_input_data_ptr(int index) const; + + /** + * Same as #try_get_input_data_ptr, but if the data is not yet available, request it. This makes + * sure that the data will be available in a future execution of the #LazyFunction. + */ + void *try_get_input_data_ptr_or_request(int index); + + /** + * Get a pointer to where the output value should be stored. + * The value at the pointer is in an uninitialized state at first. + * The #LazyFunction is responsible for initializing the value. + * After the output has been initialized to its final value, #output_set has to be called. + */ + void *get_output_data_ptr(int index); + + /** + * Call this after the output value is initialized. After this is called, the value must not be + * touched anymore. It may be moved or destructed immediately. + */ + void output_set(int index); + + /** + * Allows the #LazyFunction to check whether an output was computed already without keeping + * track of it itself. + */ + bool output_was_set(int index) const; + + /** + * Can be used to detect which outputs have to be computed. + */ + ValueUsage get_output_usage(int index) const; + + /** + * Tell the caller of the #LazyFunction that a specific input will definitely not be used. + * Only an input that was not #ValueUsage::Used can become unused. + */ + void set_input_unused(int index); + + /** + * Typed utility methods that wrap the methods above. + */ + template<typename T> T extract_input(int index); + template<typename T> const T &get_input(int index); + template<typename T> T *try_get_input_data_ptr_or_request(int index); + template<typename T> void set_output(int index, T &&value); + + /** + * Utility to initialize all outputs that haven't been set yet. + */ + void set_default_remaining_outputs(); + + private: + /** + * Methods that need to be implemented by subclasses. Those are separate from the non-virtual + * methods above to make it easy to insert additional debugging logic on top of the + * implementations. + */ + virtual void *try_get_input_data_ptr_impl(int index) const = 0; + virtual void *try_get_input_data_ptr_or_request_impl(int index) = 0; + virtual void *get_output_data_ptr_impl(int index) = 0; + virtual void output_set_impl(int index) = 0; + virtual bool output_was_set_impl(int index) const = 0; + virtual ValueUsage get_output_usage_impl(int index) const = 0; + virtual void set_input_unused_impl(int index) = 0; +}; + +/** + * Describes an input of a #LazyFunction. + */ +struct Input { + /** + * Name used for debugging purposes. The string has to be static or has to be owned by something + * else. + */ + const char *debug_name; + /** + * Data type of this input. + */ + const CPPType *type; + /** + * Can be used to indicate a caller or this function if this input is used statically before + * executing it the first time. This is technically not needed but can improve efficiency because + * a round-trip through the `execute` method can be avoided. + * + * When this is #ValueUsage::Used, the caller has to ensure that the input is definitely + * available when the #execute method is first called. The #execute method does not have to check + * whether the value is actually available. + */ + ValueUsage usage; + + Input(const char *debug_name, const CPPType &type, const ValueUsage usage = ValueUsage::Used) + : debug_name(debug_name), type(&type), usage(usage) + { + } +}; + +struct Output { + /** + * Name used for debugging purposes. The string has to be static or has to be owned by something + * else. + */ + const char *debug_name; + /** + * Data type of this output. + */ + const CPPType *type = nullptr; + + Output(const char *debug_name, const CPPType &type) : debug_name(debug_name), type(&type) + { + } +}; + +/** + * A function that can compute outputs and request inputs lazily. For more details see the comment + * at the top of the file. + */ +class LazyFunction { + protected: + const char *debug_name_ = "<unknown>"; + Vector<Input> inputs_; + Vector<Output> outputs_; + + public: + virtual ~LazyFunction() = default; + + /** + * Get a name of the function or an input or output. This is mainly used for debugging. + * These are virtual functions because the names are often not used outside of debugging + * workflows. This way the names are only generated when they are actually needed. + */ + virtual std::string name() const; + virtual std::string input_name(int index) const; + virtual std::string output_name(int index) const; + + /** + * Allocates storage for this function. The storage will be passed to every call to #execute. + * If the function does not keep track of any state, this does not have to be implemented. + */ + virtual void *init_storage(LinearAllocator<> &allocator) const; + + /** + * Destruct the storage created in #init_storage. + */ + virtual void destruct_storage(void *storage) const; + + /** + * Inputs of the function. + */ + Span<Input> inputs() const; + /** + * Outputs of the function. + */ + Span<Output> outputs() const; + + /** + * During execution the function retrieves inputs and sets outputs in #params. For some + * functions, this method is called more than once. After execution, the function either has + * computed all required outputs or is waiting for more inputs. + */ + void execute(Params ¶ms, const Context &context) const; + + /** + * Utility to check that the guarantee by #Input::usage is followed. + */ + bool always_used_inputs_available(const Params ¶ms) const; + + private: + /** + * Needs to be implemented by subclasses. This is separate from #execute so that additional + * debugging logic can be implemented in #execute. + */ + virtual void execute_impl(Params ¶ms, const Context &context) const = 0; +}; + +/* -------------------------------------------------------------------- */ +/** \name #LazyFunction Inline Methods + * \{ */ + +inline Span<Input> LazyFunction::inputs() const +{ + return inputs_; +} + +inline Span<Output> LazyFunction::outputs() const +{ + return outputs_; +} + +inline void LazyFunction::execute(Params ¶ms, const Context &context) const +{ + BLI_assert(this->always_used_inputs_available(params)); + this->execute_impl(params, context); +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #Params Inline Methods + * \{ */ + +inline Params::Params(const LazyFunction &fn) : fn_(fn) +{ +} + +inline void *Params::try_get_input_data_ptr(const int index) const +{ + return this->try_get_input_data_ptr_impl(index); +} + +inline void *Params::try_get_input_data_ptr_or_request(const int index) +{ + return this->try_get_input_data_ptr_or_request_impl(index); +} + +inline void *Params::get_output_data_ptr(const int index) +{ + return this->get_output_data_ptr_impl(index); +} + +inline void Params::output_set(const int index) +{ + this->output_set_impl(index); +} + +inline bool Params::output_was_set(const int index) const +{ + return this->output_was_set_impl(index); +} + +inline ValueUsage Params::get_output_usage(const int index) const +{ + return this->get_output_usage_impl(index); +} + +inline void Params::set_input_unused(const int index) +{ + this->set_input_unused_impl(index); +} + +template<typename T> inline T Params::extract_input(const int index) +{ + void *data = this->try_get_input_data_ptr(index); + BLI_assert(data != nullptr); + T return_value = std::move(*static_cast<T *>(data)); + return return_value; +} + +template<typename T> inline const T &Params::get_input(const int index) +{ + const void *data = this->try_get_input_data_ptr(index); + BLI_assert(data != nullptr); + return *static_cast<const T *>(data); +} + +template<typename T> inline T *Params::try_get_input_data_ptr_or_request(const int index) +{ + return static_cast<T *>(this->try_get_input_data_ptr_or_request(index)); +} + +template<typename T> inline void Params::set_output(const int index, T &&value) +{ + using DecayT = std::decay_t<T>; + void *data = this->get_output_data_ptr(index); + new (data) DecayT(std::forward<T>(value)); + this->output_set(index); +} + +/** \} */ + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/FN_lazy_function_execute.hh b/source/blender/functions/FN_lazy_function_execute.hh new file mode 100644 index 00000000000..a59d363a9d5 --- /dev/null +++ b/source/blender/functions/FN_lazy_function_execute.hh @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup fn + * + * This file contains common utilities for actually executing a lazy-function. + */ + +#include "BLI_parameter_pack_utils.hh" + +#include "FN_lazy_function.hh" + +namespace blender::fn::lazy_function { + +/** + * Most basic implementation of #Params. It does not actually implement any logic for how to + * retrieve inputs or set outputs. Instead, code using #BasicParams has to implement that. + */ +class BasicParams : public Params { + private: + const Span<GMutablePointer> inputs_; + const Span<GMutablePointer> outputs_; + MutableSpan<std::optional<ValueUsage>> input_usages_; + Span<ValueUsage> output_usages_; + MutableSpan<bool> set_outputs_; + + public: + BasicParams(const LazyFunction &fn, + const Span<GMutablePointer> inputs, + const Span<GMutablePointer> outputs, + MutableSpan<std::optional<ValueUsage>> input_usages, + Span<ValueUsage> output_usages, + MutableSpan<bool> set_outputs); + + void *try_get_input_data_ptr_impl(const int index) const override; + void *try_get_input_data_ptr_or_request_impl(const int index) override; + void *get_output_data_ptr_impl(const int index) override; + void output_set_impl(const int index) override; + bool output_was_set_impl(const int index) const override; + ValueUsage get_output_usage_impl(const int index) const override; + void set_input_unused_impl(const int index) override; +}; + +namespace detail { + +/** + * Utility to implement #execute_lazy_function_eagerly. + */ +template<typename... Inputs, typename... Outputs, size_t... InIndices, size_t... OutIndices> +inline void execute_lazy_function_eagerly_impl( + const LazyFunction &fn, + UserData *user_data, + std::tuple<Inputs...> &inputs, + std::tuple<Outputs *...> &outputs, + std::index_sequence<InIndices...> /* in_indices */, + std::index_sequence<OutIndices...> /* out_indices */) +{ + constexpr size_t InputsNum = sizeof...(Inputs); + constexpr size_t OutputsNum = sizeof...(Outputs); + std::array<GMutablePointer, InputsNum> input_pointers; + std::array<GMutablePointer, OutputsNum> output_pointers; + std::array<std::optional<ValueUsage>, InputsNum> input_usages; + std::array<ValueUsage, OutputsNum> output_usages; + std::array<bool, OutputsNum> set_outputs; + ( + [&]() { + constexpr size_t I = InIndices; + using T = Inputs; + const CPPType &type = CPPType::get<T>(); + input_pointers[I] = {type, &std::get<I>(inputs)}; + }(), + ...); + ( + [&]() { + constexpr size_t I = OutIndices; + using T = Outputs; + const CPPType &type = CPPType::get<T>(); + output_pointers[I] = {type, std::get<I>(outputs)}; + }(), + ...); + output_usages.fill(ValueUsage::Used); + set_outputs.fill(false); + LinearAllocator<> allocator; + Context context; + context.user_data = user_data; + context.storage = fn.init_storage(allocator); + BasicParams params{ + fn, input_pointers, output_pointers, input_usages, output_usages, set_outputs}; + fn.execute(params, context); + fn.destruct_storage(context.storage); +} + +} // namespace detail + +/** + * In some cases (mainly for tests), the set of inputs and outputs for a lazy-function is known at + * compile time and one just wants to compute the outputs based on the inputs, without any + * laziness. + * + * This function does exactly that. It takes all inputs in a tuple and writes the outputs to points + * provided in a second tuple. Since all inputs have to be provided, the lazy-function has to + * compute all outputs. + */ +template<typename... Inputs, typename... Outputs> +inline void execute_lazy_function_eagerly(const LazyFunction &fn, + UserData *user_data, + std::tuple<Inputs...> inputs, + std::tuple<Outputs *...> outputs) +{ + BLI_assert(fn.inputs().size() == sizeof...(Inputs)); + BLI_assert(fn.outputs().size() == sizeof...(Outputs)); + detail::execute_lazy_function_eagerly_impl(fn, + user_data, + inputs, + outputs, + std::make_index_sequence<sizeof...(Inputs)>(), + std::make_index_sequence<sizeof...(Outputs)>()); +} + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/FN_lazy_function_graph.hh b/source/blender/functions/FN_lazy_function_graph.hh new file mode 100644 index 00000000000..4ede28c4f26 --- /dev/null +++ b/source/blender/functions/FN_lazy_function_graph.hh @@ -0,0 +1,421 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup fn + * + * This file contains a graph data structure that allows composing multiple lazy-functions into a + * combined lazy-function. + * + * There are two types of nodes in the graph: + * - #FunctionNode: Corresponds to a #LazyFunction. The inputs and outputs of the function become + * input and output sockets of the node. + * - #DummyNode: Is used to indicate inputs and outputs of the entire graph. It can have an + * arbitrary number of sockets. + */ + +#include "BLI_linear_allocator.hh" + +#include "FN_lazy_function.hh" + +namespace blender::fn::lazy_function { + +class Socket; +class InputSocket; +class OutputSocket; +class Node; +class Graph; + +/** + * A #Socket is the interface of a #Node. Every #Socket is either an #InputSocket or #OutputSocket. + * Links can be created from output sockets to input sockets. + */ +class Socket : NonCopyable, NonMovable { + protected: + /** + * The node the socket belongs to. + */ + Node *node_; + /** + * Data type of the socket. Only sockets with the same type can be linked. + */ + const CPPType *type_; + /** + * Indicates whether this is an #InputSocket or #OutputSocket. + */ + bool is_input_; + /** + * Index of the socket. E.g. 0 for the first input and the first output socket. + */ + int index_in_node_; + + friend Graph; + + public: + bool is_input() const; + bool is_output() const; + + int index() const; + + InputSocket &as_input(); + OutputSocket &as_output(); + const InputSocket &as_input() const; + const OutputSocket &as_output() const; + + const Node &node() const; + Node &node(); + + const CPPType &type() const; + + std::string name() const; +}; + +class InputSocket : public Socket { + private: + /** + * An input can have at most one link connected to it. The linked socket is the "origin" because + * it's where the data is coming from. The type of the origin must be the same as the type of + * this socket. + */ + OutputSocket *origin_; + /** + * Can be null or a non-owning pointer to a value of the type of the socket. This value will be + * used when the input is used but not linked. + * + * This is technically not needed, because one could just create a separate node that just + * outputs the value, but that would have more overhead. Especially because it's commonly the + * case that most inputs are unlinked. + */ + const void *default_value_ = nullptr; + + friend Graph; + + public: + OutputSocket *origin(); + const OutputSocket *origin() const; + + const void *default_value() const; + void set_default_value(const void *value); +}; + +class OutputSocket : public Socket { + private: + /** + * An output can be linked to an arbitrary number of inputs of the same type. + */ + Vector<InputSocket *> targets_; + + friend Graph; + + public: + Span<InputSocket *> targets(); + Span<const InputSocket *> targets() const; +}; + +/** + * A #Node has input and output sockets. Every node is either a #FunctionNode or a #DummyNode. + */ +class Node : NonCopyable, NonMovable { + protected: + /** + * The function this node corresponds to. If this is null, the node is a #DummyNode. + * The function is not owned by this #Node nor by the #Graph. + */ + const LazyFunction *fn_ = nullptr; + /** + * Input sockets of the node. + */ + Span<InputSocket *> inputs_; + /** + * Output sockets of the node. + */ + Span<OutputSocket *> outputs_; + /** + * An index that is set when calling #Graph::update_node_indices. This can be used to create + * efficient mappings from nodes to other data using just an array instead of a hash map. + * + * This is technically not necessary but has better performance than always using hash maps. + */ + int index_in_graph_ = -1; + + friend Graph; + + public: + bool is_dummy() const; + bool is_function() const; + int index_in_graph() const; + + Span<const InputSocket *> inputs() const; + Span<const OutputSocket *> outputs() const; + Span<InputSocket *> inputs(); + Span<OutputSocket *> outputs(); + + const InputSocket &input(int index) const; + const OutputSocket &output(int index) const; + InputSocket &input(int index); + OutputSocket &output(int index); + + std::string name() const; +}; + +/** + * A #Node that corresponds to a specific #LazyFunction. + */ +class FunctionNode : public Node { + public: + const LazyFunction &function() const; +}; + +/** + * A #Node that does *not* correspond to a #LazyFunction. Instead it can be used to indicate inputs + * and outputs of the entire graph. It can have an arbitrary number of inputs and outputs. + */ +class DummyNode : public Node { + private: + std::string name_; + + friend Node; +}; + +/** + * A container for an arbitrary number of nodes and links between their sockets. + */ +class Graph : NonCopyable, NonMovable { + private: + /** + * Used to allocate nodes and sockets in the graph. + */ + LinearAllocator<> allocator_; + /** + * Contains all nodes in the graph so that it is efficient to iterate over them. + */ + Vector<Node *> nodes_; + + public: + ~Graph(); + + /** + * Get all nodes in the graph. The index in the span corresponds to #Node::index_in_graph. + */ + Span<const Node *> nodes() const; + + /** + * Add a new function node with sockets that match the passed in #LazyFunction. + */ + FunctionNode &add_function(const LazyFunction &fn); + + /** + * Add a new dummy node with the given socket types. + */ + DummyNode &add_dummy(Span<const CPPType *> input_types, Span<const CPPType *> output_types); + + /** + * Add a link between the two given sockets. + * This has undefined behavior when the input is linked to something else already. + */ + void add_link(OutputSocket &from, InputSocket &to); + + /** + * Make sure that #Node::index_in_graph is up to date. + */ + void update_node_indices(); + + /** + * Can be used to assert that #update_node_indices has been called. + */ + bool node_indices_are_valid() const; + + /** + * Utility to generate a dot graph string for the graph. This can be used for debugging. + */ + std::string to_dot() const; +}; + +/* -------------------------------------------------------------------- */ +/** \name #Socket Inline Methods + * \{ */ + +inline bool Socket::is_input() const +{ + return is_input_; +} + +inline bool Socket::is_output() const +{ + return !is_input_; +} + +inline int Socket::index() const +{ + return index_in_node_; +} + +inline InputSocket &Socket::as_input() +{ + BLI_assert(this->is_input()); + return *static_cast<InputSocket *>(this); +} + +inline OutputSocket &Socket::as_output() +{ + BLI_assert(this->is_output()); + return *static_cast<OutputSocket *>(this); +} + +inline const InputSocket &Socket::as_input() const +{ + BLI_assert(this->is_input()); + return *static_cast<const InputSocket *>(this); +} + +inline const OutputSocket &Socket::as_output() const +{ + BLI_assert(this->is_output()); + return *static_cast<const OutputSocket *>(this); +} + +inline const Node &Socket::node() const +{ + return *node_; +} + +inline Node &Socket::node() +{ + return *node_; +} + +inline const CPPType &Socket::type() const +{ + return *type_; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #InputSocket Inline Methods + * \{ */ + +inline const OutputSocket *InputSocket::origin() const +{ + return origin_; +} + +inline OutputSocket *InputSocket::origin() +{ + return origin_; +} + +inline const void *InputSocket::default_value() const +{ + return default_value_; +} + +inline void InputSocket::set_default_value(const void *value) +{ + default_value_ = value; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #OutputSocket Inline Methods + * \{ */ + +inline Span<const InputSocket *> OutputSocket::targets() const +{ + return targets_; +} + +inline Span<InputSocket *> OutputSocket::targets() +{ + return targets_; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #Node Inline Methods + * \{ */ + +inline bool Node::is_dummy() const +{ + return fn_ == nullptr; +} + +inline bool Node::is_function() const +{ + return fn_ != nullptr; +} + +inline int Node::index_in_graph() const +{ + return index_in_graph_; +} + +inline Span<const InputSocket *> Node::inputs() const +{ + return inputs_; +} + +inline Span<const OutputSocket *> Node::outputs() const +{ + return outputs_; +} + +inline Span<InputSocket *> Node::inputs() +{ + return inputs_; +} + +inline Span<OutputSocket *> Node::outputs() +{ + return outputs_; +} + +inline const InputSocket &Node::input(const int index) const +{ + return *inputs_[index]; +} + +inline const OutputSocket &Node::output(const int index) const +{ + return *outputs_[index]; +} + +inline InputSocket &Node::input(const int index) +{ + return *inputs_[index]; +} + +inline OutputSocket &Node::output(const int index) +{ + return *outputs_[index]; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #FunctionNode Inline Methods + * \{ */ + +inline const LazyFunction &FunctionNode::function() const +{ + BLI_assert(fn_ != nullptr); + return *fn_; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #Graph Inline Methods + * \{ */ + +inline Span<const Node *> Graph::nodes() const +{ + return nodes_; +} + +/** \} */ + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/FN_lazy_function_graph_executor.hh b/source/blender/functions/FN_lazy_function_graph_executor.hh new file mode 100644 index 00000000000..a6ae5cac967 --- /dev/null +++ b/source/blender/functions/FN_lazy_function_graph_executor.hh @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup fn + * + * This file provides means to create a #LazyFunction from #Graph (which could then e.g. be used in + * another #Graph again). + */ + +#include "BLI_vector.hh" +#include "BLI_vector_set.hh" + +#include "FN_lazy_function_graph.hh" + +namespace blender::fn::lazy_function { + +/** + * Can be implemented to log values produced during graph evaluation. + */ +class GraphExecutorLogger { + public: + virtual ~GraphExecutorLogger() = default; + + virtual void log_socket_value(const Socket &socket, + GPointer value, + const Context &context) const; + + virtual void log_before_node_execute(const FunctionNode &node, + const Params ¶ms, + const Context &context) const; + + virtual void log_after_node_execute(const FunctionNode &node, + const Params ¶ms, + const Context &context) const; + + virtual void dump_when_outputs_are_missing(const FunctionNode &node, + Span<const OutputSocket *> missing_sockets, + const Context &context) const; + virtual void dump_when_input_is_set_twice(const InputSocket &target_socket, + const OutputSocket &from_socket, + const Context &context) const; +}; + +/** + * Has to be implemented when some of the nodes in the graph may have side effects. The + * #GraphExecutor has to know about that to make sure that these nodes will be executed even though + * their outputs are not needed. + */ +class GraphExecutorSideEffectProvider { + public: + virtual ~GraphExecutorSideEffectProvider() = default; + virtual Vector<const FunctionNode *> get_nodes_with_side_effects(const Context &context) const; +}; + +class GraphExecutor : public LazyFunction { + public: + using Logger = GraphExecutorLogger; + using SideEffectProvider = GraphExecutorSideEffectProvider; + + private: + /** + * The graph that is evaluated. + */ + const Graph &graph_; + /** + * Input and output sockets of the entire graph. + */ + VectorSet<const OutputSocket *> graph_inputs_; + VectorSet<const InputSocket *> graph_outputs_; + /** + * Optional logger for events that happen during execution. + */ + const Logger *logger_; + /** + * Optional side effect provider. It knows which nodes have side effects based on the context + * during evaluation. + */ + const SideEffectProvider *side_effect_provider_; + + friend class Executor; + + public: + GraphExecutor(const Graph &graph, + Span<const OutputSocket *> graph_inputs, + Span<const InputSocket *> graph_outputs, + const Logger *logger, + const SideEffectProvider *side_effect_provider); + + void *init_storage(LinearAllocator<> &allocator) const override; + void destruct_storage(void *storage) const override; + + private: + void execute_impl(Params ¶ms, const Context &context) const override; +}; + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/FN_multi_function.hh b/source/blender/functions/FN_multi_function.hh index 015df179ef0..accbaf899be 100644 --- a/source/blender/functions/FN_multi_function.hh +++ b/source/blender/functions/FN_multi_function.hh @@ -157,6 +157,7 @@ namespace multi_function_types { using fn::MFContext; using fn::MFContextBuilder; using fn::MFDataType; +using fn::MFParamCategory; using fn::MFParams; using fn::MFParamsBuilder; using fn::MFParamType; diff --git a/source/blender/functions/intern/cpp_types.cc b/source/blender/functions/intern/cpp_types.cc index 5c43fffdd61..f046da30994 100644 --- a/source/blender/functions/intern/cpp_types.cc +++ b/source/blender/functions/intern/cpp_types.cc @@ -16,3 +16,6 @@ MAKE_FIELD_CPP_TYPE(BoolField, bool); MAKE_FIELD_CPP_TYPE(Int8Field, int8_t); MAKE_FIELD_CPP_TYPE(Int32Field, int32_t); MAKE_FIELD_CPP_TYPE(StringField, std::string); +BLI_CPP_TYPE_MAKE(StringValueOrFieldVector, + blender::Vector<blender::fn::ValueOrField<std::string>>, + CPPTypeFlags::None); diff --git a/source/blender/functions/intern/lazy_function.cc b/source/blender/functions/intern/lazy_function.cc new file mode 100644 index 00000000000..46572283e9b --- /dev/null +++ b/source/blender/functions/intern/lazy_function.cc @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/** \file + * \ingroup fn + */ + +#include "BLI_array.hh" + +#include "FN_lazy_function.hh" + +namespace blender::fn::lazy_function { + +std::string LazyFunction::name() const +{ + return debug_name_; +} + +std::string LazyFunction::input_name(int index) const +{ + return inputs_[index].debug_name; +} + +std::string LazyFunction::output_name(int index) const +{ + return outputs_[index].debug_name; +} + +void *LazyFunction::init_storage(LinearAllocator<> &UNUSED(allocator)) const +{ + return nullptr; +} + +void LazyFunction::destruct_storage(void *storage) const +{ + BLI_assert(storage == nullptr); + UNUSED_VARS_NDEBUG(storage); +} + +bool LazyFunction::always_used_inputs_available(const Params ¶ms) const +{ + for (const int i : inputs_.index_range()) { + const Input &fn_input = inputs_[i]; + if (fn_input.usage == ValueUsage::Used) { + if (params.try_get_input_data_ptr(i) == nullptr) { + return false; + } + } + } + return true; +} + +void Params::set_default_remaining_outputs() +{ + for (const int i : fn_.outputs().index_range()) { + if (this->output_was_set(i)) { + continue; + } + const Output &fn_output = fn_.outputs()[i]; + const CPPType &type = *fn_output.type; + void *data_ptr = this->get_output_data_ptr(i); + type.value_initialize(data_ptr); + this->output_set(i); + } +} + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/intern/lazy_function_execute.cc b/source/blender/functions/intern/lazy_function_execute.cc new file mode 100644 index 00000000000..279056afa99 --- /dev/null +++ b/source/blender/functions/intern/lazy_function_execute.cc @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/** \file + * \ingroup fn + */ + +#include "FN_lazy_function_execute.hh" + +namespace blender::fn::lazy_function { + +BasicParams::BasicParams(const LazyFunction &fn, + const Span<GMutablePointer> inputs, + const Span<GMutablePointer> outputs, + MutableSpan<std::optional<ValueUsage>> input_usages, + Span<ValueUsage> output_usages, + MutableSpan<bool> set_outputs) + : Params(fn), + inputs_(inputs), + outputs_(outputs), + input_usages_(input_usages), + output_usages_(output_usages), + set_outputs_(set_outputs) +{ +} + +void *BasicParams::try_get_input_data_ptr_impl(const int index) const +{ + return inputs_[index].get(); +} + +void *BasicParams::try_get_input_data_ptr_or_request_impl(const int index) +{ + void *value = inputs_[index].get(); + if (value == nullptr) { + input_usages_[index] = ValueUsage::Used; + } + return value; +} + +void *BasicParams::get_output_data_ptr_impl(const int index) +{ + return outputs_[index].get(); +} + +void BasicParams::output_set_impl(const int index) +{ + set_outputs_[index] = true; +} + +bool BasicParams::output_was_set_impl(const int index) const +{ + return set_outputs_[index]; +} + +ValueUsage BasicParams::get_output_usage_impl(const int index) const +{ + return output_usages_[index]; +} + +void BasicParams::set_input_unused_impl(const int index) +{ + input_usages_[index] = ValueUsage::Unused; +} + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/intern/lazy_function_graph.cc b/source/blender/functions/intern/lazy_function_graph.cc new file mode 100644 index 00000000000..cc55b70d166 --- /dev/null +++ b/source/blender/functions/intern/lazy_function_graph.cc @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_dot_export.hh" + +#include "FN_lazy_function_graph.hh" + +namespace blender::fn::lazy_function { + +Graph::~Graph() +{ + for (Node *node : nodes_) { + for (InputSocket *socket : node->inputs_) { + std::destroy_at(socket); + } + for (OutputSocket *socket : node->outputs_) { + std::destroy_at(socket); + } + std::destroy_at(node); + } +} + +FunctionNode &Graph::add_function(const LazyFunction &fn) +{ + const Span<Input> inputs = fn.inputs(); + const Span<Output> outputs = fn.outputs(); + + FunctionNode &node = *allocator_.construct<FunctionNode>().release(); + node.fn_ = &fn; + node.inputs_ = allocator_.construct_elements_and_pointer_array<InputSocket>(inputs.size()); + node.outputs_ = allocator_.construct_elements_and_pointer_array<OutputSocket>(outputs.size()); + + for (const int i : inputs.index_range()) { + InputSocket &socket = *node.inputs_[i]; + socket.index_in_node_ = i; + socket.is_input_ = true; + socket.node_ = &node; + socket.type_ = inputs[i].type; + } + for (const int i : outputs.index_range()) { + OutputSocket &socket = *node.outputs_[i]; + socket.index_in_node_ = i; + socket.is_input_ = false; + socket.node_ = &node; + socket.type_ = outputs[i].type; + } + + nodes_.append(&node); + return node; +} + +DummyNode &Graph::add_dummy(Span<const CPPType *> input_types, Span<const CPPType *> output_types) +{ + DummyNode &node = *allocator_.construct<DummyNode>().release(); + node.fn_ = nullptr; + node.inputs_ = allocator_.construct_elements_and_pointer_array<InputSocket>(input_types.size()); + node.outputs_ = allocator_.construct_elements_and_pointer_array<OutputSocket>( + output_types.size()); + + for (const int i : input_types.index_range()) { + InputSocket &socket = *node.inputs_[i]; + socket.index_in_node_ = i; + socket.is_input_ = true; + socket.node_ = &node; + socket.type_ = input_types[i]; + } + for (const int i : output_types.index_range()) { + OutputSocket &socket = *node.outputs_[i]; + socket.index_in_node_ = i; + socket.is_input_ = false; + socket.node_ = &node; + socket.type_ = output_types[i]; + } + + nodes_.append(&node); + return node; +} + +void Graph::add_link(OutputSocket &from, InputSocket &to) +{ + BLI_assert(to.origin_ == nullptr); + BLI_assert(from.type_ == to.type_); + to.origin_ = &from; + from.targets_.append(&to); +} + +void Graph::update_node_indices() +{ + for (const int i : nodes_.index_range()) { + nodes_[i]->index_in_graph_ = i; + } +} + +bool Graph::node_indices_are_valid() const +{ + for (const int i : nodes_.index_range()) { + if (nodes_[i]->index_in_graph_ != i) { + return false; + } + } + return true; +} + +std::string Socket::name() const +{ + if (node_->is_function()) { + const FunctionNode &fn_node = static_cast<const FunctionNode &>(*node_); + const LazyFunction &fn = fn_node.function(); + if (is_input_) { + return fn.input_name(index_in_node_); + } + return fn.output_name(index_in_node_); + } + return "Unnamed"; +} + +std::string Node::name() const +{ + if (fn_ == nullptr) { + return static_cast<const DummyNode *>(this)->name_; + } + return fn_->name(); +} + +std::string Graph::to_dot() const +{ + dot::DirectedGraph digraph; + digraph.set_rankdir(dot::Attr_rankdir::LeftToRight); + + Map<const Node *, dot::NodeWithSocketsRef> dot_nodes; + + for (const Node *node : nodes_) { + dot::Node &dot_node = digraph.new_node(""); + if (node->is_dummy()) { + dot_node.set_background_color("lightblue"); + } + else { + dot_node.set_background_color("white"); + } + + Vector<std::string> input_names; + Vector<std::string> output_names; + for (const InputSocket *socket : node->inputs()) { + input_names.append(socket->name()); + } + for (const OutputSocket *socket : node->outputs()) { + output_names.append(socket->name()); + } + + dot_nodes.add_new(node, + dot::NodeWithSocketsRef(dot_node, node->name(), input_names, output_names)); + } + + for (const Node *node : nodes_) { + for (const InputSocket *socket : node->inputs()) { + const dot::NodeWithSocketsRef &to_dot_node = dot_nodes.lookup(&socket->node()); + const dot::NodePort to_dot_port = to_dot_node.input(socket->index()); + + if (const OutputSocket *origin = socket->origin()) { + dot::NodeWithSocketsRef &from_dot_node = dot_nodes.lookup(&origin->node()); + digraph.new_edge(from_dot_node.output(origin->index()), to_dot_port); + } + else if (const void *default_value = socket->default_value()) { + const CPPType &type = socket->type(); + std::string value_string; + if (type.is_printable()) { + value_string = type.to_string(default_value); + } + else { + value_string = "<" + type.name() + ">"; + } + dot::Node &default_value_dot_node = digraph.new_node(value_string); + default_value_dot_node.set_shape(dot::Attr_shape::Ellipse); + digraph.new_edge(default_value_dot_node, to_dot_port); + } + } + } + + return digraph.to_dot_string(); +} + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/intern/lazy_function_graph_executor.cc b/source/blender/functions/intern/lazy_function_graph_executor.cc new file mode 100644 index 00000000000..176509bd687 --- /dev/null +++ b/source/blender/functions/intern/lazy_function_graph_executor.cc @@ -0,0 +1,1163 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/** + * This file implements the evaluation of a lazy-function graph. It's main objectives are: + * - Only compute values that are actually used. + * - Allow spreading the work over an arbitrary number of CPU cores. + * + * Other (simpler) executors with different main objectives could be implemented in the future. For + * some scenarios those could be simpler when many nodes do very little work or most nodes have to + * be processed sequentially. Those assumptions make the first and second objective less important + * respectively. + * + * The design implemented in this executor requires *no* main thread that coordinates everything. + * Instead, one thread will trigger some initial work and then many threads coordinate themselves + * in a distributed fashion. In an ideal situation, every thread ends up processing a separate part + * of the graph which results in less communication overhead. The way TBB schedules tasks helps + * with that: a thread will next process the task that it added to a task pool just before. + * + * Communication between threads is synchronized by using a mutex in every node. When a thread + * wants to access the state of a node, its mutex has to be locked first (with some documented + * exceptions). The assumption here is that most nodes are only ever touched by a single thread and + * therefore the lock contention is reduced the more nodes there are. + * + * Similar to how a #LazyFunction can be thought of as a state machine (see `FN_lazy_function.hh`), + * each node can also be thought of as a state machine. The state of a node contains the evaluation + * state of its inputs and outputs. Every time a node is executed, it has to advance its state in + * some way (e.g. it requests a new input or computes a new output). + * + * At the core of the executor is a task pool. Every task in that pool represents a node execution. + * When a node is executed it may send notifications to other nodes which may in turn add those + * nodes to the task pool. For example, the current node has computed one of its outputs, then the + * computed value is forwarded to all linked inputs, changing their node states in the process. If + * this input was the last missing required input, the node will be added to the task pool so that + * it is executed next. + * + * When the task pool is empty, the executor gives back control to the caller which may later + * provide new inputs to the graph which in turn adds new nodes to the task pool and the process + * starts again. + */ + +#include <mutex> + +#include "BLI_compute_context.hh" +#include "BLI_enumerable_thread_specific.hh" +#include "BLI_function_ref.hh" +#include "BLI_task.h" +#include "BLI_task.hh" +#include "BLI_timeit.hh" + +#include "FN_lazy_function_graph_executor.hh" + +namespace blender::fn::lazy_function { + +enum class NodeScheduleState { + /** + * Default state of every node. + */ + NotScheduled, + /** + * The node has been added to the task pool or is otherwise scheduled to be executed in the + * future. + */ + Scheduled, + /** + * The node is currently running. + */ + Running, + /** + * The node is running and has been rescheduled while running. In this case the node run again. + * This state exists, because we don't want to add the node to the task pool twice, because then + * the node might run twice at the same time, which is not allowed. Instead, once the node is + * done running, it will reschedule itself. + */ + RunningAndRescheduled, +}; + +struct InputState { + /** + * Value of this input socket. By default, the value is empty. When other nodes are done + * computing their outputs, the computed values will be forwarded to linked input sockets. The + * value will then live here until it is found that it is not needed anymore. + * + * If #was_ready_for_execution is true, access does not require holding the node lock. + */ + void *value = nullptr; + /** + * How the node intends to use this input. By default, all inputs may be used. Based on which + * outputs are used, a node can decide that an input will definitely be used or is never used. + * This allows freeing values early and avoids unnecessary computations. + */ + ValueUsage usage = ValueUsage::Maybe; + /** + * Set to true once #value is set and will stay true afterwards. Access during execution of a + * node, does not require holding the node lock. + */ + bool was_ready_for_execution = false; +}; + +struct OutputState { + /** + * Keeps track of how the output value is used. If a connected input becomes used, this output + * has to become used as well. The output becomes unused when it is used by no input socket + * anymore and it's not an output of the graph. + */ + ValueUsage usage = ValueUsage::Maybe; + /** + * This is a copy of #usage that is done right before node execution starts. This is done so that + * the node gets a consistent view of what outputs are used, even when this changes while the + * node is running (the node might be reevaluated in that case). Access during execution of a + * node, does not require holding the node lock. + */ + ValueUsage usage_for_execution = ValueUsage::Maybe; + /** + * Number of linked sockets that might still use the value of this output. + */ + int potential_target_sockets = 0; + /** + * Is set to true once the output has been computed and then stays true. Access does not require + * holding the node lock. + */ + bool has_been_computed = false; + /** + * Holds the output value for a short period of time while the node is initializing it and before + * it's forwarded to input sockets. Access does not require holding the node lock. + */ + void *value = nullptr; +}; + +struct NodeState { + /** + * Needs to be locked when any data in this state is accessed that is not explicitly marked as + * not needing the lock. + */ + mutable std::mutex mutex; + /** + * States of the individual input and output sockets. One can index into these arrays without + * locking. However, to access data inside, a lock is needed unless noted otherwise. + */ + MutableSpan<InputState> inputs; + MutableSpan<OutputState> outputs; + /** + * Counts the number of inputs that still have to be provided to this node, until it should run + * again. This is used as an optimization so that nodes are not scheduled unnecessarily in many + * cases. + */ + int missing_required_inputs = 0; + /** + * Is set to true once the node is done with its work, i.e. when all outputs that may be used + * have been computed. + */ + bool node_has_finished = false; + /** + * Set to true once the node is done running for the first time. + */ + bool had_initialization = true; + /** + * Nodes with side effects should always be executed when their required inputs have been + * computed. + */ + bool has_side_effects = false; + /** + * A node is always in one specific schedule state. This helps to ensure that the same node does + * not run twice at the same time accidentally. + */ + NodeScheduleState schedule_state = NodeScheduleState::NotScheduled; + /** + * Custom storage of the node. + */ + void *storage = nullptr; +}; + +/** + * Utility class that wraps a node whose state is locked. Having this is a separate class is useful + * because it allows methods to communicate that they expect the node to be locked. + */ +struct LockedNode { + /** + * This is the node that is currently locked. + */ + const Node &node; + NodeState &node_state; + + /** + * Used to delay notifying (and therefore locking) other nodes until the current node is not + * locked anymore. This might not be strictly necessary to avoid deadlocks in the current code, + * but is a good measure to avoid accidentally adding a deadlock later on. By not locking more + * than one node per thread at a time, deadlocks are avoided. + * + * The notifications will be send right after the node is not locked anymore. + */ + Vector<const OutputSocket *> delayed_required_outputs; + Vector<const OutputSocket *> delayed_unused_outputs; + Vector<const FunctionNode *> delayed_scheduled_nodes; + + LockedNode(const Node &node, NodeState &node_state) : node(node), node_state(node_state) + { + } +}; + +struct CurrentTask { + /** + * The node that should be run on the same thread after the current node is done. This avoids + * some overhead by skipping a round trip through the task pool. + */ + std::atomic<const FunctionNode *> next_node = nullptr; + /** + * Indicates that some node has been added to the task pool. + */ + std::atomic<bool> added_node_to_pool = false; +}; + +class GraphExecutorLFParams; + +class Executor { + private: + const GraphExecutor &self_; + /** + * Remembers which inputs have been loaded from the caller already, to avoid loading them twice. + * Atomics are used to make sure that every input is only retrieved once. + */ + Array<std::atomic<uint8_t>> loaded_inputs_; + /** + * State of every node, indexed by #Node::index_in_graph. + */ + Array<NodeState *> node_states_; + /** + * Parameters provided by the caller. This is always non-null, while a node is running. + */ + Params *params_ = nullptr; + const Context *context_ = nullptr; + /** + * Used to distribute work on separate nodes to separate threads. + */ + TaskPool *task_pool_ = nullptr; + /** + * A separate linear allocator for every thread. We could potentially reuse some memory, but that + * doesn't seem worth it yet. + */ + threading::EnumerableThreadSpecific<LinearAllocator<>> local_allocators_; + /** + * Set to false when the first execution ends. + */ + bool is_first_execution_ = true; + + friend GraphExecutorLFParams; + + public: + Executor(const GraphExecutor &self) : self_(self), loaded_inputs_(self.graph_inputs_.size()) + { + /* The indices are necessary, because they are used as keys in #node_states_. */ + BLI_assert(self_.graph_.node_indices_are_valid()); + } + + ~Executor() + { + BLI_task_pool_free(task_pool_); + threading::parallel_for(node_states_.index_range(), 1024, [&](const IndexRange range) { + for (const int node_index : range) { + const Node &node = *self_.graph_.nodes()[node_index]; + NodeState &node_state = *node_states_[node_index]; + this->destruct_node_state(node, node_state); + } + }); + } + + /** + * Main entry point to the execution of this graph. + */ + void execute(Params ¶ms, const Context &context) + { + params_ = ¶ms; + context_ = &context; + BLI_SCOPED_DEFER([&]() { + /* Make sure the #params_ pointer is not dangling, even when it shouldn't be accessed by + * anyone. */ + params_ = nullptr; + context_ = nullptr; + is_first_execution_ = false; + }); + + CurrentTask current_task; + if (is_first_execution_) { + this->initialize_node_states(); + task_pool_ = BLI_task_pool_create(this, TASK_PRIORITY_HIGH); + + /* Initialize atomics to zero. */ + memset(static_cast<void *>(loaded_inputs_.data()), 0, loaded_inputs_.size() * sizeof(bool)); + + this->set_always_unused_graph_inputs(); + this->set_defaulted_graph_outputs(); + this->schedule_side_effect_nodes(current_task); + } + + this->schedule_newly_requested_outputs(current_task); + this->forward_newly_provided_inputs(current_task); + + /* Avoid using task pool when there is no parallel work to do. */ + while (!current_task.added_node_to_pool) { + if (current_task.next_node == nullptr) { + /* Nothing to do. */ + return; + } + const FunctionNode &node = *current_task.next_node; + current_task.next_node = nullptr; + this->run_node_task(node, current_task); + } + if (current_task.next_node != nullptr) { + this->add_node_to_task_pool(*current_task.next_node); + } + + BLI_task_pool_work_and_wait(task_pool_); + } + + private: + void initialize_node_states() + { + Span<const Node *> nodes = self_.graph_.nodes(); + node_states_.reinitialize(nodes.size()); + + /* Construct all node states in parallel. */ + threading::parallel_for(nodes.index_range(), 256, [&](const IndexRange range) { + LinearAllocator<> &allocator = local_allocators_.local(); + for (const int i : range) { + const Node &node = *nodes[i]; + NodeState &node_state = *allocator.construct<NodeState>().release(); + node_states_[i] = &node_state; + this->construct_initial_node_state(allocator, node, node_state); + } + }); + } + + void construct_initial_node_state(LinearAllocator<> &allocator, + const Node &node, + NodeState &node_state) + { + const Span<const InputSocket *> node_inputs = node.inputs(); + const Span<const OutputSocket *> node_outputs = node.outputs(); + + node_state.inputs = allocator.construct_array<InputState>(node_inputs.size()); + node_state.outputs = allocator.construct_array<OutputState>(node_outputs.size()); + + for (const int i : node_outputs.index_range()) { + OutputState &output_state = node_state.outputs[i]; + const OutputSocket &output_socket = *node_outputs[i]; + output_state.potential_target_sockets = output_socket.targets().size(); + if (output_state.potential_target_sockets == 0) { + output_state.usage = ValueUsage::Unused; + } + } + } + + void destruct_node_state(const Node &node, NodeState &node_state) + { + if (node.is_function()) { + const LazyFunction &fn = static_cast<const FunctionNode &>(node).function(); + if (node_state.storage != nullptr) { + fn.destruct_storage(node_state.storage); + } + } + for (const int i : node.inputs().index_range()) { + InputState &input_state = node_state.inputs[i]; + const InputSocket &input_socket = node.input(i); + this->destruct_input_value_if_exists(input_state, input_socket.type()); + } + std::destroy_at(&node_state); + } + + void schedule_newly_requested_outputs(CurrentTask ¤t_task) + { + for (const int graph_output_index : self_.graph_outputs_.index_range()) { + if (params_->get_output_usage(graph_output_index) != ValueUsage::Used) { + continue; + } + if (params_->output_was_set(graph_output_index)) { + continue; + } + const InputSocket &socket = *self_.graph_outputs_[graph_output_index]; + const Node &node = socket.node(); + NodeState &node_state = *node_states_[node.index_in_graph()]; + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + this->set_input_required(locked_node, socket); + }); + } + } + + void set_defaulted_graph_outputs() + { + for (const int graph_output_index : self_.graph_outputs_.index_range()) { + const InputSocket &socket = *self_.graph_outputs_[graph_output_index]; + if (socket.origin() != nullptr) { + continue; + } + const CPPType &type = socket.type(); + const void *default_value = socket.default_value(); + BLI_assert(default_value != nullptr); + + if (self_.logger_ != nullptr) { + self_.logger_->log_socket_value(socket, {type, default_value}, *context_); + } + + void *output_ptr = params_->get_output_data_ptr(graph_output_index); + type.copy_construct(default_value, output_ptr); + params_->output_set(graph_output_index); + } + } + + void set_always_unused_graph_inputs() + { + for (const int i : self_.graph_inputs_.index_range()) { + const OutputSocket &socket = *self_.graph_inputs_[i]; + const Node &node = socket.node(); + const NodeState &node_state = *node_states_[node.index_in_graph()]; + const OutputState &output_state = node_state.outputs[socket.index()]; + if (output_state.usage == ValueUsage::Unused) { + params_->set_input_unused(i); + } + } + } + + void schedule_side_effect_nodes(CurrentTask ¤t_task) + { + if (self_.side_effect_provider_ != nullptr) { + const Vector<const FunctionNode *> side_effect_nodes = + self_.side_effect_provider_->get_nodes_with_side_effects(*context_); + for (const FunctionNode *node : side_effect_nodes) { + NodeState &node_state = *node_states_[node->index_in_graph()]; + node_state.has_side_effects = true; + this->with_locked_node(*node, node_state, current_task, [&](LockedNode &locked_node) { + this->schedule_node(locked_node); + }); + } + } + } + + void forward_newly_provided_inputs(CurrentTask ¤t_task) + { + LinearAllocator<> &allocator = local_allocators_.local(); + for (const int graph_input_index : self_.graph_inputs_.index_range()) { + std::atomic<uint8_t> &was_loaded = loaded_inputs_[graph_input_index]; + if (was_loaded.load()) { + continue; + } + void *input_data = params_->try_get_input_data_ptr(graph_input_index); + if (input_data == nullptr) { + continue; + } + if (was_loaded.fetch_or(1)) { + /* The value was forwarded before. */ + continue; + } + this->forward_newly_provided_input(current_task, allocator, graph_input_index, input_data); + } + } + + void forward_newly_provided_input(CurrentTask ¤t_task, + LinearAllocator<> &allocator, + const int graph_input_index, + void *input_data) + { + const OutputSocket &socket = *self_.graph_inputs_[graph_input_index]; + const CPPType &type = socket.type(); + void *buffer = allocator.allocate(type.size(), type.alignment()); + type.move_construct(input_data, buffer); + this->forward_value_to_linked_inputs(socket, {type, buffer}, current_task); + } + + void notify_output_required(const OutputSocket &socket, CurrentTask ¤t_task) + { + const Node &node = socket.node(); + const int index_in_node = socket.index(); + NodeState &node_state = *node_states_[node.index_in_graph()]; + OutputState &output_state = node_state.outputs[index_in_node]; + + /* The notified output socket might be an input of the entire graph. In this case, notify the + * caller that the input is required. */ + if (node.is_dummy()) { + const int graph_input_index = self_.graph_inputs_.index_of(&socket); + std::atomic<uint8_t> &was_loaded = loaded_inputs_[graph_input_index]; + if (was_loaded.load()) { + return; + } + void *input_data = params_->try_get_input_data_ptr_or_request(graph_input_index); + if (input_data == nullptr) { + return; + } + if (was_loaded.fetch_or(1)) { + /* The value was forwarded already. */ + return; + } + this->forward_newly_provided_input( + current_task, local_allocators_.local(), graph_input_index, input_data); + return; + } + + BLI_assert(node.is_function()); + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + if (output_state.usage == ValueUsage::Used) { + return; + } + output_state.usage = ValueUsage::Used; + this->schedule_node(locked_node); + }); + } + + void notify_output_unused(const OutputSocket &socket, CurrentTask ¤t_task) + { + const Node &node = socket.node(); + const int index_in_node = socket.index(); + NodeState &node_state = *node_states_[node.index_in_graph()]; + OutputState &output_state = node_state.outputs[index_in_node]; + + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + output_state.potential_target_sockets -= 1; + if (output_state.potential_target_sockets == 0) { + BLI_assert(output_state.usage != ValueUsage::Unused); + if (output_state.usage == ValueUsage::Maybe) { + output_state.usage = ValueUsage::Unused; + if (node.is_dummy()) { + const int graph_input_index = self_.graph_inputs_.index_of(&socket); + params_->set_input_unused(graph_input_index); + } + else { + this->schedule_node(locked_node); + } + } + } + }); + } + + void schedule_node(LockedNode &locked_node) + { + BLI_assert(locked_node.node.is_function()); + switch (locked_node.node_state.schedule_state) { + case NodeScheduleState::NotScheduled: { + /* Don't add the node to the task pool immediately, because the task pool might start + * executing it immediately (when Blender is started with a single thread). + * That would often result in a deadlock, because we are still holding the mutex of the + * current node. Also see comments in #LockedNode. */ + locked_node.node_state.schedule_state = NodeScheduleState::Scheduled; + locked_node.delayed_scheduled_nodes.append( + &static_cast<const FunctionNode &>(locked_node.node)); + break; + } + case NodeScheduleState::Scheduled: { + break; + } + case NodeScheduleState::Running: { + locked_node.node_state.schedule_state = NodeScheduleState::RunningAndRescheduled; + break; + } + case NodeScheduleState::RunningAndRescheduled: { + break; + } + } + } + + void with_locked_node(const Node &node, + NodeState &node_state, + CurrentTask ¤t_task, + const FunctionRef<void(LockedNode &)> f) + { + BLI_assert(&node_state == node_states_[node.index_in_graph()]); + + LockedNode locked_node{node, node_state}; + { + std::lock_guard lock{node_state.mutex}; + threading::isolate_task([&]() { f(locked_node); }); + } + + this->send_output_required_notifications(locked_node.delayed_required_outputs, current_task); + this->send_output_unused_notifications(locked_node.delayed_unused_outputs, current_task); + this->schedule_new_nodes(locked_node.delayed_scheduled_nodes, current_task); + } + + void send_output_required_notifications(const Span<const OutputSocket *> sockets, + CurrentTask ¤t_task) + { + for (const OutputSocket *socket : sockets) { + this->notify_output_required(*socket, current_task); + } + } + + void send_output_unused_notifications(const Span<const OutputSocket *> sockets, + CurrentTask ¤t_task) + { + for (const OutputSocket *socket : sockets) { + this->notify_output_unused(*socket, current_task); + } + } + + void schedule_new_nodes(const Span<const FunctionNode *> nodes, CurrentTask ¤t_task) + { + for (const FunctionNode *node_to_schedule : nodes) { + /* Avoid a round trip through the task pool for the first node that is scheduled by the + * current node execution. Other nodes are added to the pool so that other threads can pick + * them up. */ + const FunctionNode *expected = nullptr; + if (current_task.next_node.compare_exchange_strong( + expected, node_to_schedule, std::memory_order_relaxed)) { + continue; + } + this->add_node_to_task_pool(*node_to_schedule); + current_task.added_node_to_pool.store(true, std::memory_order_relaxed); + } + } + + void add_node_to_task_pool(const Node &node) + { + BLI_task_pool_push( + task_pool_, Executor::run_node_from_task_pool, (void *)&node, false, nullptr); + } + + static void run_node_from_task_pool(TaskPool *task_pool, void *task_data) + { + void *user_data = BLI_task_pool_user_data(task_pool); + Executor &executor = *static_cast<Executor *>(user_data); + const FunctionNode &node = *static_cast<const FunctionNode *>(task_data); + + /* This loop reduces the number of round trips through the task pool as long as the current + * node is scheduling more nodes. */ + CurrentTask current_task; + current_task.next_node = &node; + while (current_task.next_node != nullptr) { + const FunctionNode &node_to_run = *current_task.next_node; + current_task.next_node = nullptr; + executor.run_node_task(node_to_run, current_task); + } + } + + void run_node_task(const FunctionNode &node, CurrentTask ¤t_task) + { + NodeState &node_state = *node_states_[node.index_in_graph()]; + LinearAllocator<> &allocator = local_allocators_.local(); + const LazyFunction &fn = node.function(); + + bool node_needs_execution = false; + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + BLI_assert(node_state.schedule_state == NodeScheduleState::Scheduled); + node_state.schedule_state = NodeScheduleState::Running; + + if (node_state.node_has_finished) { + return; + } + + bool required_uncomputed_output_exists = false; + for (OutputState &output_state : node_state.outputs) { + output_state.usage_for_execution = output_state.usage; + if (output_state.usage == ValueUsage::Used && !output_state.has_been_computed) { + required_uncomputed_output_exists = true; + } + } + if (!required_uncomputed_output_exists && !node_state.has_side_effects) { + return; + } + + if (node_state.had_initialization) { + /* Initialize storage. */ + node_state.storage = fn.init_storage(allocator); + + /* Load unlinked inputs. */ + for (const int input_index : node.inputs().index_range()) { + const InputSocket &input_socket = node.input(input_index); + if (input_socket.origin() != nullptr) { + continue; + } + InputState &input_state = node_state.inputs[input_index]; + const CPPType &type = input_socket.type(); + const void *default_value = input_socket.default_value(); + BLI_assert(default_value != nullptr); + if (self_.logger_ != nullptr) { + self_.logger_->log_socket_value(input_socket, {type, default_value}, *context_); + } + void *buffer = allocator.allocate(type.size(), type.alignment()); + type.copy_construct(default_value, buffer); + this->forward_value_to_input(locked_node, input_state, {type, buffer}); + } + + /* Request linked inputs that are always needed. */ + const Span<Input> fn_inputs = fn.inputs(); + for (const int input_index : fn_inputs.index_range()) { + const Input &fn_input = fn_inputs[input_index]; + if (fn_input.usage == ValueUsage::Used) { + const InputSocket &input_socket = node.input(input_index); + this->set_input_required(locked_node, input_socket); + } + } + + node_state.had_initialization = false; + } + + for (const int input_index : node_state.inputs.index_range()) { + InputState &input_state = node_state.inputs[input_index]; + if (input_state.was_ready_for_execution) { + continue; + } + if (input_state.value != nullptr) { + input_state.was_ready_for_execution = true; + continue; + } + if (input_state.usage == ValueUsage::Used) { + return; + } + } + + node_needs_execution = true; + }); + + if (node_needs_execution) { + /* Importantly, the node must not be locked when it is executed. That would result in locks + * being hold very long in some cases and results in multiple locks being hold by the same + * thread in the same graph which can lead to deadlocks. */ + this->execute_node(node, node_state, current_task); + } + + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { +#ifdef DEBUG + if (node_needs_execution) { + this->assert_expected_outputs_have_been_computed(locked_node); + } +#endif + this->finish_node_if_possible(locked_node); + const bool reschedule_requested = node_state.schedule_state == + NodeScheduleState::RunningAndRescheduled; + node_state.schedule_state = NodeScheduleState::NotScheduled; + if (reschedule_requested && !node_state.node_has_finished) { + this->schedule_node(locked_node); + } + }); + } + + void assert_expected_outputs_have_been_computed(LockedNode &locked_node) + { + const FunctionNode &node = static_cast<const FunctionNode &>(locked_node.node); + const NodeState &node_state = locked_node.node_state; + + if (node_state.missing_required_inputs > 0) { + return; + } + if (node_state.schedule_state == NodeScheduleState::RunningAndRescheduled) { + return; + } + Vector<const OutputSocket *> missing_outputs; + for (const int i : node_state.outputs.index_range()) { + const OutputState &output_state = node_state.outputs[i]; + if (output_state.usage_for_execution == ValueUsage::Used) { + if (!output_state.has_been_computed) { + missing_outputs.append(&node.output(i)); + } + } + } + if (!missing_outputs.is_empty()) { + if (self_.logger_ != nullptr) { + self_.logger_->dump_when_outputs_are_missing(node, missing_outputs, *context_); + } + BLI_assert_unreachable(); + } + } + + void finish_node_if_possible(LockedNode &locked_node) + { + const Node &node = locked_node.node; + NodeState &node_state = locked_node.node_state; + + if (node_state.node_has_finished) { + /* Was finished already. */ + return; + } + /* If there are outputs that may still be used, the node is not done yet. */ + for (const OutputState &output_state : node_state.outputs) { + if (output_state.usage != ValueUsage::Unused && !output_state.has_been_computed) { + return; + } + } + /* If the node is still waiting for inputs, it is not done yet. */ + for (const InputState &input_state : node_state.inputs) { + if (input_state.usage == ValueUsage::Used && !input_state.was_ready_for_execution) { + return; + } + } + + node_state.node_has_finished = true; + + for (const int input_index : node_state.inputs.index_range()) { + const InputSocket &input_socket = node.input(input_index); + InputState &input_state = node_state.inputs[input_index]; + if (input_state.usage == ValueUsage::Maybe) { + this->set_input_unused(locked_node, input_socket); + } + else if (input_state.usage == ValueUsage::Used) { + this->destruct_input_value_if_exists(input_state, input_socket.type()); + } + } + + if (node_state.storage != nullptr) { + if (node.is_function()) { + const FunctionNode &fn_node = static_cast<const FunctionNode &>(node); + fn_node.function().destruct_storage(node_state.storage); + } + node_state.storage = nullptr; + } + } + + void destruct_input_value_if_exists(InputState &input_state, const CPPType &type) + { + if (input_state.value != nullptr) { + type.destruct(input_state.value); + input_state.value = nullptr; + } + } + + void execute_node(const FunctionNode &node, NodeState &node_state, CurrentTask ¤t_task); + + void set_input_unused_during_execution(const Node &node, + NodeState &node_state, + const int input_index, + CurrentTask ¤t_task) + { + const InputSocket &input_socket = node.input(input_index); + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + this->set_input_unused(locked_node, input_socket); + }); + } + + void set_input_unused(LockedNode &locked_node, const InputSocket &input_socket) + { + NodeState &node_state = locked_node.node_state; + const int input_index = input_socket.index(); + InputState &input_state = node_state.inputs[input_index]; + + BLI_assert(input_state.usage != ValueUsage::Used); + if (input_state.usage == ValueUsage::Unused) { + return; + } + input_state.usage = ValueUsage::Unused; + + this->destruct_input_value_if_exists(input_state, input_socket.type()); + if (input_state.was_ready_for_execution) { + return; + } + const OutputSocket *origin = input_socket.origin(); + if (origin != nullptr) { + locked_node.delayed_unused_outputs.append(origin); + } + } + + void *set_input_required_during_execution(const Node &node, + NodeState &node_state, + const int input_index, + CurrentTask ¤t_task) + { + const InputSocket &input_socket = node.input(input_index); + void *result; + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + result = this->set_input_required(locked_node, input_socket); + }); + return result; + } + + void *set_input_required(LockedNode &locked_node, const InputSocket &input_socket) + { + BLI_assert(&locked_node.node == &input_socket.node()); + NodeState &node_state = locked_node.node_state; + const int input_index = input_socket.index(); + InputState &input_state = node_state.inputs[input_index]; + + BLI_assert(input_state.usage != ValueUsage::Unused); + + if (input_state.value != nullptr) { + input_state.was_ready_for_execution = true; + return input_state.value; + } + if (input_state.usage == ValueUsage::Used) { + return nullptr; + } + input_state.usage = ValueUsage::Used; + node_state.missing_required_inputs += 1; + + const OutputSocket *origin_socket = input_socket.origin(); + /* Unlinked inputs are always loaded in advance. */ + BLI_assert(origin_socket != nullptr); + locked_node.delayed_required_outputs.append(origin_socket); + return nullptr; + } + + void forward_value_to_linked_inputs(const OutputSocket &from_socket, + GMutablePointer value_to_forward, + CurrentTask ¤t_task) + { + BLI_assert(value_to_forward.get() != nullptr); + LinearAllocator<> &allocator = local_allocators_.local(); + const CPPType &type = *value_to_forward.type(); + + if (self_.logger_ != nullptr) { + self_.logger_->log_socket_value(from_socket, value_to_forward, *context_); + } + + const Span<const InputSocket *> targets = from_socket.targets(); + for (const InputSocket *target_socket : targets) { + const Node &target_node = target_socket->node(); + NodeState &node_state = *node_states_[target_node.index_in_graph()]; + const int input_index = target_socket->index(); + InputState &input_state = node_state.inputs[input_index]; + const bool is_last_target = target_socket == targets.last(); +#ifdef DEBUG + if (input_state.value != nullptr) { + if (self_.logger_ != nullptr) { + self_.logger_->dump_when_input_is_set_twice(*target_socket, from_socket, *context_); + } + BLI_assert_unreachable(); + } +#endif + BLI_assert(!input_state.was_ready_for_execution); + BLI_assert(target_socket->type() == type); + BLI_assert(target_socket->origin() == &from_socket); + + if (self_.logger_ != nullptr) { + self_.logger_->log_socket_value(*target_socket, value_to_forward, *context_); + } + if (target_node.is_dummy()) { + /* Forward the value to the outside of the graph. */ + const int graph_output_index = self_.graph_outputs_.index_of_try(target_socket); + if (graph_output_index != -1 && + params_->get_output_usage(graph_output_index) != ValueUsage::Unused) { + void *dst_buffer = params_->get_output_data_ptr(graph_output_index); + if (is_last_target) { + type.move_construct(value_to_forward.get(), dst_buffer); + } + else { + type.copy_construct(value_to_forward.get(), dst_buffer); + } + params_->output_set(graph_output_index); + } + continue; + } + this->with_locked_node(target_node, node_state, current_task, [&](LockedNode &locked_node) { + if (input_state.usage == ValueUsage::Unused) { + return; + } + if (is_last_target) { + /* No need to make a copy if this is the last target. */ + this->forward_value_to_input(locked_node, input_state, value_to_forward); + value_to_forward = {}; + } + else { + void *buffer = allocator.allocate(type.size(), type.alignment()); + type.copy_construct(value_to_forward.get(), buffer); + this->forward_value_to_input(locked_node, input_state, {type, buffer}); + } + }); + } + if (value_to_forward.get() != nullptr) { + value_to_forward.destruct(); + } + } + + void forward_value_to_input(LockedNode &locked_node, + InputState &input_state, + GMutablePointer value) + { + NodeState &node_state = locked_node.node_state; + + BLI_assert(input_state.value == nullptr); + BLI_assert(!input_state.was_ready_for_execution); + input_state.value = value.get(); + + if (input_state.usage == ValueUsage::Used) { + node_state.missing_required_inputs -= 1; + if (node_state.missing_required_inputs == 0) { + this->schedule_node(locked_node); + } + } + } +}; + +class GraphExecutorLFParams final : public Params { + private: + Executor &executor_; + const Node &node_; + NodeState &node_state_; + CurrentTask ¤t_task_; + + public: + GraphExecutorLFParams(const LazyFunction &fn, + Executor &executor, + const Node &node, + NodeState &node_state, + CurrentTask ¤t_task) + : Params(fn), + executor_(executor), + node_(node), + node_state_(node_state), + current_task_(current_task) + { + } + + private: + void *try_get_input_data_ptr_impl(const int index) const override + { + const InputState &input_state = node_state_.inputs[index]; + if (input_state.was_ready_for_execution) { + return input_state.value; + } + return nullptr; + } + + void *try_get_input_data_ptr_or_request_impl(const int index) override + { + const InputState &input_state = node_state_.inputs[index]; + if (input_state.was_ready_for_execution) { + return input_state.value; + } + return executor_.set_input_required_during_execution(node_, node_state_, index, current_task_); + } + + void *get_output_data_ptr_impl(const int index) override + { + OutputState &output_state = node_state_.outputs[index]; + BLI_assert(!output_state.has_been_computed); + if (output_state.value == nullptr) { + LinearAllocator<> &allocator = executor_.local_allocators_.local(); + const CPPType &type = node_.output(index).type(); + output_state.value = allocator.allocate(type.size(), type.alignment()); + } + return output_state.value; + } + + void output_set_impl(const int index) override + { + OutputState &output_state = node_state_.outputs[index]; + BLI_assert(!output_state.has_been_computed); + BLI_assert(output_state.value != nullptr); + const OutputSocket &output_socket = node_.output(index); + executor_.forward_value_to_linked_inputs( + output_socket, {output_socket.type(), output_state.value}, current_task_); + output_state.value = nullptr; + output_state.has_been_computed = true; + } + + bool output_was_set_impl(const int index) const override + { + const OutputState &output_state = node_state_.outputs[index]; + return output_state.has_been_computed; + } + + ValueUsage get_output_usage_impl(const int index) const override + { + const OutputState &output_state = node_state_.outputs[index]; + return output_state.usage_for_execution; + } + + void set_input_unused_impl(const int index) override + { + executor_.set_input_unused_during_execution(node_, node_state_, index, current_task_); + } +}; + +/** + * Actually execute the node. + * + * Making this `inline` results in a simpler back-trace in release builds. + */ +inline void Executor::execute_node(const FunctionNode &node, + NodeState &node_state, + CurrentTask ¤t_task) +{ + const LazyFunction &fn = node.function(); + GraphExecutorLFParams node_params{fn, *this, node, node_state, current_task}; + BLI_assert(context_ != nullptr); + Context fn_context = *context_; + fn_context.storage = node_state.storage; + + if (self_.logger_ != nullptr) { + self_.logger_->log_before_node_execute(node, node_params, fn_context); + } + + fn.execute(node_params, fn_context); + + if (self_.logger_ != nullptr) { + self_.logger_->log_after_node_execute(node, node_params, fn_context); + } +} + +GraphExecutor::GraphExecutor(const Graph &graph, + const Span<const OutputSocket *> graph_inputs, + const Span<const InputSocket *> graph_outputs, + const Logger *logger, + const SideEffectProvider *side_effect_provider) + : graph_(graph), + graph_inputs_(graph_inputs), + graph_outputs_(graph_outputs), + logger_(logger), + side_effect_provider_(side_effect_provider) +{ + for (const OutputSocket *socket : graph_inputs_) { + BLI_assert(socket->node().is_dummy()); + inputs_.append({"In", socket->type(), ValueUsage::Maybe}); + } + for (const InputSocket *socket : graph_outputs_) { + BLI_assert(socket->node().is_dummy()); + outputs_.append({"Out", socket->type()}); + } +} + +void GraphExecutor::execute_impl(Params ¶ms, const Context &context) const +{ + Executor &executor = *static_cast<Executor *>(context.storage); + executor.execute(params, context); +} + +void *GraphExecutor::init_storage(LinearAllocator<> &allocator) const +{ + Executor &executor = *allocator.construct<Executor>(*this).release(); + return &executor; +} + +void GraphExecutor::destruct_storage(void *storage) const +{ + std::destroy_at(static_cast<Executor *>(storage)); +} + +void GraphExecutorLogger::log_socket_value(const Socket &socket, + const GPointer value, + const Context &context) const +{ + UNUSED_VARS(socket, value, context); +} + +void GraphExecutorLogger::log_before_node_execute(const FunctionNode &node, + const Params ¶ms, + const Context &context) const +{ + UNUSED_VARS(node, params, context); +} + +void GraphExecutorLogger::log_after_node_execute(const FunctionNode &node, + const Params ¶ms, + const Context &context) const +{ + UNUSED_VARS(node, params, context); +} + +Vector<const FunctionNode *> GraphExecutorSideEffectProvider::get_nodes_with_side_effects( + const Context &context) const +{ + UNUSED_VARS(context); + return {}; +} + +void GraphExecutorLogger::dump_when_outputs_are_missing(const FunctionNode &node, + Span<const OutputSocket *> missing_sockets, + const Context &context) const +{ + UNUSED_VARS(node, missing_sockets, context); +} + +void GraphExecutorLogger::dump_when_input_is_set_twice(const InputSocket &target_socket, + const OutputSocket &from_socket, + const Context &context) const +{ + UNUSED_VARS(target_socket, from_socket, context); +} + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/tests/FN_lazy_function_test.cc b/source/blender/functions/tests/FN_lazy_function_test.cc new file mode 100644 index 00000000000..8df064cd8a6 --- /dev/null +++ b/source/blender/functions/tests/FN_lazy_function_test.cc @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: Apache-2.0 */ + +#include "testing/testing.h" + +#include "FN_lazy_function_execute.hh" +#include "FN_lazy_function_graph.hh" +#include "FN_lazy_function_graph_executor.hh" + +#include "BLI_task.h" +#include "BLI_timeit.hh" + +namespace blender::fn::lazy_function::tests { + +class AddLazyFunction : public LazyFunction { + public: + AddLazyFunction() + { + debug_name_ = "Add"; + inputs_.append({"A", CPPType::get<int>()}); + inputs_.append({"B", CPPType::get<int>()}); + outputs_.append({"Result", CPPType::get<int>()}); + } + + void execute_impl(Params ¶ms, const Context &UNUSED(context)) const override + { + const int a = params.get_input<int>(0); + const int b = params.get_input<int>(1); + params.set_output(0, a + b); + } +}; + +class StoreValueFunction : public LazyFunction { + private: + int *dst1_; + int *dst2_; + + public: + StoreValueFunction(int *dst1, int *dst2) : dst1_(dst1), dst2_(dst2) + { + debug_name_ = "Store Value"; + inputs_.append({"A", CPPType::get<int>()}); + inputs_.append({"B", CPPType::get<int>(), ValueUsage::Maybe}); + } + + void execute_impl(Params ¶ms, const Context &UNUSED(context)) const override + { + *dst1_ = params.get_input<int>(0); + if (int *value = params.try_get_input_data_ptr_or_request<int>(1)) { + *dst2_ = *value; + } + } +}; + +class SimpleSideEffectProvider : public GraphExecutor::SideEffectProvider { + private: + Vector<const FunctionNode *> side_effect_nodes_; + + public: + SimpleSideEffectProvider(Span<const FunctionNode *> side_effect_nodes) + : side_effect_nodes_(side_effect_nodes) + { + } + + Vector<const FunctionNode *> get_nodes_with_side_effects( + const Context &UNUSED(context)) const override + { + return side_effect_nodes_; + } +}; + +TEST(lazy_function, SimpleAdd) +{ + const AddLazyFunction add_fn; + int result = 0; + execute_lazy_function_eagerly(add_fn, nullptr, std::make_tuple(30, 5), std::make_tuple(&result)); + EXPECT_EQ(result, 35); +} + +TEST(lazy_function, SideEffects) +{ + BLI_task_scheduler_init(); + int dst1 = 0; + int dst2 = 0; + + const AddLazyFunction add_fn; + const StoreValueFunction store_fn{&dst1, &dst2}; + + Graph graph; + FunctionNode &add_node_1 = graph.add_function(add_fn); + FunctionNode &add_node_2 = graph.add_function(add_fn); + FunctionNode &store_node = graph.add_function(store_fn); + DummyNode &input_node = graph.add_dummy({}, {&CPPType::get<int>()}); + + graph.add_link(input_node.output(0), add_node_1.input(0)); + graph.add_link(input_node.output(0), add_node_2.input(0)); + graph.add_link(add_node_1.output(0), store_node.input(0)); + graph.add_link(add_node_2.output(0), store_node.input(1)); + + const int value_10 = 10; + const int value_100 = 100; + add_node_1.input(1).set_default_value(&value_10); + add_node_2.input(1).set_default_value(&value_100); + + graph.update_node_indices(); + + SimpleSideEffectProvider side_effect_provider{{&store_node}}; + + GraphExecutor executor_fn{graph, {&input_node.output(0)}, {}, nullptr, &side_effect_provider}; + execute_lazy_function_eagerly(executor_fn, nullptr, std::make_tuple(5), std::make_tuple()); + + EXPECT_EQ(dst1, 15); + EXPECT_EQ(dst2, 105); +} + +} // namespace blender::fn::lazy_function::tests diff --git a/source/blender/geometry/CMakeLists.txt b/source/blender/geometry/CMakeLists.txt index 0f06890cbfa..9e1929b60a8 100644 --- a/source/blender/geometry/CMakeLists.txt +++ b/source/blender/geometry/CMakeLists.txt @@ -27,6 +27,7 @@ set(SRC intern/reverse_uv_sampler.cc intern/set_curve_type.cc intern/subdivide_curves.cc + intern/trim_curves.cc intern/uv_parametrizer.cc GEO_add_curves_on_mesh.hh @@ -41,6 +42,7 @@ set(SRC GEO_reverse_uv_sampler.hh GEO_set_curve_type.hh GEO_subdivide_curves.hh + GEO_trim_curves.hh GEO_uv_parametrizer.h ) diff --git a/source/blender/geometry/GEO_trim_curves.hh b/source/blender/geometry/GEO_trim_curves.hh new file mode 100644 index 00000000000..3c07b5628ea --- /dev/null +++ b/source/blender/geometry/GEO_trim_curves.hh @@ -0,0 +1,32 @@ +#include "BLI_span.hh" + +#include "BKE_curves.hh" +#include "BKE_curves_utils.hh" +#include "BKE_geometry_set.hh" + +namespace blender::geometry { + +/* + * Create a new Curves instance by trimming the input curves. Copying the selected splines + * between the start and end points. + */ +bke::CurvesGeometry trim_curves(const bke::CurvesGeometry &src_curves, + IndexMask selection, + Span<bke::curves::CurvePoint> start_points, + Span<bke::curves::CurvePoint> end_points); + +/** + * Find the point(s) and piecewise segment corresponding to the given distance along the length of + * the curve. Returns points on the evaluated curve for Catmull-Rom and NURBS splines. + * + * \param curves: Curve geometry to sample. + * \param lengths: Distance along the curve on form [0.0, length] to determine the point for. + * \param curve_indices: Curve index to lookup for each 'length', negative index are set to 0. + * \param is_normalized: If true, 'lengths' are normalized to the interval [0.0, 1.0]. + */ +Array<bke::curves::CurvePoint, 12> lookup_curve_points(const bke::CurvesGeometry &curves, + Span<float> lengths, + Span<int64_t> curve_indices, + bool is_normalized); + +} // namespace blender::geometry diff --git a/source/blender/geometry/intern/add_curves_on_mesh.cc b/source/blender/geometry/intern/add_curves_on_mesh.cc index e06ee55afa0..bb5e2a0a28a 100644 --- a/source/blender/geometry/intern/add_curves_on_mesh.cc +++ b/source/blender/geometry/intern/add_curves_on_mesh.cc @@ -372,6 +372,28 @@ AddCurvesOnMeshOutputs add_curves_on_mesh(CurvesGeometry &curves, curves.fill_curve_types(new_curves_range, CURVE_TYPE_CATMULL_ROM); + /* Explicitly set all other attributes besides those processed above to default values. */ + bke::MutableAttributeAccessor attributes = curves.attributes_for_write(); + Set<std::string> attributes_to_skip{{"position", + "curve_type", + "surface_uv_coordinate", + ".selection_point_float", + ".selection_curve_float"}}; + attributes.for_all( + [&](const bke::AttributeIDRef &id, const bke::AttributeMetaData /*meta_data*/) { + if (id.is_named() && attributes_to_skip.contains(id.name())) { + return true; + } + bke::GSpanAttributeWriter attribute = attributes.lookup_for_write_span(id); + const int new_elements_num = attribute.domain == ATTR_DOMAIN_POINT ? new_points_num : + new_curves_num; + const CPPType &type = attribute.span.type(); + GMutableSpan new_data = attribute.span.take_back(new_elements_num); + type.fill_assign_n(type.default_value(), new_data.data(), new_data.size()); + attribute.finish(); + return true; + }); + return outputs; } diff --git a/source/blender/geometry/intern/trim_curves.cc b/source/blender/geometry/intern/trim_curves.cc new file mode 100644 index 00000000000..9b71a95057f --- /dev/null +++ b/source/blender/geometry/intern/trim_curves.cc @@ -0,0 +1,1285 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/** \file + * \ingroup bke + */ + +#include "BLI_array_utils.hh" +#include "BLI_length_parameterize.hh" + +#include "BKE_attribute.hh" +#include "BKE_attribute_math.hh" +#include "BKE_curves.hh" +#include "BKE_curves_utils.hh" +#include "BKE_geometry_set.hh" + +#include "GEO_trim_curves.hh" + +namespace blender::geometry { + +/* -------------------------------------------------------------------- */ +/** \name Curve Enums + * \{ */ + +#define CURVE_TYPE_AS_MASK(curve_type) ((CurveTypeMask)((1 << (int)(curve_type)))) + +typedef enum CurveTypeMask { + CURVE_TYPE_MASK_CATMULL_ROM = (1 << 0), + CURVE_TYPE_MASK_POLY = (1 << 1), + CURVE_TYPE_MASK_BEZIER = (1 << 2), + CURVE_TYPE_MASK_NURBS = (1 << 3), + CURVE_TYPE_MASK_ALL = (1 << 4) - 1 +} CurveTypeMask; + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #IndexRangeCyclic Utilities + * \{ */ + +/** + * Create a cyclical iterator for all control points within the interval [start_point, end_point] + * including any control point at the start or end point. + * + * \param start_point Point on the curve that define the starting point of the interval. + * \param end_point Point on the curve that define the end point of the interval (included). + * \param points IndexRange for the curve points. + */ +static bke::curves::IndexRangeCyclic get_range_between_endpoints( + const bke::curves::CurvePoint start_point, + const bke::curves::CurvePoint end_point, + const IndexRange points) +{ + const int64_t start_index = start_point.parameter == 0.0 ? start_point.index : + start_point.next_index; + int64_t end_index = end_point.parameter == 0.0 ? end_point.index : end_point.next_index; + int64_t cycles; + + if (end_point.is_controlpoint()) { + ++end_index; + if (end_index > points.last()) { + end_index = points.one_after_last(); + } + /* end_point < start_point but parameter is irrelevant (end_point is controlpoint), and loop + * when equal due to increment. */ + cycles = end_index <= start_index; + } + else { + cycles = end_point < start_point || end_index < start_index; + } + return bke::curves::IndexRangeCyclic(start_index, end_index, points, cycles); +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Lookup Curve Points + * \{ */ + +/** + * Find the point on the curve defined by the distance along the curve. Assumes curve resolution is + * constant for all curve segments and evaluated curve points are uniformly spaced between the + * segment endpoints in relation to the curve parameter. + * + * \param lengths: Accumulated lenght for the evaluated curve. + * \param sample_length: Distance along the curve to determine the CurvePoint for. + * \param cyclic: If curve is cyclic. + * \param resolution: Curve resolution (number of evaluated points per segment). + * \param num_curve_points: Total number of control points in the curve. + * \return: Point on the piecewise segment matching the given distance. + */ +static bke::curves::CurvePoint lookup_curve_point(const Span<float> lengths, + const float sample_length, + const bool cyclic, + const int resolution, + const int num_curve_points) +{ + BLI_assert(!cyclic || lengths.size() / resolution >= 2); + const int last_index = num_curve_points - 1; + if (sample_length <= 0.0f) { + return {0, 1, 0.0f}; + } + if (sample_length >= lengths.last()) { + return cyclic ? bke::curves::CurvePoint{last_index, 0, 1.0} : + bke::curves::CurvePoint{last_index - 1, last_index, 1.0}; + } + int eval_index; + float eval_factor; + length_parameterize::sample_at_length(lengths, sample_length, eval_index, eval_factor); + + const int index = eval_index / resolution; + const int next_index = (index == last_index) ? 0 : index + 1; + const float parameter = (eval_factor + eval_index) / resolution - index; + + return bke::curves::CurvePoint{index, next_index, parameter}; +} + +/** + * Find the point on the 'evaluated' polygonal curve. + */ +static bke::curves::CurvePoint lookup_evaluated_point(const Span<float> lengths, + const float sample_length, + const bool cyclic, + const int evaluated_size) +{ + const int last_index = evaluated_size - 1; + if (sample_length <= 0.0f) { + return {0, 1, 0.0f}; + } + if (sample_length >= lengths.last()) { + return cyclic ? bke::curves::CurvePoint{last_index, 0, 1.0} : + bke::curves::CurvePoint{last_index - 1, last_index, 1.0}; + } + + int eval_index; + float eval_factor; + length_parameterize::sample_at_length(lengths, sample_length, eval_index, eval_factor); + + const int next_eval_index = (eval_index == last_index) ? 0 : eval_index + 1; + return bke::curves::CurvePoint{eval_index, next_eval_index, eval_factor}; +} + +/** + * Find the point on a Bezier curve using the 'bezier_offsets' cache. + */ +static bke::curves::CurvePoint lookup_bezier_point(const Span<int> bezier_offsets, + const Span<float> lengths, + const float sample_length, + const bool cyclic, + const int num_curve_points) +{ + const int last_index = num_curve_points - 1; + if (sample_length <= 0.0f) { + return {0, 1, 0.0f}; + } + if (sample_length >= lengths.last()) { + return cyclic ? bke::curves::CurvePoint{last_index, 0, 1.0} : + bke::curves::CurvePoint{last_index - 1, last_index, 1.0}; + } + int eval_index; + float eval_factor; + length_parameterize::sample_at_length(lengths, sample_length, eval_index, eval_factor); + + /* Find the segment index from the offset mapping. */ + const int *offset = std::upper_bound(bezier_offsets.begin(), bezier_offsets.end(), eval_index); + const int left = offset - bezier_offsets.begin(); + const int right = left == last_index ? 0 : left + 1; + + const int prev_offset = left == 0 ? 0 : bezier_offsets[(int64_t)left - 1]; + const float offset_in_segment = eval_factor + eval_index - prev_offset; + const int segment_resolution = bezier_offsets[left] - prev_offset; + const float parameter = std::clamp(offset_in_segment / segment_resolution, 0.0f, 1.0f); + + return {left, right, parameter}; +} + +Array<bke::curves::CurvePoint, 12> lookup_curve_points(const bke::CurvesGeometry &curves, + const Span<float> lengths, + const Span<int64_t> curve_indices, + const bool normalized_factors) +{ + BLI_assert(lengths.size() == curve_indices.size()); + BLI_assert(*std::max_element(curve_indices.begin(), curve_indices.end()) < curves.curves_num()); + + const VArray<bool> cyclic = curves.cyclic(); + const VArray<int> resolution = curves.resolution(); + const VArray<int8_t> curve_types = curves.curve_types(); + + /* Compute curve lenghts! */ + curves.ensure_evaluated_lengths(); + curves.ensure_evaluated_offsets(); + + /* Find the curve points referenced by the input! */ + Array<bke::curves::CurvePoint, 12> lookups(curve_indices.size()); + threading::parallel_for(curve_indices.index_range(), 128, [&](const IndexRange range) { + for (const int64_t lookup_index : range) { + const int64_t curve_i = curve_indices[lookup_index]; + + const int point_count = curves.points_num_for_curve(curve_i); + if (curve_i < 0 || point_count == 1) { + lookups[lookup_index] = {0, 0, 0.0f}; + continue; + } + + const Span<float> accumulated_lengths = curves.evaluated_lengths_for_curve(curve_i, + cyclic[curve_i]); + BLI_assert(accumulated_lengths.size() > 0); + + const float sample_length = normalized_factors ? + lengths[lookup_index] * accumulated_lengths.last() : + lengths[lookup_index]; + + const CurveType curve_type = (CurveType)curve_types[curve_i]; + + switch (curve_type) { + case CURVE_TYPE_BEZIER: { + if (bke::curves::bezier::has_vector_handles( + point_count, + curves.evaluated_points_for_curve(curve_i).size(), + cyclic[curve_i], + resolution[curve_i])) { + const Span<int> bezier_offsets = curves.bezier_evaluated_offsets_for_curve(curve_i); + lookups[lookup_index] = lookup_bezier_point( + bezier_offsets, accumulated_lengths, sample_length, cyclic[curve_i], point_count); + } + else { + lookups[lookup_index] = lookup_curve_point(accumulated_lengths, + sample_length, + cyclic[curve_i], + resolution[curve_i], + point_count); + } + break; + } + case CURVE_TYPE_CATMULL_ROM: { + lookups[lookup_index] = lookup_curve_point(accumulated_lengths, + sample_length, + cyclic[curve_i], + resolution[curve_i], + point_count); + break; + } + case CURVE_TYPE_NURBS: + case CURVE_TYPE_POLY: + default: { + /* Handle general case as an "evaluated" or polygonal curve. */ + BLI_assert(resolution[curve_i] > 0); + lookups[lookup_index] = lookup_evaluated_point( + accumulated_lengths, + sample_length, + cyclic[curve_i], + curves.evaluated_points_for_curve(curve_i).size()); + break; + } + } + } + }); + return lookups; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Transfer Curve Domain + * \{ */ + +/** + * Determine curve type(s) for the copied curves given the supported set of types and knot modes. + * If a curve type is not supported the default type is set. + */ +static void determine_copyable_curve_types(const bke::CurvesGeometry &src_curves, + bke::CurvesGeometry &dst_curves, + const IndexMask selection, + const IndexMask selection_inverse, + const CurveTypeMask supported_curve_type_mask, + const int8_t default_curve_type = (int8_t) + CURVE_TYPE_POLY) +{ + const VArray<int8_t> src_curve_types = src_curves.curve_types(); + const VArray<int8_t> src_knot_modes = src_curves.nurbs_knots_modes(); + MutableSpan<int8_t> dst_curve_types = dst_curves.curve_types_for_write(); + + threading::parallel_for(selection.index_range(), 4096, [&](const IndexRange selection_range) { + for (const int64_t curve_i : selection.slice(selection_range)) { + if (supported_curve_type_mask & CURVE_TYPE_AS_MASK(src_curve_types[curve_i])) { + dst_curve_types[curve_i] = src_curve_types[curve_i]; + } + else { + dst_curve_types[curve_i] = default_curve_type; + } + } + }); + + array_utils::copy(src_curve_types, selection_inverse, dst_curve_types); +} + +/** + * Determine if a curve is treated as an evaluated curve. Curves which inheretly do not support + * trimming are discretized (e.g. NURBS). + */ +static bool copy_as_evaluated_curve(const int8_t src_type, const int8_t dst_type) +{ + return src_type != CURVE_TYPE_POLY && dst_type == CURVE_TYPE_POLY; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Specialized Curve Constructors + * \{ */ + +static void compute_trim_result_offsets(const bke::CurvesGeometry &src_curves, + const IndexMask selection, + const IndexMask inverse_selection, + const Span<bke::curves::CurvePoint> start_points, + const Span<bke::curves::CurvePoint> end_points, + const VArray<int8_t> dst_curve_types, + MutableSpan<int> dst_curve_offsets, + Vector<int64_t> &r_curve_indices, + Vector<int64_t> &r_point_curve_indices) +{ + BLI_assert(r_curve_indices.size() == 0); + BLI_assert(r_point_curve_indices.size() == 0); + const VArray<bool> cyclic = src_curves.cyclic(); + const VArray<int8_t> curve_types = src_curves.curve_types(); + r_curve_indices.reserve(selection.size()); + + for (const int64_t curve_i : selection) { + + int64_t src_point_count; + + if (copy_as_evaluated_curve(curve_types[curve_i], dst_curve_types[curve_i])) { + src_point_count = src_curves.evaluated_points_for_curve(curve_i).size(); + } + else { + src_point_count = (int64_t)src_curves.points_num_for_curve(curve_i); + } + BLI_assert(src_point_count > 0); + + if (start_points[curve_i] == end_points[curve_i]) { + dst_curve_offsets[curve_i] = 1; + r_point_curve_indices.append(curve_i); + } + else { + const bke::curves::IndexRangeCyclic point_range = get_range_between_endpoints( + start_points[curve_i], end_points[curve_i], {0, src_point_count}); + const int count = point_range.size() + !start_points[curve_i].is_controlpoint() + + !end_points[curve_i].is_controlpoint(); + dst_curve_offsets[curve_i] = count; + r_curve_indices.append(curve_i); + } + BLI_assert(dst_curve_offsets[curve_i] > 0); + } + threading::parallel_for( + inverse_selection.index_range(), 4096, [&](const IndexRange selection_range) { + for (const int64_t curve_i : inverse_selection.slice(selection_range)) { + dst_curve_offsets[curve_i] = src_curves.points_num_for_curve(curve_i); + } + }); + bke::curves::accumulate_counts_to_offsets(dst_curve_offsets); +} + +/* -------------------------------------------------------------------- + * Utility functions. + */ + +static void fill_bezier_data(bke::CurvesGeometry &dst_curves, const IndexMask selection) +{ + if (dst_curves.has_curve_with_type(CURVE_TYPE_BEZIER)) { + MutableSpan<float3> handle_positions_left = dst_curves.handle_positions_left_for_write(); + MutableSpan<float3> handle_positions_right = dst_curves.handle_positions_right_for_write(); + MutableSpan<int8_t> handle_types_left = dst_curves.handle_types_left_for_write(); + MutableSpan<int8_t> handle_types_right = dst_curves.handle_types_right_for_write(); + + threading::parallel_for(selection.index_range(), 4096, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange points = dst_curves.points_for_curve(curve_i); + handle_types_right.slice(points).fill((int8_t)BEZIER_HANDLE_FREE); + handle_types_left.slice(points).fill((int8_t)BEZIER_HANDLE_FREE); + handle_positions_left.slice(points).fill({0.0f, 0.0f, 0.0f}); + handle_positions_right.slice(points).fill({0.0f, 0.0f, 0.0f}); + } + }); + } +} +static void fill_nurbs_data(bke::CurvesGeometry &dst_curves, const IndexMask selection) +{ + if (dst_curves.has_curve_with_type(CURVE_TYPE_NURBS)) { + bke::curves::fill_points(dst_curves, selection, 0.0f, dst_curves.nurbs_weights_for_write()); + } +} + +template<typename T> +static int64_t copy_point_data_between_endpoints(const Span<T> src_data, + MutableSpan<T> dst_data, + const bke::curves::IndexRangeCyclic src_range, + const int64_t src_index, + int64_t dst_index) +{ + int64_t increment; + if (src_range.cycles()) { + increment = src_range.size_before_loop(); + dst_data.slice(dst_index, increment).copy_from(src_data.slice(src_index, increment)); + dst_index += increment; + + increment = src_range.size_after_loop(); + dst_data.slice(dst_index, increment) + .copy_from(src_data.slice(src_range.curve_range().first(), increment)); + dst_index += increment; + } + else { + increment = src_range.one_after_last() - src_range.first(); + dst_data.slice(dst_index, increment).copy_from(src_data.slice(src_index, increment)); + dst_index += increment; + } + return dst_index; +} + +/* -------------------------------------------------------------------- + * Sampling utilities. + */ + +template<typename T> +static T interpolate_catmull_rom(const Span<T> src_data, + const bke::curves::CurvePoint insertion_point, + const bool src_cyclic) +{ + BLI_assert(insertion_point.index >= 0 && insertion_point.next_index < src_data.size()); + int i0; + if (insertion_point.index == 0) { + i0 = src_cyclic ? src_data.size() - 1 : insertion_point.index; + } + else { + i0 = insertion_point.index - 1; + } + int i3 = insertion_point.next_index + 1; + if (i3 == src_data.size()) { + i3 = src_cyclic ? 0 : insertion_point.next_index; + } + return bke::curves::catmull_rom::interpolate<T>(src_data[i0], + src_data[insertion_point.index], + src_data[insertion_point.next_index], + src_data[i3], + insertion_point.parameter); +} + +static bke::curves::bezier::Insertion knot_insert_bezier( + const Span<float3> positions, + const Span<float3> handles_left, + const Span<float3> handles_right, + const bke::curves::CurvePoint insertion_point) +{ + BLI_assert( + insertion_point.index + 1 == insertion_point.next_index || + (insertion_point.next_index >= 0 && insertion_point.next_index < insertion_point.index)); + return bke::curves::bezier::insert(positions[insertion_point.index], + handles_right[insertion_point.index], + handles_left[insertion_point.next_index], + positions[insertion_point.next_index], + insertion_point.parameter); +} + +/* -------------------------------------------------------------------- + * Sample single point. + */ + +template<typename T> +static void sample_linear(const Span<T> src_data, + MutableSpan<T> dst_data, + const IndexRange dst_range, + const bke::curves::CurvePoint sample_point) +{ + BLI_assert(dst_range.size() == 1); + if (sample_point.is_controlpoint()) { + /* Resolves cases where the source curve consist of a single control point. */ + const int index = sample_point.parameter == 1.0 ? sample_point.next_index : sample_point.index; + dst_data[dst_range.first()] = src_data[index]; + } + else { + dst_data[dst_range.first()] = attribute_math::mix2( + sample_point.parameter, src_data[sample_point.index], src_data[sample_point.next_index]); + } +} + +template<typename T> +static void sample_catmull_rom(const Span<T> src_data, + MutableSpan<T> dst_data, + const IndexRange dst_range, + const bke::curves::CurvePoint sample_point, + const bool src_cyclic) +{ + BLI_assert(dst_range.size() == 1); + if (sample_point.is_controlpoint()) { + /* Resolves cases where the source curve consist of a single control point. */ + const int index = sample_point.parameter == 1.0 ? sample_point.next_index : sample_point.index; + dst_data[dst_range.first()] = src_data[index]; + } + else { + dst_data[dst_range.first()] = interpolate_catmull_rom(src_data, sample_point, src_cyclic); + } +} + +static void sample_bezier(const Span<float3> src_positions, + const Span<float3> src_handles_l, + const Span<float3> src_handles_r, + const Span<int8_t> src_types_l, + const Span<int8_t> src_types_r, + MutableSpan<float3> dst_positions, + MutableSpan<float3> dst_handles_l, + MutableSpan<float3> dst_handles_r, + MutableSpan<int8_t> dst_types_l, + MutableSpan<int8_t> dst_types_r, + const IndexRange dst_range, + const bke::curves::CurvePoint sample_point) +{ + BLI_assert(dst_range.size() == 1); + if (sample_point.is_controlpoint()) { + /* Resolves cases where the source curve consist of a single control point. */ + const int index = sample_point.parameter == 1.0 ? sample_point.next_index : sample_point.index; + dst_positions[dst_range.first()] = src_positions[index]; + dst_handles_l[dst_range.first()] = src_handles_l[index]; + dst_handles_r[dst_range.first()] = src_handles_r[index]; + dst_types_l[dst_range.first()] = src_types_l[index]; + dst_types_r[dst_range.first()] = src_types_r[index]; + } + else { + bke::curves::bezier::Insertion insertion_point = knot_insert_bezier( + src_positions, src_handles_l, src_handles_r, sample_point); + dst_positions[dst_range.first()] = insertion_point.position; + dst_handles_l[dst_range.first()] = insertion_point.left_handle; + dst_handles_r[dst_range.first()] = insertion_point.right_handle; + dst_types_l[dst_range.first()] = BEZIER_HANDLE_FREE; + dst_types_r[dst_range.first()] = BEZIER_HANDLE_FREE; + } +} + +/* -------------------------------------------------------------------- + * Sample curve interval (trim). + */ + +/** + * Sample source curve data in the interval defined by the points [start_point, end_point]. + * Uses linear interpolation to compute the endpoints. + * + * \tparam include_start_point If False, the 'start_point' point sample will not be copied + * and not accounted for in the destination range. + * \param src_data: Source to sample from. + * \param dst_data: Destination to write samples to. + * \param src_range: Interval within [start_point, end_point] to copy from the source point domain. + * \param dst_range: Interval to copy point data to in the destination buffer. + * \param start_point: Point on the source curve to start sampling from. + * \param end_point: Last point to sample in the source curve. + */ +template<typename T, bool include_start_point = true> +static void sample_interval_linear(const Span<T> src_data, + MutableSpan<T> dst_data, + const bke::curves::IndexRangeCyclic src_range, + const IndexRange dst_range, + const bke::curves::CurvePoint start_point, + const bke::curves::CurvePoint end_point) +{ + int64_t src_index = src_range.first(); + int64_t dst_index = dst_range.first(); + + if (start_point.is_controlpoint()) { + /* 'start_point' is included in the copy iteration. */ + if constexpr (!include_start_point) { + /* Skip first. */ + ++src_index; + } + } + else if constexpr (!include_start_point) { + /* Do nothing (excluded). */ + } + else { + /* General case, sample 'start_point' */ + dst_data[dst_index] = attribute_math::mix2( + start_point.parameter, src_data[start_point.index], src_data[start_point.next_index]); + ++dst_index; + } + + dst_index = copy_point_data_between_endpoints( + src_data, dst_data, src_range, src_index, dst_index); + + /* Handle last case */ + if (end_point.is_controlpoint()) { + /* 'end_point' is included in the copy iteration. */ + } + else { + dst_data[dst_index] = attribute_math::mix2( + end_point.parameter, src_data[end_point.index], src_data[end_point.next_index]); +#ifdef DEBUG + ++dst_index; +#endif + } + BLI_assert(dst_index == dst_range.one_after_last()); +} + +template<typename T, bool include_start_point = true> +static void sample_interval_catmull_rom(const Span<T> src_data, + MutableSpan<T> dst_data, + const bke::curves::IndexRangeCyclic src_range, + const IndexRange dst_range, + const bke::curves::CurvePoint start_point, + const bke::curves::CurvePoint end_point, + const bool src_cyclic) +{ + int64_t src_index = src_range.first(); + int64_t dst_index = dst_range.first(); + + if (start_point.is_controlpoint()) { + /* 'start_point' is included in the copy iteration. */ + if constexpr (!include_start_point) { + /* Skip first. */ + ++src_index; + } + } + else if constexpr (!include_start_point) { + /* Do nothing (excluded). */ + } + else { + /* General case, sample 'start_point' */ + dst_data[dst_index] = interpolate_catmull_rom(src_data, start_point, src_cyclic); + ++dst_index; + } + + dst_index = copy_point_data_between_endpoints( + src_data, dst_data, src_range, src_index, dst_index); + + /* Handle last case */ + if (end_point.is_controlpoint()) { + /* 'end_point' is included in the copy iteration. */ + } + else { + dst_data[dst_index] = interpolate_catmull_rom(src_data, end_point, src_cyclic); +#ifdef DEBUG + ++dst_index; +#endif + } + BLI_assert(dst_index == dst_range.one_after_last()); +} + +template<bool include_start_point = true> +static void sample_interval_bezier(const Span<float3> src_positions, + const Span<float3> src_handles_l, + const Span<float3> src_handles_r, + const Span<int8_t> src_types_l, + const Span<int8_t> src_types_r, + MutableSpan<float3> dst_positions, + MutableSpan<float3> dst_handles_l, + MutableSpan<float3> dst_handles_r, + MutableSpan<int8_t> dst_types_l, + MutableSpan<int8_t> dst_types_r, + const bke::curves::IndexRangeCyclic src_range, + const IndexRange dst_range, + const bke::curves::CurvePoint start_point, + const bke::curves::CurvePoint end_point) +{ + bke::curves::bezier::Insertion start_point_insert; + int64_t src_index = src_range.first(); + int64_t dst_index = dst_range.first(); + + bool start_point_trimmed = false; + if (start_point.is_controlpoint()) { + /* The 'start_point' control point is included in the copy iteration. */ + if constexpr (!include_start_point) { + ++src_index; /* Skip first! */ + } + } + else if constexpr (!include_start_point) { + /* Do nothing, 'start_point' is excluded. */ + } + else { + /* General case, sample 'start_point'. */ + start_point_insert = knot_insert_bezier( + src_positions, src_handles_l, src_handles_r, start_point); + dst_positions[dst_range.first()] = start_point_insert.position; + dst_handles_l[dst_range.first()] = start_point_insert.left_handle; + dst_handles_r[dst_range.first()] = start_point_insert.right_handle; + dst_types_l[dst_range.first()] = src_types_l[start_point.index]; + dst_types_r[dst_range.first()] = src_types_r[start_point.index]; + + start_point_trimmed = true; + ++dst_index; + } + + /* Copy point data between the 'start_point' and 'end_point'. */ + int64_t increment = src_range.cycles() ? src_range.size_before_loop() : + src_range.one_after_last() - src_range.first(); + + const IndexRange dst_range_to_end(dst_index, increment); + const IndexRange src_range_to_end(src_index, increment); + dst_positions.slice(dst_range_to_end).copy_from(src_positions.slice(src_range_to_end)); + dst_handles_l.slice(dst_range_to_end).copy_from(src_handles_l.slice(src_range_to_end)); + dst_handles_r.slice(dst_range_to_end).copy_from(src_handles_r.slice(src_range_to_end)); + dst_types_l.slice(dst_range_to_end).copy_from(src_types_l.slice(src_range_to_end)); + dst_types_r.slice(dst_range_to_end).copy_from(src_types_r.slice(src_range_to_end)); + dst_index += increment; + + increment = src_range.size_after_loop(); + if (src_range.cycles() && increment > 0) { + const IndexRange dst_range_looped(dst_index, increment); + const IndexRange src_range_looped(src_range.curve_range().first(), increment); + dst_positions.slice(dst_range_looped).copy_from(src_positions.slice(src_range_looped)); + dst_handles_l.slice(dst_range_looped).copy_from(src_handles_l.slice(src_range_looped)); + dst_handles_r.slice(dst_range_looped).copy_from(src_handles_r.slice(src_range_looped)); + dst_types_l.slice(dst_range_looped).copy_from(src_types_l.slice(src_range_looped)); + dst_types_r.slice(dst_range_looped).copy_from(src_types_r.slice(src_range_looped)); + dst_index += increment; + } + + if (start_point_trimmed) { + dst_handles_l[dst_range.first() + 1] = start_point_insert.handle_next; + /* No need to set handle type (remains the same)! */ + } + + /* Handle 'end_point' */ + bke::curves::bezier::Insertion end_point_insert; + if (end_point.is_controlpoint()) { + /* Do nothing, the 'end_point' control point is included in the copy iteration. */ + } + else { + /* Trimmed in both ends within the same (and only) segment! Ensure both end points is not a + * loop.*/ + if (start_point_trimmed && start_point.index == end_point.index && + start_point.parameter <= end_point.parameter) { + + /* Copy following segment control point. */ + dst_positions[dst_index] = src_positions[end_point.next_index]; + dst_handles_r[dst_index] = src_handles_r[end_point.next_index]; + + /* Compute interpolation in the result curve. */ + const float parameter = (end_point.parameter - start_point.parameter) / + (1.0f - start_point.parameter); + end_point_insert = knot_insert_bezier( + dst_positions, + dst_handles_l, + dst_handles_r, + {(int)dst_range.first(), (int)(dst_range.first() + 1), parameter}); + } + else { + /* General case, compute the insertion point. */ + end_point_insert = knot_insert_bezier( + src_positions, src_handles_l, src_handles_r, end_point); + } + + dst_handles_r[dst_index - 1] = end_point_insert.handle_prev; + dst_types_r[dst_index - 1] = src_types_l[end_point.index]; + + dst_handles_l[dst_index] = end_point_insert.left_handle; + dst_handles_r[dst_index] = end_point_insert.right_handle; + dst_positions[dst_index] = end_point_insert.position; + dst_types_l[dst_index] = src_types_l[end_point.next_index]; + dst_types_r[dst_index] = src_types_r[end_point.next_index]; +#ifdef DEBUG + ++dst_index; +#endif // DEBUG + } + BLI_assert(dst_index == dst_range.one_after_last()); +} + +/* -------------------------------------------------------------------- + * Convert to point curves. + */ + +static void convert_point_polygonal_curves( + const bke::CurvesGeometry &src_curves, + bke::CurvesGeometry &dst_curves, + const IndexMask selection, + const Span<bke::curves::CurvePoint> sample_points, + MutableSpan<bke::AttributeTransferData> transfer_attributes) +{ + const Span<float3> src_positions = src_curves.positions(); + MutableSpan<float3> dst_positions = dst_curves.positions_for_write(); + + threading::parallel_for(selection.index_range(), 4096, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange src_points = src_curves.points_for_curve(curve_i); + const IndexRange dst_points = dst_curves.points_for_curve(curve_i); + + sample_linear<float3>( + src_positions.slice(src_points), dst_positions, dst_points, sample_points[curve_i]); + + for (bke::AttributeTransferData &attribute : transfer_attributes) { + attribute_math::convert_to_static_type(attribute.meta_data.data_type, [&](auto dummy) { + using T = decltype(dummy); + sample_linear<T>(attribute.src.template typed<T>().slice(src_points), + attribute.dst.span.typed<T>(), + dst_curves.points_for_curve(curve_i), + sample_points[curve_i]); + }); + } + } + }); + + fill_bezier_data(dst_curves, selection); + fill_nurbs_data(dst_curves, selection); +} + +static void convert_point_catmull_curves( + const bke::CurvesGeometry &src_curves, + bke::CurvesGeometry &dst_curves, + const IndexMask selection, + const Span<bke::curves::CurvePoint> sample_points, + MutableSpan<bke::AttributeTransferData> transfer_attributes) +{ + const Span<float3> src_positions = src_curves.positions(); + const VArray<bool> src_cyclic = src_curves.cyclic(); + + MutableSpan<float3> dst_positions = dst_curves.positions_for_write(); + + threading::parallel_for(selection.index_range(), 4096, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange src_points = src_curves.points_for_curve(curve_i); + const IndexRange dst_points = dst_curves.points_for_curve(curve_i); + + sample_catmull_rom<float3>(src_positions.slice(src_points), + dst_positions, + dst_points, + sample_points[curve_i], + src_cyclic[curve_i]); + for (bke::AttributeTransferData &attribute : transfer_attributes) { + attribute_math::convert_to_static_type(attribute.meta_data.data_type, [&](auto dummy) { + using T = decltype(dummy); + sample_catmull_rom<T>(attribute.src.template typed<T>().slice(src_points), + attribute.dst.span.typed<T>(), + dst_points, + sample_points[curve_i], + src_cyclic[curve_i]); + }); + } + } + }); + fill_bezier_data(dst_curves, selection); + fill_nurbs_data(dst_curves, selection); +} + +static void convert_point_bezier_curves( + const bke::CurvesGeometry &src_curves, + bke::CurvesGeometry &dst_curves, + const IndexMask selection, + const Span<bke::curves::CurvePoint> sample_points, + MutableSpan<bke::AttributeTransferData> transfer_attributes) +{ + const Span<float3> src_positions = src_curves.positions(); + const VArraySpan<int8_t> src_types_l{src_curves.handle_types_left()}; + const VArraySpan<int8_t> src_types_r{src_curves.handle_types_right()}; + const Span<float3> src_handles_l = src_curves.handle_positions_left(); + const Span<float3> src_handles_r = src_curves.handle_positions_right(); + + MutableSpan<float3> dst_positions = dst_curves.positions_for_write(); + MutableSpan<int8_t> dst_types_l = dst_curves.handle_types_left_for_write(); + MutableSpan<int8_t> dst_types_r = dst_curves.handle_types_right_for_write(); + MutableSpan<float3> dst_handles_l = dst_curves.handle_positions_left_for_write(); + MutableSpan<float3> dst_handles_r = dst_curves.handle_positions_right_for_write(); + + threading::parallel_for(selection.index_range(), 4096, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange src_points = src_curves.points_for_curve(curve_i); + const IndexRange dst_points = dst_curves.points_for_curve(curve_i); + + sample_bezier(src_positions.slice(src_points), + src_handles_l.slice(src_points), + src_handles_r.slice(src_points), + src_types_l.slice(src_points), + src_types_r.slice(src_points), + dst_positions, + dst_handles_l, + dst_handles_r, + dst_types_l, + dst_types_r, + dst_points, + sample_points[curve_i]); + + for (bke::AttributeTransferData &attribute : transfer_attributes) { + attribute_math::convert_to_static_type(attribute.meta_data.data_type, [&](auto dummy) { + using T = decltype(dummy); + sample_linear<T>(attribute.src.template typed<T>().slice(src_points), + attribute.dst.span.typed<T>(), + dst_points, + sample_points[curve_i]); + }); + } + } + }); + fill_nurbs_data(dst_curves, selection); +} + +static void convert_point_evaluated_curves( + const bke::CurvesGeometry &src_curves, + bke::CurvesGeometry &dst_curves, + const IndexMask selection, + const Span<bke::curves::CurvePoint> evaluated_sample_points, + MutableSpan<bke::AttributeTransferData> transfer_attributes) +{ + const Span<float3> src_eval_positions = src_curves.evaluated_positions(); + MutableSpan<float3> dst_positions = dst_curves.positions_for_write(); + + threading::parallel_for(selection.index_range(), 4096, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange dst_points = dst_curves.points_for_curve(curve_i); + const IndexRange src_evaluated_points = src_curves.evaluated_points_for_curve(curve_i); + + sample_linear<float3>(src_eval_positions.slice(src_evaluated_points), + dst_positions, + dst_points, + evaluated_sample_points[curve_i]); + + for (bke::AttributeTransferData &attribute : transfer_attributes) { + attribute_math::convert_to_static_type(attribute.meta_data.data_type, [&](auto dummy) { + using T = decltype(dummy); + GArray evaluated_data(CPPType::get<T>(), src_evaluated_points.size()); + GMutableSpan evaluated_span = evaluated_data.as_mutable_span(); + src_curves.interpolate_to_evaluated( + curve_i, attribute.src.slice(src_curves.points_for_curve(curve_i)), evaluated_span); + sample_linear<T>(evaluated_span.typed<T>(), + attribute.dst.span.typed<T>(), + dst_points, + evaluated_sample_points[curve_i]); + }); + } + } + }); + fill_bezier_data(dst_curves, selection); + fill_nurbs_data(dst_curves, selection); +} + +/* -------------------------------------------------------------------- + * Trim curves. + */ + +static void trim_attribute_linear(const bke::CurvesGeometry &src_curves, + bke::CurvesGeometry &dst_curves, + const IndexMask selection, + const Span<bke::curves::CurvePoint> start_points, + const Span<bke::curves::CurvePoint> end_points, + MutableSpan<bke::AttributeTransferData> transfer_attributes) +{ + for (bke::AttributeTransferData &attribute : transfer_attributes) { + attribute_math::convert_to_static_type(attribute.meta_data.data_type, [&](auto dummy) { + using T = decltype(dummy); + + threading::parallel_for(selection.index_range(), 512, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange src_points = src_curves.points_for_curve(curve_i); + + bke::curves::IndexRangeCyclic src_sample_range = get_range_between_endpoints( + start_points[curve_i], end_points[curve_i], {0, src_points.size()}); + sample_interval_linear<T>(attribute.src.template typed<T>().slice(src_points), + attribute.dst.span.typed<T>(), + src_sample_range, + dst_curves.points_for_curve(curve_i), + start_points[curve_i], + end_points[curve_i]); + } + }); + }); + } +} + +static void trim_polygonal_curves(const bke::CurvesGeometry &src_curves, + bke::CurvesGeometry &dst_curves, + const IndexMask selection, + const Span<bke::curves::CurvePoint> start_points, + const Span<bke::curves::CurvePoint> end_points, + MutableSpan<bke::AttributeTransferData> transfer_attributes) +{ + const Span<float3> src_positions = src_curves.positions(); + MutableSpan<float3> dst_positions = dst_curves.positions_for_write(); + + threading::parallel_for(selection.index_range(), 512, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange src_points = src_curves.points_for_curve(curve_i); + const IndexRange dst_points = dst_curves.points_for_curve(curve_i); + + bke::curves::IndexRangeCyclic src_sample_range = get_range_between_endpoints( + start_points[curve_i], end_points[curve_i], {0, src_points.size()}); + sample_interval_linear<float3>(src_positions.slice(src_points), + dst_positions, + src_sample_range, + dst_points, + start_points[curve_i], + end_points[curve_i]); + } + }); + fill_bezier_data(dst_curves, selection); + fill_nurbs_data(dst_curves, selection); + trim_attribute_linear( + src_curves, dst_curves, selection, start_points, end_points, transfer_attributes); +} + +static void trim_catmull_rom_curves(const bke::CurvesGeometry &src_curves, + bke::CurvesGeometry &dst_curves, + const IndexMask selection, + const Span<bke::curves::CurvePoint> start_points, + const Span<bke::curves::CurvePoint> end_points, + MutableSpan<bke::AttributeTransferData> transfer_attributes) +{ + const Span<float3> src_positions = src_curves.positions(); + const VArray<bool> src_cyclic = src_curves.cyclic(); + MutableSpan<float3> dst_positions = dst_curves.positions_for_write(); + + threading::parallel_for(selection.index_range(), 512, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange src_points = src_curves.points_for_curve(curve_i); + const IndexRange dst_points = dst_curves.points_for_curve(curve_i); + + bke::curves::IndexRangeCyclic src_sample_range = get_range_between_endpoints( + start_points[curve_i], end_points[curve_i], {0, src_points.size()}); + sample_interval_catmull_rom<float3>(src_positions.slice(src_points), + dst_positions, + src_sample_range, + dst_points, + start_points[curve_i], + end_points[curve_i], + src_cyclic[curve_i]); + } + }); + fill_bezier_data(dst_curves, selection); + fill_nurbs_data(dst_curves, selection); + + for (bke::AttributeTransferData &attribute : transfer_attributes) { + attribute_math::convert_to_static_type(attribute.meta_data.data_type, [&](auto dummy) { + using T = decltype(dummy); + + threading::parallel_for(selection.index_range(), 512, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange src_points = src_curves.points_for_curve(curve_i); + const IndexRange dst_points = dst_curves.points_for_curve(curve_i); + + bke::curves::IndexRangeCyclic src_sample_range = get_range_between_endpoints( + start_points[curve_i], end_points[curve_i], {0, src_points.size()}); + sample_interval_catmull_rom<T>(attribute.src.template typed<T>().slice(src_points), + attribute.dst.span.typed<T>(), + src_sample_range, + dst_points, + start_points[curve_i], + end_points[curve_i], + src_cyclic[curve_i]); + } + }); + }); + } +} + +static void trim_bezier_curves(const bke::CurvesGeometry &src_curves, + bke::CurvesGeometry &dst_curves, + const IndexMask selection, + const Span<bke::curves::CurvePoint> start_points, + const Span<bke::curves::CurvePoint> end_points, + MutableSpan<bke::AttributeTransferData> transfer_attributes) +{ + const Span<float3> src_positions = src_curves.positions(); + const VArraySpan<int8_t> src_types_l{src_curves.handle_types_left()}; + const VArraySpan<int8_t> src_types_r{src_curves.handle_types_right()}; + const Span<float3> src_handles_l = src_curves.handle_positions_left(); + const Span<float3> src_handles_r = src_curves.handle_positions_right(); + + MutableSpan<float3> dst_positions = dst_curves.positions_for_write(); + MutableSpan<int8_t> dst_types_l = dst_curves.handle_types_left_for_write(); + MutableSpan<int8_t> dst_types_r = dst_curves.handle_types_right_for_write(); + MutableSpan<float3> dst_handles_l = dst_curves.handle_positions_left_for_write(); + MutableSpan<float3> dst_handles_r = dst_curves.handle_positions_right_for_write(); + + threading::parallel_for(selection.index_range(), 512, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange src_points = src_curves.points_for_curve(curve_i); + const IndexRange dst_points = dst_curves.points_for_curve(curve_i); + + bke::curves::IndexRangeCyclic src_sample_range = get_range_between_endpoints( + start_points[curve_i], end_points[curve_i], {0, src_points.size()}); + sample_interval_bezier(src_positions.slice(src_points), + src_handles_l.slice(src_points), + src_handles_r.slice(src_points), + src_types_l.slice(src_points), + src_types_r.slice(src_points), + dst_positions, + dst_handles_l, + dst_handles_r, + dst_types_l, + dst_types_r, + src_sample_range, + dst_points, + start_points[curve_i], + end_points[curve_i]); + } + }); + fill_nurbs_data(dst_curves, selection); + trim_attribute_linear( + src_curves, dst_curves, selection, start_points, end_points, transfer_attributes); +} + +static void trim_evaluated_curves(const bke::CurvesGeometry &src_curves, + bke::CurvesGeometry &dst_curves, + const IndexMask selection, + const Span<bke::curves::CurvePoint> start_points, + const Span<bke::curves::CurvePoint> end_points, + MutableSpan<bke::AttributeTransferData> transfer_attributes) +{ + const Span<float3> src_eval_positions = src_curves.evaluated_positions(); + MutableSpan<float3> dst_positions = dst_curves.positions_for_write(); + + threading::parallel_for(selection.index_range(), 512, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + const IndexRange dst_points = dst_curves.points_for_curve(curve_i); + const IndexRange src_evaluated_points = src_curves.evaluated_points_for_curve(curve_i); + + bke::curves::IndexRangeCyclic src_sample_range = get_range_between_endpoints( + start_points[curve_i], end_points[curve_i], {0, src_evaluated_points.size()}); + sample_interval_linear<float3>(src_eval_positions.slice(src_evaluated_points), + dst_positions, + src_sample_range, + dst_points, + start_points[curve_i], + end_points[curve_i]); + } + }); + fill_bezier_data(dst_curves, selection); + fill_nurbs_data(dst_curves, selection); + + for (bke::AttributeTransferData &attribute : transfer_attributes) { + attribute_math::convert_to_static_type(attribute.meta_data.data_type, [&](auto dummy) { + using T = decltype(dummy); + + threading::parallel_for(selection.index_range(), 512, [&](const IndexRange range) { + for (const int64_t curve_i : selection.slice(range)) { + /* Interpolate onto the evaluated point domain and sample the evaluated domain. */ + const IndexRange src_evaluated_points = src_curves.evaluated_points_for_curve(curve_i); + GArray evaluated_data(CPPType::get<T>(), src_evaluated_points.size()); + GMutableSpan evaluated_span = evaluated_data.as_mutable_span(); + src_curves.interpolate_to_evaluated( + curve_i, attribute.src.slice(src_curves.points_for_curve(curve_i)), evaluated_span); + bke::curves::IndexRangeCyclic src_sample_range = get_range_between_endpoints( + start_points[curve_i], end_points[curve_i], {0, src_evaluated_points.size()}); + sample_interval_linear<T>(evaluated_span.typed<T>(), + attribute.dst.span.typed<T>(), + src_sample_range, + dst_curves.points_for_curve(curve_i), + start_points[curve_i], + end_points[curve_i]); + } + }); + }); + } +} + +bke::CurvesGeometry trim_curves(const bke::CurvesGeometry &src_curves, + const IndexMask selection, + const Span<bke::curves::CurvePoint> start_points, + const Span<bke::curves::CurvePoint> end_points) +{ + BLI_assert(selection.size() > 0); + BLI_assert(selection.last() <= start_points.size()); + BLI_assert(start_points.size() == end_points.size()); + + src_curves.ensure_evaluated_offsets(); + Vector<int64_t> inverse_selection_indices; + const IndexMask inverse_selection = selection.invert(src_curves.curves_range(), + inverse_selection_indices); + + /* Create trim curves. */ + bke::CurvesGeometry dst_curves(0, src_curves.curves_num()); + determine_copyable_curve_types(src_curves, + dst_curves, + selection, + inverse_selection, + (CurveTypeMask)(CURVE_TYPE_MASK_CATMULL_ROM | + CURVE_TYPE_MASK_POLY | CURVE_TYPE_MASK_BEZIER)); + + Vector<int64_t> curve_indices; + Vector<int64_t> point_curve_indices; + compute_trim_result_offsets(src_curves, + selection, + inverse_selection, + start_points, + end_points, + dst_curves.curve_types(), + dst_curves.offsets_for_write(), + curve_indices, + point_curve_indices); + /* Finalize by updating the geometry container. */ + dst_curves.resize(dst_curves.offsets().last(), dst_curves.curves_num()); + dst_curves.update_curve_types(); + + /* Populate curve domain. */ + const bke::AttributeAccessor src_attributes = src_curves.attributes(); + bke::MutableAttributeAccessor dst_attributes = dst_curves.attributes_for_write(); + bke::copy_attribute_domain(src_attributes, + dst_attributes, + selection, + ATTR_DOMAIN_CURVE, + {"cyclic", "curve_type", "nurbs_order", "knots_mode"}); + + /* Fetch custom point domain attributes for transfer (copy). */ + Vector<bke::AttributeTransferData> transfer_attributes = bke::retrieve_attributes_for_transfer( + src_attributes, + dst_attributes, + ATTR_DOMAIN_MASK_POINT, + {"position", + "handle_left", + "handle_right", + "handle_type_left", + "handle_type_right", + "nurbs_weight"}); + + auto trim_catmull = [&](IndexMask selection) { + trim_catmull_rom_curves( + src_curves, dst_curves, selection, start_points, end_points, transfer_attributes); + }; + auto trim_poly = [&](IndexMask selection) { + trim_polygonal_curves( + src_curves, dst_curves, selection, start_points, end_points, transfer_attributes); + }; + auto trim_bezier = [&](IndexMask selection) { + trim_bezier_curves( + src_curves, dst_curves, selection, start_points, end_points, transfer_attributes); + }; + auto trim_evaluated = [&](IndexMask selection) { + /* Ensure evaluated positions are available. */ + src_curves.ensure_evaluated_offsets(); + src_curves.evaluated_positions(); + trim_evaluated_curves( + src_curves, dst_curves, selection, start_points, end_points, transfer_attributes); + }; + + auto single_point_catmull = [&](IndexMask selection) { + convert_point_catmull_curves( + src_curves, dst_curves, selection, start_points, transfer_attributes); + }; + auto single_point_poly = [&](IndexMask selection) { + convert_point_polygonal_curves( + src_curves, dst_curves, selection, start_points, transfer_attributes); + }; + auto single_point_bezier = [&](IndexMask selection) { + convert_point_bezier_curves( + src_curves, dst_curves, selection, start_points, transfer_attributes); + }; + auto single_point_evaluated = [&](IndexMask selection) { + convert_point_evaluated_curves( + src_curves, dst_curves, selection, start_points, transfer_attributes); + }; + + /* Populate point domain. */ + bke::curves::foreach_curve_by_type(src_curves.curve_types(), + src_curves.curve_type_counts(), + curve_indices.as_span(), + trim_catmull, + trim_poly, + trim_bezier, + trim_evaluated); + + if (point_curve_indices.size()) { + bke::curves::foreach_curve_by_type(src_curves.curve_types(), + src_curves.curve_type_counts(), + point_curve_indices.as_span(), + single_point_catmull, + single_point_poly, + single_point_bezier, + single_point_evaluated); + } + /* Cleanup/close context */ + for (bke::AttributeTransferData &attribute : transfer_attributes) { + attribute.dst.finish(); + } + + /* Copy unselected */ + if (!inverse_selection.is_empty()) { + bke::copy_attribute_domain( + src_attributes, dst_attributes, inverse_selection, ATTR_DOMAIN_CURVE); + /* Trim curves are no longer cyclic. If all curves are trimmed, this will be set implicitly. */ + dst_curves.cyclic_for_write().fill_indices(selection, false); + + /* Copy point domain. */ + for (auto &attribute : bke::retrieve_attributes_for_transfer( + src_attributes, dst_attributes, ATTR_DOMAIN_MASK_POINT)) { + bke::curves::copy_point_data( + src_curves, dst_curves, inverse_selection, attribute.src, attribute.dst.span); + attribute.dst.finish(); + } + } + + dst_curves.tag_topology_changed(); + return dst_curves; +} + +} // namespace blender::geometry diff --git a/source/blender/geometry/intern/uv_parametrizer.cc b/source/blender/geometry/intern/uv_parametrizer.cc index 4f763b09bef..f074febe23a 100644 --- a/source/blender/geometry/intern/uv_parametrizer.cc +++ b/source/blender/geometry/intern/uv_parametrizer.cc @@ -307,12 +307,70 @@ static float p_vec2_angle(const float v1[2], const float v2[2], const float v3[2 { return angle_v2v2v2(v1, v2, v3); } + +/* Angles close to 0 or 180 degrees cause rows filled with zeros in the linear_solver. + * The matrix will then be rank deficient and / or have poor conditioning. + * => Reduce the maximum angle to 179 degrees, and spread the remainder to the other angles. + */ +static void fix_large_angle(const float v_fix[3], + const float v1[3], + const float v2[3], + float *r_fix, + float *r_a1, + float *r_a2) +{ + const float max_angle = (float)M_PI * (179.0f / 180.0f); + const float fix_amount = *r_fix - max_angle; + if (fix_amount < 0.0f) { + return; /* angle is reasonable, i.e. less than 179 degrees. */ + } + + /* The triangle is probably degenerate, or close to it. + * Without loss of generality, transform the triangle such that + * v_fix == { 0, s}, *r_fix = 180 degrees + * v1 == {-x1, 0}, *r_a1 = 0 + * v2 == { x2, 0}, *r_a2 = 0 + * + * With `s = 0`, `x1 > 0`, `x2 > 0` + * + * Now make `s` a small number and do some math: + * tan(*r_a1) = s / x1 + * tan(*r_a2) = s / x2 + * + * Remember that `tan = sin / cos`, `sin(s) ~= s` and `cos(s) = 1` + * + * Rearrange to obtain: + * *r_a1 = fix_amount * x2 / (x1 + x2) + * *r_a2 = fix_amount * x1 / (x1 + x2) + */ + + const float dist_v1 = len_v3v3(v_fix, v1); + const float dist_v2 = len_v3v3(v_fix, v2); + const float sum = dist_v1 + dist_v2; + const float weight = (sum > 1e-20f) ? dist_v2 / sum : 0.5f; + + /* Ensure sum of angles in triangle is unchanged. */ + *r_fix -= fix_amount; + *r_a1 += fix_amount * weight; + *r_a2 += fix_amount * (1.0f - weight); +} + static void p_triangle_angles( const float v1[3], const float v2[3], const float v3[3], float *r_a1, float *r_a2, float *r_a3) { *r_a1 = p_vec_angle(v3, v1, v2); *r_a2 = p_vec_angle(v1, v2, v3); - *r_a3 = (float)M_PI - *r_a2 - *r_a1; + *r_a3 = p_vec_angle(v2, v3, v1); + + /* Fix for degenerate geometry e.g. v1 = sum(v2 + v3). See T100874 */ + fix_large_angle(v1, v2, v3, r_a1, r_a2, r_a3); + fix_large_angle(v2, v3, v1, r_a2, r_a3, r_a1); + fix_large_angle(v3, v1, v2, r_a3, r_a1, r_a2); + + /* Workaround for degenerate geometry, e.g. v1 == v2 == v3. */ + *r_a1 = max_ff(*r_a1, 0.001f); + *r_a2 = max_ff(*r_a2, 0.001f); + *r_a3 = max_ff(*r_a3, 0.001f); } static void p_face_angles(PFace *f, float *r_a1, float *r_a2, float *r_a3) @@ -2266,7 +2324,6 @@ using PAbfSystem = struct PAbfSystem { float *bAlpha, *bTriangle, *bInterior; float *lambdaTriangle, *lambdaPlanar, *lambdaLength; float (*J2dt)[3], *bstar, *dstar; - float minangle, maxangle; }; static void p_abf_setup_system(PAbfSystem *sys) @@ -2294,9 +2351,6 @@ static void p_abf_setup_system(PAbfSystem *sys) for (i = 0; i < sys->ninterior; i++) { sys->lambdaLength[i] = 1.0; } - - sys->minangle = 1.0 * M_PI / 180.0; - sys->maxangle = (float)M_PI - sys->minangle; } static void p_abf_free_system(PAbfSystem *sys) @@ -2707,25 +2761,6 @@ static bool p_chart_abf_solve(PChart *chart) e3 = e2->next; p_face_angles(f, &a1, &a2, &a3); - if (a1 < sys.minangle) { - a1 = sys.minangle; - } - else if (a1 > sys.maxangle) { - a1 = sys.maxangle; - } - if (a2 < sys.minangle) { - a2 = sys.minangle; - } - else if (a2 > sys.maxangle) { - a2 = sys.maxangle; - } - if (a3 < sys.minangle) { - a3 = sys.minangle; - } - else if (a3 > sys.maxangle) { - a3 = sys.maxangle; - } - sys.alpha[e1->u.id] = sys.beta[e1->u.id] = a1; sys.alpha[e2->u.id] = sys.beta[e2->u.id] = a2; sys.alpha[e3->u.id] = sys.beta[e3->u.id] = a3; diff --git a/source/blender/gpu/GPU_buffers.h b/source/blender/gpu/GPU_buffers.h index d1d91cb7508..5cdc5f19540 100644 --- a/source/blender/gpu/GPU_buffers.h +++ b/source/blender/gpu/GPU_buffers.h @@ -49,7 +49,6 @@ typedef struct GPU_PBVH_Buffers GPU_PBVH_Buffers; */ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const struct Mesh *mesh, const struct MLoopTri *looptri, - const int *sculpt_face_sets, const int *face_indices, int face_indices_len); diff --git a/source/blender/gpu/intern/gpu_buffers.c b/source/blender/gpu/intern/gpu_buffers.c index 8e3058b884d..78f595cbff2 100644 --- a/source/blender/gpu/intern/gpu_buffers.c +++ b/source/blender/gpu/intern/gpu_buffers.c @@ -210,13 +210,9 @@ static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers, GPUPrimType prim) /** \name Mesh PBVH * \{ */ -static bool gpu_pbvh_is_looptri_visible(const MLoopTri *lt, - const bool *hide_vert, - const MLoop *mloop, - const int *sculpt_face_sets) +static bool gpu_pbvh_is_looptri_visible(const MLoopTri *lt, const bool *hide_poly) { - return (!paint_is_face_hidden(lt, hide_vert, mloop) && sculpt_face_sets && - sculpt_face_sets[lt->poly] > SCULPT_FACE_SET_NONE); + return !paint_is_face_hidden(lt, hide_poly); } void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id, @@ -233,8 +229,8 @@ void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id, GPUAttrRef vcol_refs[MAX_GPU_ATTR]; GPUAttrRef cd_uvs[MAX_GPU_ATTR]; - const bool *hide_vert = (const bool *)CustomData_get_layer_named( - &mesh->vdata, CD_PROP_BOOL, ".hide_vert"); + const bool *hide_poly = (const bool *)CustomData_get_layer_named( + &mesh->pdata, CD_PROP_BOOL, ".hide_poly"); const int *material_indices = (const int *)CustomData_get_layer_named( &mesh->pdata, CD_PROP_INT32, "material_index"); @@ -315,7 +311,7 @@ void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id, for (uint i = 0; i < buffers->face_indices_len; i++) { const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]]; - if (!gpu_pbvh_is_looptri_visible(lt, hide_vert, buffers->mloop, sculpt_face_sets)) { + if (!gpu_pbvh_is_looptri_visible(lt, hide_poly)) { continue; } @@ -355,7 +351,7 @@ void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id, buffers->mloop[lt->tri[2]].v, }; - if (!gpu_pbvh_is_looptri_visible(lt, hide_vert, buffers->mloop, sculpt_face_sets)) { + if (!gpu_pbvh_is_looptri_visible(lt, hide_poly)) { continue; } @@ -395,7 +391,7 @@ void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id, buffers->mloop[lt->tri[2]].v, }; - if (!gpu_pbvh_is_looptri_visible(lt, hide_vert, buffers->mloop, sculpt_face_sets)) { + if (!gpu_pbvh_is_looptri_visible(lt, hide_poly)) { continue; } @@ -459,7 +455,6 @@ void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id, GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const Mesh *mesh, const MLoopTri *looptri, - const int *sculpt_face_sets, const int *face_indices, const int face_indices_len) { @@ -472,8 +467,8 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const Mesh *mesh, buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers"); - const bool *hide_vert = (bool *)CustomData_get_layer_named( - &mesh->vdata, CD_PROP_BOOL, ".hide_vert"); + const bool *hide_poly = (bool *)CustomData_get_layer_named( + &mesh->pdata, CD_PROP_BOOL, ".hide_poly"); /* smooth or flat for all */ buffers->smooth = polys[looptri[face_indices[0]].poly].flag & ME_SMOOTH; @@ -483,7 +478,7 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const Mesh *mesh, /* Count the number of visible triangles */ for (i = 0, tottri = 0; i < face_indices_len; i++) { const MLoopTri *lt = &looptri[face_indices[i]]; - if (gpu_pbvh_is_looptri_visible(lt, hide_vert, loops, sculpt_face_sets)) { + if (gpu_pbvh_is_looptri_visible(lt, hide_poly)) { int r_edges[3]; BKE_mesh_looptri_get_real_edges(mesh, lt, r_edges); for (int j = 0; j < 3; j++) { @@ -516,7 +511,7 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const Mesh *mesh, const MLoopTri *lt = &looptri[face_indices[i]]; /* Skip hidden faces */ - if (!gpu_pbvh_is_looptri_visible(lt, hide_vert, loops, sculpt_face_sets)) { + if (!gpu_pbvh_is_looptri_visible(lt, hide_poly)) { continue; } diff --git a/source/blender/gpu/intern/gpu_codegen.cc b/source/blender/gpu/intern/gpu_codegen.cc index 0102b8db5b2..75e148e0a8f 100644 --- a/source/blender/gpu/intern/gpu_codegen.cc +++ b/source/blender/gpu/intern/gpu_codegen.cc @@ -11,6 +11,7 @@ #include "DNA_customdata_types.h" #include "DNA_image_types.h" +#include "DNA_material_types.h" #include "BLI_ghash.h" #include "BLI_hash_mm2a.h" @@ -20,6 +21,7 @@ #include "PIL_time.h" +#include "BKE_cryptomatte.hh" #include "BKE_material.h" #include "GPU_capabilities.h" @@ -238,6 +240,7 @@ class GPUCodegen { uint32_t hash_ = 0; BLI_HashMurmur2A hm2a_; ListBase ubo_inputs_ = {nullptr, nullptr}; + GPUInput *cryptomatte_input_ = nullptr; public: GPUCodegen(GPUMaterial *mat_, GPUNodeGraph *graph_) : mat(*mat_), graph(*graph_) @@ -262,11 +265,13 @@ class GPUCodegen { MEM_SAFE_FREE(output.displacement); MEM_SAFE_FREE(output.composite); MEM_SAFE_FREE(output.material_functions); + MEM_SAFE_FREE(cryptomatte_input_); delete create_info; BLI_freelistN(&ubo_inputs_); }; void generate_graphs(); + void generate_cryptomatte(); void generate_uniform_buffer(); void generate_attribs(); void generate_resources(); @@ -399,7 +404,12 @@ void GPUCodegen::generate_resources() ss << "struct NodeTree {\n"; LISTBASE_FOREACH (LinkData *, link, &ubo_inputs_) { GPUInput *input = (GPUInput *)(link->data); - ss << input->type << " u" << input->id << ";\n"; + if (input->source == GPU_SOURCE_CRYPTOMATTE) { + ss << input->type << " crypto_hash;\n"; + } + else { + ss << input->type << " u" << input->id << ";\n"; + } } ss << "};\n\n"; @@ -535,6 +545,24 @@ char *GPUCodegen::graph_serialize(eGPUNodeTag tree_tag) return eval_c_str; } +void GPUCodegen::generate_cryptomatte() +{ + cryptomatte_input_ = static_cast<GPUInput *>(MEM_callocN(sizeof(GPUInput), __func__)); + cryptomatte_input_->type = GPU_FLOAT; + cryptomatte_input_->source = GPU_SOURCE_CRYPTOMATTE; + + float material_hash = 0.0f; + Material *material = GPU_material_get_material(&mat); + if (material) { + blender::bke::cryptomatte::CryptomatteHash hash(material->id.name, + BLI_strnlen(material->id.name, MAX_NAME - 2)); + material_hash = hash.float_encoded(); + } + cryptomatte_input_->vec[0] = material_hash; + + BLI_addtail(&ubo_inputs_, BLI_genericNodeN(cryptomatte_input_)); +} + void GPUCodegen::generate_uniform_buffer() { /* Extract uniform inputs. */ @@ -615,6 +643,7 @@ GPUPass *GPU_generate_pass(GPUMaterial *material, GPUCodegen codegen(material, graph); codegen.generate_graphs(); + codegen.generate_cryptomatte(); codegen.generate_uniform_buffer(); /* Cache lookup: Reuse shaders already compiled. */ diff --git a/source/blender/gpu/intern/gpu_node_graph.h b/source/blender/gpu/intern/gpu_node_graph.h index 08ff8bbef58..74afb721a1c 100644 --- a/source/blender/gpu/intern/gpu_node_graph.h +++ b/source/blender/gpu/intern/gpu_node_graph.h @@ -35,6 +35,7 @@ typedef enum eGPUDataSource { GPU_SOURCE_TEX, GPU_SOURCE_TEX_TILED_MAPPING, GPU_SOURCE_FUNCTION_CALL, + GPU_SOURCE_CRYPTOMATTE, } eGPUDataSource; typedef enum { diff --git a/source/blender/gpu/intern/gpu_shader_builder_stubs.cc b/source/blender/gpu/intern/gpu_shader_builder_stubs.cc index e15054bd045..db14d7fbeb9 100644 --- a/source/blender/gpu/intern/gpu_shader_builder_stubs.cc +++ b/source/blender/gpu/intern/gpu_shader_builder_stubs.cc @@ -136,9 +136,7 @@ eAttrDomain BKE_id_attribute_domain(const struct ID *UNUSED(id), /* -------------------------------------------------------------------- */ /** \name Stubs of BKE_paint.h * \{ */ -bool paint_is_face_hidden(const struct MLoopTri *UNUSED(lt), - const bool *UNUSED(hide_vert), - const struct MLoop *UNUSED(mloop)) +bool paint_is_face_hidden(const struct MLoopTri *UNUSED(lt), const bool *UNUSED(hide_poly)) { BLI_assert_unreachable(); return false; diff --git a/source/blender/imbuf/IMB_imbuf.h b/source/blender/imbuf/IMB_imbuf.h index 6881916d1d2..7e652e31506 100644 --- a/source/blender/imbuf/IMB_imbuf.h +++ b/source/blender/imbuf/IMB_imbuf.h @@ -809,7 +809,7 @@ bool imb_addrectImBuf(struct ImBuf *ibuf); */ void imb_freerectImBuf(struct ImBuf *ibuf); -bool imb_addrectfloatImBuf(struct ImBuf *ibuf); +bool imb_addrectfloatImBuf(struct ImBuf *ibuf, const unsigned int channels); /** * Any free `ibuf->rect` frees mipmaps to be sure, creation is in render on first request. */ diff --git a/source/blender/imbuf/IMB_imbuf_types.h b/source/blender/imbuf/IMB_imbuf_types.h index 45d05e9b856..03bb11d0cf6 100644 --- a/source/blender/imbuf/IMB_imbuf_types.h +++ b/source/blender/imbuf/IMB_imbuf_types.h @@ -166,8 +166,6 @@ typedef enum eImBufFlags { * \{ */ typedef struct ImBuf { - struct ImBuf *next, *prev; /** < allow lists of #ImBufs, for caches or flip-books. */ - /* dimensions */ /** Width and Height of our image buffer. * Should be 'unsigned int' since most formats use this. diff --git a/source/blender/imbuf/intern/IMB_filetype.h b/source/blender/imbuf/intern/IMB_filetype.h index 9a0a6998fab..bd17316d173 100644 --- a/source/blender/imbuf/intern/IMB_filetype.h +++ b/source/blender/imbuf/intern/IMB_filetype.h @@ -264,6 +264,12 @@ struct ImBuf *imb_loadwebp(const unsigned char *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]); +struct ImBuf *imb_load_filepath_thumbnail_webp(const char *filepath, + const int flags, + const size_t max_thumb_size, + char colorspace[], + size_t *r_width, + size_t *r_height); bool imb_savewebp(struct ImBuf *ibuf, const char *name, int flags); /** \} */ diff --git a/source/blender/imbuf/intern/allocimbuf.c b/source/blender/imbuf/intern/allocimbuf.c index 8b9ad94de0c..42b587c3c81 100644 --- a/source/blender/imbuf/intern/allocimbuf.c +++ b/source/blender/imbuf/intern/allocimbuf.c @@ -258,7 +258,7 @@ bool addzbufImBuf(ImBuf *ibuf) IMB_freezbufImBuf(ibuf); - if ((ibuf->zbuf = imb_alloc_pixels(ibuf->x, ibuf->y, 1, sizeof(unsigned int), __func__))) { + if ((ibuf->zbuf = imb_alloc_pixels(ibuf->x, ibuf->y, 1, sizeof(uint), __func__))) { ibuf->mall |= IB_zbuf; ibuf->flags |= IB_zbuf; return true; @@ -309,7 +309,7 @@ bool imb_addencodedbufferImBuf(ImBuf *ibuf) bool imb_enlargeencodedbufferImBuf(ImBuf *ibuf) { - unsigned int newsize, encodedsize; + uint newsize, encodedsize; void *newbuffer; if (ibuf == NULL) { @@ -351,8 +351,7 @@ bool imb_enlargeencodedbufferImBuf(ImBuf *ibuf) return true; } -void *imb_alloc_pixels( - unsigned int x, unsigned int y, unsigned int channels, size_t typesize, const char *name) +void *imb_alloc_pixels(uint x, uint y, uint channels, size_t typesize, const char *name) { /* Protect against buffer overflow vulnerabilities from files specifying * a width and height that overflow and alloc too little memory. */ @@ -364,7 +363,7 @@ void *imb_alloc_pixels( return MEM_callocN(size, name); } -bool imb_addrectfloatImBuf(ImBuf *ibuf) +bool imb_addrectfloatImBuf(ImBuf *ibuf, const uint channels) { if (ibuf == NULL) { return false; @@ -374,8 +373,8 @@ bool imb_addrectfloatImBuf(ImBuf *ibuf) imb_freerectfloatImBuf(ibuf); /* frees mipmap too, hrm */ } - ibuf->channels = 4; - if ((ibuf->rect_float = imb_alloc_pixels(ibuf->x, ibuf->y, 4, sizeof(float), __func__))) { + ibuf->channels = channels; + if ((ibuf->rect_float = imb_alloc_pixels(ibuf->x, ibuf->y, channels, sizeof(float), __func__))) { ibuf->mall |= IB_rectfloat; ibuf->flags |= IB_rectfloat; return true; @@ -399,7 +398,7 @@ bool imb_addrectImBuf(ImBuf *ibuf) } ibuf->rect = NULL; - if ((ibuf->rect = imb_alloc_pixels(ibuf->x, ibuf->y, 4, sizeof(unsigned char), __func__))) { + if ((ibuf->rect = imb_alloc_pixels(ibuf->x, ibuf->y, 4, sizeof(uchar), __func__))) { ibuf->mall |= IB_rect; ibuf->flags |= IB_rect; if (ibuf->planes > 32) { @@ -412,8 +411,7 @@ bool imb_addrectImBuf(ImBuf *ibuf) return false; } -struct ImBuf *IMB_allocFromBufferOwn( - unsigned int *rect, float *rectf, unsigned int w, unsigned int h, unsigned int channels) +struct ImBuf *IMB_allocFromBufferOwn(uint *rect, float *rectf, uint w, uint h, uint channels) { ImBuf *ibuf = NULL; @@ -444,11 +442,8 @@ struct ImBuf *IMB_allocFromBufferOwn( return ibuf; } -struct ImBuf *IMB_allocFromBuffer(const unsigned int *rect, - const float *rectf, - unsigned int w, - unsigned int h, - unsigned int channels) +struct ImBuf *IMB_allocFromBuffer( + const uint *rect, const float *rectf, uint w, uint h, uint channels) { ImBuf *ibuf = NULL; @@ -488,8 +483,7 @@ bool imb_addtilesImBuf(ImBuf *ibuf) } if (!ibuf->tiles) { - if ((ibuf->tiles = MEM_callocN(sizeof(unsigned int *) * ibuf->xtiles * ibuf->ytiles, - "imb_tiles"))) { + if ((ibuf->tiles = MEM_callocN(sizeof(uint *) * ibuf->xtiles * ibuf->ytiles, "imb_tiles"))) { ibuf->mall |= IB_tiles; } } @@ -497,7 +491,7 @@ bool imb_addtilesImBuf(ImBuf *ibuf) return (ibuf->tiles != NULL); } -ImBuf *IMB_allocImBuf(unsigned int x, unsigned int y, uchar planes, unsigned int flags) +ImBuf *IMB_allocImBuf(uint x, uint y, uchar planes, uint flags) { ImBuf *ibuf; @@ -513,8 +507,7 @@ ImBuf *IMB_allocImBuf(unsigned int x, unsigned int y, uchar planes, unsigned int return ibuf; } -bool IMB_initImBuf( - struct ImBuf *ibuf, unsigned int x, unsigned int y, unsigned char planes, unsigned int flags) +bool IMB_initImBuf(struct ImBuf *ibuf, uint x, uint y, uchar planes, uint flags) { memset(ibuf, 0, sizeof(ImBuf)); @@ -536,7 +529,7 @@ bool IMB_initImBuf( } if (flags & IB_rectfloat) { - if (imb_addrectfloatImBuf(ibuf) == false) { + if (imb_addrectfloatImBuf(ibuf, ibuf->channels) == false) { return false; } } @@ -678,7 +671,7 @@ size_t IMB_get_size_in_memory(ImBuf *ibuf) } if (ibuf->tiles) { - size += sizeof(unsigned int) * ibuf->ytiles * ibuf->xtiles; + size += sizeof(uint) * ibuf->ytiles * ibuf->xtiles; } return size; diff --git a/source/blender/imbuf/intern/anim_movie.c b/source/blender/imbuf/intern/anim_movie.c index 52ed68a1ff3..4e6a52f8464 100644 --- a/source/blender/imbuf/intern/anim_movie.c +++ b/source/blender/imbuf/intern/anim_movie.c @@ -97,9 +97,9 @@ static void free_anim_movie(struct anim *UNUSED(anim)) # define PATHSEPARATOR '/' #endif -static int an_stringdec(const char *string, char *head, char *tail, unsigned short *numlen) +static int an_stringdec(const char *string, char *head, char *tail, ushort *numlen) { - unsigned short len, nume, nums = 0; + ushort len, nume, nums = 0; short i; bool found = false; @@ -139,8 +139,7 @@ static int an_stringdec(const char *string, char *head, char *tail, unsigned sho return true; } -static void an_stringenc( - char *string, const char *head, const char *tail, unsigned short numlen, int pic) +static void an_stringenc(char *string, const char *head, const char *tail, ushort numlen, int pic) { BLI_path_sequence_encode(string, head, tail, numlen, pic); } @@ -454,7 +453,7 @@ static ImBuf *avi_fetchibuf(struct anim *anim, int position) lpbi = AVIStreamGetFrame(anim->pgf, position + AVIStreamStart(anim->pavi[anim->firstvideo])); if (lpbi) { ibuf = IMB_ibImageFromMemory( - (const unsigned char *)lpbi, 100, IB_rect, anim->colorspace, "<avi_fetchibuf>"); + (const uchar *)lpbi, 100, IB_rect, anim->colorspace, "<avi_fetchibuf>"); /* Oh brother... */ } } @@ -1568,7 +1567,7 @@ struct ImBuf *IMB_anim_absolute(struct anim *anim, { struct ImBuf *ibuf = NULL; char head[256], tail[256]; - unsigned short digits; + ushort digits; int pic; int filter_y; if (anim == NULL) { diff --git a/source/blender/imbuf/intern/cache.c b/source/blender/imbuf/intern/cache.c index 51f7dbdf41a..4e1563c62ab 100644 --- a/source/blender/imbuf/intern/cache.c +++ b/source/blender/imbuf/intern/cache.c @@ -81,11 +81,11 @@ static ImGlobalTileCache GLOBAL_CACHE; /** \name Hash Functions * \{ */ -static unsigned int imb_global_tile_hash(const void *gtile_p) +static uint imb_global_tile_hash(const void *gtile_p) { const ImGlobalTile *gtile = gtile_p; - return ((unsigned int)(intptr_t)gtile->ibuf) * 769 + gtile->tx * 53 + gtile->ty * 97; + return ((uint)(intptr_t)gtile->ibuf) * 769 + gtile->tx * 53 + gtile->ty * 97; } static bool imb_global_tile_cmp(const void *a_p, const void *b_p) @@ -96,11 +96,11 @@ static bool imb_global_tile_cmp(const void *a_p, const void *b_p) return ((a->ibuf != b->ibuf) || (a->tx != b->tx) || (a->ty != b->ty)); } -static unsigned int imb_thread_tile_hash(const void *ttile_p) +static uint imb_thread_tile_hash(const void *ttile_p) { const ImThreadTile *ttile = ttile_p; - return ((unsigned int)(intptr_t)ttile->ibuf) * 769 + ttile->tx * 53 + ttile->ty * 97; + return ((uint)(intptr_t)ttile->ibuf) * 769 + ttile->tx * 53 + ttile->ty * 97; } static bool imb_thread_tile_cmp(const void *a_p, const void *b_p) @@ -121,9 +121,9 @@ static void imb_global_cache_tile_load(ImGlobalTile *gtile) { ImBuf *ibuf = gtile->ibuf; int toffs = ibuf->xtiles * gtile->ty + gtile->tx; - unsigned int *rect; + uint *rect; - rect = MEM_callocN(sizeof(unsigned int) * ibuf->tilex * ibuf->tiley, "imb_tile"); + rect = MEM_callocN(sizeof(uint) * ibuf->tilex * ibuf->tiley, "imb_tile"); imb_loadtile(ibuf, gtile->tx, gtile->ty, rect); ibuf->tiles[toffs] = rect; } @@ -136,7 +136,7 @@ static void imb_global_cache_tile_unload(ImGlobalTile *gtile) MEM_freeN(ibuf->tiles[toffs]); ibuf->tiles[toffs] = NULL; - GLOBAL_CACHE.totmem -= sizeof(unsigned int) * ibuf->tilex * ibuf->tiley; + GLOBAL_CACHE.totmem -= sizeof(uint) * ibuf->tilex * ibuf->tiley; } void imb_tile_cache_tile_free(ImBuf *ibuf, int tx, int ty) @@ -343,7 +343,7 @@ static ImGlobalTile *imb_global_cache_get_tile(ImBuf *ibuf, BLI_addhead(&GLOBAL_CACHE.tiles, gtile); /* mark as being loaded and unlock to allow other threads to load too */ - GLOBAL_CACHE.totmem += sizeof(unsigned int) * ibuf->tilex * ibuf->tiley; + GLOBAL_CACHE.totmem += sizeof(uint) * ibuf->tilex * ibuf->tiley; BLI_mutex_unlock(&GLOBAL_CACHE.mutex); @@ -363,10 +363,7 @@ static ImGlobalTile *imb_global_cache_get_tile(ImBuf *ibuf, /** \name Per-Thread Cache * \{ */ -static unsigned int *imb_thread_cache_get_tile(ImThreadTileCache *cache, - ImBuf *ibuf, - int tx, - int ty) +static uint *imb_thread_cache_get_tile(ImThreadTileCache *cache, ImBuf *ibuf, int tx, int ty) { ImThreadTile *ttile, lookuptile; ImGlobalTile *gtile, *replacetile; @@ -418,7 +415,7 @@ static unsigned int *imb_thread_cache_get_tile(ImThreadTileCache *cache, return ibuf->tiles[toffs]; } -unsigned int *IMB_gettile(ImBuf *ibuf, int tx, int ty, int thread) +uint *IMB_gettile(ImBuf *ibuf, int tx, int ty, int thread) { return imb_thread_cache_get_tile(&GLOBAL_CACHE.thread_cache[thread + 1], ibuf, tx, ty); } @@ -427,7 +424,7 @@ void IMB_tiles_to_rect(ImBuf *ibuf) { ImBuf *mipbuf; ImGlobalTile *gtile; - unsigned int *to, *from; + uint *to, *from; int a, tx, ty, y, w, h; for (a = 0; a < ibuf->miptot; a++) { @@ -435,8 +432,7 @@ void IMB_tiles_to_rect(ImBuf *ibuf) /* don't call imb_addrectImBuf, it frees all mipmaps */ if (!mipbuf->rect) { - if ((mipbuf->rect = MEM_callocN(ibuf->x * ibuf->y * sizeof(unsigned int), - "imb_addrectImBuf"))) { + if ((mipbuf->rect = MEM_callocN(ibuf->x * ibuf->y * sizeof(uint), "imb_addrectImBuf"))) { mipbuf->mall |= IB_rect; mipbuf->flags |= IB_rect; } @@ -460,7 +456,7 @@ void IMB_tiles_to_rect(ImBuf *ibuf) h = (ty == mipbuf->ytiles - 1) ? mipbuf->y - ty * mipbuf->tiley : mipbuf->tiley; for (y = 0; y < h; y++) { - memcpy(to, from, sizeof(unsigned int) * w); + memcpy(to, from, sizeof(uint) * w); from += mipbuf->tilex; to += mipbuf->x; } diff --git a/source/blender/imbuf/intern/cineon/cineon_dpx.c b/source/blender/imbuf/intern/cineon/cineon_dpx.c index 1a99d2a34d9..3bff8184b19 100644 --- a/source/blender/imbuf/intern/cineon/cineon_dpx.c +++ b/source/blender/imbuf/intern/cineon/cineon_dpx.c @@ -21,11 +21,8 @@ #include "MEM_guardedalloc.h" -static struct ImBuf *imb_load_dpx_cineon(const unsigned char *mem, - size_t size, - int use_cineon, - int flags, - char colorspace[IM_MAX_SPACE]) +static struct ImBuf *imb_load_dpx_cineon( + const uchar *mem, size_t size, int use_cineon, int flags, char colorspace[IM_MAX_SPACE]) { ImBuf *ibuf; LogImageFile *image; @@ -74,7 +71,7 @@ static int imb_save_dpx_cineon(ImBuf *ibuf, const char *filepath, int use_cineon LogImageFile *logImage; float *fbuf; float *fbuf_ptr; - unsigned char *rect_ptr; + uchar *rect_ptr; int x, y, depth, bitspersample, rvalue; if (flags & IB_mem) { @@ -153,7 +150,7 @@ static int imb_save_dpx_cineon(ImBuf *ibuf, const char *filepath, int use_cineon for (y = 0; y < ibuf->y; y++) { for (x = 0; x < ibuf->x; x++) { fbuf_ptr = fbuf + 4 * ((ibuf->y - y - 1) * ibuf->x + x); - rect_ptr = (unsigned char *)ibuf->rect + 4 * (y * ibuf->x + x); + rect_ptr = (uchar *)ibuf->rect + 4 * (y * ibuf->x + x); fbuf_ptr[0] = (float)rect_ptr[0] / 255.0f; fbuf_ptr[1] = (float)rect_ptr[1] / 255.0f; fbuf_ptr[2] = (float)rect_ptr[2] / 255.0f; @@ -173,15 +170,12 @@ bool imb_save_cineon(struct ImBuf *buf, const char *filepath, int flags) return imb_save_dpx_cineon(buf, filepath, 1, flags); } -bool imb_is_a_cineon(const unsigned char *buf, size_t size) +bool imb_is_a_cineon(const uchar *buf, size_t size) { return logImageIsCineon(buf, size); } -ImBuf *imb_load_cineon(const unsigned char *mem, - size_t size, - int flags, - char colorspace[IM_MAX_SPACE]) +ImBuf *imb_load_cineon(const uchar *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) { if (!imb_is_a_cineon(mem, size)) { return NULL; @@ -194,15 +188,12 @@ bool imb_save_dpx(struct ImBuf *buf, const char *filepath, int flags) return imb_save_dpx_cineon(buf, filepath, 0, flags); } -bool imb_is_a_dpx(const unsigned char *buf, size_t size) +bool imb_is_a_dpx(const uchar *buf, size_t size) { return logImageIsDpx(buf, size); } -ImBuf *imb_load_dpx(const unsigned char *mem, - size_t size, - int flags, - char colorspace[IM_MAX_SPACE]) +ImBuf *imb_load_dpx(const uchar *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) { if (!imb_is_a_dpx(mem, size)) { return NULL; diff --git a/source/blender/imbuf/intern/cineon/cineonlib.c b/source/blender/imbuf/intern/cineon/cineonlib.c index fa05f155b30..6417d92644f 100644 --- a/source/blender/imbuf/intern/cineon/cineonlib.c +++ b/source/blender/imbuf/intern/cineon/cineonlib.c @@ -121,13 +121,13 @@ static void fillCineonMainHeader(LogImageFile *cineon, /* we leave it blank */ } -LogImageFile *cineonOpen(const unsigned char *byteStuff, int fromMemory, size_t bufferSize) +LogImageFile *cineonOpen(const uchar *byteStuff, int fromMemory, size_t bufferSize) { CineonMainHeader header; LogImageFile *cineon = (LogImageFile *)MEM_mallocN(sizeof(LogImageFile), __func__); const char *filepath = (const char *)byteStuff; int i; - unsigned int dataOffset; + uint dataOffset; if (cineon == NULL) { if (verbose) { @@ -158,8 +158,8 @@ LogImageFile *cineonOpen(const unsigned char *byteStuff, int fromMemory, size_t cineon->memBufferSize = 0; } else { - cineon->memBuffer = (unsigned char *)byteStuff; - cineon->memCursor = (unsigned char *)byteStuff; + cineon->memBuffer = (uchar *)byteStuff; + cineon->memCursor = (uchar *)byteStuff; cineon->memBufferSize = bufferSize; } @@ -187,7 +187,7 @@ LogImageFile *cineonOpen(const unsigned char *byteStuff, int fromMemory, size_t else { if (verbose) { printf("Cineon: Bad magic number %lu in \"%s\".\n", - (unsigned long)header.fileHeader.magic_num, + (ulong)header.fileHeader.magic_num, byteStuff); } logImageClose(cineon); @@ -296,7 +296,7 @@ LogImageFile *cineonOpen(const unsigned char *byteStuff, int fromMemory, size_t } if (cineon->element[i].refHighData == CINEON_UNDEFINED_U32) { - cineon->element[i].refHighData = (unsigned int)cineon->element[i].maxValue; + cineon->element[i].refHighData = (uint)cineon->element[i].maxValue; } if (cineon->element[i].refLowQuantity == CINEON_UNDEFINED_R32 || @@ -353,7 +353,7 @@ LogImageFile *cineonCreate( { CineonMainHeader header; const char *shortFilename = NULL; - /* unsigned char pad[6044]; */ + /* uchar pad[6044]; */ LogImageFile *cineon = (LogImageFile *)MEM_mallocN(sizeof(LogImageFile), __func__); if (cineon == NULL) { diff --git a/source/blender/imbuf/intern/cineon/cineonlib.h b/source/blender/imbuf/intern/cineon/cineonlib.h index 13d40461728..ac0cc15590d 100644 --- a/source/blender/imbuf/intern/cineon/cineonlib.h +++ b/source/blender/imbuf/intern/cineon/cineonlib.h @@ -38,10 +38,10 @@ typedef struct { } CineonFileHeader; typedef struct { - unsigned char descriptor1; - unsigned char descriptor2; - unsigned char bits_per_sample; - unsigned char filler; + uchar descriptor1; + uchar descriptor2; + uchar bits_per_sample; + uchar filler; unsigned int pixels_per_line; unsigned int lines_per_image; unsigned int ref_low_data; @@ -51,8 +51,8 @@ typedef struct { } CineonElementHeader; typedef struct { - unsigned char orientation; - unsigned char elements_per_image; + uchar orientation; + uchar elements_per_image; unsigned short filler; CineonElementHeader element[8]; float white_point_x; @@ -65,10 +65,10 @@ typedef struct { float blue_primary_y; char label[200]; char reserved[28]; - unsigned char interleave; - unsigned char packing; - unsigned char data_sign; - unsigned char sense; + uchar interleave; + uchar packing; + uchar data_sign; + uchar sense; unsigned int line_padding; unsigned int element_padding; char reserved2[20]; @@ -90,10 +90,10 @@ typedef struct { } CineonOriginationHeader; typedef struct { - unsigned char film_code; - unsigned char film_type; - unsigned char edge_code_perforation_offset; - unsigned char filler; + uchar film_code; + uchar film_type; + uchar edge_code_perforation_offset; + uchar filler; unsigned int prefix; unsigned int count; char format[32]; @@ -112,7 +112,7 @@ typedef struct { } CineonMainHeader; void cineonSetVerbose(int); -LogImageFile *cineonOpen(const unsigned char *byteStuff, int fromMemory, size_t bufferSize); +LogImageFile *cineonOpen(const uchar *byteStuff, int fromMemory, size_t bufferSize); LogImageFile *cineonCreate( const char *filepath, int width, int height, int bitsPerSample, const char *creator); diff --git a/source/blender/imbuf/intern/cineon/dpxlib.c b/source/blender/imbuf/intern/cineon/dpxlib.c index 4c780032f0b..494bf37cfe7 100644 --- a/source/blender/imbuf/intern/cineon/dpxlib.c +++ b/source/blender/imbuf/intern/cineon/dpxlib.c @@ -119,7 +119,7 @@ static void fillDpxMainHeader(LogImageFile *dpx, header->televisionHeader.integration_times = swap_float(DPX_UNDEFINED_R32, dpx->isMSB); } -LogImageFile *dpxOpen(const unsigned char *byteStuff, int fromMemory, size_t bufferSize) +LogImageFile *dpxOpen(const uchar *byteStuff, int fromMemory, size_t bufferSize) { DpxMainHeader header; LogImageFile *dpx = (LogImageFile *)MEM_mallocN(sizeof(LogImageFile), __func__); @@ -155,8 +155,8 @@ LogImageFile *dpxOpen(const unsigned char *byteStuff, int fromMemory, size_t buf dpx->memBufferSize = 0; } else { - dpx->memBuffer = (unsigned char *)byteStuff; - dpx->memCursor = (unsigned char *)byteStuff; + dpx->memBuffer = (uchar *)byteStuff; + dpx->memCursor = (uchar *)byteStuff; dpx->memBufferSize = bufferSize; } @@ -320,7 +320,7 @@ LogImageFile *dpxOpen(const unsigned char *byteStuff, int fromMemory, size_t buf } if (dpx->element[i].refHighData == DPX_UNDEFINED_U32) { - dpx->element[i].refHighData = (unsigned int)dpx->element[i].maxValue; + dpx->element[i].refHighData = (uint)dpx->element[i].maxValue; } if (IS_DPX_UNDEFINED_R32(dpx->element[i].refLowQuantity)) { @@ -418,7 +418,7 @@ LogImageFile *dpxCreate(const char *filepath, { DpxMainHeader header; const char *shortFilename = NULL; - unsigned char pad[6044]; + uchar pad[6044]; LogImageFile *dpx = (LogImageFile *)MEM_mallocN(sizeof(LogImageFile), __func__); if (dpx == NULL) { diff --git a/source/blender/imbuf/intern/cineon/logImageCore.c b/source/blender/imbuf/intern/cineon/logImageCore.c index e693aa6f891..8188d0d04b9 100644 --- a/source/blender/imbuf/intern/cineon/logImageCore.c +++ b/source/blender/imbuf/intern/cineon/logImageCore.c @@ -81,29 +81,29 @@ void logImageSetVerbose(int verbosity) * IO stuff */ -int logImageIsDpx(const void *buffer, const unsigned int size) +int logImageIsDpx(const void *buffer, const uint size) { - unsigned int magicNum; + uint magicNum; if (size < sizeof(magicNum)) { return 0; } - magicNum = *(unsigned int *)buffer; + magicNum = *(uint *)buffer; return (magicNum == DPX_FILE_MAGIC || magicNum == swap_uint(DPX_FILE_MAGIC, 1)); } -int logImageIsCineon(const void *buffer, const unsigned int size) +int logImageIsCineon(const void *buffer, const uint size) { - unsigned int magicNum; + uint magicNum; if (size < sizeof(magicNum)) { return 0; } - magicNum = *(unsigned int *)buffer; + magicNum = *(uint *)buffer; return (magicNum == CINEON_FILE_MAGIC || magicNum == swap_uint(CINEON_FILE_MAGIC, 1)); } LogImageFile *logImageOpenFromFile(const char *filepath, int cineon) { - unsigned int magicNum; + uint magicNum; FILE *f = BLI_fopen(filepath, "rb"); (void)cineon; @@ -120,16 +120,16 @@ LogImageFile *logImageOpenFromFile(const char *filepath, int cineon) fclose(f); if (logImageIsDpx(&magicNum, sizeof(magicNum))) { - return dpxOpen((const unsigned char *)filepath, 0, 0); + return dpxOpen((const uchar *)filepath, 0, 0); } if (logImageIsCineon(&magicNum, sizeof(magicNum))) { - return cineonOpen((const unsigned char *)filepath, 0, 0); + return cineonOpen((const uchar *)filepath, 0, 0); } return NULL; } -LogImageFile *logImageOpenFromMemory(const unsigned char *buffer, unsigned int size) +LogImageFile *logImageOpenFromMemory(const uchar *buffer, uint size) { if (logImageIsDpx(buffer, size)) { return dpxOpen(buffer, 1, size); @@ -276,9 +276,9 @@ int logImageSetDataRGBA(LogImageFile *logImage, float *data, int dataIsLinearRGB static int logImageSetData8(LogImageFile *logImage, LogImageElement logElement, float *data) { size_t rowLength = getRowLength(logImage->width, logElement); - unsigned char *row; + uchar *row; - row = (unsigned char *)MEM_mallocN(rowLength, __func__); + row = (uchar *)MEM_mallocN(rowLength, __func__); if (row == NULL) { if (verbose) { printf("DPX/Cineon: Cannot allocate row.\n"); @@ -289,7 +289,7 @@ static int logImageSetData8(LogImageFile *logImage, LogImageElement logElement, for (size_t y = 0; y < logImage->height; y++) { for (size_t x = 0; x < logImage->width * logImage->depth; x++) { - row[x] = (unsigned char)float_uint(data[y * logImage->width * logImage->depth + x], 255); + row[x] = (uchar)float_uint(data[y * logImage->width * logImage->depth + x], 255); } if (logimage_fwrite(row, rowLength, 1, logImage) == 0) { @@ -307,10 +307,10 @@ static int logImageSetData8(LogImageFile *logImage, LogImageElement logElement, static int logImageSetData10(LogImageFile *logImage, LogImageElement logElement, float *data) { size_t rowLength = getRowLength(logImage->width, logElement); - unsigned int pixel, index; - unsigned int *row; + uint pixel, index; + uint *row; - row = (unsigned int *)MEM_mallocN(rowLength, __func__); + row = (uint *)MEM_mallocN(rowLength, __func__); if (row == NULL) { if (verbose) { printf("DPX/Cineon: Cannot allocate row.\n"); @@ -324,8 +324,7 @@ static int logImageSetData10(LogImageFile *logImage, LogImageElement logElement, pixel = 0; for (size_t x = 0; x < logImage->width * logImage->depth; x++) { - pixel |= (unsigned int)float_uint(data[y * logImage->width * logImage->depth + x], 1023) - << offset; + pixel |= (uint)float_uint(data[y * logImage->width * logImage->depth + x], 1023) << offset; offset -= 10; if (offset < 0) { row[index] = swap_uint(pixel, logImage->isMSB); @@ -353,9 +352,9 @@ static int logImageSetData10(LogImageFile *logImage, LogImageElement logElement, static int logImageSetData12(LogImageFile *logImage, LogImageElement logElement, float *data) { size_t rowLength = getRowLength(logImage->width, logElement); - unsigned short *row; + ushort *row; - row = (unsigned short *)MEM_mallocN(rowLength, __func__); + row = (ushort *)MEM_mallocN(rowLength, __func__); if (row == NULL) { if (verbose) { printf("DPX/Cineon: Cannot allocate row.\n"); @@ -366,7 +365,7 @@ static int logImageSetData12(LogImageFile *logImage, LogImageElement logElement, for (size_t y = 0; y < logImage->height; y++) { for (size_t x = 0; x < logImage->width * logImage->depth; x++) { row[x] = swap_ushort( - ((unsigned short)float_uint(data[y * logImage->width * logImage->depth + x], 4095)) << 4, + ((ushort)float_uint(data[y * logImage->width * logImage->depth + x], 4095)) << 4, logImage->isMSB); } @@ -385,9 +384,9 @@ static int logImageSetData12(LogImageFile *logImage, LogImageElement logElement, static int logImageSetData16(LogImageFile *logImage, LogImageElement logElement, float *data) { size_t rowLength = getRowLength(logImage->width, logElement); - unsigned short *row; + ushort *row; - row = (unsigned short *)MEM_mallocN(rowLength, __func__); + row = (ushort *)MEM_mallocN(rowLength, __func__); if (row == NULL) { if (verbose) { printf("DPX/Cineon: Cannot allocate row.\n"); @@ -398,7 +397,7 @@ static int logImageSetData16(LogImageFile *logImage, LogImageElement logElement, for (size_t y = 0; y < logImage->height; y++) { for (size_t x = 0; x < logImage->width * logImage->depth; x++) { row[x] = swap_ushort( - (unsigned short)float_uint(data[y * logImage->width * logImage->depth + x], 65535), + (ushort)float_uint(data[y * logImage->width * logImage->depth + x], 65535), logImage->isMSB); } @@ -425,11 +424,11 @@ int logImageGetDataRGBA(LogImageFile *logImage, float *data, int dataIsLinearRGB float *elementData[8]; float *elementData_ptr[8]; float *mergedData; - unsigned int sampleIndex; + uint sampleIndex; LogImageElement mergedElement; /* Determine the depth of the picture and if there's a separate alpha element. - * If the element is supported, load it into an unsigned ints array. */ + * If the element is supported, load it into an `uint` array. */ memset(&elementData, 0, 8 * sizeof(float *)); hasAlpha = 0; @@ -695,7 +694,7 @@ static int logImageElementGetData(LogImageFile *logImage, LogImageElement logEle static int logImageElementGetData1(LogImageFile *logImage, LogImageElement logElement, float *data) { - unsigned int pixel; + uint pixel; /* seek at the right place */ if (logimage_fseek(logImage, logElement.dataOffset, SEEK_SET) != 0) { @@ -727,7 +726,7 @@ static int logImageElementGetData1(LogImageFile *logImage, LogImageElement logEl static int logImageElementGetData8(LogImageFile *logImage, LogImageElement logElement, float *data) { size_t rowLength = getRowLength(logImage->width, logElement); - unsigned char pixel; + uchar pixel; /* extract required pixels */ for (size_t y = 0; y < logImage->height; y++) { @@ -756,7 +755,7 @@ static int logImageElementGetData10(LogImageFile *logImage, LogImageElement logElement, float *data) { - unsigned int pixel; + uint pixel; /* seek to data */ if (logimage_fseek(logImage, logElement.dataOffset, SEEK_SET) != 0) { @@ -829,15 +828,14 @@ static int logImageElementGetData10Packed(LogImageFile *logImage, float *data) { size_t rowLength = getRowLength(logImage->width, logElement); - unsigned int pixel, oldPixel; + uint pixel, oldPixel; /* converting bytes to pixels */ for (size_t y = 0; y < logImage->height; y++) { /* seek to data */ if (logimage_fseek(logImage, y * rowLength + logElement.dataOffset, SEEK_SET) != 0) { if (verbose) { - printf("DPX/Cineon: Couldn't seek at %u\n", - (unsigned int)(y * rowLength + logElement.dataOffset)); + printf("DPX/Cineon: Couldn't seek at %u\n", (uint)(y * rowLength + logElement.dataOffset)); } return 1; } @@ -884,9 +882,9 @@ static int logImageElementGetData12(LogImageFile *logImage, LogImageElement logElement, float *data) { - unsigned int sampleIndex; - unsigned int numSamples = logImage->width * logImage->height * logElement.depth; - unsigned short pixel; + uint sampleIndex; + uint numSamples = logImage->width * logImage->height * logElement.depth; + ushort pixel; /* seek to data */ if (logimage_fseek(logImage, logElement.dataOffset, SEEK_SET) != 0) { @@ -923,15 +921,14 @@ static int logImageElementGetData12Packed(LogImageFile *logImage, float *data) { size_t rowLength = getRowLength(logImage->width, logElement); - unsigned int pixel, oldPixel; + uint pixel, oldPixel; /* converting bytes to pixels */ for (size_t y = 0; y < logImage->height; y++) { /* seek to data */ if (logimage_fseek(logImage, y * rowLength + logElement.dataOffset, SEEK_SET) != 0) { if (verbose) { - printf("DPX/Cineon: Couldn't seek at %u\n", - (unsigned int)(y * rowLength + logElement.dataOffset)); + printf("DPX/Cineon: Couldn't seek at %u\n", (uint)(y * rowLength + logElement.dataOffset)); } return 1; } @@ -978,9 +975,9 @@ static int logImageElementGetData16(LogImageFile *logImage, LogImageElement logElement, float *data) { - unsigned int numSamples = logImage->width * logImage->height * logElement.depth; - unsigned int sampleIndex; - unsigned short pixel; + uint numSamples = logImage->width * logImage->height * logElement.depth; + uint sampleIndex; + ushort pixel; /* seek to data */ if (logimage_fseek(logImage, logElement.dataOffset, SEEK_SET) != 0) { @@ -1076,8 +1073,8 @@ static float *getLinToLogLut(LogImageFile *logImage, LogImageElement logElement) { float *lut; float gain, negativeFilmGamma, offset, step; - unsigned int lutsize = (unsigned int)(logElement.maxValue + 1); - unsigned int i; + uint lutsize = (uint)(logElement.maxValue + 1); + uint i; lut = MEM_mallocN(sizeof(float) * lutsize, "getLinToLogLut"); @@ -1104,8 +1101,8 @@ static float *getLogToLinLut(LogImageFile *logImage, LogImageElement logElement) float *lut; float breakPoint, gain, kneeGain, kneeOffset, negativeFilmGamma, offset, step, softClip; /* float filmGamma; unused */ - unsigned int lutsize = (unsigned int)(logElement.maxValue + 1); - unsigned int i; + uint lutsize = (uint)(logElement.maxValue + 1); + uint i; lut = MEM_mallocN(sizeof(float) * lutsize, "getLogToLinLut"); @@ -1154,8 +1151,8 @@ static float *getLogToLinLut(LogImageFile *logImage, LogImageElement logElement) static float *getLinToSrgbLut(LogImageElement logElement) { float col, *lut; - unsigned int lutsize = (unsigned int)(logElement.maxValue + 1); - unsigned int i; + uint lutsize = (uint)(logElement.maxValue + 1); + uint i; lut = MEM_mallocN(sizeof(float) * lutsize, "getLogToLinLut"); @@ -1175,8 +1172,8 @@ static float *getLinToSrgbLut(LogImageElement logElement) static float *getSrgbToLinLut(LogImageElement logElement) { float col, *lut; - unsigned int lutsize = (unsigned int)(logElement.maxValue + 1); - unsigned int i; + uint lutsize = (uint)(logElement.maxValue + 1); + uint i; lut = MEM_mallocN(sizeof(float) * lutsize, "getLogToLinLut"); @@ -1199,7 +1196,7 @@ static int convertRGBA_RGB(float *src, LogImageElement logElement, int elementIsSource) { - unsigned int i; + uint i; float *src_ptr = src; float *dst_ptr = dst; @@ -1254,7 +1251,7 @@ static int convertRGB_RGBA(float *src, LogImageElement logElement, int elementIsSource) { - unsigned int i; + uint i; float *src_ptr = src; float *dst_ptr = dst; @@ -1309,7 +1306,7 @@ static int convertRGBA_RGBA(float *src, LogImageElement logElement, int elementIsSource) { - unsigned int i; + uint i; float *src_ptr = src; float *dst_ptr = dst; @@ -1354,7 +1351,7 @@ static int convertABGR_RGBA(float *src, LogImageElement logElement, int elementIsSource) { - unsigned int i; + uint i; float *src_ptr = src; float *dst_ptr = dst; @@ -1407,7 +1404,7 @@ static int convertCbYCr_RGBA(float *src, LogImageFile *logImage, LogImageElement logElement) { - unsigned int i; + uint i; float conversionMatrix[9], refLowData, y, cb, cr; float *src_ptr = src; float *dst_ptr = dst; @@ -1439,7 +1436,7 @@ static int convertCbYCrA_RGBA(float *src, LogImageFile *logImage, LogImageElement logElement) { - unsigned int i; + uint i; float conversionMatrix[9], refLowData, y, cb, cr, a; float *src_ptr = src; float *dst_ptr = dst; @@ -1472,7 +1469,7 @@ static int convertCbYCrY_RGBA(float *src, LogImageFile *logImage, LogImageElement logElement) { - unsigned int i; + uint i; float conversionMatrix[9], refLowData, y1, y2, cb, cr; float *src_ptr = src; float *dst_ptr = dst; @@ -1524,7 +1521,7 @@ static int convertCbYACrYA_RGBA(float *src, LogImageFile *logImage, LogImageElement logElement) { - unsigned int i; + uint i; float conversionMatrix[9], refLowData, y1, y2, cb, cr, a1, a2; float *src_ptr = src; float *dst_ptr = dst; @@ -1578,7 +1575,7 @@ static int convertLuminance_RGBA(float *src, LogImageFile *logImage, LogImageElement logElement) { - unsigned int i; + uint i; float conversionMatrix[9], value, refLowData; float *src_ptr = src; float *dst_ptr = dst; @@ -1604,7 +1601,7 @@ static int convertYA_RGBA(float *src, LogImageFile *logImage, LogImageElement logElement) { - unsigned int i; + uint i; float conversionMatrix[9], value, refLowData; float *src_ptr = src; float *dst_ptr = dst; @@ -1629,7 +1626,7 @@ static int convertLogElementToRGBA( float *src, float *dst, LogImageFile *logImage, LogImageElement logElement, int dstIsLinearRGB) { int rvalue; - unsigned int i; + uint i; float *src_ptr; float *dst_ptr; @@ -1698,7 +1695,7 @@ static int convertLogElementToRGBA( static int convertRGBAToLogElement( float *src, float *dst, LogImageFile *logImage, LogImageElement logElement, int srcIsLinearRGB) { - unsigned int i; + uint i; int rvalue; float *srgbSrc; float *srgbSrc_ptr; diff --git a/source/blender/imbuf/intern/cineon/logmemfile.c b/source/blender/imbuf/intern/cineon/logmemfile.c index f5bd87f96d1..6c24d67b33f 100644 --- a/source/blender/imbuf/intern/cineon/logmemfile.c +++ b/source/blender/imbuf/intern/cineon/logmemfile.c @@ -44,7 +44,7 @@ int logimage_fseek(LogImageFile *logFile, intptr_t offset, int origin) return 0; } -int logimage_fwrite(void *buffer, size_t size, unsigned int count, LogImageFile *logFile) +int logimage_fwrite(void *buffer, size_t size, uint count, LogImageFile *logFile) { if (logFile->file) { return fwrite(buffer, size, count, logFile->file); @@ -54,13 +54,13 @@ int logimage_fwrite(void *buffer, size_t size, unsigned int count, LogImageFile return count; } -int logimage_fread(void *buffer, size_t size, unsigned int count, LogImageFile *logFile) +int logimage_fread(void *buffer, size_t size, uint count, LogImageFile *logFile) { if (logFile->file) { return fread(buffer, size, count, logFile->file); } /* we're reading from memory */ - unsigned char *buf = (unsigned char *)buffer; + uchar *buf = (uchar *)buffer; uintptr_t pos = (uintptr_t)logFile->memCursor - (uintptr_t)logFile->memBuffer; size_t total_size = size * count; if (pos + total_size > logFile->memBufferSize) { @@ -77,38 +77,38 @@ int logimage_fread(void *buffer, size_t size, unsigned int count, LogImageFile * return count; } -int logimage_read_uchar(unsigned char *x, LogImageFile *logFile) +int logimage_read_uchar(uchar *x, LogImageFile *logFile) { uintptr_t pos = (uintptr_t)logFile->memCursor - (uintptr_t)logFile->memBuffer; - if (pos + sizeof(unsigned char) > logFile->memBufferSize) { + if (pos + sizeof(uchar) > logFile->memBufferSize) { return 1; } - *x = *(unsigned char *)logFile->memCursor; - logFile->memCursor += sizeof(unsigned char); + *x = *(uchar *)logFile->memCursor; + logFile->memCursor += sizeof(uchar); return 0; } -int logimage_read_ushort(unsigned short *x, LogImageFile *logFile) +int logimage_read_ushort(ushort *x, LogImageFile *logFile) { uintptr_t pos = (uintptr_t)logFile->memCursor - (uintptr_t)logFile->memBuffer; - if (pos + sizeof(unsigned short) > logFile->memBufferSize) { + if (pos + sizeof(ushort) > logFile->memBufferSize) { return 1; } - *x = *(unsigned short *)logFile->memCursor; - logFile->memCursor += sizeof(unsigned short); + *x = *(ushort *)logFile->memCursor; + logFile->memCursor += sizeof(ushort); return 0; } -int logimage_read_uint(unsigned int *x, LogImageFile *logFile) +int logimage_read_uint(uint *x, LogImageFile *logFile) { uintptr_t pos = (uintptr_t)logFile->memCursor - (uintptr_t)logFile->memBuffer; - if (pos + sizeof(unsigned int) > logFile->memBufferSize) { + if (pos + sizeof(uint) > logFile->memBufferSize) { return 1; } - *x = *(unsigned int *)logFile->memCursor; - logFile->memCursor += sizeof(unsigned int); + *x = *(uint *)logFile->memCursor; + logFile->memCursor += sizeof(uint); return 0; } diff --git a/source/blender/imbuf/intern/colormanagement.c b/source/blender/imbuf/intern/colormanagement.c index b62bdd5521d..ea5f4ec275d 100644 --- a/source/blender/imbuf/intern/colormanagement.c +++ b/source/blender/imbuf/intern/colormanagement.c @@ -235,11 +235,11 @@ static ColormanageCacheData *colormanage_cachedata_get(const ImBuf *ibuf) return ibuf->colormanage_cache->data; } -static unsigned int colormanage_hashhash(const void *key_v) +static uint colormanage_hashhash(const void *key_v) { const ColormanageCacheKey *key = key_v; - unsigned int rval = (key->display << 16) | (key->view % 0xffff); + uint rval = (key->display << 16) | (key->view % 0xffff); return rval; } @@ -336,11 +336,10 @@ static ImBuf *colormanage_cache_get_ibuf(ImBuf *ibuf, return cache_ibuf; } -static unsigned char *colormanage_cache_get( - ImBuf *ibuf, - const ColormanageCacheViewSettings *view_settings, - const ColormanageCacheDisplaySettings *display_settings, - void **cache_handle) +static uchar *colormanage_cache_get(ImBuf *ibuf, + const ColormanageCacheViewSettings *view_settings, + const ColormanageCacheDisplaySettings *display_settings, + void **cache_handle) { ColormanageCacheKey key; ImBuf *cache_ibuf; @@ -383,7 +382,7 @@ static unsigned char *colormanage_cache_get( return NULL; } - return (unsigned char *)cache_ibuf->rect; + return (uchar *)cache_ibuf->rect; } return NULL; @@ -392,7 +391,7 @@ static unsigned char *colormanage_cache_get( static void colormanage_cache_put(ImBuf *ibuf, const ColormanageCacheViewSettings *view_settings, const ColormanageCacheDisplaySettings *display_settings, - unsigned char *display_buffer, + uchar *display_buffer, void **cache_handle) { ColormanageCacheKey key; @@ -410,7 +409,7 @@ static void colormanage_cache_put(ImBuf *ibuf, /* buffer itself */ cache_ibuf = IMB_allocImBuf(ibuf->x, ibuf->y, ibuf->planes, 0); - cache_ibuf->rect = (unsigned int *)display_buffer; + cache_ibuf->rect = (uint *)display_buffer; cache_ibuf->mall |= IB_rect; cache_ibuf->flags |= IB_rect; @@ -1441,10 +1440,10 @@ typedef struct DisplayBufferThread { ColormanageProcessor *cm_processor; const float *buffer; - unsigned char *byte_buffer; + uchar *byte_buffer; float *display_buffer; - unsigned char *display_buffer_byte; + uchar *display_buffer_byte; int width; int start_line; @@ -1463,10 +1462,10 @@ typedef struct DisplayBufferInitData { ImBuf *ibuf; ColormanageProcessor *cm_processor; const float *buffer; - unsigned char *byte_buffer; + uchar *byte_buffer; float *display_buffer; - unsigned char *display_buffer_byte; + uchar *display_buffer_byte; int width; @@ -1539,13 +1538,13 @@ static void display_buffer_apply_get_linear_buffer(DisplayBufferThread *handle, bool predivide = handle->predivide; if (!handle->buffer) { - unsigned char *byte_buffer = handle->byte_buffer; + uchar *byte_buffer = handle->byte_buffer; const char *from_colorspace = handle->byte_colorspace; const char *to_colorspace = global_role_scene_linear; float *fp; - unsigned char *cp; + uchar *cp; const size_t i_last = ((size_t)width) * height; size_t i; @@ -1608,7 +1607,7 @@ static void *do_display_buffer_apply_thread(void *handle_v) DisplayBufferThread *handle = (DisplayBufferThread *)handle_v; ColormanageProcessor *cm_processor = handle->cm_processor; float *display_buffer = handle->display_buffer; - unsigned char *display_buffer_byte = handle->display_buffer_byte; + uchar *display_buffer_byte = handle->display_buffer_byte; int channels = handle->channels; int width = handle->width; int height = handle->tot_line; @@ -1698,9 +1697,9 @@ static void *do_display_buffer_apply_thread(void *handle_v) static void display_buffer_apply_threaded(ImBuf *ibuf, const float *buffer, - unsigned char *byte_buffer, + uchar *byte_buffer, float *display_buffer, - unsigned char *display_buffer_byte, + uchar *display_buffer_byte, ColormanageProcessor *cm_processor) { DisplayBufferInitData init_data; @@ -1761,7 +1760,7 @@ static bool is_ibuf_rect_in_display_space(ImBuf *ibuf, static void colormanage_display_buffer_process_ex( ImBuf *ibuf, float *display_buffer, - unsigned char *display_buffer_byte, + uchar *display_buffer_byte, const ColorManagedViewSettings *view_settings, const ColorManagedDisplaySettings *display_settings) { @@ -1783,7 +1782,7 @@ static void colormanage_display_buffer_process_ex( display_buffer_apply_threaded(ibuf, ibuf->rect_float, - (unsigned char *)ibuf->rect, + (uchar *)ibuf->rect, display_buffer, display_buffer_byte, cm_processor); @@ -1794,7 +1793,7 @@ static void colormanage_display_buffer_process_ex( } static void colormanage_display_buffer_process(ImBuf *ibuf, - unsigned char *display_buffer, + uchar *display_buffer, const ColorManagedViewSettings *view_settings, const ColorManagedDisplaySettings *display_settings) { @@ -1810,7 +1809,7 @@ static void colormanage_display_buffer_process(ImBuf *ibuf, typedef struct ProcessorTransformThread { ColormanageProcessor *cm_processor; - unsigned char *byte_buffer; + uchar *byte_buffer; float *float_buffer; int width; int start_line; @@ -1822,7 +1821,7 @@ typedef struct ProcessorTransformThread { typedef struct ProcessorTransformInit { ColormanageProcessor *cm_processor; - unsigned char *byte_buffer; + uchar *byte_buffer; float *float_buffer; int width; int height; @@ -1871,7 +1870,7 @@ static void processor_transform_init_handle(void *handle_v, static void *do_processor_transform_thread(void *handle_v) { ProcessorTransformThread *handle = (ProcessorTransformThread *)handle_v; - unsigned char *byte_buffer = handle->byte_buffer; + uchar *byte_buffer = handle->byte_buffer; float *float_buffer = handle->float_buffer; const int channels = handle->channels; const int width = handle->width; @@ -1907,7 +1906,7 @@ static void *do_processor_transform_thread(void *handle_v) return NULL; } -static void processor_transform_apply_threaded(unsigned char *byte_buffer, +static void processor_transform_apply_threaded(uchar *byte_buffer, float *float_buffer, const int width, const int height, @@ -1942,7 +1941,7 @@ static void processor_transform_apply_threaded(unsigned char *byte_buffer, /* Convert the whole buffer from specified by name color space to another - * internal implementation. */ -static void colormanagement_transform_ex(unsigned char *byte_buffer, +static void colormanagement_transform_ex(uchar *byte_buffer, float *float_buffer, int width, int height, @@ -2008,7 +2007,7 @@ void IMB_colormanagement_transform_threaded(float *buffer, NULL, buffer, width, height, channels, from_colorspace, to_colorspace, predivide, true); } -void IMB_colormanagement_transform_byte(unsigned char *buffer, +void IMB_colormanagement_transform_byte(uchar *buffer, int width, int height, int channels, @@ -2018,7 +2017,7 @@ void IMB_colormanagement_transform_byte(unsigned char *buffer, colormanagement_transform_ex( buffer, NULL, width, height, channels, from_colorspace, to_colorspace, false, false); } -void IMB_colormanagement_transform_byte_threaded(unsigned char *buffer, +void IMB_colormanagement_transform_byte_threaded(uchar *buffer, int width, int height, int channels, @@ -2030,7 +2029,7 @@ void IMB_colormanagement_transform_byte_threaded(unsigned char *buffer, } void IMB_colormanagement_transform_from_byte(float *float_buffer, - unsigned char *byte_buffer, + uchar *byte_buffer, int width, int height, int channels, @@ -2050,7 +2049,7 @@ void IMB_colormanagement_transform_from_byte(float *float_buffer, float_buffer, width, height, channels, from_colorspace, to_colorspace, true); } void IMB_colormanagement_transform_from_byte_threaded(float *float_buffer, - unsigned char *byte_buffer, + uchar *byte_buffer, int width, int height, int channels, @@ -2205,7 +2204,7 @@ void IMB_colormanagement_colorspace_to_scene_linear(float *buffer, } } -void IMB_colormanagement_imbuf_to_byte_texture(unsigned char *out_buffer, +void IMB_colormanagement_imbuf_to_byte_texture(uchar *out_buffer, const int offset_x, const int offset_y, const int width, @@ -2220,14 +2219,14 @@ void IMB_colormanagement_imbuf_to_byte_texture(unsigned char *out_buffer, IMB_colormanagement_space_is_scene_linear(ibuf->rect_colorspace) || IMB_colormanagement_space_is_data(ibuf->rect_colorspace)); - const unsigned char *in_buffer = (unsigned char *)ibuf->rect; + const uchar *in_buffer = (uchar *)ibuf->rect; const bool use_premultiply = IMB_alpha_affects_rgb(ibuf) && store_premultiplied; for (int y = 0; y < height; y++) { const size_t in_offset = (offset_y + y) * ibuf->x + offset_x; const size_t out_offset = y * width; - const unsigned char *in = in_buffer + in_offset * 4; - unsigned char *out = out_buffer + out_offset * 4; + const uchar *in = in_buffer + in_offset * 4; + uchar *out = out_buffer + out_offset * 4; if (use_premultiply) { /* Premultiply only. */ @@ -2305,7 +2304,7 @@ void IMB_colormanagement_imbuf_to_float_texture(float *out_buffer, } else { /* Byte source buffer. */ - const unsigned char *in_buffer = (unsigned char *)ibuf->rect; + const uchar *in_buffer = (uchar *)ibuf->rect; const bool use_premultiply = IMB_alpha_affects_rgb(ibuf) && store_premultiplied; /* TODO(brecht): make this multi-threaded, or at least process in batches. */ @@ -2317,7 +2316,7 @@ void IMB_colormanagement_imbuf_to_float_texture(float *out_buffer, for (int y = 0; y < height; y++) { const size_t in_offset = (offset_y + y) * ibuf->x + offset_x; const size_t out_offset = y * width; - const unsigned char *in = in_buffer + in_offset * 4; + const uchar *in = in_buffer + in_offset * 4; float *out = out_buffer + out_offset * 4; /* Convert to scene linear, to sRGB and premultiply. */ @@ -2458,7 +2457,7 @@ static void colormanagement_imbuf_make_display_space( } colormanage_display_buffer_process_ex( - ibuf, ibuf->rect_float, (unsigned char *)ibuf->rect, view_settings, display_settings); + ibuf, ibuf->rect_float, (uchar *)ibuf->rect, view_settings, display_settings); } void IMB_colormanagement_imbuf_make_display_space( @@ -2545,10 +2544,8 @@ ImBuf *IMB_colormanagement_imbuf_for_write(ImBuf *ibuf, } if (colormanaged_ibuf->rect) { - IMB_alpha_under_color_byte((unsigned char *)colormanaged_ibuf->rect, - colormanaged_ibuf->x, - colormanaged_ibuf->y, - color); + IMB_alpha_under_color_byte( + (uchar *)colormanaged_ibuf->rect, colormanaged_ibuf->x, colormanaged_ibuf->y, color); } } @@ -2603,7 +2600,7 @@ ImBuf *IMB_colormanagement_imbuf_for_write(ImBuf *ibuf, if (colormanaged_ibuf->rect) { /* Byte to byte. */ - IMB_colormanagement_transform_byte_threaded((unsigned char *)colormanaged_ibuf->rect, + IMB_colormanagement_transform_byte_threaded((uchar *)colormanaged_ibuf->rect, colormanaged_ibuf->x, colormanaged_ibuf->y, colormanaged_ibuf->channels, @@ -2650,12 +2647,12 @@ ImBuf *IMB_colormanagement_imbuf_for_write(ImBuf *ibuf, /** \name Public Display Buffers Interfaces * \{ */ -unsigned char *IMB_display_buffer_acquire(ImBuf *ibuf, - const ColorManagedViewSettings *view_settings, - const ColorManagedDisplaySettings *display_settings, - void **cache_handle) +uchar *IMB_display_buffer_acquire(ImBuf *ibuf, + const ColorManagedViewSettings *view_settings, + const ColorManagedDisplaySettings *display_settings, + void **cache_handle) { - unsigned char *display_buffer; + uchar *display_buffer; size_t buffer_size; ColormanageCacheViewSettings cache_view_settings; ColormanageCacheDisplaySettings cache_display_settings; @@ -2683,7 +2680,7 @@ unsigned char *IMB_display_buffer_acquire(ImBuf *ibuf, */ if (ibuf->rect_float == NULL && ibuf->rect_colorspace && ibuf->channels == 4) { if (is_ibuf_rect_in_display_space(ibuf, applied_view_settings, display_settings)) { - return (unsigned char *)ibuf->rect; + return (uchar *)ibuf->rect; } } @@ -2694,7 +2691,7 @@ unsigned char *IMB_display_buffer_acquire(ImBuf *ibuf, if ((ibuf->userflags & IB_DISPLAY_BUFFER_INVALID) == 0) { IMB_partial_display_buffer_update_threaded(ibuf, ibuf->rect_float, - (unsigned char *)ibuf->rect, + (uchar *)ibuf->rect, ibuf->x, 0, 0, @@ -2713,14 +2710,14 @@ unsigned char *IMB_display_buffer_acquire(ImBuf *ibuf, /* ensure color management bit fields exists */ if (!ibuf->display_buffer_flags) { - ibuf->display_buffer_flags = MEM_callocN(sizeof(unsigned int) * global_tot_display, + ibuf->display_buffer_flags = MEM_callocN(sizeof(uint) * global_tot_display, "imbuf display_buffer_flags"); } else if (ibuf->userflags & IB_DISPLAY_BUFFER_INVALID) { /* all display buffers were marked as invalid from other areas, * now propagate this flag to internal color management routines */ - memset(ibuf->display_buffer_flags, 0, global_tot_display * sizeof(unsigned int)); + memset(ibuf->display_buffer_flags, 0, global_tot_display * sizeof(uint)); ibuf->userflags &= ~IB_DISPLAY_BUFFER_INVALID; } @@ -2747,7 +2744,7 @@ unsigned char *IMB_display_buffer_acquire(ImBuf *ibuf, return display_buffer; } -unsigned char *IMB_display_buffer_acquire_ctx(const bContext *C, ImBuf *ibuf, void **cache_handle) +uchar *IMB_display_buffer_acquire_ctx(const bContext *C, ImBuf *ibuf, void **cache_handle) { ColorManagedViewSettings *view_settings; ColorManagedDisplaySettings *display_settings; @@ -2757,7 +2754,7 @@ unsigned char *IMB_display_buffer_acquire_ctx(const bContext *C, ImBuf *ibuf, vo return IMB_display_buffer_acquire(ibuf, view_settings, display_settings, cache_handle); } -void IMB_display_buffer_transform_apply(unsigned char *display_buffer, +void IMB_display_buffer_transform_apply(uchar *display_buffer, float *linear_buffer, int width, int height, @@ -3396,9 +3393,9 @@ void IMB_colormanagement_colorspace_items_add(EnumPropertyItem **items, int *tot */ static void partial_buffer_update_rect(ImBuf *ibuf, - unsigned char *display_buffer, + uchar *display_buffer, const float *linear_buffer, - const unsigned char *byte_buffer, + const uchar *byte_buffer, int display_stride, int linear_stride, int linear_offset_x, @@ -3547,9 +3544,9 @@ static void partial_buffer_update_rect(ImBuf *ibuf, typedef struct PartialThreadData { ImBuf *ibuf; - unsigned char *display_buffer; + uchar *display_buffer; const float *linear_buffer; - const unsigned char *byte_buffer; + const uchar *byte_buffer; int display_stride; int linear_stride; int linear_offset_x, linear_offset_y; @@ -3580,7 +3577,7 @@ static void partial_buffer_update_rect_thread_do(void *data_v, int scanline) static void imb_partial_display_buffer_update_ex( ImBuf *ibuf, const float *linear_buffer, - const unsigned char *byte_buffer, + const uchar *byte_buffer, int stride, int offset_x, int offset_y, @@ -3595,7 +3592,7 @@ static void imb_partial_display_buffer_update_ex( ColormanageCacheViewSettings cache_view_settings; ColormanageCacheDisplaySettings cache_display_settings; void *cache_handle = NULL; - unsigned char *display_buffer = NULL; + uchar *display_buffer = NULL; int buffer_width = ibuf->x; if (ibuf->display_buffer_flags) { @@ -3621,7 +3618,7 @@ static void imb_partial_display_buffer_update_ex( buffer_width = ibuf->x; /* Mark all other buffers as invalid. */ - memset(ibuf->display_buffer_flags, 0, global_tot_display * sizeof(unsigned int)); + memset(ibuf->display_buffer_flags, 0, global_tot_display * sizeof(uint)); ibuf->display_buffer_flags[display_index] |= view_flag; BLI_thread_unlock(LOCK_COLORMANAGE); @@ -3689,7 +3686,7 @@ static void imb_partial_display_buffer_update_ex( void IMB_partial_display_buffer_update(ImBuf *ibuf, const float *linear_buffer, - const unsigned char *byte_buffer, + const uchar *byte_buffer, int stride, int offset_x, int offset_y, @@ -3718,7 +3715,7 @@ void IMB_partial_display_buffer_update(ImBuf *ibuf, void IMB_partial_display_buffer_update_threaded( struct ImBuf *ibuf, const float *linear_buffer, - const unsigned char *byte_buffer, + const uchar *byte_buffer, int stride, int offset_x, int offset_y, @@ -3925,7 +3922,7 @@ void IMB_colormanagement_processor_apply(ColormanageProcessor *cm_processor, } void IMB_colormanagement_processor_apply_byte( - ColormanageProcessor *cm_processor, unsigned char *buffer, int width, int height, int channels) + ColormanageProcessor *cm_processor, uchar *buffer, int width, int height, int channels) { /* TODO(sergey): Would be nice to support arbitrary channels configurations, * but for now it's not so important. diff --git a/source/blender/imbuf/intern/colormanagement_inline.c b/source/blender/imbuf/intern/colormanagement_inline.c index 3c6c0f5fd0a..df513a7330c 100644 --- a/source/blender/imbuf/intern/colormanagement_inline.c +++ b/source/blender/imbuf/intern/colormanagement_inline.c @@ -21,7 +21,7 @@ float IMB_colormanagement_get_luminance(const float rgb[3]) return dot_v3v3(imbuf_luma_coefficients, rgb); } -unsigned char IMB_colormanagement_get_luminance_byte(const unsigned char rgb[3]) +uchar IMB_colormanagement_get_luminance_byte(const uchar rgb[3]) { float rgbf[3]; float val; diff --git a/source/blender/imbuf/intern/dds/BlockDXT.cpp b/source/blender/imbuf/intern/dds/BlockDXT.cpp index 4048a78e5cf..2d198135a66 100644 --- a/source/blender/imbuf/intern/dds/BlockDXT.cpp +++ b/source/blender/imbuf/intern/dds/BlockDXT.cpp @@ -34,6 +34,8 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ +#include <BLI_sys_types.h> /* For `uint`. */ + #include <BlockDXT.h> #include <ColorBlock.h> #include <Common.h> @@ -576,7 +578,7 @@ void mem_read(Stream &mem, BlockDXT1 &block) void mem_read(Stream &mem, AlphaBlockDXT3 &block) { - for (unsigned short &alpha : block.row) { + for (ushort &alpha : block.row) { mem_read(mem, alpha); } } diff --git a/source/blender/imbuf/intern/dds/DirectDrawSurface.cpp b/source/blender/imbuf/intern/dds/DirectDrawSurface.cpp index ce5dd4927be..4e5dc9ce560 100644 --- a/source/blender/imbuf/intern/dds/DirectDrawSurface.cpp +++ b/source/blender/imbuf/intern/dds/DirectDrawSurface.cpp @@ -867,7 +867,7 @@ uint DDSHeader::d3d9Format() const return findD3D9Format(pf.bitcount, pf.rmask, pf.gmask, pf.bmask, pf.amask); } -DirectDrawSurface::DirectDrawSurface(unsigned char *mem, uint size) : stream(mem, size), header() +DirectDrawSurface::DirectDrawSurface(uchar *mem, uint size) : stream(mem, size), header() { mem_read(stream, header); @@ -1112,7 +1112,7 @@ void *DirectDrawSurface::readData(uint &rsize) uint size = stream.size - header_size; rsize = size; - unsigned char *data = (unsigned char *)malloc(sizeof(*data) * size); + uchar *data = (uchar *)malloc(sizeof(*data) * size); stream.seek(header_size); mem_read(stream, data, size); @@ -1158,7 +1158,7 @@ void DirectDrawSurface::readLinearImage(Image *img) for (uint y = 0; y < h; y++) { for (uint x = 0; x < w; x++) { uint c = 0; - mem_read(stream, (unsigned char *)(&c), byteCount); + mem_read(stream, (uchar *)(&c), byteCount); Color32 pixel(0, 0, 0, 0xFF); pixel.r = PixelFormat::convert((c & header.pf.rmask) >> rshift, rsize, 8); diff --git a/source/blender/imbuf/intern/dds/FlipDXT.cpp b/source/blender/imbuf/intern/dds/FlipDXT.cpp index fc978bff788..3d2b7e51a46 100644 --- a/source/blender/imbuf/intern/dds/FlipDXT.cpp +++ b/source/blender/imbuf/intern/dds/FlipDXT.cpp @@ -104,19 +104,19 @@ static void FlipDXT5BlockFull(uint8_t *block) * bits = bits_0 + 256 * (bits_1 + 256 * (bits_2 + 256 * (bits_3 + * 256 * (bits_4 + 256 * bits_5)))) * - * bits is a 48-bit unsigned integer, from which a three-bit control code + * bits is a 48-bit unsigned-integer, from which a three-bit control code * is extracted for a texel at location (x,y) in the block using: * * code(x,y) = bits[3*(4*y+x)+1..3*(4*y+x)+0] * * where bit 47 is the most significant and bit 0 is the least * significant bit. */ - unsigned int line_0_1 = block[2] + 256 * (block[3] + 256 * block[4]); - unsigned int line_2_3 = block[5] + 256 * (block[6] + 256 * block[7]); + uint line_0_1 = block[2] + 256 * (block[3] + 256 * block[4]); + uint line_2_3 = block[5] + 256 * (block[6] + 256 * block[7]); /* swap lines 0 and 1 in line_0_1. */ - unsigned int line_1_0 = ((line_0_1 & 0x000fff) << 12) | ((line_0_1 & 0xfff000) >> 12); + uint line_1_0 = ((line_0_1 & 0x000fff) << 12) | ((line_0_1 & 0xfff000) >> 12); /* swap lines 2 and 3 in line_2_3. */ - unsigned int line_3_2 = ((line_2_3 & 0x000fff) << 12) | ((line_2_3 & 0xfff000) >> 12); + uint line_3_2 = ((line_2_3 & 0x000fff) << 12) | ((line_2_3 & 0xfff000) >> 12); block[2] = line_3_2 & 0xff; block[3] = (line_3_2 & 0xff00) >> 8; @@ -133,21 +133,21 @@ static void FlipDXT5BlockFull(uint8_t *block) static void FlipDXT5BlockHalf(uint8_t *block) { /* See layout above. */ - unsigned int line_0_1 = block[2] + 256 * (block[3] + 256 * block[4]); - unsigned int line_1_0 = ((line_0_1 & 0x000fff) << 12) | ((line_0_1 & 0xfff000) >> 12); + uint line_0_1 = block[2] + 256 * (block[3] + 256 * block[4]); + uint line_1_0 = ((line_0_1 & 0x000fff) << 12) | ((line_0_1 & 0xfff000) >> 12); block[2] = line_1_0 & 0xff; block[3] = (line_1_0 & 0xff00) >> 8; block[4] = (line_1_0 & 0xff0000) >> 16; FlipDXT1BlockHalf(block + 8); } -int FlipDXTCImage(unsigned int width, - unsigned int height, - unsigned int levels, +int FlipDXTCImage(uint width, + uint height, + uint levels, int fourcc, uint8_t *data, int data_size, - unsigned int *r_num_valid_levels) + uint *r_num_valid_levels) { *r_num_valid_levels = 0; @@ -162,7 +162,7 @@ int FlipDXTCImage(unsigned int width, FlipBlockFunction full_block_function; FlipBlockFunction half_block_function; - unsigned int block_bytes = 0; + uint block_bytes = 0; switch (fourcc) { case FOURCC_DXT1: @@ -186,15 +186,15 @@ int FlipDXTCImage(unsigned int width, *r_num_valid_levels = levels; - unsigned int mip_width = width; - unsigned int mip_height = height; + uint mip_width = width; + uint mip_height = height; const uint8_t *data_end = data + data_size; - for (unsigned int i = 0; i < levels; i++) { - unsigned int blocks_per_row = (mip_width + 3) / 4; - unsigned int blocks_per_col = (mip_height + 3) / 4; - unsigned int blocks = blocks_per_row * blocks_per_col; + for (uint i = 0; i < levels; i++) { + uint blocks_per_row = (mip_width + 3) / 4; + uint blocks_per_col = (mip_height + 3) / 4; + uint blocks = blocks_per_row * blocks_per_col; if (data + block_bytes * blocks > data_end) { /* Stop flipping when running out of data to be modified, avoiding possible buffer overrun @@ -209,23 +209,23 @@ int FlipDXTCImage(unsigned int width, } if (mip_height == 2) { /* flip the first 2 lines in each block. */ - for (unsigned int i = 0; i < blocks_per_row; i++) { + for (uint i = 0; i < blocks_per_row; i++) { half_block_function(data + i * block_bytes); } } else { /* flip each block. */ - for (unsigned int i = 0; i < blocks; i++) { + for (uint i = 0; i < blocks; i++) { full_block_function(data + i * block_bytes); } /* Swap each block line in the first half of the image with the * corresponding one in the second half. * note that this is a no-op if mip_height is 4. */ - unsigned int row_bytes = block_bytes * blocks_per_row; + uint row_bytes = block_bytes * blocks_per_row; uint8_t *temp_line = new uint8_t[row_bytes]; - for (unsigned int y = 0; y < blocks_per_col / 2; y++) { + for (uint y = 0; y < blocks_per_col / 2; y++) { uint8_t *line1 = data + y * row_bytes; uint8_t *line2 = data + (blocks_per_col - y - 1) * row_bytes; diff --git a/source/blender/imbuf/intern/dds/Stream.cpp b/source/blender/imbuf/intern/dds/Stream.cpp index 566891dac8b..44b7e6d8f42 100644 --- a/source/blender/imbuf/intern/dds/Stream.cpp +++ b/source/blender/imbuf/intern/dds/Stream.cpp @@ -4,6 +4,8 @@ * \ingroup imbdds */ +#include "BLI_sys_types.h" /* For `uint`. */ + #include <Stream.h> #include <cstdio> /* printf */ @@ -12,7 +14,7 @@ static const char *msg_error_seek = "DDS: trying to seek beyond end of stream (corrupt file?)"; static const char *msg_error_read = "DDS: trying to read beyond end of stream (corrupt file?)"; -inline bool is_read_within_bounds(const Stream &mem, unsigned int count) +inline bool is_read_within_bounds(const Stream &mem, uint count) { if (mem.pos >= mem.size) { /* No more data remained in the memory buffer. */ @@ -27,7 +29,7 @@ inline bool is_read_within_bounds(const Stream &mem, unsigned int count) return true; } -unsigned int Stream::seek(unsigned int p) +uint Stream::seek(uint p) { if (p > size) { set_failed(msg_error_seek); @@ -39,7 +41,7 @@ unsigned int Stream::seek(unsigned int p) return pos; } -unsigned int mem_read(Stream &mem, unsigned long long &i) +uint mem_read(Stream &mem, unsigned long long &i) { if (!is_read_within_bounds(mem, 8)) { mem.set_failed(msg_error_seek); @@ -50,7 +52,7 @@ unsigned int mem_read(Stream &mem, unsigned long long &i) return 8; } -unsigned int mem_read(Stream &mem, unsigned int &i) +uint mem_read(Stream &mem, uint &i) { if (!is_read_within_bounds(mem, 4)) { mem.set_failed(msg_error_read); @@ -61,7 +63,7 @@ unsigned int mem_read(Stream &mem, unsigned int &i) return 4; } -unsigned int mem_read(Stream &mem, unsigned short &i) +uint mem_read(Stream &mem, ushort &i) { if (!is_read_within_bounds(mem, 2)) { mem.set_failed(msg_error_read); @@ -72,7 +74,7 @@ unsigned int mem_read(Stream &mem, unsigned short &i) return 2; } -unsigned int mem_read(Stream &mem, unsigned char &i) +uint mem_read(Stream &mem, uchar &i) { if (!is_read_within_bounds(mem, 1)) { mem.set_failed(msg_error_read); @@ -83,7 +85,7 @@ unsigned int mem_read(Stream &mem, unsigned char &i) return 1; } -unsigned int mem_read(Stream &mem, unsigned char *i, unsigned int count) +uint mem_read(Stream &mem, uchar *i, uint count) { if (!is_read_within_bounds(mem, count)) { mem.set_failed(msg_error_read); diff --git a/source/blender/imbuf/intern/dds/dds_api.cpp b/source/blender/imbuf/intern/dds/dds_api.cpp index e9a13573116..213e10cf744 100644 --- a/source/blender/imbuf/intern/dds/dds_api.cpp +++ b/source/blender/imbuf/intern/dds/dds_api.cpp @@ -58,7 +58,7 @@ bool imb_save_dds(struct ImBuf *ibuf, const char *name, int /*flags*/) return true; } -bool imb_is_a_dds(const unsigned char *mem, const size_t size) +bool imb_is_a_dds(const uchar *mem, const size_t size) { if (size < 8) { return false; @@ -75,19 +75,16 @@ bool imb_is_a_dds(const unsigned char *mem, const size_t size) return true; } -struct ImBuf *imb_load_dds(const unsigned char *mem, - size_t size, - int flags, - char colorspace[IM_MAX_SPACE]) +struct ImBuf *imb_load_dds(const uchar *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) { struct ImBuf *ibuf = nullptr; - DirectDrawSurface dds((unsigned char *)mem, size); /* reads header */ - unsigned char bits_per_pixel; - unsigned int *rect; + DirectDrawSurface dds((uchar *)mem, size); /* reads header */ + uchar bits_per_pixel; + uint *rect; Image img; - unsigned int numpixels = 0; + uint numpixels = 0; int col; - unsigned char *cp = (unsigned char *)&col; + uchar *cp = (uchar *)&col; Color32 pixel; Color32 *pixels = nullptr; @@ -128,7 +125,7 @@ struct ImBuf *imb_load_dds(const unsigned char *mem, bits_per_pixel = 24; if (img.format() == Image::Format_ARGB) { /* check that there is effectively an alpha channel */ - for (unsigned int i = 0; i < numpixels; i++) { + for (uint i = 0; i < numpixels; i++) { pixel = pixels[i]; if (pixel.a != 255) { bits_per_pixel = 32; @@ -156,7 +153,7 @@ struct ImBuf *imb_load_dds(const unsigned char *mem, rect = ibuf->rect; cp[3] = 0xff; /* default alpha if alpha channel is not present */ - for (unsigned int i = 0; i < numpixels; i++) { + for (uint i = 0; i < numpixels; i++) { pixel = pixels[i]; cp[0] = pixel.r; /* set R component of col */ cp[1] = pixel.g; /* set G component of col */ @@ -168,7 +165,7 @@ struct ImBuf *imb_load_dds(const unsigned char *mem, } if (ibuf->dds_data.fourcc != FOURCC_DDS) { - ibuf->dds_data.data = (unsigned char *)dds.readData(ibuf->dds_data.size); + ibuf->dds_data.data = (uchar *)dds.readData(ibuf->dds_data.size); /* flip compressed texture */ if (ibuf->dds_data.data) { diff --git a/source/blender/imbuf/intern/divers.c b/source/blender/imbuf/intern/divers.c index 13c8f0887b3..61ef9c111d7 100644 --- a/source/blender/imbuf/intern/divers.c +++ b/source/blender/imbuf/intern/divers.c @@ -48,7 +48,7 @@ static void clear_dither_context(DitherContext *di) /** \name Generic Buffer Conversion * \{ */ -MINLINE void ushort_to_byte_v4(uchar b[4], const unsigned short us[4]) +MINLINE void ushort_to_byte_v4(uchar b[4], const ushort us[4]) { b[0] = unit_ushort_to_uchar(us[0]); b[1] = unit_ushort_to_uchar(us[1]); @@ -56,13 +56,13 @@ MINLINE void ushort_to_byte_v4(uchar b[4], const unsigned short us[4]) b[3] = unit_ushort_to_uchar(us[3]); } -MINLINE unsigned char ftochar(float value) +MINLINE uchar ftochar(float value) { return unit_float_to_uchar_clamp(value); } MINLINE void ushort_to_byte_dither_v4( - uchar b[4], const unsigned short us[4], DitherContext *di, float s, float t) + uchar b[4], const ushort us[4], DitherContext *di, float s, float t) { #define USHORTTOFLOAT(val) ((float)val / 65535.0f) float dither_value = dither_random_value(s, t) * 0.0033f * di->dither; @@ -192,7 +192,7 @@ void IMB_buffer_byte_from_float(uchar *rect_to, } else if (profile_to == IB_PROFILE_SRGB) { /* convert from linear to sRGB */ - unsigned short us[4]; + ushort us[4]; float straight[4]; if (dither && predivide) { @@ -729,7 +729,7 @@ void IMB_rect_from_float(ImBuf *ibuf) } /* convert float to byte */ - IMB_buffer_byte_from_float((unsigned char *)ibuf->rect, + IMB_buffer_byte_from_float((uchar *)ibuf->rect, buffer, ibuf->channels, ibuf->dither, @@ -768,7 +768,7 @@ void IMB_float_from_rect_ex(struct ImBuf *dst, float *rect_float = dst->rect_float; rect_float += (region_to_update->xmin + region_to_update->ymin * dst->x) * 4; - unsigned char *rect = (unsigned char *)src->rect; + uchar *rect = (uchar *)src->rect; rect += (region_to_update->xmin + region_to_update->ymin * dst->x) * 4; const int region_width = BLI_rcti_size_x(region_to_update); const int region_height = BLI_rcti_size_y(region_to_update); @@ -889,7 +889,7 @@ void IMB_buffer_float_premultiply(float *buf, int width, int height) void IMB_saturation(ImBuf *ibuf, float sat) { size_t i; - unsigned char *rct = (unsigned char *)ibuf->rect; + uchar *rct = (uchar *)ibuf->rect; float *rct_fl = ibuf->rect_float; float hsv[3]; diff --git a/source/blender/imbuf/intern/filetype.c b/source/blender/imbuf/intern/filetype.c index 92fa980cd7f..e1d2bea4ae9 100644 --- a/source/blender/imbuf/intern/filetype.c +++ b/source/blender/imbuf/intern/filetype.c @@ -217,7 +217,7 @@ const ImFileType IMB_FILE_TYPES[] = { .is_a = imb_is_a_webp, .load = imb_loadwebp, .load_filepath = NULL, - .load_filepath_thumbnail = NULL, + .load_filepath_thumbnail = imb_load_filepath_thumbnail_webp, .save = imb_savewebp, .load_tile = NULL, .flag = 0, diff --git a/source/blender/imbuf/intern/filter.c b/source/blender/imbuf/intern/filter.c index 91c69d3abc8..67de467bd93 100644 --- a/source/blender/imbuf/intern/filter.c +++ b/source/blender/imbuf/intern/filter.c @@ -18,9 +18,9 @@ #include "imbuf.h" -static void filtrow(unsigned char *point, int x) +static void filtrow(uchar *point, int x) { - unsigned int c1, c2, c3, error; + uint c1, c2, c3, error; if (x > 1) { c1 = c2 = *point; @@ -56,10 +56,10 @@ static void filtrowf(float *point, int x) } } -static void filtcolum(unsigned char *point, int y, int skip) +static void filtcolum(uchar *point, int y, int skip) { - unsigned int c1, c2, c3, error; - unsigned char *point2; + uint c1, c2, c3, error; + uchar *point2; if (y > 1) { c1 = c2 = *point; @@ -101,11 +101,11 @@ static void filtcolumf(float *point, int y, int skip) void IMB_filtery(struct ImBuf *ibuf) { - unsigned char *point; + uchar *point; float *pointf; int x, y, skip; - point = (unsigned char *)ibuf->rect; + point = (uchar *)ibuf->rect; pointf = ibuf->rect_float; x = ibuf->x; @@ -142,11 +142,11 @@ void IMB_filtery(struct ImBuf *ibuf) void imb_filterx(struct ImBuf *ibuf) { - unsigned char *point; + uchar *point; float *pointf; int x, y, skip; - point = (unsigned char *)ibuf->rect; + point = (uchar *)ibuf->rect; pointf = ibuf->rect_float; x = ibuf->x; @@ -395,7 +395,7 @@ static int check_pixel_assigned( res = mask[index] != 0 ? 1 : 0; } else if ((is_float && ((const float *)buffer)[alpha_index] != 0.0f) || - (!is_float && ((const unsigned char *)buffer)[alpha_index] != 0)) { + (!is_float && ((const uchar *)buffer)[alpha_index] != 0)) { res = 1; } } @@ -408,7 +408,7 @@ void IMB_filter_extend(struct ImBuf *ibuf, char *mask, int filter) const int width = ibuf->x; const int height = ibuf->y; const int depth = 4; /* always 4 channels */ - const int chsize = ibuf->rect_float ? sizeof(float) : sizeof(unsigned char); + const int chsize = ibuf->rect_float ? sizeof(float) : sizeof(uchar); const size_t bsize = ((size_t)width) * height * depth * chsize; const bool is_float = (ibuf->rect_float != NULL); void *dstbuf = (void *)MEM_dupallocN(ibuf->rect_float ? (void *)ibuf->rect_float : @@ -478,7 +478,7 @@ void IMB_filter_extend(struct ImBuf *ibuf, char *mask, int filter) } else { for (c = 0; c < depth; c++) { - tmp[c] = (float)((const unsigned char *)srcbuf)[depth * tmpindex + c]; + tmp[c] = (float)((const uchar *)srcbuf)[depth * tmpindex + c]; } } @@ -505,8 +505,10 @@ void IMB_filter_extend(struct ImBuf *ibuf, char *mask, int filter) } else { for (c = 0; c < depth; c++) { - ((unsigned char *)dstbuf)[depth * index + c] = - acc[c] > 255 ? 255 : (acc[c] < 0 ? 0 : (unsigned char)roundf(acc[c])); + ((uchar *)dstbuf)[depth * index + c] = acc[c] > 255 ? + 255 : + (acc[c] < 0 ? 0 : + (uchar)roundf(acc[c])); } } @@ -613,7 +615,7 @@ ImBuf *IMB_getmipmap(ImBuf *ibuf, int level) return (level == 0) ? ibuf : ibuf->mipmap[level - 1]; } -void IMB_premultiply_rect(unsigned int *rect, char planes, int w, int h) +void IMB_premultiply_rect(uint *rect, char planes, int w, int h) { char *cp; int x, y, val; @@ -674,7 +676,7 @@ void IMB_premultiply_alpha(ImBuf *ibuf) } } -void IMB_unpremultiply_rect(unsigned int *rect, char planes, int w, int h) +void IMB_unpremultiply_rect(uint *rect, char planes, int w, int h) { char *cp; int x, y; diff --git a/source/blender/imbuf/intern/imageprocess.c b/source/blender/imbuf/intern/imageprocess.c index 13bf3697946..4530959e5ac 100644 --- a/source/blender/imbuf/intern/imageprocess.c +++ b/source/blender/imbuf/intern/imageprocess.c @@ -26,7 +26,7 @@ void IMB_convert_rgba_to_abgr(struct ImBuf *ibuf) { size_t size; - unsigned char rt, *cp = (unsigned char *)ibuf->rect; + uchar rt, *cp = (uchar *)ibuf->rect; float rtf, *cpf = ibuf->rect_float; if (ibuf->rect) { @@ -58,14 +58,13 @@ void IMB_convert_rgba_to_abgr(struct ImBuf *ibuf) } } -static void pixel_from_buffer( - const struct ImBuf *ibuf, unsigned char **outI, float **outF, int x, int y) +static void pixel_from_buffer(const struct ImBuf *ibuf, uchar **outI, float **outF, int x, int y) { size_t offset = ((size_t)ibuf->x) * y * 4 + 4 * x; if (ibuf->rect) { - *outI = (unsigned char *)ibuf->rect + offset; + *outI = (uchar *)ibuf->rect + offset; } if (ibuf->rect_float) { @@ -78,19 +77,19 @@ static void pixel_from_buffer( * \{ */ void bicubic_interpolation_color( - const struct ImBuf *in, unsigned char outI[4], float outF[4], float u, float v) + const struct ImBuf *in, uchar outI[4], float outF[4], float u, float v) { if (outF) { BLI_bicubic_interpolation_fl(in->rect_float, outF, in->x, in->y, 4, u, v); } else { - BLI_bicubic_interpolation_char((unsigned char *)in->rect, outI, in->x, in->y, 4, u, v); + BLI_bicubic_interpolation_char((uchar *)in->rect, outI, in->x, in->y, 4, u, v); } } void bicubic_interpolation(const ImBuf *in, ImBuf *out, float u, float v, int xout, int yout) { - unsigned char *outI = NULL; + uchar *outI = NULL; float *outF = NULL; if (in == NULL || (in->rect == NULL && in->rect_float == NULL)) { @@ -110,7 +109,7 @@ void bicubic_interpolation(const ImBuf *in, ImBuf *out, float u, float v, int xo * \{ */ void bilinear_interpolation_color_fl( - const struct ImBuf *in, unsigned char UNUSED(outI[4]), float outF[4], float u, float v) + const struct ImBuf *in, uchar UNUSED(outI[4]), float outF[4], float u, float v) { BLI_assert(outF); BLI_assert(in->rect_float); @@ -118,21 +117,21 @@ void bilinear_interpolation_color_fl( } void bilinear_interpolation_color_char( - const struct ImBuf *in, unsigned char outI[4], float UNUSED(outF[4]), float u, float v) + const struct ImBuf *in, uchar outI[4], float UNUSED(outF[4]), float u, float v) { BLI_assert(outI); BLI_assert(in->rect); - BLI_bilinear_interpolation_char((unsigned char *)in->rect, outI, in->x, in->y, 4, u, v); + BLI_bilinear_interpolation_char((uchar *)in->rect, outI, in->x, in->y, 4, u, v); } void bilinear_interpolation_color( - const struct ImBuf *in, unsigned char outI[4], float outF[4], float u, float v) + const struct ImBuf *in, uchar outI[4], float outF[4], float u, float v) { if (outF) { BLI_bilinear_interpolation_fl(in->rect_float, outF, in->x, in->y, 4, u, v); } else { - BLI_bilinear_interpolation_char((unsigned char *)in->rect, outI, in->x, in->y, 4, u, v); + BLI_bilinear_interpolation_char((uchar *)in->rect, outI, in->x, in->y, 4, u, v); } } @@ -140,10 +139,10 @@ void bilinear_interpolation_color( /* BILINEAR INTERPOLATION */ void bilinear_interpolation_color_wrap( - const struct ImBuf *in, unsigned char outI[4], float outF[4], float u, float v) + const struct ImBuf *in, uchar outI[4], float outF[4], float u, float v) { float *row1, *row2, *row3, *row4, a, b; - unsigned char *row1I, *row2I, *row3I, *row4I; + uchar *row1I, *row2I, *row3I, *row4I; float a_b, ma_b, a_mb, ma_mb; int y1, y2, x1, x2; @@ -198,10 +197,10 @@ void bilinear_interpolation_color_wrap( } if (outI) { /* sample including outside of edges of image */ - row1I = (unsigned char *)in->rect + ((size_t)in->x) * y1 * 4 + 4 * x1; - row2I = (unsigned char *)in->rect + ((size_t)in->x) * y2 * 4 + 4 * x1; - row3I = (unsigned char *)in->rect + ((size_t)in->x) * y1 * 4 + 4 * x2; - row4I = (unsigned char *)in->rect + ((size_t)in->x) * y2 * 4 + 4 * x2; + row1I = (uchar *)in->rect + ((size_t)in->x) * y1 * 4 + 4 * x1; + row2I = (uchar *)in->rect + ((size_t)in->x) * y2 * 4 + 4 * x1; + row3I = (uchar *)in->rect + ((size_t)in->x) * y1 * 4 + 4 * x2; + row4I = (uchar *)in->rect + ((size_t)in->x) * y2 * 4 + 4 * x2; /* Tested with white images and this should not wrap back to zero. */ outI[0] = roundf(ma_mb * row1I[0] + a_mb * row3I[0] + ma_b * row2I[0] + a_b * row4I[0]); @@ -213,7 +212,7 @@ void bilinear_interpolation_color_wrap( void bilinear_interpolation(const ImBuf *in, ImBuf *out, float u, float v, int xout, int yout) { - unsigned char *outI = NULL; + uchar *outI = NULL; float *outF = NULL; if (in == NULL || (in->rect == NULL && in->rect_float == NULL)) { @@ -233,7 +232,7 @@ void bilinear_interpolation(const ImBuf *in, ImBuf *out, float u, float v, int x * \{ */ void nearest_interpolation_color_char( - const struct ImBuf *in, unsigned char outI[4], float UNUSED(outF[4]), float u, float v) + const struct ImBuf *in, uchar outI[4], float UNUSED(outF[4]), float u, float v) { BLI_assert(outI); BLI_assert(in->rect); @@ -248,7 +247,7 @@ void nearest_interpolation_color_char( } const size_t offset = ((size_t)in->x * y1 + x1) * 4; - const unsigned char *dataI = (unsigned char *)in->rect + offset; + const uchar *dataI = (uchar *)in->rect + offset; outI[0] = dataI[0]; outI[1] = dataI[1]; outI[2] = dataI[2]; @@ -256,7 +255,7 @@ void nearest_interpolation_color_char( } void nearest_interpolation_color_fl( - const struct ImBuf *in, unsigned char UNUSED(outI[4]), float outF[4], float u, float v) + const struct ImBuf *in, uchar UNUSED(outI[4]), float outF[4], float u, float v) { BLI_assert(outF); BLI_assert(in->rect_float); @@ -276,7 +275,7 @@ void nearest_interpolation_color_fl( } void nearest_interpolation_color( - const struct ImBuf *in, unsigned char outI[4], float outF[4], float u, float v) + const struct ImBuf *in, uchar outI[4], float outF[4], float u, float v) { if (outF) { nearest_interpolation_color_fl(in, outI, outF, u, v); @@ -287,10 +286,10 @@ void nearest_interpolation_color( } void nearest_interpolation_color_wrap( - const struct ImBuf *in, unsigned char outI[4], float outF[4], float u, float v) + const struct ImBuf *in, uchar outI[4], float outF[4], float u, float v) { const float *dataF; - unsigned char *dataI; + uchar *dataI; int y, x; /* ImBuf in must have a valid rect or rect_float, assume this is already checked */ @@ -309,7 +308,7 @@ void nearest_interpolation_color_wrap( y += in->y; } - dataI = (unsigned char *)in->rect + ((size_t)in->x) * y * 4 + 4 * x; + dataI = (uchar *)in->rect + ((size_t)in->x) * y * 4 + 4 * x; if (outI) { outI[0] = dataI[0]; outI[1] = dataI[1]; @@ -327,7 +326,7 @@ void nearest_interpolation_color_wrap( void nearest_interpolation(const ImBuf *in, ImBuf *out, float u, float v, int xout, int yout) { - unsigned char *outI = NULL; + uchar *outI = NULL; float *outF = NULL; if (in == NULL || (in->rect == NULL && in->rect_float == NULL)) { @@ -446,10 +445,10 @@ void IMB_alpha_under_color_float(float *rect_float, int x, int y, float backcol[ } } -void IMB_alpha_under_color_byte(unsigned char *rect, int x, int y, const float backcol[3]) +void IMB_alpha_under_color_byte(uchar *rect, int x, int y, const float backcol[3]) { size_t a = ((size_t)x) * y; - unsigned char *cp = rect; + uchar *cp = rect; while (a--) { if (cp[3] == 255) { @@ -487,7 +486,7 @@ void IMB_sampleImageAtLocation(ImBuf *ibuf, float x, float y, bool make_linear_r nearest_interpolation_color(ibuf, NULL, color, x, y); } else { - unsigned char byte_color[4]; + uchar byte_color[4]; nearest_interpolation_color(ibuf, byte_color, NULL, x, y); rgba_uchar_to_float(color, byte_color); if (make_linear_rgb) { diff --git a/source/blender/imbuf/intern/jp2.c b/source/blender/imbuf/intern/jp2.c index a14c94d5d62..f57d4382672 100644 --- a/source/blender/imbuf/intern/jp2.c +++ b/source/blender/imbuf/intern/jp2.c @@ -39,7 +39,7 @@ typedef struct img_folder { float *rates; } img_fol_t; -static bool check_jp2(const unsigned char *mem, const size_t size) /* J2K_CFMT */ +static bool check_jp2(const uchar *mem, const size_t size) /* J2K_CFMT */ { if (size < sizeof(JP2_HEAD)) { return false; @@ -47,7 +47,7 @@ static bool check_jp2(const unsigned char *mem, const size_t size) /* J2K_CFMT * return memcmp(JP2_HEAD, mem, sizeof(JP2_HEAD)) ? 0 : 1; } -static bool check_j2k(const unsigned char *mem, const size_t size) /* J2K_CFMT */ +static bool check_j2k(const uchar *mem, const size_t size) /* J2K_CFMT */ { if (size < sizeof(J2K_HEAD)) { return false; @@ -55,8 +55,7 @@ static bool check_j2k(const unsigned char *mem, const size_t size) /* J2K_CFMT * return memcmp(J2K_HEAD, mem, sizeof(J2K_HEAD)) ? 0 : 1; } -static OPJ_CODEC_FORMAT format_from_header(const unsigned char mem[JP2_FILEHEADER_SIZE], - const size_t size) +static OPJ_CODEC_FORMAT format_from_header(const uchar mem[JP2_FILEHEADER_SIZE], const size_t size) { if (check_jp2(mem, size)) { return OPJ_CODEC_JP2; @@ -68,7 +67,7 @@ static OPJ_CODEC_FORMAT format_from_header(const unsigned char mem[JP2_FILEHEADE return OPJ_CODEC_UNKNOWN; } -bool imb_is_a_jp2(const unsigned char *buf, size_t size) +bool imb_is_a_jp2(const uchar *buf, size_t size) { return (check_jp2(buf, size) || check_j2k(buf, size)); } @@ -102,11 +101,11 @@ static void info_callback(const char *msg, void *client_data) #endif #define PIXEL_LOOPER_BEGIN(_rect) \ - for (y = h - 1; y != (unsigned int)(-1); y--) { \ + for (y = h - 1; y != (uint)(-1); y--) { \ for (i = y * w, i_next = (y + 1) * w; i < i_next; i++, _rect += 4) { #define PIXEL_LOOPER_BEGIN_CHANNELS(_rect, _channels) \ - for (y = h - 1; y != (unsigned int)(-1); y--) { \ + for (y = h - 1; y != (uint)(-1); y--) { \ for (i = y * w, i_next = (y + 1) * w; i < i_next; i++, _rect += _channels) { #define PIXEL_LOOPER_END \ @@ -119,8 +118,8 @@ static void info_callback(const char *msg, void *client_data) * \{ */ struct BufInfo { - const unsigned char *buf; - const unsigned char *cur; + const uchar *buf; + const uchar *cur; OPJ_OFF_T len; }; @@ -300,10 +299,7 @@ static ImBuf *imb_load_jp2_stream(opj_stream_t *stream, int flags, char colorspace[IM_MAX_SPACE]); -ImBuf *imb_load_jp2(const unsigned char *mem, - size_t size, - int flags, - char colorspace[IM_MAX_SPACE]) +ImBuf *imb_load_jp2(const uchar *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) { const OPJ_CODEC_FORMAT format = (size > JP2_FILEHEADER_SIZE) ? format_from_header(mem, size) : OPJ_CODEC_UNKNOWN; @@ -322,7 +318,7 @@ ImBuf *imb_load_jp2(const unsigned char *mem, ImBuf *imb_load_jp2_filepath(const char *filepath, int flags, char colorspace[IM_MAX_SPACE]) { FILE *p_file = NULL; - unsigned char mem[JP2_FILEHEADER_SIZE]; + uchar mem[JP2_FILEHEADER_SIZE]; opj_stream_t *stream = opj_stream_create_from_file( filepath, OPJ_J2K_STREAM_CHUNK_SIZE, true, &p_file); if (stream) { @@ -358,8 +354,8 @@ static ImBuf *imb_load_jp2_stream(opj_stream_t *stream, long signed_offsets[4] = {0, 0, 0, 0}; int float_divs[4] = {1, 1, 1, 1}; - unsigned int i, i_next, w, h, planes; - unsigned int y; + uint i, i_next, w, h, planes; + uint y; int *r, *g, *b, *a; /* matching 'opj_image_comp.data' type */ opj_dparameters_t parameters; /* decompression parameters */ @@ -509,7 +505,7 @@ static ImBuf *imb_load_jp2_stream(opj_stream_t *stream, } } else { - unsigned char *rect_uchar = (unsigned char *)ibuf->rect; + uchar *rect_uchar = (uchar *)ibuf->rect; if (image->numcomps < 3) { r = image->comps[0].data; @@ -599,11 +595,11 @@ static opj_image_t *rawtoimage(const char *filename, (_val) <= 0.0f ? 0 : ((_val) >= 1.0f ? 65535 : (int)(65535.0f * (_val))) #else -BLI_INLINE int UPSAMPLE_8_TO_12(const unsigned char _val) +BLI_INLINE int UPSAMPLE_8_TO_12(const uchar _val) { return (_val << 4) | (_val & ((1 << 4) - 1)); } -BLI_INLINE int UPSAMPLE_8_TO_16(const unsigned char _val) +BLI_INLINE int UPSAMPLE_8_TO_16(const uchar _val) { return (_val << 8) + _val; } @@ -811,14 +807,14 @@ static float channel_colormanage_noop(float value) static opj_image_t *ibuftoimage(ImBuf *ibuf, opj_cparameters_t *parameters) { - unsigned char *rect_uchar; + uchar *rect_uchar; float *rect_float, from_straight[4]; - unsigned int subsampling_dx = parameters->subsampling_dx; - unsigned int subsampling_dy = parameters->subsampling_dy; + uint subsampling_dx = parameters->subsampling_dx; + uint subsampling_dy = parameters->subsampling_dy; - unsigned int i, i_next, numcomps, w, h, prec; - unsigned int y; + uint i, i_next, numcomps, w, h, prec; + uint y; int *r, *g, *b, *a; /* matching 'opj_image_comp.data' type */ OPJ_COLOR_SPACE color_space; opj_image_cmptparm_t cmptparm[4]; /* maximum of 4 components */ @@ -910,7 +906,7 @@ static opj_image_t *ibuftoimage(ImBuf *ibuf, opj_cparameters_t *parameters) image->y1 = image->y0 + (h - 1) * subsampling_dy + 1 + image->y0; /* set image data */ - rect_uchar = (unsigned char *)ibuf->rect; + rect_uchar = (uchar *)ibuf->rect; rect_float = ibuf->rect_float; /* set the destination channels */ diff --git a/source/blender/imbuf/intern/jpeg.c b/source/blender/imbuf/intern/jpeg.c index 06f9202a1c6..e03765fea92 100644 --- a/source/blender/imbuf/intern/jpeg.c +++ b/source/blender/imbuf/intern/jpeg.c @@ -37,7 +37,7 @@ static void init_source(j_decompress_ptr cinfo); static boolean fill_input_buffer(j_decompress_ptr cinfo); static void skip_input_data(j_decompress_ptr cinfo, long num_bytes); static void term_source(j_decompress_ptr cinfo); -static void memory_source(j_decompress_ptr cinfo, const unsigned char *buffer, size_t size); +static void memory_source(j_decompress_ptr cinfo, const uchar *buffer, size_t size); static boolean handle_app1(j_decompress_ptr cinfo); static ImBuf *ibJpegImageFromCinfo(struct jpeg_decompress_struct *cinfo, int flags, @@ -48,7 +48,7 @@ static ImBuf *ibJpegImageFromCinfo(struct jpeg_decompress_struct *cinfo, static const uchar jpeg_default_quality = 75; static uchar ibuf_quality; -bool imb_is_a_jpeg(const unsigned char *mem, const size_t size) +bool imb_is_a_jpeg(const uchar *mem, const size_t size) { const char magic[2] = {0xFF, 0xD8}; if (size < sizeof(magic)) { @@ -89,7 +89,7 @@ static void jpeg_error(j_common_ptr cinfo) #if 0 typedef struct { - unsigned char *buffer; + uchar *buffer; int filled; } buffer_struct; #endif @@ -97,7 +97,7 @@ typedef struct { typedef struct { struct jpeg_source_mgr pub; /* public fields */ - const unsigned char *buffer; + const uchar *buffer; int size; JOCTET terminal[2]; } my_source_mgr; @@ -144,7 +144,7 @@ static void term_source(j_decompress_ptr cinfo) (void)cinfo; /* unused */ } -static void memory_source(j_decompress_ptr cinfo, const unsigned char *buffer, size_t size) +static void memory_source(j_decompress_ptr cinfo, const uchar *buffer, size_t size) { my_src_ptr src; @@ -205,11 +205,11 @@ static void memory_source(j_decompress_ptr cinfo, const unsigned char *buffer, s MAKESTMT(MAKE_BYTE_AVAIL(cinfo, action); bytes_in_buffer--; V = GETJOCTET(*next_input_byte++);) /* As above, but read two bytes interpreted as an unsigned 16-bit integer. - * V should be declared unsigned int or perhaps INT32. + * V should be declared `uint` or perhaps INT32. */ #define INPUT_2BYTES(cinfo, V, action) \ MAKESTMT(MAKE_BYTE_AVAIL(cinfo, action); bytes_in_buffer--; \ - V = ((unsigned int)GETJOCTET(*next_input_byte++)) << 8; \ + V = ((uint)GETJOCTET(*next_input_byte++)) << 8; \ MAKE_BYTE_AVAIL(cinfo, action); \ bytes_in_buffer--; \ V += GETJOCTET(*next_input_byte++);) @@ -445,10 +445,7 @@ static ImBuf *ibJpegImageFromCinfo(struct jpeg_decompress_struct *cinfo, return ibuf; } -ImBuf *imb_load_jpeg(const unsigned char *buffer, - size_t size, - int flags, - char colorspace[IM_MAX_SPACE]) +ImBuf *imb_load_jpeg(const uchar *buffer, size_t size, int flags, char colorspace[IM_MAX_SPACE]) { struct jpeg_decompress_struct _cinfo, *cinfo = &_cinfo; struct my_error_mgr jerr; @@ -521,7 +518,7 @@ struct ImBuf *imb_thumbnail_jpeg(const char *filepath, if ((fgetc(infile) == JPEG_MARKER_MSB) && (fgetc(infile) == JPEG_MARKER_SOI) && (fgetc(infile) == JPEG_MARKER_MSB) && (fgetc(infile) == JPEG_MARKER_APP1)) { /* This is a JPEG in EXIF format (SOI + APP1), not JFIF (SOI + APP0). */ - unsigned int i = JPEG_APP1_MAX; + uint i = JPEG_APP1_MAX; /* All EXIF data is within this 64K header segment. Skip ahead until next SOI for thumbnail. */ while (!((fgetc(infile) == JPEG_MARKER_MSB) && (fgetc(infile) == JPEG_MARKER_SOI)) && !feof(infile) && i--) { diff --git a/source/blender/imbuf/intern/moviecache.cc b/source/blender/imbuf/intern/moviecache.cc index 91a7dfdfae2..54d95578120 100644 --- a/source/blender/imbuf/intern/moviecache.cc +++ b/source/blender/imbuf/intern/moviecache.cc @@ -81,7 +81,7 @@ struct MovieCacheItem { bool added_empty; }; -static unsigned int moviecache_hashhash(const void *keyv) +static uint moviecache_hashhash(const void *keyv) { const MovieCacheKey *key = (const MovieCacheKey *)keyv; diff --git a/source/blender/imbuf/intern/oiio/openimageio_api.cpp b/source/blender/imbuf/intern/oiio/openimageio_api.cpp index e887424d7b2..5c7b7d9fae4 100644 --- a/source/blender/imbuf/intern/oiio/openimageio_api.cpp +++ b/source/blender/imbuf/intern/oiio/openimageio_api.cpp @@ -32,7 +32,7 @@ OIIO_NAMESPACE_USING using std::string; using std::unique_ptr; -using uchar = unsigned char; +using uchar = uchar; template<class T, class Q> static void fill_all_channels(T *pixels, int width, int height, int components, Q alpha) @@ -147,9 +147,9 @@ static ImBuf *imb_oiio_load_image_float( extern "C" { -bool imb_is_a_photoshop(const unsigned char *mem, size_t size) +bool imb_is_a_photoshop(const uchar *mem, size_t size) { - const unsigned char magic[4] = {'8', 'B', 'P', 'S'}; + const uchar magic[4] = {'8', 'B', 'P', 'S'}; if (size < sizeof(magic)) { return false; } diff --git a/source/blender/imbuf/intern/openexr/openexr_api.cpp b/source/blender/imbuf/intern/openexr/openexr_api.cpp index eb6ce5df794..b4ccdfab9a5 100644 --- a/source/blender/imbuf/intern/openexr/openexr_api.cpp +++ b/source/blender/imbuf/intern/openexr/openexr_api.cpp @@ -122,8 +122,7 @@ static void imb_exr_type_by_channels(ChannelList &channels, class IMemStream : public Imf::IStream { public: - IMemStream(unsigned char *exrbuf, size_t exrsize) - : IStream("<memory>"), _exrpos(0), _exrsize(exrsize) + IMemStream(uchar *exrbuf, size_t exrsize) : IStream("<memory>"), _exrpos(0), _exrsize(exrsize) { _exrbuf = exrbuf; } @@ -156,7 +155,7 @@ class IMemStream : public Imf::IStream { private: exr_file_offset_t _exrpos; exr_file_offset_t _exrsize; - unsigned char *_exrbuf; + uchar *_exrbuf; }; /* Memory-Mapped Input Stream */ @@ -178,7 +177,7 @@ class IMMapStream : public Imf::IStream { throw IEX_NAMESPACE::InputExc("BLI_mmap_open failed"); } close(file); - _exrbuf = (unsigned char *)BLI_mmap_get_pointer(_mmap_file); + _exrbuf = (uchar *)BLI_mmap_get_pointer(_mmap_file); } ~IMMapStream() override @@ -216,7 +215,7 @@ class IMMapStream : public Imf::IStream { BLI_mmap_file *_mmap_file; exr_file_offset_t _exrpos; exr_file_offset_t _exrsize; - unsigned char *_exrbuf; + uchar *_exrbuf; }; /* File Input Stream */ @@ -395,7 +394,7 @@ static half float_to_half_safe(const float value) extern "C" { -bool imb_is_a_openexr(const unsigned char *mem, const size_t size) +bool imb_is_a_openexr(const uchar *mem, const size_t size) { /* No define is exposed for this size. */ if (size < 4) { @@ -547,10 +546,10 @@ static bool imb_save_openexr_half(ImBuf *ibuf, const char *name, const int flags } } else { - unsigned char *from; + uchar *from; for (int i = ibuf->y - 1; i >= 0; i--) { - from = (unsigned char *)ibuf->rect + 4 * i * width; + from = (uchar *)ibuf->rect + 4 * i * width; for (int j = ibuf->x; j > 0; j--) { to->r = srgb_to_linearrgb((float)from[0] / 255.0f); @@ -1670,29 +1669,29 @@ static bool imb_exr_multilayer_parse_channels_from_file(ExrHandle *data) if (ELEM(pass->totchan, 3, 4)) { if (pass->chan[0]->chan_id == 'B' || pass->chan[1]->chan_id == 'B' || pass->chan[2]->chan_id == 'B') { - lookup[(unsigned int)'R'] = 0; - lookup[(unsigned int)'G'] = 1; - lookup[(unsigned int)'B'] = 2; - lookup[(unsigned int)'A'] = 3; + lookup[(uint)'R'] = 0; + lookup[(uint)'G'] = 1; + lookup[(uint)'B'] = 2; + lookup[(uint)'A'] = 3; } else if (pass->chan[0]->chan_id == 'Y' || pass->chan[1]->chan_id == 'Y' || pass->chan[2]->chan_id == 'Y') { - lookup[(unsigned int)'X'] = 0; - lookup[(unsigned int)'Y'] = 1; - lookup[(unsigned int)'Z'] = 2; - lookup[(unsigned int)'W'] = 3; + lookup[(uint)'X'] = 0; + lookup[(uint)'Y'] = 1; + lookup[(uint)'Z'] = 2; + lookup[(uint)'W'] = 3; } else { - lookup[(unsigned int)'U'] = 0; - lookup[(unsigned int)'V'] = 1; - lookup[(unsigned int)'A'] = 2; + lookup[(uint)'U'] = 0; + lookup[(uint)'V'] = 1; + lookup[(uint)'A'] = 2; } for (int a = 0; a < pass->totchan; a++) { echan = pass->chan[a]; - echan->rect = pass->rect + lookup[(unsigned int)echan->chan_id]; + echan->rect = pass->rect + lookup[(uint)echan->chan_id]; echan->xstride = pass->totchan; echan->ystride = data->width * pass->totchan; - pass->chan_id[(unsigned int)lookup[(unsigned int)echan->chan_id]] = echan->chan_id; + pass->chan_id[(uint)lookup[(uint)echan->chan_id]] = echan->chan_id; } } else { /* unknown */ @@ -1969,7 +1968,7 @@ bool IMB_exr_has_multilayer(void *handle) return imb_exr_is_multi(*data->ifile); } -struct ImBuf *imb_load_openexr(const unsigned char *mem, +struct ImBuf *imb_load_openexr(const uchar *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) @@ -1987,7 +1986,7 @@ struct ImBuf *imb_load_openexr(const unsigned char *mem, try { bool is_multi; - membuf = new IMemStream((unsigned char *)mem, size); + membuf = new IMemStream((uchar *)mem, size); file = new MultiPartInputFile(*membuf); Box2i dw = file->header(0).dataWindow(); @@ -2058,7 +2057,7 @@ struct ImBuf *imb_load_openexr(const unsigned char *mem, size_t xstride = sizeof(float[4]); size_t ystride = -xstride * width; - imb_addrectfloatImBuf(ibuf); + imb_addrectfloatImBuf(ibuf, 4); /* Inverse correct first pixel for data-window * coordinates (- dw.min.y because of y flip). */ @@ -2209,7 +2208,7 @@ struct ImBuf *imb_load_filepath_thumbnail_openexr(const char *filepath, if (file->header().hasPreviewImage()) { const Imf::PreviewImage &preview = file->header().previewImage(); ImBuf *ibuf = IMB_allocFromBuffer( - (unsigned int *)preview.pixels(), nullptr, preview.width(), preview.height(), 4); + (uint *)preview.pixels(), nullptr, preview.width(), preview.height(), 4); delete file; delete stream; IMB_flipy(ibuf); diff --git a/source/blender/imbuf/intern/png.c b/source/blender/imbuf/intern/png.c index 4d6dfac0ba0..df6959ca90b 100644 --- a/source/blender/imbuf/intern/png.c +++ b/source/blender/imbuf/intern/png.c @@ -31,21 +31,21 @@ #include "IMB_colormanagement_intern.h" typedef struct PNGReadStruct { - const unsigned char *data; - unsigned int size; - unsigned int seek; + const uchar *data; + uint size; + uint seek; } PNGReadStruct; static void ReadData(png_structp png_ptr, png_bytep data, png_size_t length); static void WriteData(png_structp png_ptr, png_bytep data, png_size_t length); static void Flush(png_structp png_ptr); -BLI_INLINE unsigned short UPSAMPLE_8_TO_16(const unsigned char _val) +BLI_INLINE ushort UPSAMPLE_8_TO_16(const uchar _val) { return (_val << 8) + _val; } -bool imb_is_a_png(const unsigned char *mem, size_t size) +bool imb_is_a_png(const uchar *mem, size_t size) { const int num_to_check = 8; if (size < num_to_check) { @@ -102,7 +102,7 @@ static float channel_colormanage_noop(float value) } /* wrap to avoid macro calling functions multiple times */ -BLI_INLINE unsigned short ftoshort(float val) +BLI_INLINE ushort ftoshort(float val) { return unit_float_to_ushort_clamp(val); } @@ -112,9 +112,9 @@ bool imb_savepng(struct ImBuf *ibuf, const char *filepath, int flags) png_structp png_ptr; png_infop info_ptr; - unsigned char *pixels = NULL; - unsigned char *from, *to; - unsigned short *pixels16 = NULL, *to16; + uchar *pixels = NULL; + uchar *from, *to; + ushort *pixels16 = NULL, *to16; float *from_float, from_straight[4]; png_bytepp row_pointers = NULL; int i, bytesperpixel, color_type = PNG_COLOR_TYPE_GRAY; @@ -169,10 +169,10 @@ bool imb_savepng(struct ImBuf *ibuf, const char *filepath, int flags) /* copy image data */ num_bytes = ((size_t)ibuf->x) * ibuf->y * bytesperpixel; if (is_16bit) { - pixels16 = MEM_mallocN(num_bytes * sizeof(unsigned short), "png 16bit pixels"); + pixels16 = MEM_mallocN(num_bytes * sizeof(ushort), "png 16bit pixels"); } else { - pixels = MEM_mallocN(num_bytes * sizeof(unsigned char), "png 8bit pixels"); + pixels = MEM_mallocN(num_bytes * sizeof(uchar), "png 8bit pixels"); } if (pixels == NULL && pixels16 == NULL) { printf( @@ -210,7 +210,7 @@ bool imb_savepng(struct ImBuf *ibuf, const char *filepath, int flags) return 0; } - from = (unsigned char *)ibuf->rect; + from = (uchar *)ibuf->rect; to = pixels; from_float = ibuf->rect_float; to16 = pixels16; @@ -453,8 +453,8 @@ bool imb_savepng(struct ImBuf *ibuf, const char *filepath, int flags) if (ibuf->ppm[0] > 0.0 && ibuf->ppm[1] > 0.0) { png_set_pHYs(png_ptr, info_ptr, - (unsigned int)(ibuf->ppm[0] + 0.5), - (unsigned int)(ibuf->ppm[1] + 0.5), + (uint)(ibuf->ppm[0] + 0.5), + (uint)(ibuf->ppm[1] + 0.5), PNG_RESOLUTION_METER); } @@ -468,15 +468,15 @@ bool imb_savepng(struct ImBuf *ibuf, const char *filepath, int flags) /* set the individual row-pointers to point at the correct offsets */ if (is_16bit) { for (i = 0; i < ibuf->y; i++) { - row_pointers[ibuf->y - 1 - i] = (png_bytep)((unsigned short *)pixels16 + + row_pointers[ibuf->y - 1 - i] = (png_bytep)((ushort *)pixels16 + (((size_t)i) * ibuf->x) * bytesperpixel); } } else { for (i = 0; i < ibuf->y; i++) { - row_pointers[ibuf->y - 1 - i] = (png_bytep)((unsigned char *)pixels + - (((size_t)i) * ibuf->x) * bytesperpixel * - sizeof(unsigned char)); + row_pointers[ibuf->y - 1 - i] = (png_bytep)((uchar *)pixels + (((size_t)i) * ibuf->x) * + bytesperpixel * + sizeof(uchar)); } } @@ -521,22 +521,22 @@ static void imb_png_error(png_structp UNUSED(png_ptr), png_const_charp message) fprintf(stderr, "libpng error: %s\n", message); } -ImBuf *imb_loadpng(const unsigned char *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) +ImBuf *imb_loadpng(const uchar *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) { struct ImBuf *ibuf = NULL; png_structp png_ptr; png_infop info_ptr; - unsigned char *pixels = NULL; - unsigned short *pixels16 = NULL; + uchar *pixels = NULL; + ushort *pixels16 = NULL; png_bytepp row_pointers = NULL; png_uint_32 width, height; int bit_depth, color_type; PNGReadStruct ps; - unsigned char *from, *to; - unsigned short *from16; + uchar *from, *to; + ushort *from16; float *to_float; - unsigned int channels; + uint channels; if (imb_is_a_png(mem, size) == 0) { return NULL; @@ -646,7 +646,7 @@ ImBuf *imb_loadpng(const unsigned char *mem, size_t size, int flags, char colors if (ibuf && ((flags & IB_test) == 0)) { if (bit_depth == 16) { - imb_addrectfloatImBuf(ibuf); + imb_addrectfloatImBuf(ibuf, 4); png_set_swap(png_ptr); pixels16 = imb_alloc_pixels(ibuf->x, ibuf->y, channels, sizeof(png_uint_16), "pixels"); @@ -718,7 +718,7 @@ ImBuf *imb_loadpng(const unsigned char *mem, size_t size, int flags, char colors else { imb_addrectImBuf(ibuf); - pixels = imb_alloc_pixels(ibuf->x, ibuf->y, channels, sizeof(unsigned char), "pixels"); + pixels = imb_alloc_pixels(ibuf->x, ibuf->y, channels, sizeof(uchar), "pixels"); if (pixels == NULL || ibuf->rect == NULL) { printf("Cannot allocate pixels array\n"); longjmp(png_jmpbuf(png_ptr), 1); @@ -733,16 +733,16 @@ ImBuf *imb_loadpng(const unsigned char *mem, size_t size, int flags, char colors /* set the individual row-pointers to point at the correct offsets */ for (int i = 0; i < ibuf->y; i++) { - row_pointers[ibuf->y - 1 - i] = (png_bytep)((unsigned char *)pixels + - (((size_t)i) * ibuf->x) * channels * - sizeof(unsigned char)); + row_pointers[ibuf->y - 1 - i] = (png_bytep)((uchar *)pixels + (((size_t)i) * ibuf->x) * + channels * + sizeof(uchar)); } png_read_image(png_ptr, row_pointers); /* copy image data */ - to = (unsigned char *)ibuf->rect; + to = (uchar *)ibuf->rect; from = pixels; switch (channels) { diff --git a/source/blender/imbuf/intern/radiance_hdr.c b/source/blender/imbuf/intern/radiance_hdr.c index aa07edf5c3a..00ef12a54f8 100644 --- a/source/blender/imbuf/intern/radiance_hdr.c +++ b/source/blender/imbuf/intern/radiance_hdr.c @@ -33,7 +33,7 @@ #define BLU 2 #define EXP 3 #define COLXS 128 -typedef unsigned char RGBE[4]; +typedef uchar RGBE[4]; typedef float fCOLOR[3]; /* copy source -> dest */ @@ -41,10 +41,7 @@ typedef float fCOLOR[3]; (c2[RED] = c1[RED], c2[GRN] = c1[GRN], c2[BLU] = c1[BLU], c2[EXP] = c1[EXP]) /* read routines */ -static const unsigned char *oldreadcolrs(RGBE *scan, - const unsigned char *mem, - int xmax, - const unsigned char *mem_eof) +static const uchar *oldreadcolrs(RGBE *scan, const uchar *mem, int xmax, const uchar *mem_eof) { size_t i, rshift = 0, len = xmax; while (len > 0) { @@ -72,10 +69,7 @@ static const unsigned char *oldreadcolrs(RGBE *scan, return mem; } -static const unsigned char *freadcolrs(RGBE *scan, - const unsigned char *mem, - int xmax, - const unsigned char *mem_eof) +static const uchar *freadcolrs(RGBE *scan, const uchar *mem, int xmax, const uchar *mem_eof) { if (UNLIKELY(mem_eof - mem < 4)) { return NULL; @@ -118,7 +112,7 @@ static const unsigned char *freadcolrs(RGBE *scan, } val = *mem++; while (code--) { - scan[j++][i] = (unsigned char)val; + scan[j++][i] = (uchar)val; } } else { @@ -167,16 +161,16 @@ static void FLOAT2RGBE(const fCOLOR fcol, RGBE rgbe) } else { d = (float)frexp(d, &e) * 256.0f / d; - rgbe[RED] = (unsigned char)(fcol[RED] * d); - rgbe[GRN] = (unsigned char)(fcol[GRN] * d); - rgbe[BLU] = (unsigned char)(fcol[BLU] * d); - rgbe[EXP] = (unsigned char)(e + COLXS); + rgbe[RED] = (uchar)(fcol[RED] * d); + rgbe[GRN] = (uchar)(fcol[GRN] * d); + rgbe[BLU] = (uchar)(fcol[BLU] * d); + rgbe[EXP] = (uchar)(e + COLXS); } } /* ImBuf read */ -bool imb_is_a_hdr(const unsigned char *buf, const size_t size) +bool imb_is_a_hdr(const uchar *buf, const size_t size) { /* NOTE: `#?RADIANCE` is used by other programs such as `ImageMagik`, * Although there are some files in the wild that only use `#?` (from looking online). @@ -187,17 +181,14 @@ bool imb_is_a_hdr(const unsigned char *buf, const size_t size) * * See: http://paulbourke.net/dataformats/pic/ */ - const unsigned char magic[2] = {'#', '?'}; + const uchar magic[2] = {'#', '?'}; if (size < sizeof(magic)) { return false; } return memcmp(buf, magic, sizeof(magic)) == 0; } -struct ImBuf *imb_loadhdr(const unsigned char *mem, - size_t size, - int flags, - char colorspace[IM_MAX_SPACE]) +struct ImBuf *imb_loadhdr(const uchar *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) { struct ImBuf *ibuf; RGBE *sline; @@ -205,7 +196,7 @@ struct ImBuf *imb_loadhdr(const unsigned char *mem, float *rect_float; int found = 0; int width = 0, height = 0; - const unsigned char *ptr, *mem_eof = mem + size; + const uchar *ptr, *mem_eof = mem + size; char oriY[3], oriX[3]; if (!imb_is_a_hdr(mem, size)) { @@ -246,7 +237,7 @@ struct ImBuf *imb_loadhdr(const unsigned char *mem, * since the format uses RLE compression. Can cause excessive memory allocation to occur. */ /* find end of this line, data right behind it */ - ptr = (const unsigned char *)strchr((const char *)&mem[x], '\n'); + ptr = (const uchar *)strchr((const char *)&mem[x], '\n'); if (ptr == NULL || ptr >= mem_eof) { return NULL; } @@ -306,7 +297,7 @@ struct ImBuf *imb_loadhdr(const unsigned char *mem, /* ImBuf write */ static int fwritecolrs( - FILE *file, int width, int channels, const unsigned char *ibufscan, const float *fpscan) + FILE *file, int width, int channels, const uchar *ibufscan, const float *fpscan) { int beg, c2, count = 0; fCOLOR fcol; @@ -343,8 +334,8 @@ static int fwritecolrs( /* put magic header */ putc(2, file); putc(2, file); - putc((unsigned char)(width >> 8), file); - putc((unsigned char)(width & 255), file); + putc((uchar)(width >> 8), file); + putc((uchar)(width & 255), file); /* put components separately */ for (size_t i = 0; i < 4; i++) { for (size_t j = 0; j < width; j += count) { /* find next run */ @@ -362,8 +353,8 @@ static int fwritecolrs( c2 = j + 1; while (rgbe_scan[c2++][i] == rgbe_scan[j][i]) { if (c2 == beg) { /* short run */ - putc((unsigned char)(128 + beg - j), file); - putc((unsigned char)(rgbe_scan[j][i]), file); + putc((uchar)(128 + beg - j), file); + putc((uchar)(rgbe_scan[j][i]), file); j = beg; break; } @@ -373,13 +364,13 @@ static int fwritecolrs( if ((c2 = beg - j) > 128) { c2 = 128; } - putc((unsigned char)(c2), file); + putc((uchar)(c2), file); while (c2--) { putc(rgbe_scan[j++][i], file); } } if (count >= MINRUN) { /* write out run */ - putc((unsigned char)(128 + count), file); + putc((uchar)(128 + count), file); putc(rgbe_scan[beg][i], file); } else { @@ -411,7 +402,7 @@ bool imb_savehdr(struct ImBuf *ibuf, const char *filepath, int flags) FILE *file = BLI_fopen(filepath, "wb"); float *fp = NULL; size_t width = ibuf->x, height = ibuf->y; - unsigned char *cp = NULL; + uchar *cp = NULL; (void)flags; /* unused */ @@ -422,7 +413,7 @@ bool imb_savehdr(struct ImBuf *ibuf, const char *filepath, int flags) writeHeader(file, width, height); if (ibuf->rect) { - cp = (unsigned char *)ibuf->rect + ibuf->channels * (height - 1) * width; + cp = (uchar *)ibuf->rect + ibuf->channels * (height - 1) * width; } if (ibuf->rect_float) { fp = ibuf->rect_float + ibuf->channels * (height - 1) * width; diff --git a/source/blender/imbuf/intern/readimage.c b/source/blender/imbuf/intern/readimage.c index b33e9dc4e0e..a9b79ad6d19 100644 --- a/source/blender/imbuf/intern/readimage.c +++ b/source/blender/imbuf/intern/readimage.c @@ -81,11 +81,8 @@ static void imb_handle_alpha(ImBuf *ibuf, colormanage_imbuf_make_linear(ibuf, effective_colorspace); } -ImBuf *IMB_ibImageFromMemory(const unsigned char *mem, - size_t size, - int flags, - char colorspace[IM_MAX_SPACE], - const char *descr) +ImBuf *IMB_ibImageFromMemory( + const uchar *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE], const char *descr) { ImBuf *ibuf; const ImFileType *type; @@ -157,7 +154,7 @@ ImBuf *IMB_loadifffile( int file, const char *filepath, int flags, char colorspace[IM_MAX_SPACE], const char *descr) { ImBuf *ibuf; - unsigned char *mem; + uchar *mem; size_t size; if (file == -1) { @@ -319,9 +316,9 @@ ImBuf *IMB_testiffname(const char *filepath, int flags) return ibuf; } -static void imb_loadtilefile(ImBuf *ibuf, int file, int tx, int ty, unsigned int *rect) +static void imb_loadtilefile(ImBuf *ibuf, int file, int tx, int ty, uint *rect) { - unsigned char *mem; + uchar *mem; size_t size; if (file == -1) { @@ -352,7 +349,7 @@ static void imb_loadtilefile(ImBuf *ibuf, int file, int tx, int ty, unsigned int imb_mmap_unlock(); } -void imb_loadtile(ImBuf *ibuf, int tx, int ty, unsigned int *rect) +void imb_loadtile(ImBuf *ibuf, int tx, int ty, uint *rect) { int file; diff --git a/source/blender/imbuf/intern/rectop.c b/source/blender/imbuf/intern/rectop.c index 2f864534d61..4159aa851c4 100644 --- a/source/blender/imbuf/intern/rectop.c +++ b/source/blender/imbuf/intern/rectop.c @@ -21,9 +21,9 @@ #include "MEM_guardedalloc.h" -void IMB_blend_color_byte(unsigned char dst[4], - const unsigned char src1[4], - const unsigned char src2[4], +void IMB_blend_color_byte(uchar dst[4], + const uchar src1[4], + const uchar src2[4], IMB_BlendMode mode) { switch (mode) { @@ -487,17 +487,15 @@ void IMB_rectcpy(ImBuf *dbuf, false); } -typedef void (*IMB_blend_func)(unsigned char *dst, - const unsigned char *src1, - const unsigned char *src2); +typedef void (*IMB_blend_func)(uchar *dst, const uchar *src1, const uchar *src2); typedef void (*IMB_blend_func_float)(float *dst, const float *src1, const float *src2); void IMB_rectblend(ImBuf *dbuf, const ImBuf *obuf, const ImBuf *sbuf, - unsigned short *dmask, - const unsigned short *curvemask, - const unsigned short *texmask, + ushort *dmask, + const ushort *curvemask, + const ushort *texmask, float mask_max, int destx, int desty, @@ -510,11 +508,11 @@ void IMB_rectblend(ImBuf *dbuf, IMB_BlendMode mode, bool accumulate) { - unsigned int *drect = NULL, *orect = NULL, *srect = NULL, *dr, * or, *sr; + uint *drect = NULL, *orect = NULL, *srect = NULL, *dr, * or, *sr; float *drectf = NULL, *orectf = NULL, *srectf = NULL, *drf, *orf, *srf; - const unsigned short *cmaskrect = curvemask, *cmr; - unsigned short *dmaskrect = dmask, *dmr; - const unsigned short *texmaskrect = texmask, *tmr; + const ushort *cmaskrect = curvemask, *cmr; + ushort *dmaskrect = dmask, *dmr; + const ushort *texmaskrect = texmask, *tmr; int srcskip, destskip, origskip, x; IMB_blend_func func = NULL; IMB_blend_func_float func_float = NULL; @@ -766,7 +764,7 @@ void IMB_rectblend(ImBuf *dbuf, if (dmaskrect) { dmr = dmaskrect; for (x = width; x > 0; x--, dr++, or ++, sr++, dmr++, cmr++) { - unsigned char *src = (unsigned char *)sr; + uchar *src = (uchar *)sr; float mask_lim = mask_max * (*cmr); if (texmaskrect) { @@ -786,7 +784,7 @@ void IMB_rectblend(ImBuf *dbuf, mask = min_ff(mask, 65535.0); if (mask > *dmr) { - unsigned char mask_src[4]; + uchar mask_src[4]; *dmr = mask; @@ -797,11 +795,11 @@ void IMB_rectblend(ImBuf *dbuf, if (mode == IMB_BLEND_INTERPOLATE) { mask_src[3] = src[3]; blend_color_interpolate_byte( - (unsigned char *)dr, (unsigned char *) or, mask_src, mask / 65535.0f); + (uchar *)dr, (uchar *) or, mask_src, mask / 65535.0f); } else { mask_src[3] = divide_round_i(src[3] * mask, 65535); - func((unsigned char *)dr, (unsigned char *) or, mask_src); + func((uchar *)dr, (uchar *) or, mask_src); } } } @@ -811,7 +809,7 @@ void IMB_rectblend(ImBuf *dbuf, /* no destination mask buffer, do regular blend with masktexture if present */ else { for (x = width; x > 0; x--, dr++, or ++, sr++, cmr++) { - unsigned char *src = (unsigned char *)sr; + uchar *src = (uchar *)sr; float mask = (float)mask_max * ((float)(*cmr)); if (texmaskrect) { @@ -821,7 +819,7 @@ void IMB_rectblend(ImBuf *dbuf, mask = min_ff(mask, 65535.0); if (src[3] && (mask > 0.0f)) { - unsigned char mask_src[4]; + uchar mask_src[4]; mask_src[0] = src[0]; mask_src[1] = src[1]; @@ -830,11 +828,11 @@ void IMB_rectblend(ImBuf *dbuf, if (mode == IMB_BLEND_INTERPOLATE) { mask_src[3] = src[3]; blend_color_interpolate_byte( - (unsigned char *)dr, (unsigned char *) or, mask_src, mask / 65535.0f); + (uchar *)dr, (uchar *) or, mask_src, mask / 65535.0f); } else { mask_src[3] = divide_round_i(src[3] * mask, 65535); - func((unsigned char *)dr, (unsigned char *) or, mask_src); + func((uchar *)dr, (uchar *) or, mask_src); } } } @@ -848,8 +846,8 @@ void IMB_rectblend(ImBuf *dbuf, else { /* regular blending */ for (x = width; x > 0; x--, dr++, or ++, sr++) { - if (((unsigned char *)sr)[3]) { - func((unsigned char *)dr, (unsigned char *) or, (unsigned char *)sr); + if (((uchar *)sr)[3]) { + func((uchar *)dr, (uchar *) or, (uchar *)sr); } } } @@ -956,8 +954,8 @@ void IMB_rectblend(ImBuf *dbuf, typedef struct RectBlendThreadData { ImBuf *dbuf; const ImBuf *obuf, *sbuf; - unsigned short *dmask; - const unsigned short *curvemask, *texmask; + ushort *dmask; + const ushort *curvemask, *texmask; float mask_max; int destx, desty, origx, origy; int srcx, srcy, width; @@ -991,9 +989,9 @@ static void rectblend_thread_do(void *data_v, int scanline) void IMB_rectblend_threaded(ImBuf *dbuf, const ImBuf *obuf, const ImBuf *sbuf, - unsigned short *dmask, - const unsigned short *curvemask, - const unsigned short *texmask, + ushort *dmask, + const ushort *curvemask, + const ushort *texmask, float mask_max, int destx, int desty, @@ -1052,7 +1050,7 @@ void IMB_rectfill(ImBuf *drect, const float col[4]) int num; if (drect->rect) { - unsigned int *rrect = drect->rect; + uint *rrect = drect->rect; char ccol[4]; ccol[0] = (int)(col[0] * 255); @@ -1062,7 +1060,7 @@ void IMB_rectfill(ImBuf *drect, const float col[4]) num = drect->x * drect->y; for (; num > 0; num--) { - *rrect++ = *((unsigned int *)ccol); + *rrect++ = *((uint *)ccol); } } @@ -1106,15 +1104,15 @@ void IMB_rectfill_area_replace( return; } - unsigned char col_char[4] = {col[0] * 255, col[1] * 255, col[2] * 255, col[3] * 255}; + uchar col_char[4] = {col[0] * 255, col[1] * 255, col[2] * 255, col[3] * 255}; for (int y = y1; y < y2; y++) { for (int x = x1; x < x2; x++) { size_t offset = ((size_t)ibuf->x) * y * 4 + 4 * x; if (ibuf->rect) { - unsigned char *rrect = (unsigned char *)ibuf->rect + offset; - memcpy(rrect, &col_char, sizeof(unsigned char) * 4); + uchar *rrect = (uchar *)ibuf->rect + offset; + memcpy(rrect, &col_char, sizeof(uchar) * 4); } if (ibuf->rect_float) { @@ -1125,7 +1123,7 @@ void IMB_rectfill_area_replace( } } -void buf_rectfill_area(unsigned char *rect, +void buf_rectfill_area(uchar *rect, float *rectf, int width, int height, @@ -1165,8 +1163,8 @@ void buf_rectfill_area(unsigned char *rect, aich = ai / 255.0f; if (rect) { - unsigned char *pixel; - unsigned char chr = 0, chg = 0, chb = 0; + uchar *pixel; + uchar chr = 0, chg = 0, chb = 0; float fr = 0, fg = 0, fb = 0; const int alphaint = unit_float_to_uchar_clamp(a); @@ -1247,16 +1245,8 @@ void IMB_rectfill_area(ImBuf *ibuf, if (!ibuf) { return; } - buf_rectfill_area((unsigned char *)ibuf->rect, - ibuf->rect_float, - ibuf->x, - ibuf->y, - col, - display, - x1, - y1, - x2, - y2); + buf_rectfill_area( + (uchar *)ibuf->rect, ibuf->rect_float, ibuf->x, ibuf->y, col, display, x1, y1, x2, y2); } void IMB_rectfill_alpha(ImBuf *ibuf, const float value) @@ -1271,8 +1261,8 @@ void IMB_rectfill_alpha(ImBuf *ibuf, const float value) } if (ibuf->rect) { - const unsigned char cvalue = value * 255; - unsigned char *cbuf = ((unsigned char *)ibuf->rect) + 3; + const uchar cvalue = value * 255; + uchar *cbuf = ((uchar *)ibuf->rect) + 3; for (i = ibuf->x * ibuf->y; i > 0; i--, cbuf += 4) { *cbuf = cvalue; } diff --git a/source/blender/imbuf/intern/rotate.c b/source/blender/imbuf/intern/rotate.c index ac07ce85526..7081bf2ad26 100644 --- a/source/blender/imbuf/intern/rotate.c +++ b/source/blender/imbuf/intern/rotate.c @@ -22,7 +22,7 @@ void IMB_flipy(struct ImBuf *ibuf) } if (ibuf->rect) { - unsigned int *top, *bottom, *line; + uint *top, *bottom, *line; x_size = ibuf->x; y_size = ibuf->y; @@ -88,7 +88,7 @@ void IMB_flipx(struct ImBuf *ibuf) for (yi = y - 1; yi >= 0; yi--) { const size_t x_offset = (size_t)x * yi; for (xr = x - 1, xl = 0; xr >= xl; xr--, xl++) { - SWAP(unsigned int, ibuf->rect[x_offset + xr], ibuf->rect[x_offset + xl]); + SWAP(uint, ibuf->rect[x_offset + xr], ibuf->rect[x_offset + xl]); } } } diff --git a/source/blender/imbuf/intern/scaling.c b/source/blender/imbuf/intern/scaling.c index f4abc668402..05bee77a6cb 100644 --- a/source/blender/imbuf/intern/scaling.c +++ b/source/blender/imbuf/intern/scaling.c @@ -324,10 +324,9 @@ struct ImBuf *IMB_double_y(struct ImBuf *ibuf1) /* pretty much specific functions which converts uchar <-> ushort but assumes * ushort range of 255*255 which is more convenient here */ -MINLINE void straight_uchar_to_premul_ushort(unsigned short result[4], - const unsigned char color[4]) +MINLINE void straight_uchar_to_premul_ushort(ushort result[4], const uchar color[4]) { - unsigned short alpha = color[3]; + ushort alpha = color[3]; result[0] = color[0] * alpha; result[1] = color[1] * alpha; @@ -335,7 +334,7 @@ MINLINE void straight_uchar_to_premul_ushort(unsigned short result[4], result[3] = alpha * 256; } -MINLINE void premul_ushort_to_straight_uchar(unsigned char *result, const unsigned short color[4]) +MINLINE void premul_ushort_to_straight_uchar(uchar *result, const ushort color[4]) { if (color[3] <= 255) { result[0] = unit_ushort_to_uchar(color[0]); @@ -344,7 +343,7 @@ MINLINE void premul_ushort_to_straight_uchar(unsigned char *result, const unsign result[3] = unit_ushort_to_uchar(color[3]); } else { - unsigned short alpha = color[3] / 256; + ushort alpha = color[3] / 256; result[0] = unit_ushort_to_uchar((ushort)(color[0] / alpha * 256)); result[1] = unit_ushort_to_uchar((ushort)(color[1] / alpha * 256)); @@ -373,25 +372,25 @@ void imb_onehalf_no_alloc(struct ImBuf *ibuf2, struct ImBuf *ibuf1) } if (do_rect) { - unsigned char *cp1, *cp2, *dest; + uchar *cp1, *cp2, *dest; - cp1 = (unsigned char *)ibuf1->rect; - dest = (unsigned char *)ibuf2->rect; + cp1 = (uchar *)ibuf1->rect; + dest = (uchar *)ibuf2->rect; for (y = ibuf2->y; y > 0; y--) { cp2 = cp1 + (ibuf1->x << 2); for (x = ibuf2->x; x > 0; x--) { - unsigned short p1i[8], p2i[8], desti[4]; + ushort p1i[8], p2i[8], desti[4]; straight_uchar_to_premul_ushort(p1i, cp1); straight_uchar_to_premul_ushort(p2i, cp2); straight_uchar_to_premul_ushort(p1i + 4, cp1 + 4); straight_uchar_to_premul_ushort(p2i + 4, cp2 + 4); - desti[0] = ((unsigned int)p1i[0] + p2i[0] + p1i[4] + p2i[4]) >> 2; - desti[1] = ((unsigned int)p1i[1] + p2i[1] + p1i[5] + p2i[5]) >> 2; - desti[2] = ((unsigned int)p1i[2] + p2i[2] + p1i[6] + p2i[6]) >> 2; - desti[3] = ((unsigned int)p1i[3] + p2i[3] + p1i[7] + p2i[7]) >> 2; + desti[0] = ((uint)p1i[0] + p2i[0] + p1i[4] + p2i[4]) >> 2; + desti[1] = ((uint)p1i[1] + p2i[1] + p1i[5] + p2i[5]) >> 2; + desti[2] = ((uint)p1i[2] + p2i[2] + p1i[6] + p2i[6]) >> 2; + desti[3] = ((uint)p1i[3] + p2i[3] + p1i[7] + p2i[7]) >> 2; premul_ushort_to_straight_uchar(dest, desti); @@ -460,12 +459,8 @@ ImBuf *IMB_onehalf(struct ImBuf *ibuf1) /* q_scale_linear_interpolation helper functions */ -static void enlarge_picture_byte(unsigned char *src, - unsigned char *dst, - int src_width, - int src_height, - int dst_width, - int dst_height) +static void enlarge_picture_byte( + uchar *src, uchar *dst, int src_width, int src_height, int dst_width, int dst_height) { double ratiox = (double)(dst_width - 1.0) / (double)(src_width - 1.001); double ratioy = (double)(dst_height - 1.0) / (double)(src_height - 1.001); @@ -477,8 +472,8 @@ static void enlarge_picture_byte(unsigned char *src, y_src = 0; for (y_dst = 0; y_dst < dst_height; y_dst++) { - unsigned char *line1 = src + (y_src >> 16) * 4 * src_width; - unsigned char *line2 = line1 + 4 * src_width; + uchar *line1 = src + (y_src >> 16) * 4 * src_width; + uchar *line2 = line1 + 4 * src_width; uintptr_t weight1y = 65536 - (y_src & 0xffff); uintptr_t weight2y = 65536 - weight1y; @@ -491,7 +486,7 @@ static void enlarge_picture_byte(unsigned char *src, uintptr_t weight1x = 65536 - (x_src & 0xffff); uintptr_t weight2x = 65536 - weight1x; - unsigned long x = (x_src >> 16) * 4; + ulong x = (x_src >> 16) * 4; *dst++ = ((((line1[x] * weight1y) >> 16) * weight1x) >> 16) + ((((line2[x] * weight2y) >> 16) * weight1x) >> 16) + @@ -528,19 +523,15 @@ struct scale_outpix_byte { uintptr_t weight; }; -static void shrink_picture_byte(unsigned char *src, - unsigned char *dst, - int src_width, - int src_height, - int dst_width, - int dst_height) +static void shrink_picture_byte( + uchar *src, uchar *dst, int src_width, int src_height, int dst_width, int dst_height) { double ratiox = (double)(dst_width) / (double)(src_width); double ratioy = (double)(dst_height) / (double)(src_height); uintptr_t x_src, dx_dst, x_dst; uintptr_t y_src, dy_dst, y_dst; intptr_t y_counter; - unsigned char *dst_begin = dst; + uchar *dst_begin = dst; struct scale_outpix_byte *dst_line1 = NULL; struct scale_outpix_byte *dst_line2 = NULL; @@ -556,7 +547,7 @@ static void shrink_picture_byte(unsigned char *src, y_dst = 0; y_counter = 65536; for (y_src = 0; y_src < src_height; y_src++) { - unsigned char *line = src + y_src * 4 * src_width; + uchar *line = src + y_src * 4 * src_width; uintptr_t weight1y = 65535 - (y_dst & 0xffff); uintptr_t weight2y = 65535 - weight1y; x_dst = 0; @@ -643,12 +634,8 @@ static void shrink_picture_byte(unsigned char *src, MEM_freeN(dst_line2); } -static void q_scale_byte(unsigned char *in, - unsigned char *out, - int in_width, - int in_height, - int dst_width, - int dst_height) +static void q_scale_byte( + uchar *in, uchar *out, int in_width, int in_height, int dst_width, int dst_height) { if (dst_width > in_width && dst_height > in_height) { enlarge_picture_byte(in, out, in_width, in_height, dst_width, dst_height); @@ -868,12 +855,12 @@ static bool q_scale_linear_interpolation(struct ImBuf *ibuf, int newx, int newy) } if (ibuf->rect) { - unsigned char *newrect = MEM_mallocN(sizeof(int) * newx * newy, "q_scale rect"); - q_scale_byte((unsigned char *)ibuf->rect, newrect, ibuf->x, ibuf->y, newx, newy); + uchar *newrect = MEM_mallocN(sizeof(int) * newx * newy, "q_scale rect"); + q_scale_byte((uchar *)ibuf->rect, newrect, ibuf->x, ibuf->y, newx, newy); imb_freerectImBuf(ibuf); ibuf->mall |= IB_rect; - ibuf->rect = (unsigned int *)newrect; + ibuf->rect = (uint *)newrect; } if (ibuf->rect_float) { float *newrect = MEM_mallocN(sizeof(float[4]) * newx * newy, "q_scale rectfloat"); @@ -1014,7 +1001,7 @@ static ImBuf *scaledownx(struct ImBuf *ibuf, int newx) BLI_assert((uchar *)rect - ((uchar *)ibuf->rect) == rect_size); /* see bug T26502. */ imb_freerectImBuf(ibuf); ibuf->mall |= IB_rect; - ibuf->rect = (unsigned int *)_newrect; + ibuf->rect = (uint *)_newrect; } if (do_float) { // printf("%ld %ld\n", rectf - ibuf->rect_float, rect_size); @@ -1156,7 +1143,7 @@ static ImBuf *scaledowny(struct ImBuf *ibuf, int newy) BLI_assert((uchar *)rect - ((uchar *)ibuf->rect) == rect_size); /* see bug T26502. */ imb_freerectImBuf(ibuf); ibuf->mall |= IB_rect; - ibuf->rect = (unsigned int *)_newrect; + ibuf->rect = (uint *)_newrect; } if (do_float) { // printf("%ld %ld\n", rectf - ibuf->rect_float, rect_size); @@ -1361,7 +1348,7 @@ static ImBuf *scaleupx(struct ImBuf *ibuf, int newx) if (do_rect) { imb_freerectImBuf(ibuf); ibuf->mall |= IB_rect; - ibuf->rect = (unsigned int *)_newrect; + ibuf->rect = (uint *)_newrect; } if (do_float) { imb_freerectfloatImBuf(ibuf); @@ -1564,7 +1551,7 @@ static ImBuf *scaleupy(struct ImBuf *ibuf, int newy) if (do_rect) { imb_freerectImBuf(ibuf); ibuf->mall |= IB_rect; - ibuf->rect = (unsigned int *)_newrect; + ibuf->rect = (uint *)_newrect; } if (do_float) { imb_freerectfloatImBuf(ibuf); @@ -1641,7 +1628,7 @@ static void scalefast_Z_ImBuf(ImBuf *ibuf, int newx, int newy) } } -bool IMB_scaleImBuf(struct ImBuf *ibuf, unsigned int newx, unsigned int newy) +bool IMB_scaleImBuf(struct ImBuf *ibuf, uint newx, uint newy) { BLI_assert_msg(newx > 0 && newy > 0, "Images must be at least 1 on both dimensions!"); @@ -1686,11 +1673,11 @@ struct imbufRGBA { float r, g, b, a; }; -bool IMB_scalefastImBuf(struct ImBuf *ibuf, unsigned int newx, unsigned int newy) +bool IMB_scalefastImBuf(struct ImBuf *ibuf, uint newx, uint newy) { BLI_assert_msg(newx > 0 && newy > 0, "Images must be at least 1 on both dimensions!"); - unsigned int *rect, *_newrect, *newrect; + uint *rect, *_newrect, *newrect; struct imbufRGBA *rectf, *_newrectf, *newrectf; int x, y; bool do_float = false, do_rect = false; @@ -1789,23 +1776,23 @@ bool IMB_scalefastImBuf(struct ImBuf *ibuf, unsigned int newx, unsigned int newy typedef struct ScaleTreadInitData { ImBuf *ibuf; - unsigned int newx; - unsigned int newy; + uint newx; + uint newy; - unsigned char *byte_buffer; + uchar *byte_buffer; float *float_buffer; } ScaleTreadInitData; typedef struct ScaleThreadData { ImBuf *ibuf; - unsigned int newx; - unsigned int newy; + uint newx; + uint newy; int start_line; int tot_line; - unsigned char *byte_buffer; + uchar *byte_buffer; float *float_buffer; } ScaleThreadData; @@ -1844,9 +1831,8 @@ static void *do_scale_thread(void *data_v) int offset = y * data->newx + x; if (data->byte_buffer) { - unsigned char *pixel = data->byte_buffer + 4 * offset; - BLI_bilinear_interpolation_char( - (unsigned char *)ibuf->rect, pixel, ibuf->x, ibuf->y, 4, u, v); + uchar *pixel = data->byte_buffer + 4 * offset; + BLI_bilinear_interpolation_char((uchar *)ibuf->rect, pixel, ibuf->x, ibuf->y, 4, u, v); } if (data->float_buffer) { @@ -1860,7 +1846,7 @@ static void *do_scale_thread(void *data_v) return NULL; } -void IMB_scaleImBuf_threaded(ImBuf *ibuf, unsigned int newx, unsigned int newy) +void IMB_scaleImBuf_threaded(ImBuf *ibuf, uint newx, uint newy) { BLI_assert_msg(newx > 0 && newy > 0, "Images must be at least 1 on both dimensions!"); @@ -1893,7 +1879,7 @@ void IMB_scaleImBuf_threaded(ImBuf *ibuf, unsigned int newx, unsigned int newy) if (ibuf->rect) { imb_freerectImBuf(ibuf); ibuf->mall |= IB_rect; - ibuf->rect = (unsigned int *)init_data.byte_buffer; + ibuf->rect = (uint *)init_data.byte_buffer; } if (ibuf->rect_float) { diff --git a/source/blender/imbuf/intern/stereoimbuf.c b/source/blender/imbuf/intern/stereoimbuf.c index 2a0baaf6172..eb2701b5b9c 100644 --- a/source/blender/imbuf/intern/stereoimbuf.c +++ b/source/blender/imbuf/intern/stereoimbuf.c @@ -650,8 +650,8 @@ static void imb_stereo3d_squeeze_rect( IMB_stereo3d_write_dimensions(s3d->display_mode, false, x, y, &width, &height); ibuf = IMB_allocImBuf(width, height, channels, IB_rect); - IMB_buffer_byte_from_byte((unsigned char *)ibuf->rect, - (unsigned char *)rect, + IMB_buffer_byte_from_byte((uchar *)ibuf->rect, + (uchar *)rect, IB_PROFILE_SRGB, IB_PROFILE_SRGB, false, @@ -661,7 +661,7 @@ static void imb_stereo3d_squeeze_rect( width); IMB_scaleImBuf_threaded(ibuf, x, y); - memcpy(rect, ibuf->rect, x * y * sizeof(unsigned int)); + memcpy(rect, ibuf->rect, x * y * sizeof(uint)); IMB_freeImBuf(ibuf); } @@ -761,11 +761,14 @@ ImBuf *IMB_stereo3d_ImBuf(const ImageFormatData *im_format, ImBuf *ibuf_left, Im IMB_stereo3d_write_dimensions( im_format->stereo3d_format.display_mode, false, ibuf_left->x, ibuf_left->y, &width, &height); - ibuf_stereo = IMB_allocImBuf( - width, height, ibuf_left->planes, (is_float ? IB_rectfloat : IB_rect)); + ibuf_stereo = IMB_allocImBuf(width, height, ibuf_left->planes, 0); - ibuf_stereo->rect_colorspace = ibuf_left->rect_colorspace; - ibuf_stereo->float_colorspace = ibuf_left->float_colorspace; + if (is_float) { + imb_addrectfloatImBuf(ibuf_stereo, ibuf_left->channels); + } + else { + imb_addrectImBuf(ibuf_stereo); + } ibuf_stereo->flags = ibuf_left->flags; @@ -773,7 +776,7 @@ ImBuf *IMB_stereo3d_ImBuf(const ImageFormatData *im_format, ImBuf *ibuf_left, Im is_float, ibuf_left->x, ibuf_left->y, - 4, + ibuf_left->channels, (int *)ibuf_left->rect, (int *)ibuf_right->rect, (int *)ibuf_stereo->rect, @@ -1286,10 +1289,17 @@ void IMB_ImBufFromStereo3d(const Stereo3dFormat *s3d, &width, &height); - ibuf_left = IMB_allocImBuf( - width, height, ibuf_stereo3d->planes, (is_float ? IB_rectfloat : IB_rect)); - ibuf_right = IMB_allocImBuf( - width, height, ibuf_stereo3d->planes, (is_float ? IB_rectfloat : IB_rect)); + ibuf_left = IMB_allocImBuf(width, height, ibuf_stereo3d->planes, 0); + ibuf_right = IMB_allocImBuf(width, height, ibuf_stereo3d->planes, 0); + + if (is_float) { + imb_addrectfloatImBuf(ibuf_left, ibuf_stereo3d->channels); + imb_addrectfloatImBuf(ibuf_right, ibuf_stereo3d->channels); + } + else { + imb_addrectImBuf(ibuf_left); + imb_addrectImBuf(ibuf_right); + } ibuf_left->flags = ibuf_stereo3d->flags; ibuf_right->flags = ibuf_stereo3d->flags; @@ -1307,7 +1317,7 @@ void IMB_ImBufFromStereo3d(const Stereo3dFormat *s3d, is_float, ibuf_left->x, ibuf_left->y, - 4, + ibuf_left->channels, (int *)ibuf_left->rect, (int *)ibuf_right->rect, (int *)ibuf_stereo3d->rect, diff --git a/source/blender/imbuf/intern/targa.c b/source/blender/imbuf/intern/targa.c index 7cf90cd12e2..ed6e6e9866d 100644 --- a/source/blender/imbuf/intern/targa.c +++ b/source/blender/imbuf/intern/targa.c @@ -30,18 +30,18 @@ /***/ typedef struct TARGA { - unsigned char numid; - unsigned char maptyp; - unsigned char imgtyp; + uchar numid; + uchar maptyp; + uchar imgtyp; short maporig; short mapsize; - unsigned char mapbits; + uchar mapbits; short xorig; short yorig; short xsize; short ysize; - unsigned char pixsize; - unsigned char imgdes; + uchar pixsize; + uchar imgdes; } TARGA; /** @@ -54,7 +54,7 @@ typedef struct TARGA { /***/ -static int tga_out1(unsigned int data, FILE *file) +static int tga_out1(uint data, FILE *file) { uchar *p; @@ -65,7 +65,7 @@ static int tga_out1(unsigned int data, FILE *file) return ~EOF; } -static int tga_out2(unsigned int data, FILE *file) +static int tga_out2(uint data, FILE *file) { uchar *p; @@ -79,7 +79,7 @@ static int tga_out2(unsigned int data, FILE *file) return ~EOF; } -static int tga_out3(unsigned int data, FILE *file) +static int tga_out3(uint data, FILE *file) { uchar *p; @@ -96,7 +96,7 @@ static int tga_out3(unsigned int data, FILE *file) return ~EOF; } -static int tga_out4(unsigned int data, FILE *file) +static int tga_out4(uint data, FILE *file) { uchar *p; @@ -117,11 +117,11 @@ static int tga_out4(unsigned int data, FILE *file) return ~EOF; } -static bool makebody_tga(ImBuf *ibuf, FILE *file, int (*out)(unsigned int, FILE *)) +static bool makebody_tga(ImBuf *ibuf, FILE *file, int (*out)(uint, FILE *)) { int last, this; int copy, bytes; - unsigned int *rect, *rectstart, *temp; + uint *rect, *rectstart, *temp; int y; for (y = 0; y < ibuf->y; y++) { @@ -345,7 +345,7 @@ bool imb_savetarga(struct ImBuf *ibuf, const char *filepath, int UNUSED(flags)) return ok; } -static bool checktarga(TARGA *tga, const unsigned char *mem, const size_t size) +static bool checktarga(TARGA *tga, const uchar *mem, const size_t size) { if (size < TARGA_HEADER_SIZE) { return false; @@ -397,14 +397,14 @@ static bool checktarga(TARGA *tga, const unsigned char *mem, const size_t size) return true; } -bool imb_is_a_targa(const unsigned char *buf, size_t size) +bool imb_is_a_targa(const uchar *buf, size_t size) { TARGA tga; return checktarga(&tga, buf, size); } -static void complete_partial_load(struct ImBuf *ibuf, unsigned int *rect) +static void complete_partial_load(struct ImBuf *ibuf, uint *rect) { int size = (ibuf->x * ibuf->y) - (rect - ibuf->rect); if (size) { @@ -420,11 +420,11 @@ static void complete_partial_load(struct ImBuf *ibuf, unsigned int *rect) } } -static void decodetarga(struct ImBuf *ibuf, const unsigned char *mem, size_t mem_size, int psize) +static void decodetarga(struct ImBuf *ibuf, const uchar *mem, size_t mem_size, int psize) { - const unsigned char *mem_end = mem + mem_size; + const uchar *mem_end = mem + mem_size; int count, col, size; - unsigned int *rect; + uint *rect; uchar *cp = (uchar *)&col; if (ibuf == NULL) { @@ -545,11 +545,11 @@ partial_load: complete_partial_load(ibuf, rect); } -static void ldtarga(struct ImBuf *ibuf, const unsigned char *mem, size_t mem_size, int psize) +static void ldtarga(struct ImBuf *ibuf, const uchar *mem, size_t mem_size, int psize) { - const unsigned char *mem_end = mem + mem_size; + const uchar *mem_end = mem + mem_size; int col, size; - unsigned int *rect; + uint *rect; uchar *cp = (uchar *)&col; if (ibuf == NULL) { @@ -609,15 +609,12 @@ partial_load: complete_partial_load(ibuf, rect); } -ImBuf *imb_loadtarga(const unsigned char *mem, - size_t mem_size, - int flags, - char colorspace[IM_MAX_SPACE]) +ImBuf *imb_loadtarga(const uchar *mem, size_t mem_size, int flags, char colorspace[IM_MAX_SPACE]) { TARGA tga; struct ImBuf *ibuf; int count, size; - unsigned int *rect, *cmap = NULL /*, mincol = 0*/, cmap_max = 0; + uint *rect, *cmap = NULL /*, mincol = 0*/, cmap_max = 0; int32_t cp_data; uchar *cp = (uchar *)&cp_data; @@ -650,7 +647,7 @@ ImBuf *imb_loadtarga(const unsigned char *mem, /* Load color map. */ // mincol = tga.maporig; /* UNUSED */ cmap_max = tga.mapsize; - cmap = MEM_callocN(sizeof(unsigned int) * cmap_max, "targa cmap"); + cmap = MEM_callocN(sizeof(uint) * cmap_max, "targa cmap"); for (count = 0; count < cmap_max; count++) { switch (tga.mapbits >> 3) { @@ -753,7 +750,7 @@ ImBuf *imb_loadtarga(const unsigned char *mem, } if (tga.pixsize == 16) { - unsigned int col; + uint col; rect = ibuf->rect; for (size = ibuf->x * ibuf->y; size > 0; size--, rect++) { col = *rect; @@ -773,10 +770,10 @@ ImBuf *imb_loadtarga(const unsigned char *mem, if (ELEM(tga.imgtyp, 3, 11)) { uchar *crect; - unsigned int *lrect, col; + uint *lrect, col; crect = (uchar *)ibuf->rect; - lrect = (unsigned int *)ibuf->rect; + lrect = (uint *)ibuf->rect; for (size = ibuf->x * ibuf->y; size > 0; size--) { col = *lrect++; diff --git a/source/blender/imbuf/intern/thumbs.c b/source/blender/imbuf/intern/thumbs.c index 6f39009d38d..d535bd00501 100644 --- a/source/blender/imbuf/intern/thumbs.c +++ b/source/blender/imbuf/intern/thumbs.c @@ -136,7 +136,7 @@ typedef enum { /* Don't lose comment alignment. */ /* clang-format off */ -static const unsigned char acceptable[96] = { +static const uchar acceptable[96] = { /* A table of the ASCII chars from space (32) to DEL (127) */ /* ! " # $ % & ' ( ) * + , - . / */ 0x00,0x3F,0x20,0x20,0x28,0x00,0x2C,0x3F,0x3F,0x3F,0x3F,0x2A,0x28,0x3F,0x3F,0x1C, @@ -176,7 +176,7 @@ static void escape_uri_string(const char *string, escaped_string_size -= 1; for (q = escaped_string, p = string; (*p != '\0') && escaped_string_size; p++) { - c = (unsigned char)*p; + c = (uchar)*p; if (!ACCEPTABLE(c)) { if (escaped_string_size < 3) { @@ -227,7 +227,7 @@ static bool uri_from_filename(const char *path, char *uri) return 0; } /* on windows, using always uppercase drive/volume letter in uri */ - vol[0] = (unsigned char)toupper(path[0]); + vol[0] = (uchar)toupper(path[0]); vol[1] = ':'; vol[2] = '\0'; strcat(orig_uri, vol); @@ -256,7 +256,7 @@ static bool thumbpathname_from_uri( if (r_name) { char hexdigest[33]; - unsigned char digest[16]; + uchar digest[16]; BLI_hash_md5_buffer(uri, strlen(uri), digest); hexdigest[0] = '\0'; BLI_snprintf(r_name, name_len, "%s.png", BLI_hash_md5_to_hexdigest(digest, hexdigest)); diff --git a/source/blender/imbuf/intern/thumbs_font.c b/source/blender/imbuf/intern/thumbs_font.c index c0a33f608a5..65848bfb55e 100644 --- a/source/blender/imbuf/intern/thumbs_font.c +++ b/source/blender/imbuf/intern/thumbs_font.c @@ -41,7 +41,7 @@ void IMB_thumb_ensure_translations(void) } } -struct ImBuf *IMB_thumb_load_font(const char *filepath, unsigned int x, unsigned int y) +struct ImBuf *IMB_thumb_load_font(const char *filepath, uint x, uint y) { const int font_size = y / 4; @@ -66,7 +66,7 @@ struct ImBuf *IMB_thumb_load_font(const char *filepath, unsigned int x, unsigned ARRAY_SIZE(thumb_str), font_color, font_size, - (unsigned char *)ibuf->rect, + (uchar *)ibuf->rect, ibuf->x, ibuf->y, ibuf->channels); @@ -83,7 +83,7 @@ bool IMB_thumb_load_font_get_hash(char *r_hash) int draw_str_lines = ARRAY_SIZE(thumb_str); int i; - unsigned char digest[16]; + uchar digest[16]; len += BLI_strncpy_rlen(str + len, THUMB_DEFAULT_HASH, sizeof(buf) - len); diff --git a/source/blender/imbuf/intern/tiff.c b/source/blender/imbuf/intern/tiff.c index 1989566fc32..f4829386aac 100644 --- a/source/blender/imbuf/intern/tiff.c +++ b/source/blender/imbuf/intern/tiff.c @@ -60,7 +60,7 @@ static void imb_tiff_DummyUnmapProc(thandle_t fd, tdata_t base, toff_t size); /** Structure for in-memory TIFF file. */ typedef struct ImbTIFFMemFile { /** Location of first byte of TIFF file. */ - const unsigned char *mem; + const uchar *mem; /** Current offset within the file. */ toff_t offset; /** Size of the TIFF file. */ @@ -262,7 +262,7 @@ static toff_t imb_tiff_SizeProc(thandle_t handle) return (toff_t)(mfile->size); } -static TIFF *imb_tiff_client_open(ImbTIFFMemFile *memFile, const unsigned char *mem, size_t size) +static TIFF *imb_tiff_client_open(ImbTIFFMemFile *memFile, const uchar *mem, size_t size) { /* open the TIFF client layer interface to the in-memory file */ memFile->mem = mem; @@ -303,7 +303,7 @@ static TIFF *imb_tiff_client_open(ImbTIFFMemFile *memFile, const unsigned char * * hence my manual comparison. - Jonathan Merritt (lancelet) 4th Sept 2005. */ #define IMB_TIFF_NCB 4 /* number of comparison bytes used */ -bool imb_is_a_tiff(const unsigned char *buf, size_t size) +bool imb_is_a_tiff(const uchar *buf, size_t size) { const char big_endian[IMB_TIFF_NCB] = {0x4d, 0x4d, 0x00, 0x2a}; const char lil_endian[IMB_TIFF_NCB] = {0x49, 0x49, 0x2a, 0x00}; @@ -315,10 +315,7 @@ bool imb_is_a_tiff(const unsigned char *buf, size_t size) (memcmp(lil_endian, buf, IMB_TIFF_NCB) == 0)); } -static void scanline_contig_16bit(float *rectf, - const unsigned short *sbuf, - int scanline_w, - int spp) +static void scanline_contig_16bit(float *rectf, const ushort *sbuf, int scanline_w, int spp) { int i; for (i = 0; i < scanline_w; i++) { @@ -340,10 +337,7 @@ static void scanline_contig_32bit(float *rectf, const float *fbuf, int scanline_ } } -static void scanline_separate_16bit(float *rectf, - const unsigned short *sbuf, - int scanline_w, - int chan) +static void scanline_separate_16bit(float *rectf, const ushort *sbuf, int scanline_w, int chan) { int i; for (i = 0; i < scanline_w; i++) { @@ -392,7 +386,7 @@ static int imb_read_tiff_pixels(ImBuf *ibuf, TIFF *image) size_t scanline; int ib_flag = 0, row, chan; float *fbuf = NULL; - unsigned short *sbuf = NULL; + ushort *sbuf = NULL; TIFFGetField(image, TIFFTAG_BITSPERSAMPLE, &bitspersample); TIFFGetField(image, TIFFTAG_SAMPLESPERPIXEL, &spp); /* number of 'channels' */ @@ -410,7 +404,7 @@ static int imb_read_tiff_pixels(ImBuf *ibuf, TIFF *image) * So let's keep this thing here for until proper solution is found (sergey) */ - unsigned short extraSampleTypes[1]; + ushort extraSampleTypes[1]; extraSampleTypes[0] = EXTRASAMPLE_ASSOCALPHA; TIFFSetField(image, TIFFTAG_EXTRASAMPLES, 1, extraSampleTypes); } @@ -428,7 +422,7 @@ static int imb_read_tiff_pixels(ImBuf *ibuf, TIFF *image) } else if (bitspersample == 16) { ib_flag = IB_rectfloat; - sbuf = (unsigned short *)_TIFFmalloc(scanline); + sbuf = (ushort *)_TIFFmalloc(scanline); if (!sbuf) { goto cleanup; } @@ -539,10 +533,7 @@ void imb_inittiff(void) } } -ImBuf *imb_loadtiff(const unsigned char *mem, - size_t size, - int flags, - char colorspace[IM_MAX_SPACE]) +ImBuf *imb_loadtiff(const uchar *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) { TIFF *image = NULL; ImBuf *ibuf = NULL, *hbuf; @@ -589,7 +580,7 @@ ImBuf *imb_loadtiff(const unsigned char *mem, /* get alpha mode from file header */ if (flags & IB_alphamode_detect) { if (spp == 4) { - unsigned short extra, *extraSampleTypes; + ushort extra, *extraSampleTypes; const int found = TIFFGetField(image, TIFFTAG_EXTRASAMPLES, &extra, &extraSampleTypes); if (found && (extraSampleTypes[0] == EXTRASAMPLE_ASSOCALPHA)) { @@ -661,8 +652,7 @@ ImBuf *imb_loadtiff(const unsigned char *mem, return ibuf; } -void imb_loadtiletiff( - ImBuf *ibuf, const unsigned char *mem, size_t size, int tx, int ty, unsigned int *rect) +void imb_loadtiletiff(ImBuf *ibuf, const uchar *mem, size_t size, int tx, int ty, uint *rect) { TIFF *image = NULL; uint32_t width, height; @@ -723,9 +713,9 @@ bool imb_savetiff(ImBuf *ibuf, const char *filepath, int flags) TIFF *image = NULL; uint16_t samplesperpixel, bitspersample; size_t npixels; - unsigned char *pixels = NULL; - unsigned char *from = NULL, *to = NULL; - unsigned short *pixels16 = NULL, *to16 = NULL; + uchar *pixels = NULL; + uchar *from = NULL, *to = NULL; + ushort *pixels16 = NULL, *to16 = NULL; float *fromf = NULL; float xres, yres; int x, y, from_i, to_i, i; @@ -786,10 +776,10 @@ bool imb_savetiff(ImBuf *ibuf, const char *filepath, int flags) /* allocate array for pixel data */ npixels = ibuf->x * ibuf->y; if (bitspersample == 16) { - pixels16 = (unsigned short *)_TIFFmalloc(npixels * samplesperpixel * sizeof(unsigned short)); + pixels16 = (ushort *)_TIFFmalloc(npixels * samplesperpixel * sizeof(ushort)); } else { - pixels = (unsigned char *)_TIFFmalloc(npixels * samplesperpixel * sizeof(unsigned char)); + pixels = (uchar *)_TIFFmalloc(npixels * samplesperpixel * sizeof(uchar)); } if (pixels == NULL && pixels16 == NULL) { @@ -804,7 +794,7 @@ bool imb_savetiff(ImBuf *ibuf, const char *filepath, int flags) to16 = pixels16; } else { - from = (unsigned char *)ibuf->rect; + from = (uchar *)ibuf->rect; to = pixels; } @@ -813,7 +803,7 @@ bool imb_savetiff(ImBuf *ibuf, const char *filepath, int flags) TIFFSetField(image, TIFFTAG_SAMPLESPERPIXEL, samplesperpixel); if (samplesperpixel == 4) { - unsigned short extraSampleTypes[1]; + ushort extraSampleTypes[1]; if (bitspersample == 16) { extraSampleTypes[0] = EXTRASAMPLE_ASSOCALPHA; @@ -908,7 +898,7 @@ bool imb_savetiff(ImBuf *ibuf, const char *filepath, int flags) TIFFSetField(image, TIFFTAG_RESOLUTIONUNIT, RESUNIT_INCH); if (TIFFWriteEncodedStrip(image, 0, - (bitspersample == 16) ? (unsigned char *)pixels16 : pixels, + (bitspersample == 16) ? (uchar *)pixels16 : pixels, (size_t)ibuf->x * ibuf->y * samplesperpixel * bitspersample / 8) == -1) { fprintf(stderr, "imb_savetiff: Could not write encoded TIFF.\n"); diff --git a/source/blender/imbuf/intern/transform.cc b/source/blender/imbuf/intern/transform.cc index d64a48569ae..276d31c0557 100644 --- a/source/blender/imbuf/intern/transform.cc +++ b/source/blender/imbuf/intern/transform.cc @@ -147,7 +147,7 @@ class NoDiscard : public BaseDiscard { template< /** * \brief Kind of buffer. - * Possible options: float, unsigned char. + * Possible options: float, uchar. */ typename StorageType = float, @@ -170,10 +170,9 @@ class PixelPointer { if constexpr (std::is_same_v<StorageType, float>) { pointer = image_buffer->rect_float + offset; } - else if constexpr (std::is_same_v<StorageType, unsigned char>) { - pointer = const_cast<unsigned char *>( - static_cast<const unsigned char *>(static_cast<const void *>(image_buffer->rect)) + - offset); + else if constexpr (std::is_same_v<StorageType, uchar>) { + pointer = const_cast<uchar *>( + static_cast<const uchar *>(static_cast<const void *>(image_buffer->rect)) + offset); } else { pointer = nullptr; @@ -264,7 +263,7 @@ template< /** \brief Interpolation mode to use when sampling. */ eIMBInterpolationFilterMode Filter, - /** \brief storage type of a single pixel channel (unsigned char or float). */ + /** \brief storage type of a single pixel channel (uchar or float). */ typename StorageType, /** * \brief number of channels if the image to read. @@ -294,14 +293,14 @@ class Sampler { const float wrapped_v = uv_wrapper.modify_v(source, v); bilinear_interpolation_color_fl(source, nullptr, r_sample.data(), wrapped_u, wrapped_v); } - else if constexpr (Filter == IMB_FILTER_NEAREST && - std::is_same_v<StorageType, unsigned char> && NumChannels == 4) { + else if constexpr (Filter == IMB_FILTER_NEAREST && std::is_same_v<StorageType, uchar> && + NumChannels == 4) { const float wrapped_u = uv_wrapper.modify_u(source, u); const float wrapped_v = uv_wrapper.modify_v(source, v); nearest_interpolation_color_char(source, r_sample.data(), nullptr, wrapped_u, wrapped_v); } - else if constexpr (Filter == IMB_FILTER_BILINEAR && - std::is_same_v<StorageType, unsigned char> && NumChannels == 4) { + else if constexpr (Filter == IMB_FILTER_BILINEAR && std::is_same_v<StorageType, uchar> && + NumChannels == 4) { const float wrapped_u = uv_wrapper.modify_u(source, u); const float wrapped_v = uv_wrapper.modify_v(source, v); bilinear_interpolation_color_char(source, r_sample.data(), nullptr, wrapped_u, wrapped_v); @@ -374,7 +373,7 @@ class Sampler { * * Template class to convert and store a sample in a PixelPointer. * It supports: - * - 4 channel unsigned char -> 4 channel unsigned char. + * - 4 channel uchar -> 4 channel uchar. * - 4 channel float -> 4 channel float. * - 3 channel float -> 4 channel float. * - 2 channel float -> 4 channel float. @@ -392,7 +391,7 @@ class ChannelConverter { */ void convert_and_store(const SampleType &sample, PixelType &pixel_pointer) { - if constexpr (std::is_same_v<StorageType, unsigned char>) { + if constexpr (std::is_same_v<StorageType, uchar>) { BLI_STATIC_ASSERT(SourceNumChannels == 4, "Unsigned chars always have 4 channels."); BLI_STATIC_ASSERT(DestinationNumChannels == 4, "Unsigned chars always have 4 channels."); @@ -550,8 +549,8 @@ static void transform_threaded(TransformUserData *user_data, const eIMBTransform scanline_func = get_scanline_function<Filter>(user_data, mode); } else if (user_data->dst->rect && user_data->src->rect) { - /* Number of channels is always 4 when using unsigned char buffers (sRGB + straight alpha). */ - scanline_func = get_scanline_function<Filter, unsigned char, 4, 4>(mode); + /* Number of channels is always 4 when using uchar buffers (sRGB + straight alpha). */ + scanline_func = get_scanline_function<Filter, uchar, 4, 4>(mode); } if (scanline_func != nullptr) { diff --git a/source/blender/imbuf/intern/util.c b/source/blender/imbuf/intern/util.c index ffa989a29b4..2870ff56c0a 100644 --- a/source/blender/imbuf/intern/util.c +++ b/source/blender/imbuf/intern/util.c @@ -106,8 +106,7 @@ const char *imb_ext_audio[] = { /* Increased from 32 to 64 because of the bitmaps header size. */ #define HEADER_SIZE 64 -static ssize_t imb_ispic_read_header_from_filepath(const char *filepath, - unsigned char buf[HEADER_SIZE]) +static ssize_t imb_ispic_read_header_from_filepath(const char *filepath, uchar buf[HEADER_SIZE]) { BLI_stat_t st; int fp; @@ -135,7 +134,7 @@ static ssize_t imb_ispic_read_header_from_filepath(const char *filepath, return size; } -int IMB_ispic_type_from_memory(const unsigned char *buf, const size_t buf_size) +int IMB_ispic_type_from_memory(const uchar *buf, const size_t buf_size) { for (const ImFileType *type = IMB_FILE_TYPES; type < IMB_FILE_TYPES_LAST; type++) { if (type->is_a != NULL) { @@ -150,7 +149,7 @@ int IMB_ispic_type_from_memory(const unsigned char *buf, const size_t buf_size) int IMB_ispic_type(const char *filepath) { - unsigned char buf[HEADER_SIZE]; + uchar buf[HEADER_SIZE]; const ssize_t buf_size = imb_ispic_read_header_from_filepath(filepath, buf); if (buf_size <= 0) { return IMB_FTYPE_NONE; @@ -160,7 +159,7 @@ int IMB_ispic_type(const char *filepath) bool IMB_ispic_type_matches(const char *filepath, int filetype) { - unsigned char buf[HEADER_SIZE]; + uchar buf[HEADER_SIZE]; const ssize_t buf_size = imb_ispic_read_header_from_filepath(filepath, buf); if (buf_size <= 0) { return false; @@ -251,7 +250,7 @@ const char *IMB_ffmpeg_last_error(void) static int isffmpeg(const char *filepath) { AVFormatContext *pFormatCtx = NULL; - unsigned int i; + uint i; int videoStream; const AVCodec *pCodec; diff --git a/source/blender/imbuf/intern/webp.c b/source/blender/imbuf/intern/webp.c index 19fe2373ea0..27c26fb19c1 100644 --- a/source/blender/imbuf/intern/webp.c +++ b/source/blender/imbuf/intern/webp.c @@ -4,14 +4,23 @@ * \ingroup imbuf */ +#ifdef _WIN32 +# include <io.h> +#else +# include <unistd.h> +#endif + +#include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <webp/decode.h> #include <webp/encode.h> #include "BLI_fileops.h" +#include "BLI_mmap.h" #include "BLI_utildefines.h" +#include "IMB_allocimbuf.h" #include "IMB_colormanagement.h" #include "IMB_colormanagement_intern.h" #include "IMB_filetype.h" @@ -20,7 +29,7 @@ #include "MEM_guardedalloc.h" -bool imb_is_a_webp(const unsigned char *buf, size_t size) +bool imb_is_a_webp(const uchar *buf, size_t size) { if (WebPGetInfo(buf, size, NULL, NULL)) { return true; @@ -28,10 +37,7 @@ bool imb_is_a_webp(const unsigned char *buf, size_t size) return false; } -ImBuf *imb_loadwebp(const unsigned char *mem, - size_t size, - int flags, - char colorspace[IM_MAX_SPACE]) +ImBuf *imb_loadwebp(const uchar *mem, size_t size, int flags, char colorspace[IM_MAX_SPACE]) { if (!imb_is_a_webp(mem, size)) { return NULL; @@ -57,7 +63,7 @@ ImBuf *imb_loadwebp(const unsigned char *mem, ibuf->ftype = IMB_FTYPE_WEBP; imb_addrectImBuf(ibuf); /* Flip the image during decoding to match Blender. */ - unsigned char *last_row = (unsigned char *)(ibuf->rect + (ibuf->y - 1) * ibuf->x); + uchar *last_row = (uchar *)(ibuf->rect + (ibuf->y - 1) * ibuf->x); if (WebPDecodeRGBAInto(mem, size, last_row, (size_t)(ibuf->x) * ibuf->y * 4, -4 * ibuf->x) == NULL) { fprintf(stderr, "WebP: Failed to decode image\n"); @@ -67,10 +73,93 @@ ImBuf *imb_loadwebp(const unsigned char *mem, return ibuf; } +struct ImBuf *imb_load_filepath_thumbnail_webp(const char *filepath, + const int UNUSED(flags), + const size_t max_thumb_size, + char colorspace[], + size_t *r_width, + size_t *r_height) +{ + const int file = BLI_open(filepath, O_BINARY | O_RDONLY, 0); + if (file == -1) { + return NULL; + } + + const size_t data_size = BLI_file_descriptor_size(file); + + imb_mmap_lock(); + BLI_mmap_file *mmap_file = BLI_mmap_open(file); + imb_mmap_unlock(); + close(file); + if (mmap_file == NULL) { + return NULL; + } + + const uchar *data = BLI_mmap_get_pointer(mmap_file); + + WebPDecoderConfig config; + if (!data || !WebPInitDecoderConfig(&config) || + WebPGetFeatures(data, data_size, &config.input) != VP8_STATUS_OK) { + fprintf(stderr, "WebP: Invalid file\n"); + imb_mmap_lock(); + BLI_mmap_free(mmap_file); + imb_mmap_unlock(); + return NULL; + } + + /* Return full size of the image. */ + *r_width = (size_t)config.input.width; + *r_height = (size_t)config.input.height; + + const float scale = (float)max_thumb_size / MAX2(config.input.width, config.input.height); + const int dest_w = (int)(config.input.width * scale); + const int dest_h = (int)(config.input.height * scale); + + colorspace_set_default_role(colorspace, IM_MAX_SPACE, COLOR_ROLE_DEFAULT_BYTE); + struct ImBuf *ibuf = IMB_allocImBuf(dest_w, dest_h, 32, IB_rect); + if (ibuf == NULL) { + fprintf(stderr, "WebP: Failed to allocate image memory\n"); + imb_mmap_lock(); + BLI_mmap_free(mmap_file); + imb_mmap_unlock(); + return NULL; + } + + config.options.no_fancy_upsampling = 1; + config.options.use_scaling = 1; + config.options.scaled_width = dest_w; + config.options.scaled_height = dest_h; + config.options.bypass_filtering = 1; + config.options.use_threads = 0; + config.options.flip = 1; + config.output.is_external_memory = 1; + config.output.colorspace = MODE_RGBA; + config.output.u.RGBA.rgba = (uint8_t *)ibuf->rect; + config.output.u.RGBA.stride = 4 * ibuf->x; + config.output.u.RGBA.size = (size_t)(config.output.u.RGBA.stride * ibuf->y); + + if (WebPDecode(data, data_size, &config) != VP8_STATUS_OK) { + fprintf(stderr, "WebP: Failed to decode image\n"); + imb_mmap_lock(); + BLI_mmap_free(mmap_file); + imb_mmap_unlock(); + return NULL; + } + + /* Free the output buffer. */ + WebPFreeDecBuffer(&config.output); + + imb_mmap_lock(); + BLI_mmap_free(mmap_file); + imb_mmap_unlock(); + + return ibuf; +} + bool imb_savewebp(struct ImBuf *ibuf, const char *name, int UNUSED(flags)) { const int bytesperpixel = (ibuf->planes + 7) >> 3; - unsigned char *encoded_data, *last_row; + uchar *encoded_data, *last_row; size_t encoded_data_size; if (bytesperpixel == 3) { @@ -84,7 +173,7 @@ bool imb_savewebp(struct ImBuf *ibuf, const char *name, int UNUSED(flags)) rgb_rect[i * 3 + 2] = rgba_rect[i * 4 + 2]; } - last_row = (unsigned char *)(rgb_rect + (ibuf->y - 1) * ibuf->x * 3); + last_row = (uchar *)(rgb_rect + (ibuf->y - 1) * ibuf->x * 3); if (ibuf->foptions.quality == 100.0f) { encoded_data_size = WebPEncodeLosslessRGB( @@ -97,7 +186,7 @@ bool imb_savewebp(struct ImBuf *ibuf, const char *name, int UNUSED(flags)) MEM_freeN(rgb_rect); } else if (bytesperpixel == 4) { - last_row = (unsigned char *)(ibuf->rect + (ibuf->y - 1) * ibuf->x); + last_row = (uchar *)(ibuf->rect + (ibuf->y - 1) * ibuf->x); if (ibuf->foptions.quality == 100.0f) { encoded_data_size = WebPEncodeLosslessRGBA( diff --git a/source/blender/io/usd/usd.h b/source/blender/io/usd/usd.h index a07315d8b4e..3494d8ffdc3 100644 --- a/source/blender/io/usd/usd.h +++ b/source/blender/io/usd/usd.h @@ -52,7 +52,7 @@ struct USDImportParams { bool import_materials; bool import_meshes; bool import_volumes; - char *prim_path_mask; + char prim_path_mask[1024]; bool import_subdiv; bool import_instance_proxies; bool create_collection; diff --git a/source/blender/io/wavefront_obj/IO_wavefront_obj.h b/source/blender/io/wavefront_obj/IO_wavefront_obj.h index 544630c9cc0..0a92bbca477 100644 --- a/source/blender/io/wavefront_obj/IO_wavefront_obj.h +++ b/source/blender/io/wavefront_obj/IO_wavefront_obj.h @@ -48,18 +48,15 @@ struct OBJExportParams { bool export_triangulated_mesh; bool export_curves_as_nurbs; ePathReferenceMode path_mode; + bool export_pbr_extensions; /* Grouping options. */ bool export_object_groups; bool export_material_groups; bool export_vertex_groups; - /** - * Calculate smooth groups from sharp edges. - */ + /* Calculate smooth groups from sharp edges. */ bool export_smooth_groups; - /** - * Create bitflags instead of the default "0"/"1" group IDs. - */ + /* Create bitflags instead of the default "0"/"1" group IDs. */ bool smooth_groups_bitflags; }; diff --git a/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.cc b/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.cc index 4d934960010..f2547e6fc14 100644 --- a/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.cc +++ b/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.cc @@ -493,11 +493,14 @@ void OBJWriter::write_nurbs_curve(FormatHandler &fh, const OBJCurve &obj_nurbs_d static const char *tex_map_type_to_string[] = { "map_Kd", + "map_Pm", "map_Ks", "map_Ns", - "map_d", + "map_Pr", + "map_Ps", "map_refl", "map_Ke", + "map_d", "map_Bump", }; BLI_STATIC_ASSERT(ARRAY_SIZE(tex_map_type_to_string) == (int)MTLTexMapType::Count, @@ -553,29 +556,64 @@ StringRefNull MTLWriter::mtl_file_path() const return mtl_filepath_; } -void MTLWriter::write_bsdf_properties(const MTLMaterial &mtl) +void MTLWriter::write_bsdf_properties(const MTLMaterial &mtl, bool write_pbr) { /* For various material properties, we only capture information * coming from the texture, or the default value of the socket. * When the texture is present, do not emit the default value. */ - if (!mtl.tex_map_of_type(MTLTexMapType::Ns).is_valid()) { - fmt_handler_.write_mtl_float("Ns", mtl.Ns); + + /* Do not write Ns & Ka when writing in PBR mode. */ + if (!write_pbr) { + if (!mtl.tex_map_of_type(MTLTexMapType::SpecularExponent).is_valid()) { + fmt_handler_.write_mtl_float("Ns", mtl.spec_exponent); + } + fmt_handler_.write_mtl_float3( + "Ka", mtl.ambient_color.x, mtl.ambient_color.y, mtl.ambient_color.z); + } + if (!mtl.tex_map_of_type(MTLTexMapType::Color).is_valid()) { + fmt_handler_.write_mtl_float3("Kd", mtl.color.x, mtl.color.y, mtl.color.z); } - fmt_handler_.write_mtl_float3("Ka", mtl.Ka.x, mtl.Ka.y, mtl.Ka.z); - if (!mtl.tex_map_of_type(MTLTexMapType::Kd).is_valid()) { - fmt_handler_.write_mtl_float3("Kd", mtl.Kd.x, mtl.Kd.y, mtl.Kd.z); + if (!mtl.tex_map_of_type(MTLTexMapType::Specular).is_valid()) { + fmt_handler_.write_mtl_float3("Ks", mtl.spec_color.x, mtl.spec_color.y, mtl.spec_color.z); } - if (!mtl.tex_map_of_type(MTLTexMapType::Ks).is_valid()) { - fmt_handler_.write_mtl_float3("Ks", mtl.Ks.x, mtl.Ks.y, mtl.Ks.z); + if (!mtl.tex_map_of_type(MTLTexMapType::Emission).is_valid()) { + fmt_handler_.write_mtl_float3( + "Ke", mtl.emission_color.x, mtl.emission_color.y, mtl.emission_color.z); } - if (!mtl.tex_map_of_type(MTLTexMapType::Ke).is_valid()) { - fmt_handler_.write_mtl_float3("Ke", mtl.Ke.x, mtl.Ke.y, mtl.Ke.z); + fmt_handler_.write_mtl_float("Ni", mtl.ior); + if (!mtl.tex_map_of_type(MTLTexMapType::Alpha).is_valid()) { + fmt_handler_.write_mtl_float("d", mtl.alpha); } - fmt_handler_.write_mtl_float("Ni", mtl.Ni); - if (!mtl.tex_map_of_type(MTLTexMapType::d).is_valid()) { - fmt_handler_.write_mtl_float("d", mtl.d); + fmt_handler_.write_mtl_illum(mtl.illum_mode); + + if (write_pbr) { + if (!mtl.tex_map_of_type(MTLTexMapType::Roughness).is_valid() && mtl.roughness >= 0.0f) { + fmt_handler_.write_mtl_float("Pr", mtl.roughness); + } + if (!mtl.tex_map_of_type(MTLTexMapType::Metallic).is_valid() && mtl.metallic >= 0.0f) { + fmt_handler_.write_mtl_float("Pm", mtl.metallic); + } + if (!mtl.tex_map_of_type(MTLTexMapType::Sheen).is_valid() && mtl.sheen >= 0.0f) { + fmt_handler_.write_mtl_float("Ps", mtl.sheen); + } + if (mtl.cc_thickness >= 0.0f) { + fmt_handler_.write_mtl_float("Pc", mtl.cc_thickness); + } + if (mtl.cc_roughness >= 0.0f) { + fmt_handler_.write_mtl_float("Pcr", mtl.cc_roughness); + } + if (mtl.aniso >= 0.0f) { + fmt_handler_.write_mtl_float("aniso", mtl.aniso); + } + if (mtl.aniso_rot >= 0.0f) { + fmt_handler_.write_mtl_float("anisor", mtl.aniso_rot); + } + if (mtl.transmit_color.x > 0.0f || mtl.transmit_color.y > 0.0f || + mtl.transmit_color.z > 0.0f) { + fmt_handler_.write_mtl_float3( + "Tf", mtl.transmit_color.x, mtl.transmit_color.y, mtl.transmit_color.z); + } } - fmt_handler_.write_mtl_illum(mtl.illum); } void MTLWriter::write_texture_map(const MTLMaterial &mtl_material, @@ -594,8 +632,8 @@ void MTLWriter::write_texture_map(const MTLMaterial &mtl_material, if (texture_map.scale != float3{1.0f, 1.0f, 1.0f}) { options.append(" -s ").append(float3_to_string(texture_map.scale)); } - if (texture_key == MTLTexMapType::bump && mtl_material.map_Bump_strength > 0.0001f) { - options.append(" -bm ").append(std::to_string(mtl_material.map_Bump_strength)); + if (texture_key == MTLTexMapType::Normal && mtl_material.normal_strength > 0.0001f) { + options.append(" -bm ").append(std::to_string(mtl_material.normal_strength)); } std::string path = path_reference( @@ -606,9 +644,21 @@ void MTLWriter::write_texture_map(const MTLMaterial &mtl_material, fmt_handler_.write_mtl_map(tex_map_type_to_string[(int)texture_key], options, path); } +static bool is_pbr_map(MTLTexMapType type) +{ + return type == MTLTexMapType::Metallic || type == MTLTexMapType::Roughness || + type == MTLTexMapType::Sheen; +} + +static bool is_non_pbr_map(MTLTexMapType type) +{ + return type == MTLTexMapType::SpecularExponent || type == MTLTexMapType::Reflection; +} + void MTLWriter::write_materials(const char *blen_filepath, ePathReferenceMode path_mode, - const char *dest_dir) + const char *dest_dir, + bool write_pbr) { if (mtlmaterials_.size() == 0) { return; @@ -626,12 +676,18 @@ void MTLWriter::write_materials(const char *blen_filepath, for (const MTLMaterial &mtlmat : mtlmaterials_) { fmt_handler_.write_string(""); fmt_handler_.write_mtl_newmtl(mtlmat.name); - write_bsdf_properties(mtlmat); + write_bsdf_properties(mtlmat, write_pbr); for (int key = 0; key < (int)MTLTexMapType::Count; key++) { const MTLTexMap &tex = mtlmat.texture_maps[key]; if (!tex.is_valid()) { continue; } + if (!write_pbr && is_pbr_map((MTLTexMapType)key)) { + continue; + } + if (write_pbr && is_non_pbr_map((MTLTexMapType)key)) { + continue; + } write_texture_map( mtlmat, (MTLTexMapType)key, tex, blen_filedir, dest_dir, path_mode, copy_set); } diff --git a/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.hh b/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.hh index 4544037fbc1..eda4576297b 100644 --- a/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.hh +++ b/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.hh @@ -186,7 +186,8 @@ class MTLWriter : NonMovable, NonCopyable { */ void write_materials(const char *blen_filepath, ePathReferenceMode path_mode, - const char *dest_dir); + const char *dest_dir, + bool write_pbr); StringRefNull mtl_file_path() const; /** * Add the materials of the given object to #MTLWriter, de-duplicating @@ -203,7 +204,7 @@ class MTLWriter : NonMovable, NonCopyable { /** * Write properties sourced from p-BSDF node or #Object.Material. */ - void write_bsdf_properties(const MTLMaterial &mtl_material); + void write_bsdf_properties(const MTLMaterial &mtl_material, bool write_pbr); /** * Write a texture map in the form "map_XX -s 1. 1. 1. -o 0. 0. 0. [-bm 1.] path/to/image". */ diff --git a/source/blender/io/wavefront_obj/exporter/obj_export_mtl.cc b/source/blender/io/wavefront_obj/exporter/obj_export_mtl.cc index 6a02695c304..f8c7da75a70 100644 --- a/source/blender/io/wavefront_obj/exporter/obj_export_mtl.cc +++ b/source/blender/io/wavefront_obj/exporter/obj_export_mtl.cc @@ -23,11 +23,14 @@ namespace blender::io::obj { const char *tex_map_type_to_socket_id[] = { "Base Color", + "Metallic", "Specular", + "Roughness", /* Map specular exponent to roughness. */ "Roughness", - "Alpha", - "Metallic", + "Sheen", + "Metallic", /* Map reflection to metallic. */ "Emission", + "Alpha", "Normal", }; BLI_STATIC_ASSERT(ARRAY_SIZE(tex_map_type_to_socket_id) == (int)MTLTexMapType::Count, @@ -188,7 +191,6 @@ static void store_bsdf_properties(const bNode *bsdf_node, const Material *material, MTLMaterial &r_mtl_mat) { - /* If p-BSDF is not present, fallback to #Object.Material. */ float roughness = material->roughness; if (bsdf_node) { copy_property_from_node(SOCK_FLOAT, bsdf_node, "Roughness", {&roughness, 1}); @@ -212,11 +214,11 @@ static void store_bsdf_properties(const bNode *bsdf_node, copy_property_from_node(SOCK_FLOAT, bsdf_node, "IOR", {&refraction_index, 1}); } - float dissolved = material->a; + float alpha = material->a; if (bsdf_node) { - copy_property_from_node(SOCK_FLOAT, bsdf_node, "Alpha", {&dissolved, 1}); + copy_property_from_node(SOCK_FLOAT, bsdf_node, "Alpha", {&alpha, 1}); } - const bool transparent = dissolved != 1.0f; + const bool transparent = alpha != 1.0f; float3 diffuse_col = {material->r, material->g, material->b}; if (bsdf_node) { @@ -231,6 +233,22 @@ static void store_bsdf_properties(const bNode *bsdf_node, } mul_v3_fl(emission_col, emission_strength); + float sheen = -1.0f; + float clearcoat = -1.0f; + float clearcoat_roughness = -1.0f; + float aniso = -1.0f; + float aniso_rot = -1.0f; + float transmission = -1.0f; + if (bsdf_node) { + copy_property_from_node(SOCK_FLOAT, bsdf_node, "Sheen", {&sheen, 1}); + copy_property_from_node(SOCK_FLOAT, bsdf_node, "Clearcoat", {&clearcoat, 1}); + copy_property_from_node( + SOCK_FLOAT, bsdf_node, "Clearcoat Roughness", {&clearcoat_roughness, 1}); + copy_property_from_node(SOCK_FLOAT, bsdf_node, "Anisotropic", {&aniso, 1}); + copy_property_from_node(SOCK_FLOAT, bsdf_node, "Anisotropic Rotation", {&aniso_rot, 1}); + copy_property_from_node(SOCK_FLOAT, bsdf_node, "Transmission", {&transmission, 1}); + } + /* See https://wikipedia.org/wiki/Wavefront_.obj_file for all possible values of `illum`. */ /* Highlight on. */ int illum = 2; @@ -253,19 +271,27 @@ static void store_bsdf_properties(const bNode *bsdf_node, /* Transparency: Glass on, Reflection: Ray trace off */ illum = 9; } - r_mtl_mat.Ns = spec_exponent; + r_mtl_mat.spec_exponent = spec_exponent; if (metallic != 0.0f) { - r_mtl_mat.Ka = {metallic, metallic, metallic}; + r_mtl_mat.ambient_color = {metallic, metallic, metallic}; } else { - r_mtl_mat.Ka = {1.0f, 1.0f, 1.0f}; + r_mtl_mat.ambient_color = {1.0f, 1.0f, 1.0f}; } - r_mtl_mat.Kd = diffuse_col; - r_mtl_mat.Ks = {specular, specular, specular}; - r_mtl_mat.Ke = emission_col; - r_mtl_mat.Ni = refraction_index; - r_mtl_mat.d = dissolved; - r_mtl_mat.illum = illum; + r_mtl_mat.color = diffuse_col; + r_mtl_mat.spec_color = {specular, specular, specular}; + r_mtl_mat.emission_color = emission_col; + r_mtl_mat.ior = refraction_index; + r_mtl_mat.alpha = alpha; + r_mtl_mat.illum_mode = illum; + r_mtl_mat.roughness = roughness; + r_mtl_mat.metallic = metallic; + r_mtl_mat.sheen = sheen; + r_mtl_mat.cc_thickness = clearcoat; + r_mtl_mat.cc_roughness = clearcoat_roughness; + r_mtl_mat.aniso = aniso; + r_mtl_mat.aniso_rot = aniso_rot; + r_mtl_mat.transmit_color = {transmission, transmission, transmission}; } /** @@ -291,7 +317,7 @@ static void store_image_textures(const bNode *bsdf_node, Vector<const bNodeSocket *> linked_sockets; const bNode *normal_map_node{nullptr}; - if (key == (int)MTLTexMapType::bump) { + if (key == (int)MTLTexMapType::Normal) { /* Find sockets linked to destination "Normal" socket in P-BSDF node. */ linked_sockets_to_dest_id(bsdf_node, *node_tree, "Normal", linked_sockets); /* Among the linked sockets, find Normal Map shader node. */ @@ -302,7 +328,7 @@ static void store_image_textures(const bNode *bsdf_node, } else { /* Skip emission map if emission strength is zero. */ - if (key == (int)MTLTexMapType::Ke) { + if (key == (int)MTLTexMapType::Emission) { float emission_strength = 0.0f; copy_property_from_node( SOCK_FLOAT, bsdf_node, "Emission Strength", {&emission_strength, 1}); @@ -331,7 +357,7 @@ static void store_image_textures(const bNode *bsdf_node, if (normal_map_node) { copy_property_from_node( - SOCK_FLOAT, normal_map_node, "Strength", {&r_mtl_mat.map_Bump_strength, 1}); + SOCK_FLOAT, normal_map_node, "Strength", {&r_mtl_mat.normal_strength, 1}); } /* Texture transform options. Only translation (origin offset, "-o") and scale * ("-o") are supported. */ diff --git a/source/blender/io/wavefront_obj/exporter/obj_export_mtl.hh b/source/blender/io/wavefront_obj/exporter/obj_export_mtl.hh index d8eafff107b..9c1bc2f0f8f 100644 --- a/source/blender/io/wavefront_obj/exporter/obj_export_mtl.hh +++ b/source/blender/io/wavefront_obj/exporter/obj_export_mtl.hh @@ -13,7 +13,19 @@ struct Material; namespace blender::io::obj { -enum class MTLTexMapType { Kd = 0, Ks, Ns, d, refl, Ke, bump, Count }; +enum class MTLTexMapType { + Color = 0, + Metallic, + Specular, + SpecularExponent, + Roughness, + Sheen, + Reflection, + Emission, + Alpha, + Normal, + Count +}; extern const char *tex_map_type_to_socket_id[]; struct MTLTexMap { @@ -47,17 +59,26 @@ struct MTLMaterial { std::string name; /* Always check for negative values while importing or exporting. Use defaults if * any value is negative. */ - float Ns{-1.0f}; - float3 Ka{-1.0f}; - float3 Kd{-1.0f}; - float3 Ks{-1.0f}; - float3 Ke{-1.0f}; - float Ni{-1.0f}; - float d{-1.0f}; - int illum{-1}; + float spec_exponent{-1.0f}; /* `Ns` */ + float3 ambient_color{-1.0f}; /* `Ka` */ + float3 color{-1.0f}; /* `Kd` */ + float3 spec_color{-1.0f}; /* `Ks` */ + float3 emission_color{-1.0f}; /* `Ke` */ + float ior{-1.0f}; /* `Ni` */ + float alpha{-1.0f}; /* `d` */ + float3 transmit_color{-1.0f}; /* `Kt` / `Tf` */ + float roughness{-1.0f}; /* `Pr` */ + float metallic{-1.0f}; /* `Pm` */ + float sheen{-1.0f}; /* `Ps` */ + float cc_thickness{-1.0f}; /* `Pc` */ + float cc_roughness{-1.0f}; /* `Pcr` */ + float aniso{-1.0f}; /* `aniso` */ + float aniso_rot{-1.0f}; /* `anisor` */ + + int illum_mode{-1}; MTLTexMap texture_maps[(int)MTLTexMapType::Count]; - /** Only used for Normal Map node: "map_Bump". */ - float map_Bump_strength{-1.0f}; + /* Only used for Normal Map node: `map_Bump`. */ + float normal_strength{-1.0f}; }; MTLMaterial mtlmaterial_for_material(const Material *material); diff --git a/source/blender/io/wavefront_obj/exporter/obj_exporter.cc b/source/blender/io/wavefront_obj/exporter/obj_exporter.cc index 294ea81fd58..a51c017f81d 100644 --- a/source/blender/io/wavefront_obj/exporter/obj_exporter.cc +++ b/source/blender/io/wavefront_obj/exporter/obj_exporter.cc @@ -293,7 +293,10 @@ void export_frame(Depsgraph *depsgraph, const OBJExportParams &export_params, co } BLI_path_slash_native(dest_dir); BLI_path_normalize(nullptr, dest_dir); - mtl_writer->write_materials(export_params.blen_filepath, export_params.path_mode, dest_dir); + mtl_writer->write_materials(export_params.blen_filepath, + export_params.path_mode, + dest_dir, + export_params.export_pbr_extensions); } write_nurbs_curve_objects(std::move(exportable_as_nurbs), *frame_writer); } diff --git a/source/blender/io/wavefront_obj/importer/obj_import_file_reader.cc b/source/blender/io/wavefront_obj/importer/obj_import_file_reader.cc index 2ad8a09bd90..f92f9894f75 100644 --- a/source/blender/io/wavefront_obj/importer/obj_import_file_reader.cc +++ b/source/blender/io/wavefront_obj/importer/obj_import_file_reader.cc @@ -596,26 +596,35 @@ void OBJParser::parse(Vector<std::unique_ptr<Geometry>> &r_all_geometries, static MTLTexMapType mtl_line_start_to_texture_type(const char *&p, const char *end) { if (parse_keyword(p, end, "map_Kd")) { - return MTLTexMapType::Kd; + return MTLTexMapType::Color; } if (parse_keyword(p, end, "map_Ks")) { - return MTLTexMapType::Ks; + return MTLTexMapType::Specular; } if (parse_keyword(p, end, "map_Ns")) { - return MTLTexMapType::Ns; + return MTLTexMapType::SpecularExponent; } if (parse_keyword(p, end, "map_d")) { - return MTLTexMapType::d; + return MTLTexMapType::Alpha; } if (parse_keyword(p, end, "refl") || parse_keyword(p, end, "map_refl")) { - return MTLTexMapType::refl; + return MTLTexMapType::Reflection; } if (parse_keyword(p, end, "map_Ke")) { - return MTLTexMapType::Ke; + return MTLTexMapType::Emission; } if (parse_keyword(p, end, "bump") || parse_keyword(p, end, "map_Bump") || parse_keyword(p, end, "map_bump")) { - return MTLTexMapType::bump; + return MTLTexMapType::Normal; + } + if (parse_keyword(p, end, "map_Pr")) { + return MTLTexMapType::Roughness; + } + if (parse_keyword(p, end, "map_Pm")) { + return MTLTexMapType::Metallic; + } + if (parse_keyword(p, end, "map_Ps")) { + return MTLTexMapType::Sheen; } return MTLTexMapType::Count; } @@ -647,7 +656,7 @@ static bool parse_texture_option(const char *&p, return true; } if (parse_keyword(p, end, "-bm")) { - p = parse_float(p, end, 1.0f, material->map_Bump_strength, true, true); + p = parse_float(p, end, 1.0f, material->normal_strength, true, true); return true; } if (parse_keyword(p, end, "-type")) { @@ -780,31 +789,55 @@ void MTLParser::parse_and_store(Map<string, std::unique_ptr<MTLMaterial>> &r_mat } else if (material != nullptr) { if (parse_keyword(p, end, "Ns")) { - parse_float(p, end, 324.0f, material->Ns); + parse_float(p, end, 324.0f, material->spec_exponent); } else if (parse_keyword(p, end, "Ka")) { - parse_floats(p, end, 0.0f, material->Ka, 3); + parse_floats(p, end, 0.0f, material->ambient_color, 3); } else if (parse_keyword(p, end, "Kd")) { - parse_floats(p, end, 0.8f, material->Kd, 3); + parse_floats(p, end, 0.8f, material->color, 3); } else if (parse_keyword(p, end, "Ks")) { - parse_floats(p, end, 0.5f, material->Ks, 3); + parse_floats(p, end, 0.5f, material->spec_color, 3); } else if (parse_keyword(p, end, "Ke")) { - parse_floats(p, end, 0.0f, material->Ke, 3); + parse_floats(p, end, 0.0f, material->emission_color, 3); } else if (parse_keyword(p, end, "Ni")) { - parse_float(p, end, 1.45f, material->Ni); + parse_float(p, end, 1.45f, material->ior); } else if (parse_keyword(p, end, "d")) { - parse_float(p, end, 1.0f, material->d); + parse_float(p, end, 1.0f, material->alpha); } else if (parse_keyword(p, end, "illum")) { /* Some files incorrectly use a float (T60135). */ float val; parse_float(p, end, 1.0f, val); - material->illum = val; + material->illum_mode = val; + } + else if (parse_keyword(p, end, "Pr")) { + parse_float(p, end, 0.5f, material->roughness); + } + else if (parse_keyword(p, end, "Pm")) { + parse_float(p, end, 0.0f, material->metallic); + } + else if (parse_keyword(p, end, "Ps")) { + parse_float(p, end, 0.0f, material->sheen); + } + else if (parse_keyword(p, end, "Pc")) { + parse_float(p, end, 0.0f, material->cc_thickness); + } + else if (parse_keyword(p, end, "Pcr")) { + parse_float(p, end, 0.0f, material->cc_roughness); + } + else if (parse_keyword(p, end, "aniso")) { + parse_float(p, end, 0.0f, material->aniso); + } + else if (parse_keyword(p, end, "anisor")) { + parse_float(p, end, 0.0f, material->aniso_rot); + } + else if (parse_keyword(p, end, "Kt") || parse_keyword(p, end, "Tf")) { + parse_floats(p, end, 0.0f, material->transmit_color, 3); } else { parse_texture_map(p, end, material, mtl_dir_path_); diff --git a/source/blender/io/wavefront_obj/importer/obj_import_mtl.cc b/source/blender/io/wavefront_obj/importer/obj_import_mtl.cc index 0922a71979e..c471b2002de 100644 --- a/source/blender/io/wavefront_obj/importer/obj_import_mtl.cc +++ b/source/blender/io/wavefront_obj/importer/obj_import_mtl.cc @@ -178,7 +178,7 @@ static void link_sockets(bNodeTree *ntree, static void set_bsdf_socket_values(bNode *bsdf, Material *mat, const MTLMaterial &mtl_mat) { - const int illum = mtl_mat.illum; + const int illum = mtl_mat.illum_mode; bool do_highlight = false; bool do_tranparency = false; bool do_reflection = false; @@ -244,21 +244,23 @@ static void set_bsdf_socket_values(bNode *bsdf, Material *mat, const MTLMaterial /* Approximations for trying to map obj/mtl material model into * Principled BSDF: */ /* Specular: average of Ks components. */ - float specular = (mtl_mat.Ks[0] + mtl_mat.Ks[1] + mtl_mat.Ks[2]) / 3; + float specular = (mtl_mat.spec_color[0] + mtl_mat.spec_color[1] + mtl_mat.spec_color[2]) / 3; if (specular < 0.0f) { specular = do_highlight ? 1.0f : 0.0f; } /* Roughness: map 0..1000 range to 1..0 and apply non-linearity. */ float roughness; - if (mtl_mat.Ns < 0.0f) { + if (mtl_mat.spec_exponent < 0.0f) { roughness = do_highlight ? 0.0f : 1.0f; } else { - float clamped_ns = std::max(0.0f, std::min(1000.0f, mtl_mat.Ns)); + float clamped_ns = std::max(0.0f, std::min(1000.0f, mtl_mat.spec_exponent)); roughness = 1.0f - sqrt(clamped_ns / 1000.0f); } - /* Metallic: average of Ka components. */ - float metallic = (mtl_mat.Ka[0] + mtl_mat.Ka[1] + mtl_mat.Ka[2]) / 3; + /* Metallic: average of `Ka` components. */ + float metallic = (mtl_mat.ambient_color[0] + mtl_mat.ambient_color[1] + + mtl_mat.ambient_color[2]) / + 3; if (do_reflection) { if (metallic < 0.0f) { metallic = 1.0f; @@ -268,7 +270,7 @@ static void set_bsdf_socket_values(bNode *bsdf, Material *mat, const MTLMaterial metallic = 0.0f; } - float ior = mtl_mat.Ni; + float ior = mtl_mat.ior; if (ior < 0) { if (do_tranparency) { ior = 1.0f; @@ -277,12 +279,20 @@ static void set_bsdf_socket_values(bNode *bsdf, Material *mat, const MTLMaterial ior = 1.5f; } } - float alpha = mtl_mat.d; + float alpha = mtl_mat.alpha; if (do_tranparency && alpha < 0) { alpha = 1.0f; } - float3 base_color = {mtl_mat.Kd[0], mtl_mat.Kd[1], mtl_mat.Kd[2]}; + /* PBR values, when present, override the ones calculated above. */ + if (mtl_mat.roughness >= 0) { + roughness = mtl_mat.roughness; + } + if (mtl_mat.metallic >= 0) { + metallic = mtl_mat.metallic; + } + + float3 base_color = mtl_mat.color; if (base_color.x >= 0 && base_color.y >= 0 && base_color.z >= 0) { set_property_of_socket(SOCK_RGBA, "Base Color", {base_color, 3}, bsdf); /* Viewport shading uses legacy r,g,b base color. */ @@ -291,11 +301,11 @@ static void set_bsdf_socket_values(bNode *bsdf, Material *mat, const MTLMaterial mat->b = base_color.z; } - float3 emission_color = {mtl_mat.Ke[0], mtl_mat.Ke[1], mtl_mat.Ke[2]}; + float3 emission_color = mtl_mat.emission_color; if (emission_color.x >= 0 && emission_color.y >= 0 && emission_color.z >= 0) { set_property_of_socket(SOCK_RGBA, "Emission", {emission_color, 3}, bsdf); } - if (mtl_mat.tex_map_of_type(MTLTexMapType::Ke).is_valid()) { + if (mtl_mat.tex_map_of_type(MTLTexMapType::Emission).is_valid()) { set_property_of_socket(SOCK_FLOAT, "Emission Strength", {1.0f}, bsdf); } set_property_of_socket(SOCK_FLOAT, "Specular", {specular}, bsdf); @@ -312,6 +322,30 @@ static void set_bsdf_socket_values(bNode *bsdf, Material *mat, const MTLMaterial if (do_tranparency || (alpha >= 0.0f && alpha < 1.0f)) { mat->blend_method = MA_BM_BLEND; } + + if (mtl_mat.sheen >= 0) { + set_property_of_socket(SOCK_FLOAT, "Sheen", {mtl_mat.sheen}, bsdf); + } + if (mtl_mat.cc_thickness >= 0) { + set_property_of_socket(SOCK_FLOAT, "Clearcoat", {mtl_mat.cc_thickness}, bsdf); + } + if (mtl_mat.cc_roughness >= 0) { + set_property_of_socket(SOCK_FLOAT, "Clearcoat Roughness", {mtl_mat.cc_roughness}, bsdf); + } + if (mtl_mat.aniso >= 0) { + set_property_of_socket(SOCK_FLOAT, "Anisotropic", {mtl_mat.aniso}, bsdf); + } + if (mtl_mat.aniso_rot >= 0) { + set_property_of_socket(SOCK_FLOAT, "Anisotropic Rotation", {mtl_mat.aniso_rot}, bsdf); + } + + /* Transmission: average of transmission color. */ + float transmission = (mtl_mat.transmit_color[0] + mtl_mat.transmit_color[1] + + mtl_mat.transmit_color[2]) / + 3; + if (transmission >= 0) { + set_property_of_socket(SOCK_FLOAT, "Transmission", {transmission}, bsdf); + } } static void add_image_textures(Main *bmain, @@ -341,9 +375,9 @@ static void add_image_textures(Main *bmain, /* Add normal map node if needed. */ bNode *normal_map = nullptr; - if (key == (int)MTLTexMapType::bump) { + if (key == (int)MTLTexMapType::Normal) { normal_map = add_node(ntree, SH_NODE_NORMAL_MAP, node_locx_normalmap, node_locy); - const float bump = std::max(0.0f, mtl_mat.map_Bump_strength); + const float bump = std::max(0.0f, mtl_mat.normal_strength); set_property_of_socket(SOCK_FLOAT, "Strength", {bump}, normal_map); } @@ -362,7 +396,7 @@ static void add_image_textures(Main *bmain, link_sockets(ntree, image_node, "Color", normal_map, "Color"); link_sockets(ntree, normal_map, "Normal", bsdf, "Normal"); } - else if (key == (int)MTLTexMapType::d) { + else if (key == (int)MTLTexMapType::Alpha) { link_sockets(ntree, image_node, "Alpha", bsdf, tex_map_type_to_socket_id[key]); mat->blend_method = MA_BM_BLEND; } diff --git a/source/blender/io/wavefront_obj/tests/obj_exporter_tests.cc b/source/blender/io/wavefront_obj/tests/obj_exporter_tests.cc index 0fd711bdac6..dcba78ac99e 100644 --- a/source/blender/io/wavefront_obj/tests/obj_exporter_tests.cc +++ b/source/blender/io/wavefront_obj/tests/obj_exporter_tests.cc @@ -527,4 +527,27 @@ TEST_F(obj_exporter_regression_test, all_objects_mat_groups) _export.params); } +TEST_F(obj_exporter_regression_test, materials_without_pbr) +{ + OBJExportParamsDefault _export; + _export.params.export_normals = false; + _export.params.path_mode = PATH_REFERENCE_RELATIVE; + compare_obj_export_to_golden("io_tests/blend_geometry/materials_pbr.blend", + "io_tests/obj/materials_without_pbr.obj", + "io_tests/obj/materials_without_pbr.mtl", + _export.params); +} + +TEST_F(obj_exporter_regression_test, materials_pbr) +{ + OBJExportParamsDefault _export; + _export.params.export_normals = false; + _export.params.path_mode = PATH_REFERENCE_RELATIVE; + _export.params.export_pbr_extensions = true; + compare_obj_export_to_golden("io_tests/blend_geometry/materials_pbr.blend", + "io_tests/obj/materials_pbr.obj", + "io_tests/obj/materials_pbr.mtl", + _export.params); +} + } // namespace blender::io::obj diff --git a/source/blender/io/wavefront_obj/tests/obj_exporter_tests.hh b/source/blender/io/wavefront_obj/tests/obj_exporter_tests.hh index 7d3b41ed527..006d86312b6 100644 --- a/source/blender/io/wavefront_obj/tests/obj_exporter_tests.hh +++ b/source/blender/io/wavefront_obj/tests/obj_exporter_tests.hh @@ -31,6 +31,7 @@ struct OBJExportParamsDefault { params.path_mode = PATH_REFERENCE_AUTO; params.export_triangulated_mesh = false; params.export_curves_as_nurbs = false; + params.export_pbr_extensions = false; params.export_object_groups = false; params.export_material_groups = false; diff --git a/source/blender/io/wavefront_obj/tests/obj_mtl_parser_tests.cc b/source/blender/io/wavefront_obj/tests/obj_mtl_parser_tests.cc index 5691aa5bea1..e473d629673 100644 --- a/source/blender/io/wavefront_obj/tests/obj_mtl_parser_tests.cc +++ b/source/blender/io/wavefront_obj/tests/obj_mtl_parser_tests.cc @@ -50,15 +50,23 @@ class obj_mtl_parser_test : public testing::Test { } const MTLMaterial &got = *materials.lookup(exp.name); const float tol = 0.0001f; - EXPECT_V3_NEAR(exp.Ka, got.Ka, tol); - EXPECT_V3_NEAR(exp.Kd, got.Kd, tol); - EXPECT_V3_NEAR(exp.Ks, got.Ks, tol); - EXPECT_V3_NEAR(exp.Ke, got.Ke, tol); - EXPECT_NEAR(exp.Ns, got.Ns, tol); - EXPECT_NEAR(exp.Ni, got.Ni, tol); - EXPECT_NEAR(exp.d, got.d, tol); - EXPECT_NEAR(exp.map_Bump_strength, got.map_Bump_strength, tol); - EXPECT_EQ(exp.illum, got.illum); + EXPECT_V3_NEAR(exp.ambient_color, got.ambient_color, tol); + EXPECT_V3_NEAR(exp.color, got.color, tol); + EXPECT_V3_NEAR(exp.spec_color, got.spec_color, tol); + EXPECT_V3_NEAR(exp.emission_color, got.emission_color, tol); + EXPECT_V3_NEAR(exp.transmit_color, got.transmit_color, tol); + EXPECT_NEAR(exp.spec_exponent, got.spec_exponent, tol); + EXPECT_NEAR(exp.ior, got.ior, tol); + EXPECT_NEAR(exp.alpha, got.alpha, tol); + EXPECT_NEAR(exp.normal_strength, got.normal_strength, tol); + EXPECT_EQ(exp.illum_mode, got.illum_mode); + EXPECT_NEAR(exp.roughness, got.roughness, tol); + EXPECT_NEAR(exp.metallic, got.metallic, tol); + EXPECT_NEAR(exp.sheen, got.sheen, tol); + EXPECT_NEAR(exp.cc_thickness, got.cc_thickness, tol); + EXPECT_NEAR(exp.cc_roughness, got.cc_roughness, tol); + EXPECT_NEAR(exp.aniso, got.aniso, tol); + EXPECT_NEAR(exp.aniso_rot, got.aniso_rot, tol); for (int key = 0; key < (int)MTLTexMapType::Count; key++) { const MTLTexMap &exp_tex = exp.texture_maps[key]; const MTLTexMap &got_tex = got.texture_maps[key]; @@ -102,20 +110,20 @@ TEST_F(obj_mtl_parser_test, string_newlines_whitespace) "map_Ks sometex_s_spaces_after_name.png \t \r\n"; MTLMaterial mat[6]; mat[0].name = "simple"; - mat[0].Ka = {0.1f, 0.2f, 0.3f}; - mat[0].illum = 4; + mat[0].ambient_color = {0.1f, 0.2f, 0.3f}; + mat[0].illum_mode = 4; mat[1].name = "tab_indentation"; - mat[1].Kd = {0.2f, 0.3f, 0.4f}; + mat[1].color = {0.2f, 0.3f, 0.4f}; mat[2].name = "space_after_name"; - mat[2].Ks = {0.4f, 0.5f, 0.6f}; + mat[2].spec_color = {0.4f, 0.5f, 0.6f}; mat[3].name = "space_before_name"; mat[4].name = "indented_values"; - mat[4].Ka = {0.5f, 0.6f, 0.7f}; - mat[4].Kd = {0.6f, 0.7f, 0.8f}; + mat[4].ambient_color = {0.5f, 0.6f, 0.7f}; + mat[4].color = {0.6f, 0.7f, 0.8f}; mat[5].name = "crlf_ending"; - mat[5].Ns = 5.0f; - mat[5].tex_map_of_type(MTLTexMapType::Kd).image_path = "sometex_d.png"; - mat[5].tex_map_of_type(MTLTexMapType::Ks).image_path = "sometex_s_spaces_after_name.png"; + mat[5].spec_exponent = 5.0f; + mat[5].tex_map_of_type(MTLTexMapType::Color).image_path = "sometex_d.png"; + mat[5].tex_map_of_type(MTLTexMapType::Specular).image_path = "sometex_s_spaces_after_name.png"; check_string(text, mat, ARRAY_SIZE(mat)); } @@ -123,8 +131,8 @@ TEST_F(obj_mtl_parser_test, cube) { MTLMaterial mat; mat.name = "red"; - mat.Ka = {0.2f, 0.2f, 0.2f}; - mat.Kd = {1, 0, 0}; + mat.ambient_color = {0.2f, 0.2f, 0.2f}; + mat.color = {1, 0, 0}; check("cube.mtl", &mat, 1); } @@ -132,28 +140,28 @@ TEST_F(obj_mtl_parser_test, all_objects) { MTLMaterial mat[7]; for (auto &m : mat) { - m.Ka = {1, 1, 1}; - m.Ks = {0.5f, 0.5f, 0.5f}; - m.Ke = {0, 0, 0}; - m.Ns = 250; - m.Ni = 1; - m.d = 1; - m.illum = 2; + m.ambient_color = {1, 1, 1}; + m.spec_color = {0.5f, 0.5f, 0.5f}; + m.emission_color = {0, 0, 0}; + m.spec_exponent = 250; + m.ior = 1; + m.alpha = 1; + m.illum_mode = 2; } mat[0].name = "Blue"; - mat[0].Kd = {0, 0, 1}; + mat[0].color = {0, 0, 1}; mat[1].name = "BlueDark"; - mat[1].Kd = {0, 0, 0.5f}; + mat[1].color = {0, 0, 0.5f}; mat[2].name = "Green"; - mat[2].Kd = {0, 1, 0}; + mat[2].color = {0, 1, 0}; mat[3].name = "GreenDark"; - mat[3].Kd = {0, 0.5f, 0}; + mat[3].color = {0, 0.5f, 0}; mat[4].name = "Material"; - mat[4].Kd = {0.8f, 0.8f, 0.8f}; + mat[4].color = {0.8f, 0.8f, 0.8f}; mat[5].name = "Red"; - mat[5].Kd = {1, 0, 0}; + mat[5].color = {1, 0, 0}; mat[6].name = "RedDark"; - mat[6].Kd = {0.5f, 0, 0}; + mat[6].color = {0.5f, 0, 0}; check("all_objects.mtl", mat, ARRAY_SIZE(mat)); } @@ -161,92 +169,101 @@ TEST_F(obj_mtl_parser_test, materials) { MTLMaterial mat[6]; mat[0].name = "no_textures_red"; - mat[0].Ka = {0.3f, 0.3f, 0.3f}; - mat[0].Kd = {0.8f, 0.3f, 0.1f}; - mat[0].Ns = 5.624998f; + mat[0].ambient_color = {0.3f, 0.3f, 0.3f}; + mat[0].color = {0.8f, 0.3f, 0.1f}; + mat[0].spec_exponent = 5.624998f; mat[1].name = "four_maps"; - mat[1].Ka = {1, 1, 1}; - mat[1].Kd = {0.8f, 0.8f, 0.8f}; - mat[1].Ks = {0.5f, 0.5f, 0.5f}; - mat[1].Ke = {0, 0, 0}; - mat[1].Ns = 1000; - mat[1].Ni = 1.45f; - mat[1].d = 1; - mat[1].illum = 2; - mat[1].map_Bump_strength = 1; + mat[1].ambient_color = {1, 1, 1}; + mat[1].color = {0.8f, 0.8f, 0.8f}; + mat[1].spec_color = {0.5f, 0.5f, 0.5f}; + mat[1].emission_color = {0, 0, 0}; + mat[1].spec_exponent = 1000; + mat[1].ior = 1.45f; + mat[1].alpha = 1; + mat[1].illum_mode = 2; + mat[1].normal_strength = 1; { - MTLTexMap &kd = mat[1].tex_map_of_type(MTLTexMapType::Kd); + MTLTexMap &kd = mat[1].tex_map_of_type(MTLTexMapType::Color); kd.image_path = "texture.png"; - MTLTexMap &ns = mat[1].tex_map_of_type(MTLTexMapType::Ns); + MTLTexMap &ns = mat[1].tex_map_of_type(MTLTexMapType::SpecularExponent); ns.image_path = "sometexture_Roughness.png"; - MTLTexMap &refl = mat[1].tex_map_of_type(MTLTexMapType::refl); + MTLTexMap &refl = mat[1].tex_map_of_type(MTLTexMapType::Reflection); refl.image_path = "sometexture_Metallic.png"; - MTLTexMap &bump = mat[1].tex_map_of_type(MTLTexMapType::bump); + MTLTexMap &bump = mat[1].tex_map_of_type(MTLTexMapType::Normal); bump.image_path = "sometexture_Normal.png"; } mat[2].name = "Clay"; - mat[2].Ka = {1, 1, 1}; - mat[2].Kd = {0.8f, 0.682657f, 0.536371f}; - mat[2].Ks = {0.5f, 0.5f, 0.5f}; - mat[2].Ke = {0, 0, 0}; - mat[2].Ns = 440.924042f; - mat[2].Ni = 1.45f; - mat[2].d = 1; - mat[2].illum = 2; + mat[2].ambient_color = {1, 1, 1}; + mat[2].color = {0.8f, 0.682657f, 0.536371f}; + mat[2].spec_color = {0.5f, 0.5f, 0.5f}; + mat[2].emission_color = {0, 0, 0}; + mat[2].spec_exponent = 440.924042f; + mat[2].ior = 1.45f; + mat[2].alpha = 1; + mat[2].illum_mode = 2; mat[3].name = "Hat"; - mat[3].Ka = {1, 1, 1}; - mat[3].Kd = {0.8f, 0.8f, 0.8f}; - mat[3].Ks = {0.5f, 0.5f, 0.5f}; - mat[3].Ns = 800; - mat[3].map_Bump_strength = 0.5f; + mat[3].ambient_color = {1, 1, 1}; + mat[3].color = {0.8f, 0.8f, 0.8f}; + mat[3].spec_color = {0.5f, 0.5f, 0.5f}; + mat[3].spec_exponent = 800; + mat[3].normal_strength = 0.5f; { - MTLTexMap &kd = mat[3].tex_map_of_type(MTLTexMapType::Kd); + MTLTexMap &kd = mat[3].tex_map_of_type(MTLTexMapType::Color); kd.image_path = "someHatTexture_BaseColor.jpg"; - MTLTexMap &ns = mat[3].tex_map_of_type(MTLTexMapType::Ns); + MTLTexMap &ns = mat[3].tex_map_of_type(MTLTexMapType::SpecularExponent); ns.image_path = "someHatTexture_Roughness.jpg"; - MTLTexMap &refl = mat[3].tex_map_of_type(MTLTexMapType::refl); + MTLTexMap &refl = mat[3].tex_map_of_type(MTLTexMapType::Reflection); refl.image_path = "someHatTexture_Metalness.jpg"; - MTLTexMap &bump = mat[3].tex_map_of_type(MTLTexMapType::bump); + MTLTexMap &bump = mat[3].tex_map_of_type(MTLTexMapType::Normal); bump.image_path = "someHatTexture_Normal.jpg"; } mat[4].name = "Parser_Test"; - mat[4].Ka = {0.1f, 0.2f, 0.3f}; - mat[4].Kd = {0.4f, 0.5f, 0.6f}; - mat[4].Ks = {0.7f, 0.8f, 0.9f}; - mat[4].illum = 6; - mat[4].Ns = 15.5; - mat[4].Ni = 1.5; - mat[4].d = 0.5; - mat[4].map_Bump_strength = 0.1f; + mat[4].ambient_color = {0.1f, 0.2f, 0.3f}; + mat[4].color = {0.4f, 0.5f, 0.6f}; + mat[4].spec_color = {0.7f, 0.8f, 0.9f}; + mat[4].illum_mode = 6; + mat[4].spec_exponent = 15.5; + mat[4].ior = 1.5; + mat[4].alpha = 0.5; + mat[4].normal_strength = 0.1f; + mat[4].transmit_color = {0.1f, 0.3f, 0.5f}; + mat[4].normal_strength = 0.1f; + mat[4].roughness = 0.2f; + mat[4].metallic = 0.3f; + mat[4].sheen = 0.4f; + mat[4].cc_thickness = 0.5f; + mat[4].cc_roughness = 0.6f; + mat[4].aniso = 0.7f; + mat[4].aniso_rot = 0.8f; { - MTLTexMap &kd = mat[4].tex_map_of_type(MTLTexMapType::Kd); + MTLTexMap &kd = mat[4].tex_map_of_type(MTLTexMapType::Color); kd.image_path = "sometex_d.png"; - MTLTexMap &ns = mat[4].tex_map_of_type(MTLTexMapType::Ns); + MTLTexMap &ns = mat[4].tex_map_of_type(MTLTexMapType::SpecularExponent); ns.image_path = "sometex_ns.psd"; - MTLTexMap &refl = mat[4].tex_map_of_type(MTLTexMapType::refl); + MTLTexMap &refl = mat[4].tex_map_of_type(MTLTexMapType::Reflection); refl.image_path = "clouds.tiff"; refl.scale = {1.5f, 2.5f, 3.5f}; refl.translation = {4.5f, 5.5f, 6.5f}; refl.projection_type = SHD_PROJ_SPHERE; - MTLTexMap &bump = mat[4].tex_map_of_type(MTLTexMapType::bump); + MTLTexMap &bump = mat[4].tex_map_of_type(MTLTexMapType::Normal); bump.image_path = "somebump.tga"; bump.scale = {3, 4, 5}; } mat[5].name = "Parser_ScaleOffset_Test"; { - MTLTexMap &kd = mat[5].tex_map_of_type(MTLTexMapType::Kd); + MTLTexMap &kd = mat[5].tex_map_of_type(MTLTexMapType::Color); kd.translation = {2.5f, 0.0f, 0.0f}; kd.image_path = "OffsetOneValue.png"; - MTLTexMap &ks = mat[5].tex_map_of_type(MTLTexMapType::Ks); + MTLTexMap &ks = mat[5].tex_map_of_type(MTLTexMapType::Specular); ks.scale = {1.5f, 2.5f, 1.0f}; ks.translation = {3.5f, 4.5f, 0.0f}; ks.image_path = "ScaleOffsetBothTwovalues.png"; - MTLTexMap &ns = mat[5].tex_map_of_type(MTLTexMapType::Ns); + MTLTexMap &ns = mat[5].tex_map_of_type(MTLTexMapType::SpecularExponent); ns.scale = {0.5f, 1.0f, 1.0f}; ns.image_path = "1.Value.png"; } @@ -254,4 +271,75 @@ TEST_F(obj_mtl_parser_test, materials) check("materials.mtl", mat, ARRAY_SIZE(mat)); } +TEST_F(obj_mtl_parser_test, materials_without_pbr) +{ + MTLMaterial mat[2]; + mat[0].name = "Mat1"; + mat[0].spec_exponent = 360.0f; + mat[0].ambient_color = {0.9f, 0.9f, 0.9f}; + mat[0].color = {0.8f, 0.276449f, 0.101911f}; + mat[0].spec_color = {0.25f, 0.25f, 0.25f}; + mat[0].emission_color = {0, 0, 0}; + mat[0].ior = 1.45f; + mat[0].alpha = 1; + mat[0].illum_mode = 3; + + mat[1].name = "Mat2"; + mat[1].ambient_color = {1, 1, 1}; + mat[1].color = {0.8f, 0.8f, 0.8f}; + mat[1].spec_color = {0.5f, 0.5f, 0.5f}; + mat[1].ior = 1.45f; + mat[1].alpha = 1; + mat[1].illum_mode = 2; + { + MTLTexMap &ns = mat[1].tex_map_of_type(MTLTexMapType::SpecularExponent); + ns.image_path = "../blend_geometry/texture_roughness.png"; + MTLTexMap &ke = mat[1].tex_map_of_type(MTLTexMapType::Emission); + ke.image_path = "../blend_geometry/texture_illum.png"; + } + + check("materials_without_pbr.mtl", mat, ARRAY_SIZE(mat)); +} + +TEST_F(obj_mtl_parser_test, materials_pbr) +{ + MTLMaterial mat[2]; + mat[0].name = "Mat1"; + mat[0].color = {0.8f, 0.276449f, 0.101911f}; + mat[0].spec_color = {0.25f, 0.25f, 0.25f}; + mat[0].emission_color = {0, 0, 0}; + mat[0].ior = 1.45f; + mat[0].alpha = 1; + mat[0].illum_mode = 3; + mat[0].roughness = 0.4f; + mat[0].metallic = 0.9f; + mat[0].sheen = 0.3f; + mat[0].cc_thickness = 0.393182f; + mat[0].cc_roughness = 0.05f; + mat[0].aniso = 0.2f; + mat[0].aniso_rot = 0.0f; + + mat[1].name = "Mat2"; + mat[1].color = {0.8f, 0.8f, 0.8f}; + mat[1].spec_color = {0.5f, 0.5f, 0.5f}; + mat[1].ior = 1.45f; + mat[1].alpha = 1; + mat[1].illum_mode = 2; + mat[1].metallic = 0.0f; + mat[1].cc_thickness = 0.3f; + mat[1].cc_roughness = 0.4f; + mat[1].aniso = 0.8f; + mat[1].aniso_rot = 0.7f; + { + MTLTexMap &pr = mat[1].tex_map_of_type(MTLTexMapType::Roughness); + pr.image_path = "../blend_geometry/texture_roughness.png"; + MTLTexMap &ps = mat[1].tex_map_of_type(MTLTexMapType::Sheen); + ps.image_path = "../blend_geometry/texture_checker.png"; + MTLTexMap &ke = mat[1].tex_map_of_type(MTLTexMapType::Emission); + ke.image_path = "../blend_geometry/texture_illum.png"; + } + + check("materials_pbr.mtl", mat, ARRAY_SIZE(mat)); +} + } // namespace blender::io::obj diff --git a/source/blender/makesdna/DNA_layer_types.h b/source/blender/makesdna/DNA_layer_types.h index 2176df7f4ec..b9aadcaf183 100644 --- a/source/blender/makesdna/DNA_layer_types.h +++ b/source/blender/makesdna/DNA_layer_types.h @@ -34,10 +34,18 @@ typedef enum eViewLayerEEVEEPassType { EEVEE_RENDER_PASS_AO = (1 << 13), EEVEE_RENDER_PASS_BLOOM = (1 << 14), EEVEE_RENDER_PASS_AOV = (1 << 15), + /* + * TODO(jbakker): Clean up confliting bits after EEVEE has been removed. + * EEVEE_RENDER_PASS_CRYPTOMATTE is for EEVEE, EEVEE_RENDER_PASS_CRYTPOMATTE_* are for + * EEVEE-Next. + */ EEVEE_RENDER_PASS_CRYPTOMATTE = (1 << 16), - EEVEE_RENDER_PASS_VECTOR = (1 << 17), + EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT = (1 << 16), + EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET = (1 << 17), + EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL = (1 << 18), + EEVEE_RENDER_PASS_VECTOR = (1 << 19), } eViewLayerEEVEEPassType; -#define EEVEE_RENDER_PASS_MAX_BIT 18 +#define EEVEE_RENDER_PASS_MAX_BIT 20 /* #ViewLayerAOV.type */ typedef enum eViewLayerAOVType { diff --git a/source/blender/makesdna/DNA_meshdata_types.h b/source/blender/makesdna/DNA_meshdata_types.h index e621343b818..77cb27083ab 100644 --- a/source/blender/makesdna/DNA_meshdata_types.h +++ b/source/blender/makesdna/DNA_meshdata_types.h @@ -29,7 +29,7 @@ typedef struct MVert { /** * Deprecated bevel weight storage, now located in #CD_BWEIGHT, except for file read and write. */ - char bweight DNA_DEPRECATED; + char bweight_legacy; char _pad[2]; } MVert; @@ -55,7 +55,7 @@ typedef struct MEdge { /** * Deprecated bevel weight storage, now located in #CD_BWEIGHT, except for file read and write. */ - char bweight DNA_DEPRECATED; + char bweight_legacy; short flag; } MEdge; @@ -83,7 +83,7 @@ typedef struct MPoly { /** Keep signed since we need to subtract when getting the previous loop. */ int totloop; /** Deprecated material index. Now stored in the "material_index" attribute, but kept for IO. */ - short mat_nr DNA_DEPRECATED; + short mat_nr_legacy; char flag, _pad; } MPoly; diff --git a/source/blender/makesdna/DNA_node_types.h b/source/blender/makesdna/DNA_node_types.h index b19210968d9..735f5c7b20a 100644 --- a/source/blender/makesdna/DNA_node_types.h +++ b/source/blender/makesdna/DNA_node_types.h @@ -637,6 +637,9 @@ typedef struct bNodeTree { /** A span containing all nodes in the node tree. */ blender::Span<bNode *> all_nodes(); blender::Span<const bNode *> all_nodes() const; + /** A span containing all group nodes in the node tree. */ + blender::Span<bNode *> group_nodes(); + blender::Span<const bNode *> group_nodes() const; /** A span containing all input sockets in the node tree. */ blender::Span<bNodeSocket *> all_input_sockets(); blender::Span<const bNodeSocket *> all_input_sockets() const; diff --git a/source/blender/makesdna/DNA_scene_types.h b/source/blender/makesdna/DNA_scene_types.h index 40345c31fef..f184460cba4 100644 --- a/source/blender/makesdna/DNA_scene_types.h +++ b/source/blender/makesdna/DNA_scene_types.h @@ -304,6 +304,10 @@ typedef enum eScenePassType { #define RE_PASSNAME_BLOOM "BloomCol" #define RE_PASSNAME_VOLUME_LIGHT "VolumeDir" +#define RE_PASSNAME_CRYPTOMATTE_OBJECT "CryptoObject" +#define RE_PASSNAME_CRYPTOMATTE_ASSET "CryptoAsset" +#define RE_PASSNAME_CRYPTOMATTE_MATERIAL "CryptoMaterial" + /** View - MultiView. */ typedef struct SceneRenderView { struct SceneRenderView *next, *prev; diff --git a/source/blender/makesdna/intern/dna_rename_defs.h b/source/blender/makesdna/intern/dna_rename_defs.h index f25ff5fbbb8..257e60eae98 100644 --- a/source/blender/makesdna/intern/dna_rename_defs.h +++ b/source/blender/makesdna/intern/dna_rename_defs.h @@ -97,6 +97,9 @@ DNA_STRUCT_RENAME_ELEM(Object, dupfacesca, instance_faces_scale) DNA_STRUCT_RENAME_ELEM(Object, restrictflag, visibility_flag) DNA_STRUCT_RENAME_ELEM(Object, size, scale) DNA_STRUCT_RENAME_ELEM(Object_Runtime, crazyspace_num_verts, crazyspace_verts_num) +DNA_STRUCT_RENAME_ELEM(MEdge, bweight, bweight_legacy) +DNA_STRUCT_RENAME_ELEM(MPoly, mat_nr, mat_nr_legacy) +DNA_STRUCT_RENAME_ELEM(MVert, bweight, bweight_legacy) DNA_STRUCT_RENAME_ELEM(ParticleSettings, child_nbr, child_percent) DNA_STRUCT_RENAME_ELEM(ParticleSettings, dup_group, instance_collection) DNA_STRUCT_RENAME_ELEM(ParticleSettings, dup_ob, instance_object) diff --git a/source/blender/makesrna/intern/rna_object.c b/source/blender/makesrna/intern/rna_object.c index 6cbc24db2d8..cfc3a832166 100644 --- a/source/blender/makesrna/intern/rna_object.c +++ b/source/blender/makesrna/intern/rna_object.c @@ -500,7 +500,7 @@ void rna_Object_data_update(Main *bmain, Scene *scene, PointerRNA *ptr) Object *object = (Object *)ptr->data; if (object->mode == OB_MODE_SCULPT) { - BKE_sculpt_ensure_orig_mesh_data(scene, object); + BKE_sculpt_ensure_orig_mesh_data(object); } rna_Object_internal_update_data_dependency(bmain, scene, ptr); diff --git a/source/blender/makesrna/intern/rna_space.c b/source/blender/makesrna/intern/rna_space.c index aa5de2094a0..268aacfccc6 100644 --- a/source/blender/makesrna/intern/rna_space.c +++ b/source/blender/makesrna/intern/rna_space.c @@ -455,6 +455,9 @@ static const EnumPropertyItem rna_enum_view3dshading_render_pass_type_items[] = RNA_ENUM_ITEM_HEADING(N_("Data"), NULL), {EEVEE_RENDER_PASS_NORMAL, "NORMAL", 0, "Normal", ""}, {EEVEE_RENDER_PASS_MIST, "MIST", 0, "Mist", ""}, + {EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT, "CryptoObject", 0, "CryptoObject", ""}, + {EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET, "CryptoAsset", 0, "CryptoAsset", ""}, + {EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL, "CryptoMaterial", 0, "CryptoMaterial", ""}, RNA_ENUM_ITEM_HEADING(N_("Shader AOV"), NULL), {EEVEE_RENDER_PASS_AOV, "AOV", 0, "AOV", ""}, @@ -1423,6 +1426,7 @@ static const EnumPropertyItem *rna_3DViewShading_render_pass_itemf(bContext *C, const bool bloom_enabled = scene->eevee.flag & SCE_EEVEE_BLOOM_ENABLED; const bool aov_available = BKE_view_layer_has_valid_aov(view_layer); + const bool eevee_next_active = STREQ(scene->r.engine, "BLENDER_EEVEE_NEXT"); int totitem = 0; EnumPropertyItem *result = NULL; @@ -1443,6 +1447,12 @@ static const EnumPropertyItem *rna_3DViewShading_render_pass_itemf(bContext *C, aov_template.value++; } } + else if (ELEM(item->value, + EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT, + EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET, + EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL) && + !eevee_next_active) { + } else if (!((!bloom_enabled && (item->value == EEVEE_RENDER_PASS_BLOOM || STREQ(item->name, "Effects"))) || (!aov_available && STREQ(item->name, "Shader AOV")))) { diff --git a/source/blender/makesrna/intern/rna_userdef.c b/source/blender/makesrna/intern/rna_userdef.c index 61d4edccb06..0031e023d39 100644 --- a/source/blender/makesrna/intern/rna_userdef.c +++ b/source/blender/makesrna/intern/rna_userdef.c @@ -6377,7 +6377,7 @@ static void rna_def_userdef_experimental(BlenderRNA *brna) RNA_def_property_boolean_sdna(prop, NULL, "use_viewport_debug", 1); RNA_def_property_ui_text(prop, "Viewport Debug", - "Enable viewport debugging options for developpers in the overlays " + "Enable viewport debugging options for developers in the overlays " "pop-over"); RNA_def_property_update(prop, 0, "rna_userdef_ui_update"); } diff --git a/source/blender/modifiers/CMakeLists.txt b/source/blender/modifiers/CMakeLists.txt index 73daabec9b3..8bace2e048c 100644 --- a/source/blender/modifiers/CMakeLists.txt +++ b/source/blender/modifiers/CMakeLists.txt @@ -65,7 +65,6 @@ set(SRC intern/MOD_mirror.c intern/MOD_multires.c intern/MOD_nodes.cc - intern/MOD_nodes_evaluator.cc intern/MOD_none.c intern/MOD_normal_edit.c intern/MOD_ocean.c @@ -105,7 +104,6 @@ set(SRC MOD_modifiertypes.h MOD_nodes.h intern/MOD_meshcache_util.h - intern/MOD_nodes_evaluator.hh intern/MOD_solidify_util.h intern/MOD_ui_common.h intern/MOD_util.h diff --git a/source/blender/modifiers/intern/MOD_nodes.cc b/source/blender/modifiers/intern/MOD_nodes.cc index 2908fbf5597..ffd78a90638 100644 --- a/source/blender/modifiers/intern/MOD_nodes.cc +++ b/source/blender/modifiers/intern/MOD_nodes.cc @@ -36,6 +36,7 @@ #include "DNA_windowmanager_types.h" #include "BKE_attribute_math.hh" +#include "BKE_compute_contexts.hh" #include "BKE_customdata.h" #include "BKE_geometry_fields.hh" #include "BKE_geometry_set_instances.hh" @@ -73,7 +74,6 @@ #include "MOD_modifiertypes.h" #include "MOD_nodes.h" -#include "MOD_nodes_evaluator.hh" #include "MOD_ui_common.h" #include "ED_object.h" @@ -81,15 +81,18 @@ #include "ED_spreadsheet.h" #include "ED_undo.h" -#include "NOD_derived_node_tree.hh" #include "NOD_geometry.h" -#include "NOD_geometry_nodes_eval_log.hh" +#include "NOD_geometry_nodes_lazy_function.hh" #include "NOD_node_declaration.hh" #include "FN_field.hh" #include "FN_field_cpp_type.hh" +#include "FN_lazy_function_execute.hh" +#include "FN_lazy_function_graph_executor.hh" #include "FN_multi_function.hh" +namespace lf = blender::fn::lazy_function; + using blender::Array; using blender::ColorGeometry4f; using blender::CPPType; @@ -106,6 +109,7 @@ using blender::MultiValueMap; using blender::MutableSpan; using blender::Set; using blender::Span; +using blender::Stack; using blender::StringRef; using blender::StringRefNull; using blender::Vector; @@ -117,11 +121,17 @@ using blender::fn::ValueOrFieldCPPType; using blender::nodes::FieldInferencingInterface; using blender::nodes::GeoNodeExecParams; using blender::nodes::InputSocketFieldType; +using blender::nodes::geo_eval_log::GeoModifierLog; using blender::threading::EnumerableThreadSpecific; using namespace blender::fn::multi_function_types; -using namespace blender::nodes::derived_node_tree_types; -using geo_log::eNamedAttrUsage; -using geo_log::GeometryAttributeInfo; +using blender::nodes::geo_eval_log::GeometryAttributeInfo; +using blender::nodes::geo_eval_log::GeometryInfoLog; +using blender::nodes::geo_eval_log::GeoNodeLog; +using blender::nodes::geo_eval_log::GeoTreeLog; +using blender::nodes::geo_eval_log::NamedAttributeUsage; +using blender::nodes::geo_eval_log::NodeWarning; +using blender::nodes::geo_eval_log::NodeWarningType; +using blender::nodes::geo_eval_log::ValueLog; static void initData(ModifierData *md) { @@ -756,36 +766,37 @@ void MOD_nodes_update_interface(Object *object, NodesModifierData *nmd) } static void initialize_group_input(NodesModifierData &nmd, - const bNodeSocket &socket, + const bNodeSocket &interface_socket, + const int input_index, void *r_value) { - const bNodeSocketType &socket_type = *socket.typeinfo; - const bNodeSocket &bsocket = socket; - const eNodeSocketDatatype socket_data_type = static_cast<eNodeSocketDatatype>(bsocket.type); + const bNodeSocketType &socket_type = *interface_socket.typeinfo; + const eNodeSocketDatatype socket_data_type = static_cast<eNodeSocketDatatype>( + interface_socket.type); if (nmd.settings.properties == nullptr) { - socket_type.get_geometry_nodes_cpp_value(bsocket, r_value); + socket_type.get_geometry_nodes_cpp_value(interface_socket, r_value); return; } const IDProperty *property = IDP_GetPropertyFromGroup(nmd.settings.properties, - socket.identifier); + interface_socket.identifier); if (property == nullptr) { - socket_type.get_geometry_nodes_cpp_value(bsocket, r_value); + socket_type.get_geometry_nodes_cpp_value(interface_socket, r_value); return; } - if (!id_property_type_matches_socket(bsocket, *property)) { - socket_type.get_geometry_nodes_cpp_value(bsocket, r_value); + if (!id_property_type_matches_socket(interface_socket, *property)) { + socket_type.get_geometry_nodes_cpp_value(interface_socket, r_value); return; } - if (!input_has_attribute_toggle(*nmd.node_group, socket.runtime->index_in_node)) { + if (!input_has_attribute_toggle(*nmd.node_group, input_index)) { init_socket_cpp_value_from_property(*property, socket_data_type, r_value); return; } const IDProperty *property_use_attribute = IDP_GetPropertyFromGroup( - nmd.settings.properties, (socket.identifier + use_attribute_suffix).c_str()); + nmd.settings.properties, (interface_socket.identifier + use_attribute_suffix).c_str()); const IDProperty *property_attribute_name = IDP_GetPropertyFromGroup( - nmd.settings.properties, (socket.identifier + attribute_name_suffix).c_str()); + nmd.settings.properties, (interface_socket.identifier + attribute_name_suffix).c_str()); if (property_use_attribute == nullptr || property_attribute_name == nullptr) { init_socket_cpp_value_from_property(*property, socket_data_type, r_value); return; @@ -831,13 +842,25 @@ static Vector<SpaceSpreadsheet *> find_spreadsheet_editors(Main *bmain) return spreadsheets; } -static void find_sockets_to_preview_for_spreadsheet(SpaceSpreadsheet *sspreadsheet, - NodesModifierData *nmd, - const ModifierEvalContext *ctx, - const DerivedNodeTree &tree, - Set<DSocket> &r_sockets_to_preview) +static const lf::FunctionNode &find_viewer_lf_node(const bNode &viewer_bnode) +{ + return *blender::nodes::ensure_geometry_nodes_lazy_function_graph(viewer_bnode.owner_tree()) + ->mapping.viewer_node_map.lookup(&viewer_bnode); +} +static const lf::FunctionNode &find_group_lf_node(const bNode &group_bnode) +{ + return *blender::nodes::ensure_geometry_nodes_lazy_function_graph(group_bnode.owner_tree()) + ->mapping.group_node_map.lookup(&group_bnode); +} + +static void find_side_effect_nodes_for_spreadsheet( + const SpaceSpreadsheet &sspreadsheet, + const NodesModifierData &nmd, + const ModifierEvalContext &ctx, + const bNodeTree &root_tree, + MultiValueMap<blender::ComputeContextHash, const lf::FunctionNode *> &r_side_effect_nodes) { - Vector<SpreadsheetContext *> context_path = sspreadsheet->context_path; + Vector<SpreadsheetContext *> context_path = sspreadsheet.context_path; if (context_path.size() < 3) { return; } @@ -848,11 +871,11 @@ static void find_sockets_to_preview_for_spreadsheet(SpaceSpreadsheet *sspreadshe return; } SpreadsheetContextObject *object_context = (SpreadsheetContextObject *)context_path[0]; - if (object_context->object != DEG_get_original_object(ctx->object)) { + if (object_context->object != DEG_get_original_object(ctx.object)) { return; } SpreadsheetContextModifier *modifier_context = (SpreadsheetContextModifier *)context_path[1]; - if (StringRef(modifier_context->modifier_name) != nmd->modifier.name) { + if (StringRef(modifier_context->modifier_name) != nmd.modifier.name) { return; } for (SpreadsheetContext *context : context_path.as_span().drop_front(2)) { @@ -861,61 +884,77 @@ static void find_sockets_to_preview_for_spreadsheet(SpaceSpreadsheet *sspreadshe } } - Span<SpreadsheetContextNode *> nested_group_contexts = + blender::ComputeContextBuilder compute_context_builder; + compute_context_builder.push<blender::bke::ModifierComputeContext>(nmd.modifier.name); + + const Span<SpreadsheetContextNode *> nested_group_contexts = context_path.as_span().drop_front(2).drop_back(1).cast<SpreadsheetContextNode *>(); - SpreadsheetContextNode *last_context = (SpreadsheetContextNode *)context_path.last(); + const SpreadsheetContextNode *last_context = (SpreadsheetContextNode *)context_path.last(); - const DTreeContext *context = &tree.root_context(); + Stack<const bNode *> group_node_stack; + const bNodeTree *group = &root_tree; for (SpreadsheetContextNode *node_context : nested_group_contexts) { - const bNodeTree &btree = context->btree(); const bNode *found_node = nullptr; - for (const bNode *bnode : btree.all_nodes()) { - if (STREQ(bnode->name, node_context->node_name)) { - found_node = bnode; + for (const bNode *node : group->group_nodes()) { + if (STREQ(node->name, node_context->node_name)) { + found_node = node; break; } } if (found_node == nullptr) { return; } - context = context->child_context(*found_node); - if (context == nullptr) { + if (found_node->id == nullptr) { return; } + group_node_stack.push(found_node); + group = reinterpret_cast<const bNodeTree *>(found_node->id); + compute_context_builder.push<blender::bke::NodeGroupComputeContext>(node_context->node_name); } - const bNodeTree &btree = context->btree(); - for (const bNode *bnode : btree.nodes_by_type("GeometryNodeViewer")) { - if (STREQ(bnode->name, last_context->node_name)) { - const DNode viewer_node{context, bnode}; - for (const bNodeSocket *input_socket : bnode->input_sockets()) { - if (input_socket->is_available() && input_socket->is_logically_linked()) { - r_sockets_to_preview.add(DSocket{context, input_socket}); - } - } + const bNode *found_viewer_node = nullptr; + for (const bNode *viewer_node : group->nodes_by_type("GeometryNodeViewer")) { + if (STREQ(viewer_node->name, last_context->node_name)) { + found_viewer_node = viewer_node; + break; } } + if (found_viewer_node == nullptr) { + return; + } + + /* Not only mark the viewer node as having side effects, but also all group nodes it is contained + * in. */ + r_side_effect_nodes.add(compute_context_builder.hash(), + &find_viewer_lf_node(*found_viewer_node)); + compute_context_builder.pop(); + while (!compute_context_builder.is_empty()) { + r_side_effect_nodes.add(compute_context_builder.hash(), + &find_group_lf_node(*group_node_stack.pop())); + compute_context_builder.pop(); + } } -static void find_sockets_to_preview(NodesModifierData *nmd, - const ModifierEvalContext *ctx, - const DerivedNodeTree &tree, - Set<DSocket> &r_sockets_to_preview) +static void find_side_effect_nodes( + const NodesModifierData &nmd, + const ModifierEvalContext &ctx, + const bNodeTree &tree, + MultiValueMap<blender::ComputeContextHash, const lf::FunctionNode *> &r_side_effect_nodes) { - Main *bmain = DEG_get_bmain(ctx->depsgraph); + Main *bmain = DEG_get_bmain(ctx.depsgraph); /* Based on every visible spreadsheet context path, get a list of sockets that need to have their * intermediate geometries cached for display. */ Vector<SpaceSpreadsheet *> spreadsheets = find_spreadsheet_editors(bmain); for (SpaceSpreadsheet *sspreadsheet : spreadsheets) { - find_sockets_to_preview_for_spreadsheet(sspreadsheet, nmd, ctx, tree, r_sockets_to_preview); + find_side_effect_nodes_for_spreadsheet(*sspreadsheet, nmd, ctx, tree, r_side_effect_nodes); } } static void clear_runtime_data(NodesModifierData *nmd) { if (nmd->runtime_eval_log != nullptr) { - delete (geo_log::ModifierLog *)nmd->runtime_eval_log; + delete static_cast<GeoModifierLog *>(nmd->runtime_eval_log); nmd->runtime_eval_log = nullptr; } } @@ -1079,92 +1118,104 @@ static void store_output_attributes(GeometrySet &geometry, /** * Evaluate a node group to compute the output geometry. */ -static GeometrySet compute_geometry(const DerivedNodeTree &tree, - Span<const bNode *> group_input_nodes, - const bNode &output_node, - GeometrySet input_geometry_set, - NodesModifierData *nmd, - const ModifierEvalContext *ctx) +static GeometrySet compute_geometry( + const bNodeTree &btree, + const blender::nodes::GeometryNodesLazyFunctionGraphInfo &lf_graph_info, + const bNode &output_node, + GeometrySet input_geometry_set, + NodesModifierData *nmd, + const ModifierEvalContext *ctx) { - blender::ResourceScope scope; - blender::LinearAllocator<> &allocator = scope.linear_allocator(); - blender::nodes::NodeMultiFunctions mf_by_node{tree}; + const blender::nodes::GeometryNodeLazyFunctionGraphMapping &mapping = lf_graph_info.mapping; + + Span<const lf::OutputSocket *> graph_inputs = mapping.group_input_sockets; + Vector<const lf::InputSocket *> graph_outputs; + for (const bNodeSocket *bsocket : output_node.input_sockets().drop_back(1)) { + const lf::InputSocket &socket = mapping.dummy_socket_map.lookup(bsocket)->as_input(); + graph_outputs.append(&socket); + } - Map<DOutputSocket, GMutablePointer> group_inputs; + Array<GMutablePointer> param_inputs(graph_inputs.size()); + Array<GMutablePointer> param_outputs(graph_outputs.size()); + Array<std::optional<lf::ValueUsage>> param_input_usages(graph_inputs.size()); + Array<lf::ValueUsage> param_output_usages(graph_outputs.size(), lf::ValueUsage::Used); + Array<bool> param_set_outputs(graph_outputs.size(), false); - const DTreeContext *root_context = &tree.root_context(); - for (const bNode *group_input_node : group_input_nodes) { - Span<const bNodeSocket *> group_input_sockets = group_input_node->output_sockets().drop_back( - 1); - if (group_input_sockets.is_empty()) { - continue; - } + blender::nodes::GeometryNodesLazyFunctionLogger lf_logger(lf_graph_info); + blender::nodes::GeometryNodesLazyFunctionSideEffectProvider lf_side_effect_provider( + lf_graph_info); - Span<const bNodeSocket *> remaining_input_sockets = group_input_sockets; + lf::GraphExecutor graph_executor{ + lf_graph_info.graph, graph_inputs, graph_outputs, &lf_logger, &lf_side_effect_provider}; - /* If the group expects a geometry as first input, use the geometry that has been passed to - * modifier. */ - const bNodeSocket *first_input_socket = group_input_sockets[0]; - if (first_input_socket->type == SOCK_GEOMETRY) { - GeometrySet *geometry_set_in = - allocator.construct<GeometrySet>(input_geometry_set).release(); - group_inputs.add_new({root_context, first_input_socket}, geometry_set_in); - remaining_input_sockets = remaining_input_sockets.drop_front(1); + blender::nodes::GeoNodesModifierData geo_nodes_modifier_data; + geo_nodes_modifier_data.depsgraph = ctx->depsgraph; + geo_nodes_modifier_data.self_object = ctx->object; + auto eval_log = std::make_unique<GeoModifierLog>(); + if (logging_enabled(ctx)) { + geo_nodes_modifier_data.eval_log = eval_log.get(); + } + MultiValueMap<blender::ComputeContextHash, const lf::FunctionNode *> r_side_effect_nodes; + find_side_effect_nodes(*nmd, *ctx, btree, r_side_effect_nodes); + geo_nodes_modifier_data.side_effect_nodes = &r_side_effect_nodes; + blender::nodes::GeoNodesLFUserData user_data; + user_data.modifier_data = &geo_nodes_modifier_data; + blender::bke::ModifierComputeContext modifier_compute_context{nullptr, nmd->modifier.name}; + user_data.compute_context = &modifier_compute_context; + + blender::LinearAllocator<> allocator; + Vector<GMutablePointer> inputs_to_destruct; + + int input_index; + LISTBASE_FOREACH_INDEX (bNodeSocket *, interface_socket, &btree.inputs, input_index) { + if (interface_socket->type == SOCK_GEOMETRY && input_index == 0) { + param_inputs[input_index] = &input_geometry_set; + continue; } - /* Initialize remaining group inputs. */ - for (const bNodeSocket *socket : remaining_input_sockets) { - const CPPType &cpp_type = *socket->typeinfo->geometry_nodes_cpp_type; - void *value_in = allocator.allocate(cpp_type.size(), cpp_type.alignment()); - initialize_group_input(*nmd, *socket, value_in); - group_inputs.add_new({root_context, socket}, {cpp_type, value_in}); - } + const CPPType *type = interface_socket->typeinfo->geometry_nodes_cpp_type; + BLI_assert(type != nullptr); + void *value = allocator.allocate(type->size(), type->alignment()); + initialize_group_input(*nmd, *interface_socket, input_index, value); + param_inputs[input_index] = {type, value}; + inputs_to_destruct.append({type, value}); } - Vector<DInputSocket> group_outputs; - for (const bNodeSocket *socket_ref : output_node.input_sockets().drop_back(1)) { - group_outputs.append({root_context, socket_ref}); + for (const int i : graph_outputs.index_range()) { + const lf::InputSocket &socket = *graph_outputs[i]; + const CPPType &type = socket.type(); + void *buffer = allocator.allocate(type.size(), type.alignment()); + param_outputs[i] = {type, buffer}; } - std::optional<geo_log::GeoLogger> geo_logger; - - blender::modifiers::geometry_nodes::GeometryNodesEvaluationParams eval_params; - - if (logging_enabled(ctx)) { - Set<DSocket> preview_sockets; - find_sockets_to_preview(nmd, ctx, tree, preview_sockets); - eval_params.force_compute_sockets.extend(preview_sockets.begin(), preview_sockets.end()); - geo_logger.emplace(std::move(preview_sockets)); + lf::Context lf_context; + lf_context.storage = graph_executor.init_storage(allocator); + lf_context.user_data = &user_data; + lf::BasicParams lf_params{graph_executor, + param_inputs, + param_outputs, + param_input_usages, + param_output_usages, + param_set_outputs}; + graph_executor.execute(lf_params, lf_context); + graph_executor.destruct_storage(lf_context.storage); - geo_logger->log_input_geometry(input_geometry_set); + for (GMutablePointer &ptr : inputs_to_destruct) { + ptr.destruct(); } - /* Don't keep a reference to the input geometry components to avoid copies during evaluation. */ - input_geometry_set.clear(); - - eval_params.input_values = group_inputs; - eval_params.output_sockets = group_outputs; - eval_params.mf_by_node = &mf_by_node; - eval_params.modifier_ = nmd; - eval_params.depsgraph = ctx->depsgraph; - eval_params.self_object = ctx->object; - eval_params.geo_logger = geo_logger.has_value() ? &*geo_logger : nullptr; - blender::modifiers::geometry_nodes::evaluate_geometry_nodes(eval_params); + GeometrySet output_geometry_set = std::move(*static_cast<GeometrySet *>(param_outputs[0].get())); + store_output_attributes(output_geometry_set, *nmd, output_node, param_outputs); - GeometrySet output_geometry_set = std::move(*eval_params.r_output_values[0].get<GeometrySet>()); - - if (geo_logger.has_value()) { - geo_logger->log_output_geometry(output_geometry_set); - NodesModifierData *nmd_orig = (NodesModifierData *)BKE_modifier_get_original(ctx->object, - &nmd->modifier); - clear_runtime_data(nmd_orig); - nmd_orig->runtime_eval_log = new geo_log::ModifierLog(*geo_logger); + for (GMutablePointer &ptr : param_outputs) { + ptr.destruct(); } - store_output_attributes(output_geometry_set, *nmd, output_node, eval_params.r_output_values); - - for (GMutablePointer value : eval_params.r_output_values) { - value.destruct(); + if (logging_enabled(ctx)) { + NodesModifierData *nmd_orig = reinterpret_cast<NodesModifierData *>( + BKE_modifier_get_original(ctx->object, &nmd->modifier)); + delete static_cast<GeoModifierLog *>(nmd_orig->runtime_eval_log); + nmd_orig->runtime_eval_log = eval_log.release(); } return output_geometry_set; @@ -1225,27 +1276,18 @@ static void modifyGeometry(ModifierData *md, return; } + const bNodeTree &tree = *nmd->node_group; + tree.ensure_topology_cache(); check_property_socket_sync(ctx->object, md); - const bNodeTree &root_tree_ref = *nmd->node_group; - DerivedNodeTree tree{root_tree_ref}; - - if (tree.has_link_cycles()) { - BKE_modifier_set_error(ctx->object, md, "Node group has cycles"); - geometry_set.clear(); - return; - } - - Span<const bNode *> input_nodes = root_tree_ref.nodes_by_type("NodeGroupInput"); - Span<const bNode *> output_nodes = root_tree_ref.nodes_by_type("NodeGroupOutput"); - if (output_nodes.size() != 1) { - BKE_modifier_set_error(ctx->object, md, "Node group must have a single output node"); + const bNode *output_node = tree.group_output_node(); + if (output_node == nullptr) { + BKE_modifier_set_error(ctx->object, md, "Node group must have a group output node"); geometry_set.clear(); return; } - const bNode &output_node = *output_nodes[0]; - Span<const bNodeSocket *> group_outputs = output_node.input_sockets().drop_back(1); + Span<const bNodeSocket *> group_outputs = output_node->input_sockets().drop_back(1); if (group_outputs.is_empty()) { BKE_modifier_set_error(ctx->object, md, "Node group must have an output socket"); geometry_set.clear(); @@ -1259,6 +1301,14 @@ static void modifyGeometry(ModifierData *md, return; } + const blender::nodes::GeometryNodesLazyFunctionGraphInfo *lf_graph_info = + blender::nodes::ensure_geometry_nodes_lazy_function_graph(tree); + if (lf_graph_info == nullptr) { + BKE_modifier_set_error(ctx->object, md, "Cannot evaluate node group"); + geometry_set.clear(); + return; + } + bool use_orig_index_verts = false; bool use_orig_index_edges = false; bool use_orig_index_polys = false; @@ -1270,7 +1320,7 @@ static void modifyGeometry(ModifierData *md, } geometry_set = compute_geometry( - tree, input_nodes, output_node, std::move(geometry_set), nmd, ctx); + tree, *lf_graph_info, *output_node, std::move(geometry_set), nmd, ctx); if (geometry_set.has_mesh()) { /* Add #CD_ORIGINDEX layers if they don't exist already. This is required because the @@ -1342,6 +1392,16 @@ static NodesModifierData *get_modifier_data(Main &bmain, return reinterpret_cast<NodesModifierData *>(md); } +static GeoTreeLog *get_root_tree_log(const NodesModifierData &nmd) +{ + if (nmd.runtime_eval_log == nullptr) { + return nullptr; + } + GeoModifierLog &modifier_log = *static_cast<GeoModifierLog *>(nmd.runtime_eval_log); + blender::bke::ModifierComputeContext compute_context{nullptr, nmd.modifier.name}; + return &modifier_log.get_tree_log(compute_context.hash()); +} + static void attribute_search_update_fn( const bContext *C, void *arg, const char *str, uiSearchItems *items, const bool is_first) { @@ -1350,27 +1410,52 @@ static void attribute_search_update_fn( if (nmd == nullptr) { return; } - const geo_log::ModifierLog *modifier_log = static_cast<const geo_log::ModifierLog *>( - nmd->runtime_eval_log); - if (modifier_log == nullptr) { + if (nmd->node_group == nullptr) { return; } - const geo_log::GeometryValueLog *geometry_log = data.is_output ? - modifier_log->output_geometry_log() : - modifier_log->input_geometry_log(); - if (geometry_log == nullptr) { + GeoTreeLog *tree_log = get_root_tree_log(*nmd); + if (tree_log == nullptr) { return; } + tree_log->ensure_existing_attributes(); + nmd->node_group->ensure_topology_cache(); - Span<GeometryAttributeInfo> infos = geometry_log->attributes(); - - /* The shared attribute search code expects a span of pointers, so convert to that. */ - Array<const GeometryAttributeInfo *> info_ptrs(infos.size()); - for (const int i : infos.index_range()) { - info_ptrs[i] = &infos[i]; + Vector<const bNodeSocket *> sockets_to_check; + if (data.is_output) { + for (const bNode *node : nmd->node_group->nodes_by_type("NodeGroupOutput")) { + for (const bNodeSocket *socket : node->input_sockets()) { + if (socket->type == SOCK_GEOMETRY) { + sockets_to_check.append(socket); + } + } + } + } + else { + for (const bNode *node : nmd->node_group->nodes_by_type("NodeGroupInput")) { + for (const bNodeSocket *socket : node->output_sockets()) { + if (socket->type == SOCK_GEOMETRY) { + sockets_to_check.append(socket); + } + } + } + } + Set<StringRef> names; + Vector<const GeometryAttributeInfo *> attributes; + for (const bNodeSocket *socket : sockets_to_check) { + const ValueLog *value_log = tree_log->find_socket_value_log(*socket); + if (value_log == nullptr) { + continue; + } + if (const GeometryInfoLog *geo_log = dynamic_cast<const GeometryInfoLog *>(value_log)) { + for (const GeometryAttributeInfo &attribute : geo_log->attributes) { + if (names.add(attribute.name)) { + attributes.append(&attribute); + } + } + } } blender::ui::attribute_search_add_items( - str, data.is_output, info_ptrs.as_span(), items, is_first); + str, data.is_output, attributes.as_span(), items, is_first); } static void attribute_search_exec_fn(bContext *C, void *data_v, void *item_v) @@ -1401,8 +1486,7 @@ static void add_attribute_search_button(const bContext &C, const bNodeSocket &socket, const bool is_output) { - const geo_log::ModifierLog *log = static_cast<geo_log::ModifierLog *>(nmd.runtime_eval_log); - if (log == nullptr) { + if (nmd.runtime_eval_log == nullptr) { uiItemR(layout, md_ptr, rna_path_attribute_name.c_str(), 0, "", ICON_NONE); return; } @@ -1627,15 +1711,14 @@ static void panel_draw(const bContext *C, Panel *panel) } /* Draw node warnings. */ - if (nmd->runtime_eval_log != nullptr) { - const geo_log::ModifierLog &log = *static_cast<geo_log::ModifierLog *>(nmd->runtime_eval_log); - log.foreach_node_log([&](const geo_log::NodeLog &node_log) { - for (const geo_log::NodeWarning &warning : node_log.warnings()) { - if (warning.type != geo_log::NodeWarningType::Info) { - uiItemL(layout, warning.message.c_str(), ICON_ERROR); - } + GeoTreeLog *tree_log = get_root_tree_log(*nmd); + if (tree_log != nullptr) { + tree_log->ensure_node_warnings(); + for (const NodeWarning &warning : tree_log->all_warnings) { + if (warning.type != NodeWarningType::Info) { + uiItemL(layout, warning.message.c_str(), ICON_ERROR); } - }); + } } modifier_panel_end(layout, ptr); @@ -1672,17 +1755,14 @@ static void internal_dependencies_panel_draw(const bContext *UNUSED(C), Panel *p PointerRNA *ptr = modifier_panel_get_property_pointers(panel, nullptr); NodesModifierData *nmd = static_cast<NodesModifierData *>(ptr->data); - if (nmd->runtime_eval_log == nullptr) { + GeoTreeLog *tree_log = get_root_tree_log(*nmd); + if (tree_log == nullptr) { return; } - const geo_log::ModifierLog &log = *static_cast<geo_log::ModifierLog *>(nmd->runtime_eval_log); - Map<std::string, eNamedAttrUsage> usage_by_attribute; - log.foreach_node_log([&](const geo_log::NodeLog &node_log) { - for (const geo_log::UsedNamedAttribute &used_attribute : node_log.used_named_attributes()) { - usage_by_attribute.lookup_or_add_as(used_attribute.name, - used_attribute.usage) |= used_attribute.usage; - } - }); + + tree_log->ensure_used_named_attributes(); + const Map<std::string, NamedAttributeUsage> &usage_by_attribute = + tree_log->used_named_attributes; if (usage_by_attribute.is_empty()) { uiItemL(layout, IFACE_("No named attributes used"), ICON_INFO); @@ -1691,7 +1771,7 @@ static void internal_dependencies_panel_draw(const bContext *UNUSED(C), Panel *p struct NameWithUsage { StringRefNull name; - eNamedAttrUsage usage; + NamedAttributeUsage usage; }; Vector<NameWithUsage> sorted_used_attribute; @@ -1706,20 +1786,20 @@ static void internal_dependencies_panel_draw(const bContext *UNUSED(C), Panel *p for (const NameWithUsage &attribute : sorted_used_attribute) { const StringRefNull attribute_name = attribute.name; - const eNamedAttrUsage usage = attribute.usage; + const NamedAttributeUsage usage = attribute.usage; /* #uiLayoutRowWithHeading doesn't seem to work in this case. */ uiLayout *split = uiLayoutSplit(layout, 0.4f, false); std::stringstream ss; Vector<std::string> usages; - if ((usage & eNamedAttrUsage::Read) != eNamedAttrUsage::None) { + if ((usage & NamedAttributeUsage::Read) != NamedAttributeUsage::None) { usages.append(TIP_("Read")); } - if ((usage & eNamedAttrUsage::Write) != eNamedAttrUsage::None) { + if ((usage & NamedAttributeUsage::Write) != NamedAttributeUsage::None) { usages.append(TIP_("Write")); } - if ((usage & eNamedAttrUsage::Remove) != eNamedAttrUsage::None) { + if ((usage & NamedAttributeUsage::Remove) != NamedAttributeUsage::None) { usages.append(TIP_("Remove")); } for (const int i : usages.index_range()) { diff --git a/source/blender/modifiers/intern/MOD_nodes_evaluator.cc b/source/blender/modifiers/intern/MOD_nodes_evaluator.cc deleted file mode 100644 index dd7c87ca499..00000000000 --- a/source/blender/modifiers/intern/MOD_nodes_evaluator.cc +++ /dev/null @@ -1,1929 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#include "MOD_nodes_evaluator.hh" - -#include "BKE_node.h" -#include "BKE_type_conversions.hh" - -#include "NOD_geometry_exec.hh" -#include "NOD_socket_declarations.hh" - -#include "DEG_depsgraph_query.h" - -#include "FN_field.hh" -#include "FN_field_cpp_type.hh" -#include "FN_multi_function.hh" - -#include "BLT_translation.h" - -#include "BLI_enumerable_thread_specific.hh" -#include "BLI_generic_value_map.hh" -#include "BLI_stack.hh" -#include "BLI_task.h" -#include "BLI_task.hh" -#include "BLI_vector_set.hh" - -#include <chrono> - -namespace blender::modifiers::geometry_nodes { - -using fn::Field; -using fn::GField; -using fn::ValueOrField; -using fn::ValueOrFieldCPPType; -using nodes::GeoNodeExecParams; -using namespace fn::multi_function_types; - -enum class ValueUsage : uint8_t { - /* The value is definitely used. */ - Required, - /* The value may be used. */ - Maybe, - /* The value will definitely not be used. */ - Unused, -}; - -struct SingleInputValue { - /** - * Points either to null or to a value of the type of input. - */ - void *value = nullptr; -}; - -struct MultiInputValue { - /** - * Ordered sockets connected to this multi-input. - */ - Vector<DSocket> origins; - /** - * A value for every origin socket. The order is determined by #origins. - * Note, the same origin can occur multiple times. However, it is guaranteed that values coming - * from the same origin have the same value (the pointer is different, but they point to values - * that would compare equal). - */ - Vector<void *> values; - /** - * Number of non-null values. - */ - int provided_value_count = 0; - - bool all_values_available() const - { - return this->missing_values() == 0; - } - - int missing_values() const - { - return this->values.size() - this->provided_value_count; - } - - void add_value(const DSocket origin, void *value) - { - const int index = this->find_available_index(origin); - this->values[index] = value; - this->provided_value_count++; - } - - private: - int find_available_index(DSocket origin) const - { - for (const int i : origins.index_range()) { - if (values[i] != nullptr) { - continue; - } - if (origins[i] != origin) { - continue; - } - return i; - } - BLI_assert_unreachable(); - return -1; - } -}; - -struct InputState { - - /** - * Type of the socket. If this is null, the socket should just be ignored. - */ - const CPPType *type = nullptr; - - /** - * Value of this input socket. By default, the value is empty. When other nodes are done - * computing their outputs, the computed values will be forwarded to linked input sockets. - * The value will then live here until it is consumed by the node or it was found that the value - * is not needed anymore. - * Whether the `single` or `multi` value is used depends on the socket. - */ - union { - SingleInputValue *single; - MultiInputValue *multi; - } value; - - /** - * How the node intends to use this input. By default all inputs may be used. Based on which - * outputs are used, a node can tell the evaluator that an input will definitely be used or is - * never used. This allows the evaluator to free values early, avoid copies and other unnecessary - * computations. - */ - ValueUsage usage = ValueUsage::Maybe; - - /** - * True when this input is/was used for an execution. While a node is running, only the inputs - * that have this set to true are allowed to be used. This makes sure that inputs created while - * the node is running correctly trigger the node to run again. Furthermore, it gives the node a - * consistent view of which inputs are available that does not change unexpectedly. - * - * While the node is running, this can be checked without a lock, because no one is writing to - * it. If this is true, the value can be read without a lock as well, because the value is not - * changed by others anymore. - */ - bool was_ready_for_execution = false; - - /** - * True when this input has to be computed for logging/debugging purposes, regardless of whether - * it is needed for some output. - */ - bool force_compute = false; -}; - -struct OutputState { - /** - * If this output has been computed and forwarded already. If this is true, the value is not - * computed/forwarded again. - */ - bool has_been_computed = false; - - /** - * Keeps track of how the output value is used. If a connected input becomes required, this - * output has to become required as well. The output becomes ignored when it has zero potential - * users that are counted below. - */ - ValueUsage output_usage = ValueUsage::Maybe; - - /** - * This is a copy of `output_usage` that is done right before node execution starts. This is - * done so that the node gets a consistent view of what outputs are used, even when this changes - * while the node is running (the node might be reevaluated in that case). - * - * While the node is running, this can be checked without a lock, because no one is writing to - * it. - */ - ValueUsage output_usage_for_execution = ValueUsage::Maybe; - - /** - * Counts how many times the value from this output might be used. If this number reaches zero, - * the output is not needed anymore. - */ - int potential_users = 0; -}; - -enum class NodeScheduleState { - /** - * Default state of every node. - */ - NotScheduled, - /** - * The node has been added to the task group and will be executed by it in the future. - */ - Scheduled, - /** - * The node is currently running. - */ - Running, - /** - * The node is running and has been rescheduled while running. In this case the node will run - * again. However, we don't add it to the task group immediately, because then the node might run - * twice at the same time, which is not allowed. Instead, once the node is done running, it will - * reschedule itself. - */ - RunningAndRescheduled, -}; - -struct NodeState { - /** - * Needs to be locked when any data in this state is accessed that is not explicitly marked as - * otherwise. - */ - std::mutex mutex; - - /** - * States of the individual input and output sockets. One can index into these arrays without - * locking. However, to access the data inside a lock is generally necessary. - * - * These spans have to be indexed with the socket index. Unavailable sockets have a state as - * well. Maybe we can handle unavailable sockets differently in Blender in general, so I did not - * want to add complexity around it here. - */ - MutableSpan<InputState> inputs; - MutableSpan<OutputState> outputs; - - /** - * Most nodes have inputs that are always required. Those have special handling to avoid an extra - * call to the node execution function. - */ - bool non_lazy_inputs_handled = false; - - /** - * Used to check that nodes that don't support laziness do not run more than once. - */ - bool has_been_executed = false; - - /** - * Becomes true when the node will never be executed again and its inputs are destructed. - * Generally, a node has finished once all of its outputs with (potential) users have been - * computed. - */ - bool node_has_finished = false; - - /** - * Counts the number of values that still have to be forwarded to this node until it should run - * again. It counts values from a multi input socket separately. - * This is used as an optimization so that nodes are not scheduled unnecessarily in many cases. - */ - int missing_required_inputs = 0; - - /** - * A node is always in one specific schedule state. This helps to ensure that the same node does - * not run twice at the same time accidentally. - */ - NodeScheduleState schedule_state = NodeScheduleState::NotScheduled; -}; - -/** - * Container for a node and its state. Packing them into a single struct allows the use of - * `VectorSet` instead of a `Map` for `node_states_` which simplifies parallel loops over all - * states. - * - * Equality operators and a hash function for `DNode` are provided so that one can lookup this type - * in `node_states_` just with a `DNode`. - */ -struct NodeWithState { - DNode node; - /* Store a pointer instead of `NodeState` directly to keep it small and movable. */ - NodeState *state = nullptr; - - friend bool operator==(const NodeWithState &a, const NodeWithState &b) - { - return a.node == b.node; - } - - friend bool operator==(const NodeWithState &a, const DNode &b) - { - return a.node == b; - } - - friend bool operator==(const DNode &a, const NodeWithState &b) - { - return a == b.node; - } - - uint64_t hash() const - { - return node.hash(); - } - - static uint64_t hash_as(const DNode &node) - { - return node.hash(); - } -}; - -class GeometryNodesEvaluator; - -/** - * Utility class that wraps a node whose state is locked. Having this is a separate class is useful - * because it allows methods to communicate that they expect the node to be locked. - */ -class LockedNode : NonCopyable, NonMovable { - public: - /** - * This is the node that is currently locked. - */ - const DNode node; - NodeState &node_state; - - /** - * Used to delay notifying (and therefore locking) other nodes until the current node is not - * locked anymore. This might not be strictly necessary to avoid deadlocks in the current code, - * but it is a good measure to avoid accidentally adding a deadlock later on. By not locking - * more than one node per thread at a time, deadlocks are avoided. - * - * The notifications will be send right after the node is not locked anymore. - */ - Vector<DOutputSocket> delayed_required_outputs; - Vector<DOutputSocket> delayed_unused_outputs; - Vector<DNode> delayed_scheduled_nodes; - - LockedNode(const DNode node, NodeState &node_state) : node(node), node_state(node_state) - { - } -}; - -static const CPPType *get_socket_cpp_type(const bNodeSocket &socket) -{ - const bNodeSocketType *typeinfo = socket.typeinfo; - if (typeinfo->geometry_nodes_cpp_type == nullptr) { - return nullptr; - } - const CPPType *type = typeinfo->geometry_nodes_cpp_type; - if (type == nullptr) { - return nullptr; - } - /* The evaluator only supports types that have special member functions. */ - if (!type->has_special_member_functions()) { - return nullptr; - } - return type; -} - -static const CPPType *get_socket_cpp_type(const DSocket socket) -{ - return get_socket_cpp_type(*socket); -} - -/** - * \note This is not supposed to be a long term solution. Eventually we want that nodes can - * specify more complex defaults (other than just single values) in their socket declarations. - */ -static bool get_implicit_socket_input(const bNodeSocket &socket, void *r_value) -{ - const bNode &node = socket.owner_node(); - const nodes::NodeDeclaration *node_declaration = node.runtime->declaration; - if (node_declaration == nullptr) { - return false; - } - const nodes::SocketDeclaration &socket_declaration = *node_declaration->inputs()[socket.index()]; - if (socket_declaration.input_field_type() == nodes::InputSocketFieldType::Implicit) { - const bNode &bnode = socket.owner_node(); - if (socket.typeinfo->type == SOCK_VECTOR) { - if (bnode.type == GEO_NODE_SET_CURVE_HANDLES) { - StringRef side = ((NodeGeometrySetCurveHandlePositions *)bnode.storage)->mode == - GEO_NODE_CURVE_HANDLE_LEFT ? - "handle_left" : - "handle_right"; - new (r_value) ValueOrField<float3>(bke::AttributeFieldInput::Create<float3>(side)); - return true; - } - if (bnode.type == GEO_NODE_EXTRUDE_MESH) { - new (r_value) - ValueOrField<float3>(Field<float3>(std::make_shared<bke::NormalFieldInput>())); - return true; - } - new (r_value) ValueOrField<float3>(bke::AttributeFieldInput::Create<float3>("position")); - return true; - } - if (socket.typeinfo->type == SOCK_INT) { - if (ELEM(bnode.type, FN_NODE_RANDOM_VALUE, GEO_NODE_INSTANCE_ON_POINTS)) { - new (r_value) - ValueOrField<int>(Field<int>(std::make_shared<bke::IDAttributeFieldInput>())); - return true; - } - new (r_value) ValueOrField<int>(Field<int>(std::make_shared<fn::IndexFieldInput>())); - return true; - } - } - return false; -} - -static void get_socket_value(const bNodeSocket &socket, void *r_value) -{ - if (get_implicit_socket_input(socket, r_value)) { - return; - } - - const bNodeSocketType *typeinfo = socket.typeinfo; - typeinfo->get_geometry_nodes_cpp_value(socket, r_value); -} - -static bool node_supports_laziness(const DNode node) -{ - return node->typeinfo->geometry_node_execute_supports_laziness; -} - -struct NodeTaskRunState { - /** The node that should be run on the same thread after the current node finished. */ - DNode next_node_to_run; -}; - -/** Implements the callbacks that might be called when a node is executed. */ -class NodeParamsProvider : public nodes::GeoNodeExecParamsProvider { - private: - GeometryNodesEvaluator &evaluator_; - NodeState &node_state_; - NodeTaskRunState *run_state_; - - public: - NodeParamsProvider(GeometryNodesEvaluator &evaluator, - DNode dnode, - NodeState &node_state, - NodeTaskRunState *run_state); - - bool can_get_input(StringRef identifier) const override; - bool can_set_output(StringRef identifier) const override; - GMutablePointer extract_input(StringRef identifier) override; - Vector<GMutablePointer> extract_multi_input(StringRef identifier) override; - GPointer get_input(StringRef identifier) const override; - GMutablePointer alloc_output_value(const CPPType &type) override; - void set_output(StringRef identifier, GMutablePointer value) override; - void set_input_unused(StringRef identifier) override; - bool output_is_required(StringRef identifier) const override; - - bool lazy_require_input(StringRef identifier) override; - bool lazy_output_is_required(StringRef identifier) const override; - - void set_default_remaining_outputs() override; -}; - -class GeometryNodesEvaluator { - private: - /** - * This allocator lives on after the evaluator has been destructed. Therefore outputs of the - * entire evaluator should be allocated here. - */ - LinearAllocator<> &outer_allocator_; - /** - * A local linear allocator for each thread. Only use this for values that do not need to live - * longer than the lifetime of the evaluator itself. Considerations for the future: - * - We could use an allocator that can free here, some temporary values don't live long. - * - If we ever run into false sharing bottlenecks, we could use local allocators that allocate - * on cache line boundaries. Note, just because a value is allocated in one specific thread, - * does not mean that it will only be used by that thread. - */ - threading::EnumerableThreadSpecific<LinearAllocator<>> local_allocators_; - - /** - * Every node that is reachable from the output gets its own state. Once all states have been - * constructed, this map can be used for lookups from multiple threads. - */ - VectorSet<NodeWithState> node_states_; - - /** - * Contains all the tasks for the nodes that are currently scheduled. - */ - TaskPool *task_pool_ = nullptr; - - GeometryNodesEvaluationParams ¶ms_; - const blender::bke::DataTypeConversions &conversions_; - - friend NodeParamsProvider; - - public: - GeometryNodesEvaluator(GeometryNodesEvaluationParams ¶ms) - : outer_allocator_(params.allocator), - params_(params), - conversions_(blender::bke::get_implicit_type_conversions()) - { - } - - void execute() - { - task_pool_ = BLI_task_pool_create(this, TASK_PRIORITY_HIGH); - - this->create_states_for_reachable_nodes(); - this->forward_group_inputs(); - this->schedule_initial_nodes(); - - /* This runs until all initially requested inputs have been computed. */ - BLI_task_pool_work_and_wait(task_pool_); - BLI_task_pool_free(task_pool_); - - this->extract_group_outputs(); - this->destruct_node_states(); - } - - void create_states_for_reachable_nodes() - { - /* This does a depth first search for all the nodes that are reachable from the group - * outputs. This finds all nodes that are relevant. */ - Stack<DNode> nodes_to_check; - /* Start at the output sockets. */ - for (const DInputSocket &socket : params_.output_sockets) { - nodes_to_check.push(socket.node()); - } - for (const DSocket &socket : params_.force_compute_sockets) { - nodes_to_check.push(socket.node()); - } - /* Use the local allocator because the states do not need to outlive the evaluator. */ - LinearAllocator<> &allocator = local_allocators_.local(); - while (!nodes_to_check.is_empty()) { - const DNode node = nodes_to_check.pop(); - if (node_states_.contains_as(node)) { - /* This node has been handled already. */ - continue; - } - /* Create a new state for the node. */ - NodeState &node_state = *allocator.construct<NodeState>().release(); - node_states_.add_new({node, &node_state}); - - /* Push all linked origins on the stack. */ - for (const bNodeSocket *input : node->input_sockets()) { - const DInputSocket dinput{node.context(), input}; - dinput.foreach_origin_socket( - [&](const DSocket origin) { nodes_to_check.push(origin.node()); }); - } - } - - /* Initialize the more complex parts of the node states in parallel. At this point no new - * node states are added anymore, so it is safe to lookup states from `node_states_` from - * multiple threads. */ - threading::parallel_for( - IndexRange(node_states_.size()), 50, [&, this](const IndexRange range) { - LinearAllocator<> &allocator = this->local_allocators_.local(); - for (const NodeWithState &item : node_states_.as_span().slice(range)) { - this->initialize_node_state(item.node, *item.state, allocator); - } - }); - - /* Mark input sockets that have to be computed. */ - for (const DSocket &socket : params_.force_compute_sockets) { - NodeState &node_state = *node_states_.lookup_key_as(socket.node()).state; - if (socket->is_input()) { - node_state.inputs[socket->index()].force_compute = true; - } - } - } - - void initialize_node_state(const DNode node, NodeState &node_state, LinearAllocator<> &allocator) - { - /* Construct arrays of the correct size. */ - node_state.inputs = allocator.construct_array<InputState>(node->input_sockets().size()); - node_state.outputs = allocator.construct_array<OutputState>(node->output_sockets().size()); - - /* Initialize input states. */ - for (const int i : node->input_sockets().index_range()) { - InputState &input_state = node_state.inputs[i]; - const DInputSocket socket = node.input(i); - if (!socket->is_available()) { - /* Unavailable sockets should never be used. */ - input_state.type = nullptr; - input_state.usage = ValueUsage::Unused; - continue; - } - const CPPType *type = get_socket_cpp_type(socket); - input_state.type = type; - if (type == nullptr) { - /* This is not a known data socket, it shouldn't be used. */ - input_state.usage = ValueUsage::Unused; - continue; - } - /* Construct the correct struct that can hold the input(s). */ - if (socket->is_multi_input()) { - input_state.value.multi = allocator.construct<MultiInputValue>().release(); - MultiInputValue &multi_value = *input_state.value.multi; - /* Count how many values should be added until the socket is complete. */ - socket.foreach_origin_socket([&](DSocket origin) { multi_value.origins.append(origin); }); - /* If no links are connected, we do read the value from socket itself. */ - if (multi_value.origins.is_empty()) { - multi_value.origins.append(socket); - } - multi_value.values.resize(multi_value.origins.size(), nullptr); - } - else { - input_state.value.single = allocator.construct<SingleInputValue>().release(); - } - } - /* Initialize output states. */ - for (const int i : node->output_sockets().index_range()) { - OutputState &output_state = node_state.outputs[i]; - const DOutputSocket socket = node.output(i); - if (!socket->is_available()) { - /* Unavailable outputs should never be used. */ - output_state.output_usage = ValueUsage::Unused; - continue; - } - const CPPType *type = get_socket_cpp_type(socket); - if (type == nullptr) { - /* Non data sockets should never be used. */ - output_state.output_usage = ValueUsage::Unused; - continue; - } - /* Count the number of potential users for this socket. */ - socket.foreach_target_socket( - [&, this](const DInputSocket target_socket, - const DOutputSocket::TargetSocketPathInfo &UNUSED(path_info)) { - const DNode target_node = target_socket.node(); - if (!this->node_states_.contains_as(target_node)) { - /* The target node is not computed because it is not computed to the output. */ - return; - } - output_state.potential_users += 1; - }); - if (output_state.potential_users == 0) { - /* If it does not have any potential users, it is unused. It might become required again in - * `schedule_initial_nodes`. */ - output_state.output_usage = ValueUsage::Unused; - } - } - } - - void destruct_node_states() - { - threading::parallel_for( - IndexRange(node_states_.size()), 50, [&, this](const IndexRange range) { - for (const NodeWithState &item : node_states_.as_span().slice(range)) { - this->destruct_node_state(item.node, *item.state); - } - }); - } - - void destruct_node_state(const DNode node, NodeState &node_state) - { - /* Need to destruct stuff manually, because it's allocated by a custom allocator. */ - for (const int i : node->input_sockets().index_range()) { - InputState &input_state = node_state.inputs[i]; - if (input_state.type == nullptr) { - continue; - } - const bNodeSocket &bsocket = node->input_socket(i); - if (bsocket.is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - for (void *value : multi_value.values) { - if (value != nullptr) { - input_state.type->destruct(value); - } - } - multi_value.~MultiInputValue(); - } - else { - SingleInputValue &single_value = *input_state.value.single; - void *value = single_value.value; - if (value != nullptr) { - input_state.type->destruct(value); - } - single_value.~SingleInputValue(); - } - } - - destruct_n(node_state.inputs.data(), node_state.inputs.size()); - destruct_n(node_state.outputs.data(), node_state.outputs.size()); - - node_state.~NodeState(); - } - - void forward_group_inputs() - { - for (auto &&item : params_.input_values.items()) { - const DOutputSocket socket = item.key; - GMutablePointer value = item.value; - - const DNode node = socket.node(); - if (!node_states_.contains_as(node)) { - /* The socket is not connected to any output. */ - this->log_socket_value({socket}, value); - value.destruct(); - continue; - } - this->forward_output(socket, value, nullptr); - } - } - - void schedule_initial_nodes() - { - for (const DInputSocket &socket : params_.output_sockets) { - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - this->with_locked_node(node, node_state, nullptr, [&](LockedNode &locked_node) { - /* Setting an input as required will schedule any linked node. */ - this->set_input_required(locked_node, socket); - }); - } - for (const DSocket socket : params_.force_compute_sockets) { - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - this->with_locked_node(node, node_state, nullptr, [&](LockedNode &locked_node) { - if (socket->is_input()) { - this->set_input_required(locked_node, DInputSocket(socket)); - } - else { - OutputState &output_state = node_state.outputs[socket->index()]; - output_state.output_usage = ValueUsage::Required; - this->schedule_node(locked_node); - } - }); - } - } - - void schedule_node(LockedNode &locked_node) - { - switch (locked_node.node_state.schedule_state) { - case NodeScheduleState::NotScheduled: { - /* The node will be scheduled once it is not locked anymore. We could schedule the node - * right here, but that would result in a deadlock if the task pool decides to run the task - * immediately (this only happens when Blender is started with a single thread). */ - locked_node.node_state.schedule_state = NodeScheduleState::Scheduled; - locked_node.delayed_scheduled_nodes.append(locked_node.node); - break; - } - case NodeScheduleState::Scheduled: { - /* Scheduled already, nothing to do. */ - break; - } - case NodeScheduleState::Running: { - /* Reschedule node while it is running. - * The node will reschedule itself when it is done. */ - locked_node.node_state.schedule_state = NodeScheduleState::RunningAndRescheduled; - break; - } - case NodeScheduleState::RunningAndRescheduled: { - /* Scheduled already, nothing to do. */ - break; - } - } - } - - static void run_node_from_task_pool(TaskPool *task_pool, void *task_data) - { - void *user_data = BLI_task_pool_user_data(task_pool); - GeometryNodesEvaluator &evaluator = *(GeometryNodesEvaluator *)user_data; - const NodeWithState *root_node_with_state = (const NodeWithState *)task_data; - - /* First, the node provided by the task pool is executed. During the execution other nodes - * might be scheduled. One of those nodes is not added to the task pool but is executed in the - * loop below directly. This has two main benefits: - * - Fewer round trips through the task pool which add threading overhead. - * - Helps with cpu cache efficiency, because a thread is more likely to process data that it - * has processed shortly before. - */ - DNode next_node_to_run = root_node_with_state->node; - while (next_node_to_run) { - NodeTaskRunState run_state; - evaluator.node_task_run(next_node_to_run, &run_state); - next_node_to_run = run_state.next_node_to_run; - } - } - - void node_task_run(const DNode node, NodeTaskRunState *run_state) - { - /* These nodes are sometimes scheduled. We could also check for them in other places, but - * it's the easiest to do it here. */ - if (ELEM(node->type, NODE_GROUP_INPUT, NODE_GROUP_OUTPUT)) { - return; - } - - NodeState &node_state = *node_states_.lookup_key_as(node).state; - - const bool do_execute_node = this->node_task_preprocessing(node, node_state, run_state); - - /* Only execute the node if all prerequisites are met. There has to be an output that is - * required and all required inputs have to be provided already. */ - if (do_execute_node) { - this->execute_node(node, node_state, run_state); - } - - this->node_task_postprocessing(node, node_state, do_execute_node, run_state); - } - - bool node_task_preprocessing(const DNode node, - NodeState &node_state, - NodeTaskRunState *run_state) - { - bool do_execute_node = false; - this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { - BLI_assert(node_state.schedule_state == NodeScheduleState::Scheduled); - node_state.schedule_state = NodeScheduleState::Running; - - /* Early return if the node has finished already. */ - if (locked_node.node_state.node_has_finished) { - return; - } - /* Prepare outputs and check if actually any new outputs have to be computed. */ - if (!this->prepare_node_outputs_for_execution(locked_node)) { - return; - } - /* Initialize inputs that don't support laziness. This is done after at least one output is - * required and before we check that all required inputs are provided. This reduces the - * number of "round-trips" through the task pool by one for most nodes. */ - if (!node_state.non_lazy_inputs_handled) { - this->require_non_lazy_inputs(locked_node); - node_state.non_lazy_inputs_handled = true; - } - /* Prepare inputs and check if all required inputs are provided. */ - if (!this->prepare_node_inputs_for_execution(locked_node)) { - return; - } - do_execute_node = true; - }); - return do_execute_node; - } - - /* A node is finished when it has computed all outputs that may be used have been computed and - * when no input is still forced to be computed. */ - bool finish_node_if_possible(LockedNode &locked_node) - { - if (locked_node.node_state.node_has_finished) { - /* Early return in case this node is known to have finished already. */ - return true; - } - - /* Check if there is any output that might be used but has not been computed yet. */ - for (OutputState &output_state : locked_node.node_state.outputs) { - if (output_state.has_been_computed) { - continue; - } - if (output_state.output_usage != ValueUsage::Unused) { - return false; - } - } - - /* Check if there is an input that still has to be computed. */ - for (InputState &input_state : locked_node.node_state.inputs) { - if (input_state.force_compute) { - if (!input_state.was_ready_for_execution) { - return false; - } - } - } - - /* If there are no remaining outputs, all the inputs can be destructed and/or can become - * unused. This can also trigger a chain reaction where nodes to the left become finished - * too. */ - for (const int i : locked_node.node->input_sockets().index_range()) { - const DInputSocket socket = locked_node.node.input(i); - InputState &input_state = locked_node.node_state.inputs[i]; - if (input_state.usage == ValueUsage::Maybe) { - this->set_input_unused(locked_node, socket); - } - else if (input_state.usage == ValueUsage::Required) { - /* The value was required, so it cannot become unused. However, we can destruct the - * value. */ - this->destruct_input_value_if_exists(locked_node, socket); - } - } - locked_node.node_state.node_has_finished = true; - return true; - } - - bool prepare_node_outputs_for_execution(LockedNode &locked_node) - { - bool execution_is_necessary = false; - for (OutputState &output_state : locked_node.node_state.outputs) { - /* Update the output usage for execution to the latest value. */ - output_state.output_usage_for_execution = output_state.output_usage; - if (!output_state.has_been_computed) { - if (output_state.output_usage == ValueUsage::Required) { - /* Only evaluate when there is an output that is required but has not been computed. */ - execution_is_necessary = true; - } - } - } - return execution_is_necessary; - } - - void require_non_lazy_inputs(LockedNode &locked_node) - { - this->foreach_non_lazy_input(locked_node, [&](const DInputSocket socket) { - this->set_input_required(locked_node, socket); - }); - } - - void foreach_non_lazy_input(LockedNode &locked_node, FunctionRef<void(DInputSocket socket)> fn) - { - if (node_supports_laziness(locked_node.node)) { - /* In the future only some of the inputs may support laziness. */ - return; - } - /* Nodes that don't support laziness require all inputs. */ - for (const int i : locked_node.node->input_sockets().index_range()) { - InputState &input_state = locked_node.node_state.inputs[i]; - if (input_state.type == nullptr) { - /* Ignore unavailable/non-data sockets. */ - continue; - } - fn(locked_node.node.input(i)); - } - } - - /** - * Checks if requested inputs are available and "marks" all the inputs that are available - * during the node execution. Inputs that are provided after this function ends but before the - * node is executed, cannot be read by the node in the execution (note that this only affects - * nodes that support lazy inputs). - */ - bool prepare_node_inputs_for_execution(LockedNode &locked_node) - { - for (const int i : locked_node.node_state.inputs.index_range()) { - InputState &input_state = locked_node.node_state.inputs[i]; - if (input_state.type == nullptr) { - /* Ignore unavailable and non-data sockets. */ - continue; - } - const DInputSocket socket = locked_node.node.input(i); - const bool is_required = input_state.usage == ValueUsage::Required; - - /* No need to check this socket again. */ - if (input_state.was_ready_for_execution) { - continue; - } - - if (socket->is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - /* Checks if all the linked sockets have been provided already. */ - if (multi_value.all_values_available()) { - input_state.was_ready_for_execution = true; - } - else if (is_required) { - /* The input is required but is not fully provided yet. Therefore the node cannot be - * executed yet. */ - return false; - } - } - else { - SingleInputValue &single_value = *input_state.value.single; - if (single_value.value != nullptr) { - input_state.was_ready_for_execution = true; - } - else if (is_required) { - /* The input is required but has not been provided yet. Therefore the node cannot be - * executed yet. */ - return false; - } - } - } - /* All required inputs have been provided. */ - return true; - } - - /** - * Actually execute the node. All the required inputs are available and at least one output is - * required. - */ - void execute_node(const DNode node, NodeState &node_state, NodeTaskRunState *run_state) - { - const bNode &bnode = *node; - - if (node_state.has_been_executed) { - if (!node_supports_laziness(node)) { - /* Nodes that don't support laziness must not be executed more than once. */ - BLI_assert_unreachable(); - } - } - node_state.has_been_executed = true; - - /* Use the geometry node execute callback if it exists. */ - if (bnode.typeinfo->geometry_node_execute != nullptr) { - this->execute_geometry_node(node, node_state, run_state); - return; - } - - /* Use the multi-function implementation if it exists. */ - const nodes::NodeMultiFunctions::Item &fn_item = params_.mf_by_node->try_get(node); - if (fn_item.fn != nullptr) { - this->execute_multi_function_node(node, fn_item, node_state, run_state); - return; - } - - this->execute_unknown_node(node, node_state, run_state); - } - - void execute_geometry_node(const DNode node, NodeState &node_state, NodeTaskRunState *run_state) - { - using Clock = std::chrono::steady_clock; - const bNode &bnode = *node; - - NodeParamsProvider params_provider{*this, node, node_state, run_state}; - GeoNodeExecParams params{params_provider}; - Clock::time_point begin = Clock::now(); - bnode.typeinfo->geometry_node_execute(params); - Clock::time_point end = Clock::now(); - const std::chrono::microseconds duration = - std::chrono::duration_cast<std::chrono::microseconds>(end - begin); - if (params_.geo_logger != nullptr) { - params_.geo_logger->local().log_execution_time(node, duration); - } - } - - void execute_multi_function_node(const DNode node, - const nodes::NodeMultiFunctions::Item &fn_item, - NodeState &node_state, - NodeTaskRunState *run_state) - { - LinearAllocator<> &allocator = local_allocators_.local(); - - bool any_input_is_field = false; - Vector<const void *, 16> input_values; - Vector<const ValueOrFieldCPPType *, 16> input_types; - for (const int i : node->input_sockets().index_range()) { - const bNodeSocket &bsocket = node->input_socket(i); - if (!bsocket.is_available()) { - continue; - } - BLI_assert(!bsocket.is_multi_input()); - InputState &input_state = node_state.inputs[i]; - BLI_assert(input_state.was_ready_for_execution); - SingleInputValue &single_value = *input_state.value.single; - BLI_assert(single_value.value != nullptr); - const ValueOrFieldCPPType &field_cpp_type = static_cast<const ValueOrFieldCPPType &>( - *input_state.type); - input_values.append(single_value.value); - input_types.append(&field_cpp_type); - if (field_cpp_type.is_field(single_value.value)) { - any_input_is_field = true; - } - } - - if (any_input_is_field) { - this->execute_multi_function_node__field( - node, fn_item, node_state, allocator, input_values, input_types, run_state); - } - else { - this->execute_multi_function_node__value( - node, *fn_item.fn, node_state, allocator, input_values, input_types, run_state); - } - } - - void execute_multi_function_node__field(const DNode node, - const nodes::NodeMultiFunctions::Item &fn_item, - NodeState &node_state, - LinearAllocator<> &allocator, - Span<const void *> input_values, - Span<const ValueOrFieldCPPType *> input_types, - NodeTaskRunState *run_state) - { - Vector<GField> input_fields; - for (const int i : input_values.index_range()) { - const void *input_value_or_field = input_values[i]; - const ValueOrFieldCPPType &field_cpp_type = *input_types[i]; - input_fields.append(field_cpp_type.as_field(input_value_or_field)); - } - - std::shared_ptr<fn::FieldOperation> operation; - if (fn_item.owned_fn) { - operation = std::make_shared<fn::FieldOperation>(fn_item.owned_fn, std::move(input_fields)); - } - else { - operation = std::make_shared<fn::FieldOperation>(*fn_item.fn, std::move(input_fields)); - } - - int output_index = 0; - for (const int i : node->output_sockets().index_range()) { - const bNodeSocket &bsocket = node->output_socket(i); - if (!bsocket.is_available()) { - continue; - } - OutputState &output_state = node_state.outputs[i]; - const DOutputSocket socket{node.context(), &bsocket}; - const ValueOrFieldCPPType *cpp_type = static_cast<const ValueOrFieldCPPType *>( - get_socket_cpp_type(bsocket)); - GField new_field{operation, output_index}; - void *buffer = allocator.allocate(cpp_type->size(), cpp_type->alignment()); - cpp_type->construct_from_field(buffer, std::move(new_field)); - this->forward_output(socket, {cpp_type, buffer}, run_state); - output_state.has_been_computed = true; - output_index++; - } - } - - void execute_multi_function_node__value(const DNode node, - const MultiFunction &fn, - NodeState &node_state, - LinearAllocator<> &allocator, - Span<const void *> input_values, - Span<const ValueOrFieldCPPType *> input_types, - NodeTaskRunState *run_state) - { - MFParamsBuilder params{fn, 1}; - for (const int i : input_values.index_range()) { - const void *input_value_or_field = input_values[i]; - const ValueOrFieldCPPType &field_cpp_type = *input_types[i]; - const CPPType &base_type = field_cpp_type.base_type(); - const void *input_value = field_cpp_type.get_value_ptr(input_value_or_field); - params.add_readonly_single_input(GVArray::ForSingleRef(base_type, 1, input_value)); - } - - Vector<GMutablePointer, 16> output_buffers; - for (const int i : node->output_sockets().index_range()) { - const DOutputSocket socket = node.output(i); - if (!socket->is_available()) { - output_buffers.append({}); - continue; - } - const ValueOrFieldCPPType *value_or_field_type = static_cast<const ValueOrFieldCPPType *>( - get_socket_cpp_type(socket)); - const CPPType &base_type = value_or_field_type->base_type(); - void *value_or_field_buffer = allocator.allocate(value_or_field_type->size(), - value_or_field_type->alignment()); - value_or_field_type->default_construct(value_or_field_buffer); - void *value_buffer = value_or_field_type->get_value_ptr(value_or_field_buffer); - base_type.destruct(value_buffer); - params.add_uninitialized_single_output(GMutableSpan{base_type, value_buffer, 1}); - output_buffers.append({value_or_field_type, value_or_field_buffer}); - } - - MFContextBuilder context; - fn.call(IndexRange(1), params, context); - - for (const int i : output_buffers.index_range()) { - GMutablePointer buffer = output_buffers[i]; - if (buffer.get() == nullptr) { - continue; - } - const DOutputSocket socket = node.output(i); - this->forward_output(socket, buffer, run_state); - - OutputState &output_state = node_state.outputs[i]; - output_state.has_been_computed = true; - } - } - - void execute_unknown_node(const DNode node, NodeState &node_state, NodeTaskRunState *run_state) - { - LinearAllocator<> &allocator = local_allocators_.local(); - for (const bNodeSocket *socket : node->output_sockets()) { - if (!socket->is_available()) { - continue; - } - const CPPType *type = get_socket_cpp_type(*socket); - if (type == nullptr) { - continue; - } - /* Just forward the default value of the type as a fallback. That's typically better than - * crashing or doing nothing. */ - OutputState &output_state = node_state.outputs[socket->index()]; - output_state.has_been_computed = true; - void *buffer = allocator.allocate(type->size(), type->alignment()); - this->construct_default_value(*type, buffer); - this->forward_output({node.context(), socket}, {*type, buffer}, run_state); - } - } - - void node_task_postprocessing(const DNode node, - NodeState &node_state, - bool was_executed, - NodeTaskRunState *run_state) - { - this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { - const bool node_has_finished = this->finish_node_if_possible(locked_node); - const bool reschedule_requested = node_state.schedule_state == - NodeScheduleState::RunningAndRescheduled; - node_state.schedule_state = NodeScheduleState::NotScheduled; - if (reschedule_requested && !node_has_finished) { - /* Either the node rescheduled itself or another node tried to schedule it while it ran. */ - this->schedule_node(locked_node); - } - if (was_executed) { - this->assert_expected_outputs_have_been_computed(locked_node); - } - }); - } - - void assert_expected_outputs_have_been_computed(LockedNode &locked_node) - { -#ifdef DEBUG - /* Outputs can only be computed when all required inputs have been provided. */ - if (locked_node.node_state.missing_required_inputs > 0) { - return; - } - /* If the node is still scheduled, it is not necessary that all its expected outputs are - * computed yet. */ - if (locked_node.node_state.schedule_state == NodeScheduleState::Scheduled) { - return; - } - - const bool supports_laziness = node_supports_laziness(locked_node.node); - /* Iterating over sockets instead of the states directly, because that makes it easier to - * figure out which socket is missing when one of the asserts is hit. */ - for (const bNodeSocket *bsocket : locked_node.node->output_sockets()) { - OutputState &output_state = locked_node.node_state.outputs[bsocket->index()]; - if (supports_laziness) { - /* Expected that at least all required sockets have been computed. If more outputs become - * required later, the node will be executed again. */ - if (output_state.output_usage_for_execution == ValueUsage::Required) { - BLI_assert(output_state.has_been_computed); - } - } - else { - /* Expect that all outputs that may be used have been computed, because the node cannot - * be executed again. */ - if (output_state.output_usage_for_execution != ValueUsage::Unused) { - BLI_assert(output_state.has_been_computed); - } - } - } -#else - UNUSED_VARS(locked_node); -#endif - } - - void extract_group_outputs() - { - for (const DInputSocket &socket : params_.output_sockets) { - BLI_assert(socket->is_available()); - BLI_assert(!socket->is_multi_input()); - - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - InputState &input_state = node_state.inputs[socket->index()]; - - SingleInputValue &single_value = *input_state.value.single; - void *value = single_value.value; - - /* The value should have been computed by now. If this assert is hit, it means that there - * was some scheduling issue before. */ - BLI_assert(value != nullptr); - - /* Move value into memory owned by the outer allocator. */ - const CPPType &type = *input_state.type; - void *buffer = outer_allocator_.allocate(type.size(), type.alignment()); - type.move_construct(value, buffer); - - params_.r_output_values.append({type, buffer}); - } - } - - /** - * Load the required input from the socket or trigger nodes to the left to compute the value. - * \return True when the node will be triggered by another node again when the value is computed. - */ - bool set_input_required(LockedNode &locked_node, const DInputSocket input_socket) - { - BLI_assert(locked_node.node == input_socket.node()); - InputState &input_state = locked_node.node_state.inputs[input_socket->index()]; - - /* Value set as unused cannot become used again. */ - BLI_assert(input_state.usage != ValueUsage::Unused); - - if (input_state.was_ready_for_execution) { - return false; - } - - if (input_state.usage == ValueUsage::Required) { - /* If the input was not ready for execution but is required, the node will be triggered again - * once the input has been computed. */ - return true; - } - input_state.usage = ValueUsage::Required; - - /* Count how many values still have to be added to this input until it is "complete". */ - int missing_values = 0; - if (input_socket->is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - missing_values = multi_value.missing_values(); - } - else { - SingleInputValue &single_value = *input_state.value.single; - if (single_value.value == nullptr) { - missing_values = 1; - } - } - if (missing_values == 0) { - return false; - } - /* Increase the total number of missing required inputs. This ensures that the node will be - * scheduled correctly when all inputs have been provided. */ - locked_node.node_state.missing_required_inputs += missing_values; - - /* Get all origin sockets, because we have to tag those as required as well. */ - Vector<DSocket> origin_sockets; - input_socket.foreach_origin_socket( - [&](const DSocket origin_socket) { origin_sockets.append(origin_socket); }); - - if (origin_sockets.is_empty()) { - /* If there are no origin sockets, just load the value from the socket directly. */ - this->load_unlinked_input_value(locked_node, input_socket, input_state, input_socket); - locked_node.node_state.missing_required_inputs -= 1; - return false; - } - bool requested_from_other_node = false; - for (const DSocket &origin_socket : origin_sockets) { - if (origin_socket->is_input()) { - /* Load the value directly from the origin socket. In most cases this is an unlinked - * group input. */ - this->load_unlinked_input_value(locked_node, input_socket, input_state, origin_socket); - locked_node.node_state.missing_required_inputs -= 1; - } - else { - /* The value has not been computed yet, so when it will be forwarded by another node, this - * node will be triggered. */ - requested_from_other_node = true; - locked_node.delayed_required_outputs.append(DOutputSocket(origin_socket)); - } - } - /* If this node will be triggered by another node, we don't have to schedule it now. */ - if (requested_from_other_node) { - return true; - } - return false; - } - - void set_input_unused(LockedNode &locked_node, const DInputSocket socket) - { - InputState &input_state = locked_node.node_state.inputs[socket->index()]; - - /* A required socket cannot become unused. */ - BLI_assert(input_state.usage != ValueUsage::Required); - - if (input_state.usage == ValueUsage::Unused) { - /* Nothing to do in this case. */ - return; - } - input_state.usage = ValueUsage::Unused; - - /* If the input is unused, its value can be destructed now. */ - this->destruct_input_value_if_exists(locked_node, socket); - - if (input_state.was_ready_for_execution) { - /* If the value was already computed, we don't need to notify origin nodes. */ - return; - } - - /* Notify origin nodes that might want to set its inputs as unused as well. */ - socket.foreach_origin_socket([&](const DSocket origin_socket) { - if (origin_socket->is_input()) { - /* Values from these sockets are loaded directly from the sockets, so there is no node to - * notify. */ - return; - } - /* Delay notification of the other node until this node is not locked anymore. */ - locked_node.delayed_unused_outputs.append(DOutputSocket(origin_socket)); - }); - } - - void send_output_required_notification(const DOutputSocket socket, NodeTaskRunState *run_state) - { - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - OutputState &output_state = node_state.outputs[socket->index()]; - - this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { - if (output_state.output_usage == ValueUsage::Required) { - /* Output is marked as required already. So the node is scheduled already. */ - return; - } - /* The origin node needs to be scheduled so that it provides the requested input - * eventually. */ - output_state.output_usage = ValueUsage::Required; - this->schedule_node(locked_node); - }); - } - - void send_output_unused_notification(const DOutputSocket socket, NodeTaskRunState *run_state) - { - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - OutputState &output_state = node_state.outputs[socket->index()]; - - this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { - output_state.potential_users -= 1; - if (output_state.potential_users == 0) { - /* The socket might be required even though the output is not used by other sockets. That - * can happen when the socket is forced to be computed. */ - if (output_state.output_usage != ValueUsage::Required) { - /* The output socket has no users anymore. */ - output_state.output_usage = ValueUsage::Unused; - /* Schedule the origin node in case it wants to set its inputs as unused as well. */ - this->schedule_node(locked_node); - } - } - }); - } - - void add_node_to_task_pool(const DNode node) - { - /* Push the task to the pool while it is not locked to avoid a deadlock in case when the task - * is executed immediately. */ - const NodeWithState *node_with_state = node_states_.lookup_key_ptr_as(node); - BLI_task_pool_push( - task_pool_, run_node_from_task_pool, (void *)node_with_state, false, nullptr); - } - - /** - * Moves a newly computed value from an output socket to all the inputs that might need it. - * Takes ownership of the value and destructs if it is unused. - */ - void forward_output(const DOutputSocket from_socket, - GMutablePointer value_to_forward, - NodeTaskRunState *run_state) - { - BLI_assert(value_to_forward.get() != nullptr); - - LinearAllocator<> &allocator = local_allocators_.local(); - - Vector<DSocket> log_original_value_sockets; - Vector<DInputSocket> forward_original_value_sockets; - log_original_value_sockets.append(from_socket); - - from_socket.foreach_target_socket([&](const DInputSocket to_socket, - const DOutputSocket::TargetSocketPathInfo &path_info) { - if (!this->should_forward_to_socket(to_socket)) { - return; - } - BLI_assert(to_socket == path_info.sockets.last()); - GMutablePointer current_value = value_to_forward; - for (const DSocket &next_socket : path_info.sockets) { - const DNode next_node = next_socket.node(); - const bool is_last_socket = to_socket == next_socket; - const bool do_conversion_if_necessary = is_last_socket || - next_node->type == NODE_GROUP_OUTPUT || - (next_node->is_group() && !next_node->is_muted()); - if (do_conversion_if_necessary) { - const CPPType &next_type = *get_socket_cpp_type(next_socket); - if (*current_value.type() != next_type) { - void *buffer = allocator.allocate(next_type.size(), next_type.alignment()); - this->convert_value(*current_value.type(), next_type, current_value.get(), buffer); - if (current_value.get() != value_to_forward.get()) { - current_value.destruct(); - } - current_value = {next_type, buffer}; - } - } - if (current_value.get() == value_to_forward.get()) { - /* Log the original value at the current socket. */ - log_original_value_sockets.append(next_socket); - } - else { - /* Multi-input sockets are logged when all values are available. */ - if (!(next_socket->is_input() && next_socket->is_multi_input())) { - /* Log the converted value at the socket. */ - this->log_socket_value({next_socket}, current_value); - } - } - } - if (current_value.get() == value_to_forward.get()) { - /* The value has not been converted, so forward the original value. */ - forward_original_value_sockets.append(to_socket); - } - else { - /* The value has been converted. */ - this->add_value_to_input_socket(to_socket, from_socket, current_value, run_state); - } - }); - this->log_socket_value(log_original_value_sockets, value_to_forward); - this->forward_to_sockets_with_same_type( - allocator, forward_original_value_sockets, value_to_forward, from_socket, run_state); - } - - bool should_forward_to_socket(const DInputSocket socket) - { - const DNode to_node = socket.node(); - const NodeWithState *target_node_with_state = node_states_.lookup_key_ptr_as(to_node); - if (target_node_with_state == nullptr) { - /* If the socket belongs to a node that has no state, the entire node is not used. */ - return false; - } - NodeState &target_node_state = *target_node_with_state->state; - InputState &target_input_state = target_node_state.inputs[socket->index()]; - - std::lock_guard lock{target_node_state.mutex}; - /* Do not forward to an input socket whose value won't be used. */ - return target_input_state.usage != ValueUsage::Unused; - } - - void forward_to_sockets_with_same_type(LinearAllocator<> &allocator, - Span<DInputSocket> to_sockets, - GMutablePointer value_to_forward, - const DOutputSocket from_socket, - NodeTaskRunState *run_state) - { - if (to_sockets.is_empty()) { - /* Value is not used anymore, so it can be destructed. */ - value_to_forward.destruct(); - } - else if (to_sockets.size() == 1) { - /* Value is only used by one input socket, no need to copy it. */ - const DInputSocket to_socket = to_sockets[0]; - this->add_value_to_input_socket(to_socket, from_socket, value_to_forward, run_state); - } - else { - /* Multiple inputs use the value, make a copy for every input except for one. */ - /* First make the copies, so that the next node does not start modifying the value while we - * are still making copies. */ - const CPPType &type = *value_to_forward.type(); - for (const DInputSocket &to_socket : to_sockets.drop_front(1)) { - void *buffer = allocator.allocate(type.size(), type.alignment()); - type.copy_construct(value_to_forward.get(), buffer); - this->add_value_to_input_socket(to_socket, from_socket, {type, buffer}, run_state); - } - /* Forward the original value to one of the targets. */ - const DInputSocket to_socket = to_sockets[0]; - this->add_value_to_input_socket(to_socket, from_socket, value_to_forward, run_state); - } - } - - void add_value_to_input_socket(const DInputSocket socket, - const DOutputSocket origin, - GMutablePointer value, - NodeTaskRunState *run_state) - { - BLI_assert(socket->is_available()); - - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - InputState &input_state = node_state.inputs[socket->index()]; - - this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { - if (socket->is_multi_input()) { - /* Add a new value to the multi-input. */ - MultiInputValue &multi_value = *input_state.value.multi; - multi_value.add_value(origin, value.get()); - - if (multi_value.all_values_available()) { - this->log_socket_value({socket}, input_state, multi_value.values); - } - } - else { - /* Assign the value to the input. */ - SingleInputValue &single_value = *input_state.value.single; - BLI_assert(single_value.value == nullptr); - single_value.value = value.get(); - } - - if (input_state.usage == ValueUsage::Required) { - node_state.missing_required_inputs--; - if (node_state.missing_required_inputs == 0) { - /* Schedule node if all the required inputs have been provided. */ - this->schedule_node(locked_node); - } - } - }); - } - - /** - * Loads the value of a socket that is not computed by another node. Note that the socket may - * still be linked to e.g. a Group Input node, but the socket on the outside is not connected to - * anything. - * - * \param input_socket: The socket of the node that wants to use the value. - * \param origin_socket: The socket that we want to load the value from. - */ - void load_unlinked_input_value(LockedNode &locked_node, - const DInputSocket input_socket, - InputState &input_state, - const DSocket origin_socket) - { - /* Only takes locked node as parameter, because the node needs to be locked. */ - UNUSED_VARS(locked_node); - - GMutablePointer value = this->get_value_from_socket(origin_socket, *input_state.type); - if (input_socket->is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - multi_value.add_value(origin_socket, value.get()); - if (multi_value.all_values_available()) { - this->log_socket_value({input_socket}, input_state, multi_value.values); - } - } - else { - SingleInputValue &single_value = *input_state.value.single; - single_value.value = value.get(); - Vector<DSocket> sockets_to_log_to = {input_socket}; - if (origin_socket != input_socket) { - /* This might log the socket value for the #origin_socket more than once, but this is - * handled by the logging system gracefully. */ - sockets_to_log_to.append(origin_socket); - } - /* TODO: Log to the intermediate sockets between the group input and where the value is - * actually used as well. */ - this->log_socket_value(sockets_to_log_to, value); - } - } - - void destruct_input_value_if_exists(LockedNode &locked_node, const DInputSocket socket) - { - InputState &input_state = locked_node.node_state.inputs[socket->index()]; - if (socket->is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - for (void *&value : multi_value.values) { - if (value != nullptr) { - input_state.type->destruct(value); - value = nullptr; - } - } - multi_value.provided_value_count = 0; - } - else { - SingleInputValue &single_value = *input_state.value.single; - if (single_value.value != nullptr) { - input_state.type->destruct(single_value.value); - single_value.value = nullptr; - } - } - } - - GMutablePointer get_value_from_socket(const DSocket socket, const CPPType &required_type) - { - LinearAllocator<> &allocator = local_allocators_.local(); - - const CPPType &type = *get_socket_cpp_type(socket); - void *buffer = allocator.allocate(type.size(), type.alignment()); - get_socket_value(*socket.bsocket(), buffer); - - if (type == required_type) { - return {type, buffer}; - } - void *converted_buffer = allocator.allocate(required_type.size(), required_type.alignment()); - this->convert_value(type, required_type, buffer, converted_buffer); - type.destruct(buffer); - return {required_type, converted_buffer}; - } - - void convert_value(const CPPType &from_type, - const CPPType &to_type, - const void *from_value, - void *to_value) - { - if (from_type == to_type) { - from_type.copy_construct(from_value, to_value); - return; - } - const ValueOrFieldCPPType *from_field_type = dynamic_cast<const ValueOrFieldCPPType *>( - &from_type); - const ValueOrFieldCPPType *to_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&to_type); - - if (from_field_type != nullptr && to_field_type != nullptr) { - const CPPType &from_base_type = from_field_type->base_type(); - const CPPType &to_base_type = to_field_type->base_type(); - if (conversions_.is_convertible(from_base_type, to_base_type)) { - if (from_field_type->is_field(from_value)) { - const GField &from_field = *from_field_type->get_field_ptr(from_value); - to_field_type->construct_from_field(to_value, - conversions_.try_convert(from_field, to_base_type)); - } - else { - to_field_type->default_construct(to_value); - const void *from_value_ptr = from_field_type->get_value_ptr(from_value); - void *to_value_ptr = to_field_type->get_value_ptr(to_value); - conversions_.get_conversion_functions(from_base_type, to_base_type) - ->convert_single_to_initialized(from_value_ptr, to_value_ptr); - } - return; - } - } - if (conversions_.is_convertible(from_type, to_type)) { - /* Do the conversion if possible. */ - conversions_.convert_to_uninitialized(from_type, to_type, from_value, to_value); - } - else { - /* Cannot convert, use default value instead. */ - this->construct_default_value(to_type, to_value); - } - } - - void construct_default_value(const CPPType &type, void *r_value) - { - type.value_initialize(r_value); - } - - NodeState &get_node_state(const DNode node) - { - return *node_states_.lookup_key_as(node).state; - } - - void log_socket_value(DSocket socket, InputState &input_state, Span<void *> values) - { - if (params_.geo_logger == nullptr) { - return; - } - - Vector<GPointer, 16> value_pointers; - value_pointers.reserve(values.size()); - const CPPType &type = *input_state.type; - for (const void *value : values) { - value_pointers.append({type, value}); - } - params_.geo_logger->local().log_multi_value_socket(socket, value_pointers); - } - - void log_socket_value(Span<DSocket> sockets, GPointer value) - { - if (params_.geo_logger == nullptr) { - return; - } - params_.geo_logger->local().log_value_for_sockets(sockets, value); - } - - void log_debug_message(DNode node, std::string message) - { - if (params_.geo_logger == nullptr) { - return; - } - params_.geo_logger->local().log_debug_message(node, std::move(message)); - } - - /* In most cases when `NodeState` is accessed, the node has to be locked first to avoid race - * conditions. */ - template<typename Function> - void with_locked_node(const DNode node, - NodeState &node_state, - NodeTaskRunState *run_state, - const Function &function) - { - LockedNode locked_node{node, node_state}; - - node_state.mutex.lock(); - /* Isolate this thread because we don't want it to start executing another node. This other - * node might want to lock the same mutex leading to a deadlock. */ - threading::isolate_task([&] { function(locked_node); }); - node_state.mutex.unlock(); - - /* Then send notifications to the other nodes after the node state is unlocked. This avoids - * locking two nodes at the same time on this thread and helps to prevent deadlocks. */ - for (const DOutputSocket &socket : locked_node.delayed_required_outputs) { - this->send_output_required_notification(socket, run_state); - } - for (const DOutputSocket &socket : locked_node.delayed_unused_outputs) { - this->send_output_unused_notification(socket, run_state); - } - for (const DNode &node_to_schedule : locked_node.delayed_scheduled_nodes) { - if (run_state != nullptr && !run_state->next_node_to_run) { - /* Execute the node on the same thread after the current node finished. */ - /* Currently, this assumes that it is always best to run the first node that is scheduled - * on the same thread. That is usually correct, because the geometry socket which carries - * the most data usually comes first in nodes. */ - run_state->next_node_to_run = node_to_schedule; - } - else { - /* Push the node to the task pool so that another thread can start working on it. */ - this->add_node_to_task_pool(node_to_schedule); - } - } - } -}; - -NodeParamsProvider::NodeParamsProvider(GeometryNodesEvaluator &evaluator, - DNode dnode, - NodeState &node_state, - NodeTaskRunState *run_state) - : evaluator_(evaluator), node_state_(node_state), run_state_(run_state) -{ - this->dnode = dnode; - this->self_object = evaluator.params_.self_object; - this->modifier = &evaluator.params_.modifier_->modifier; - this->depsgraph = evaluator.params_.depsgraph; - this->logger = evaluator.params_.geo_logger; -} - -bool NodeParamsProvider::can_get_input(StringRef identifier) const -{ - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - - InputState &input_state = node_state_.inputs[socket->index()]; - if (!input_state.was_ready_for_execution) { - return false; - } - - if (socket->is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - return multi_value.all_values_available(); - } - SingleInputValue &single_value = *input_state.value.single; - return single_value.value != nullptr; -} - -bool NodeParamsProvider::can_set_output(StringRef identifier) const -{ - const DOutputSocket socket = this->dnode.output_by_identifier(identifier); - BLI_assert(socket); - - OutputState &output_state = node_state_.outputs[socket->index()]; - return !output_state.has_been_computed; -} - -GMutablePointer NodeParamsProvider::extract_input(StringRef identifier) -{ - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - BLI_assert(!socket->is_multi_input()); - BLI_assert(this->can_get_input(identifier)); - - InputState &input_state = node_state_.inputs[socket->index()]; - SingleInputValue &single_value = *input_state.value.single; - void *value = single_value.value; - single_value.value = nullptr; - return {*input_state.type, value}; -} - -Vector<GMutablePointer> NodeParamsProvider::extract_multi_input(StringRef identifier) -{ - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - BLI_assert(socket->is_multi_input()); - BLI_assert(this->can_get_input(identifier)); - - InputState &input_state = node_state_.inputs[socket->index()]; - MultiInputValue &multi_value = *input_state.value.multi; - - Vector<GMutablePointer> ret_values; - for (void *&value : multi_value.values) { - BLI_assert(value != nullptr); - ret_values.append({*input_state.type, value}); - value = nullptr; - } - return ret_values; -} - -GPointer NodeParamsProvider::get_input(StringRef identifier) const -{ - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - BLI_assert(!socket->is_multi_input()); - BLI_assert(this->can_get_input(identifier)); - - InputState &input_state = node_state_.inputs[socket->index()]; - SingleInputValue &single_value = *input_state.value.single; - return {*input_state.type, single_value.value}; -} - -GMutablePointer NodeParamsProvider::alloc_output_value(const CPPType &type) -{ - LinearAllocator<> &allocator = evaluator_.local_allocators_.local(); - return {type, allocator.allocate(type.size(), type.alignment())}; -} - -void NodeParamsProvider::set_output(StringRef identifier, GMutablePointer value) -{ - const DOutputSocket socket = this->dnode.output_by_identifier(identifier); - BLI_assert(socket); - - OutputState &output_state = node_state_.outputs[socket->index()]; - BLI_assert(!output_state.has_been_computed); - evaluator_.forward_output(socket, value, run_state_); - output_state.has_been_computed = true; -} - -bool NodeParamsProvider::lazy_require_input(StringRef identifier) -{ - BLI_assert(node_supports_laziness(this->dnode)); - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - - InputState &input_state = node_state_.inputs[socket->index()]; - if (input_state.was_ready_for_execution) { - return false; - } - evaluator_.with_locked_node(this->dnode, node_state_, run_state_, [&](LockedNode &locked_node) { - if (!evaluator_.set_input_required(locked_node, socket)) { - /* Schedule the currently executed node again because the value is available now but was not - * ready for the current execution. */ - evaluator_.schedule_node(locked_node); - } - }); - return true; -} - -void NodeParamsProvider::set_input_unused(StringRef identifier) -{ - BLI_assert(node_supports_laziness(this->dnode)); - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - - evaluator_.with_locked_node(this->dnode, node_state_, run_state_, [&](LockedNode &locked_node) { - evaluator_.set_input_unused(locked_node, socket); - }); -} - -bool NodeParamsProvider::output_is_required(StringRef identifier) const -{ - const DOutputSocket socket = this->dnode.output_by_identifier(identifier); - BLI_assert(socket); - - OutputState &output_state = node_state_.outputs[socket->index()]; - if (output_state.has_been_computed) { - return false; - } - return output_state.output_usage_for_execution != ValueUsage::Unused; -} - -bool NodeParamsProvider::lazy_output_is_required(StringRef identifier) const -{ - BLI_assert(node_supports_laziness(this->dnode)); - const DOutputSocket socket = this->dnode.output_by_identifier(identifier); - BLI_assert(socket); - - OutputState &output_state = node_state_.outputs[socket->index()]; - if (output_state.has_been_computed) { - return false; - } - return output_state.output_usage_for_execution == ValueUsage::Required; -} - -void NodeParamsProvider::set_default_remaining_outputs() -{ - LinearAllocator<> &allocator = evaluator_.local_allocators_.local(); - - for (const int i : this->dnode->output_sockets().index_range()) { - OutputState &output_state = node_state_.outputs[i]; - if (output_state.has_been_computed) { - continue; - } - if (output_state.output_usage_for_execution == ValueUsage::Unused) { - continue; - } - - const DOutputSocket socket = this->dnode.output(i); - const CPPType *type = get_socket_cpp_type(socket); - BLI_assert(type != nullptr); - void *buffer = allocator.allocate(type->size(), type->alignment()); - type->value_initialize(buffer); - evaluator_.forward_output(socket, {type, buffer}, run_state_); - output_state.has_been_computed = true; - } -} - -void evaluate_geometry_nodes(GeometryNodesEvaluationParams ¶ms) -{ - GeometryNodesEvaluator evaluator{params}; - evaluator.execute(); -} - -} // namespace blender::modifiers::geometry_nodes diff --git a/source/blender/modifiers/intern/MOD_nodes_evaluator.hh b/source/blender/modifiers/intern/MOD_nodes_evaluator.hh deleted file mode 100644 index cbcbcab5679..00000000000 --- a/source/blender/modifiers/intern/MOD_nodes_evaluator.hh +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#pragma once - -#include "BLI_generic_pointer.hh" -#include "BLI_map.hh" - -#include "NOD_derived_node_tree.hh" -#include "NOD_geometry_nodes_eval_log.hh" -#include "NOD_multi_function.hh" - -#include "DNA_modifier_types.h" - -#include "FN_multi_function.hh" - -namespace geo_log = blender::nodes::geometry_nodes_eval_log; - -namespace blender::modifiers::geometry_nodes { - -using namespace nodes::derived_node_tree_types; - -struct GeometryNodesEvaluationParams { - blender::LinearAllocator<> allocator; - - Map<DOutputSocket, GMutablePointer> input_values; - Vector<DInputSocket> output_sockets; - /* These sockets will be computed but are not part of the output. Their value can be retrieved in - * `log_socket_value_fn`. These sockets are not part of `output_sockets` because then the - * evaluator would have to keep the socket values in memory until the end, which might not be - * necessary in all cases. Sometimes `log_socket_value_fn` might just want to look at the value - * and then it can be freed. */ - Vector<DSocket> force_compute_sockets; - nodes::NodeMultiFunctions *mf_by_node; - const NodesModifierData *modifier_; - Depsgraph *depsgraph; - Object *self_object; - geo_log::GeoLogger *geo_logger; - - Vector<GMutablePointer> r_output_values; -}; - -void evaluate_geometry_nodes(GeometryNodesEvaluationParams ¶ms); - -} // namespace blender::modifiers::geometry_nodes diff --git a/source/blender/nodes/CMakeLists.txt b/source/blender/nodes/CMakeLists.txt index ff8bd27f8d7..e042458ca19 100644 --- a/source/blender/nodes/CMakeLists.txt +++ b/source/blender/nodes/CMakeLists.txt @@ -40,7 +40,8 @@ set(INC set(SRC intern/derived_node_tree.cc - intern/geometry_nodes_eval_log.cc + intern/geometry_nodes_lazy_function.cc + intern/geometry_nodes_log.cc intern/math_functions.cc intern/node_common.cc intern/node_declaration.cc @@ -58,7 +59,7 @@ set(SRC NOD_function.h NOD_geometry.h NOD_geometry_exec.hh - NOD_geometry_nodes_eval_log.hh + NOD_geometry_nodes_lazy_function.hh NOD_math_functions.hh NOD_multi_function.hh NOD_node_declaration.hh diff --git a/source/blender/nodes/NOD_geometry_exec.hh b/source/blender/nodes/NOD_geometry_exec.hh index b5ffd3a317c..16669f7cfce 100644 --- a/source/blender/nodes/NOD_geometry_exec.hh +++ b/source/blender/nodes/NOD_geometry_exec.hh @@ -3,6 +3,7 @@ #pragma once #include "FN_field.hh" +#include "FN_lazy_function.hh" #include "FN_multi_function_builder.hh" #include "BKE_geometry_fields.hh" @@ -11,9 +12,8 @@ #include "DNA_node_types.h" #include "NOD_derived_node_tree.hh" -#include "NOD_geometry_nodes_eval_log.hh" +#include "NOD_geometry_nodes_lazy_function.hh" -struct Depsgraph; struct ModifierData; namespace blender::nodes { @@ -40,75 +40,18 @@ using fn::FieldInput; using fn::FieldOperation; using fn::GField; using fn::ValueOrField; -using geometry_nodes_eval_log::eNamedAttrUsage; -using geometry_nodes_eval_log::NodeWarningType; - -/** - * This class exists to separate the memory management details of the geometry nodes evaluator - * from the node execution functions and related utilities. - */ -class GeoNodeExecParamsProvider { - public: - DNode dnode; - const Object *self_object = nullptr; - const ModifierData *modifier = nullptr; - Depsgraph *depsgraph = nullptr; - geometry_nodes_eval_log::GeoLogger *logger = nullptr; - - /** - * Returns true when the node is allowed to get/extract the input value. The identifier is - * expected to be valid. This may return false if the input value has been consumed already. - */ - virtual bool can_get_input(StringRef identifier) const = 0; - - /** - * Returns true when the node is allowed to set the output value. The identifier is expected to - * be valid. This may return false if the output value has been set already. - */ - virtual bool can_set_output(StringRef identifier) const = 0; - - /** - * Take ownership of an input value. The caller is responsible for destructing the value. It does - * not have to be freed, because the memory is managed by the geometry nodes evaluator. - */ - virtual GMutablePointer extract_input(StringRef identifier) = 0; - - /** - * Similar to #extract_input, but has to be used for multi-input sockets. - */ - virtual Vector<GMutablePointer> extract_multi_input(StringRef identifier) = 0; - - /** - * Get the input value for the identifier without taking ownership of it. - */ - virtual GPointer get_input(StringRef identifier) const = 0; - - /** - * Prepare a memory buffer for an output value of the node. The returned memory has to be - * initialized by the caller. The identifier and type are expected to be correct. - */ - virtual GMutablePointer alloc_output_value(const CPPType &type) = 0; - - /** - * The value has been allocated with #alloc_output_value. - */ - virtual void set_output(StringRef identifier, GMutablePointer value) = 0; - - /* A description for these methods is provided in GeoNodeExecParams. */ - virtual void set_input_unused(StringRef identifier) = 0; - virtual bool output_is_required(StringRef identifier) const = 0; - virtual bool lazy_require_input(StringRef identifier) = 0; - virtual bool lazy_output_is_required(StringRef identifier) const = 0; - - virtual void set_default_remaining_outputs() = 0; -}; +using geo_eval_log::NamedAttributeUsage; +using geo_eval_log::NodeWarningType; class GeoNodeExecParams { private: - GeoNodeExecParamsProvider *provider_; + const bNode &node_; + lf::Params ¶ms_; + const lf::Context &lf_context_; public: - GeoNodeExecParams(GeoNodeExecParamsProvider &provider) : provider_(&provider) + GeoNodeExecParams(const bNode &node, lf::Params ¶ms, const lf::Context &lf_context) + : node_(node), params_(params), lf_context_(lf_context) { } @@ -119,20 +62,6 @@ class GeoNodeExecParams { /** * Get the input value for the input socket with the given identifier. * - * The node calling becomes responsible for destructing the value before it is done - * executing. This method can only be called once for each identifier. - */ - GMutablePointer extract_input(StringRef identifier) - { -#ifdef DEBUG - this->check_input_access(identifier); -#endif - return provider_->extract_input(identifier); - } - - /** - * Get the input value for the input socket with the given identifier. - * * This method can only be called once for each identifier. */ template<typename T> T extract_input(StringRef identifier) @@ -151,8 +80,8 @@ class GeoNodeExecParams { #ifdef DEBUG this->check_input_access(identifier, &CPPType::get<T>()); #endif - GMutablePointer gvalue = this->extract_input(identifier); - T value = gvalue.relocate_out<T>(); + const int index = this->get_input_index(identifier); + T value = params_.extract_input<T>(index); if constexpr (std::is_same_v<T, GeometrySet>) { this->check_input_geometry_set(identifier, value); } @@ -164,27 +93,6 @@ class GeoNodeExecParams { void check_output_geometry_set(const GeometrySet &geometry_set) const; /** - * Get input as vector for multi input socket with the given identifier. - * - * This method can only be called once for each identifier. - */ - template<typename T> Vector<T> extract_multi_input(StringRef identifier) - { - Vector<GMutablePointer> gvalues = provider_->extract_multi_input(identifier); - Vector<T> values; - for (GMutablePointer gvalue : gvalues) { - if constexpr (is_field_base_type_v<T>) { - const ValueOrField<T> value_or_field = gvalue.relocate_out<ValueOrField<T>>(); - values.append(value_or_field.as_value()); - } - else { - values.append(gvalue.relocate_out<T>()); - } - } - return values; - } - - /** * Get the input value for the input socket with the given identifier. */ template<typename T> T get_input(StringRef identifier) const @@ -202,9 +110,8 @@ class GeoNodeExecParams { #ifdef DEBUG this->check_input_access(identifier, &CPPType::get<T>()); #endif - GPointer gvalue = provider_->get_input(identifier); - BLI_assert(gvalue.is_type<T>()); - const T &value = *(const T *)gvalue.get(); + const int index = this->get_input_index(identifier); + const T &value = params_.get_input<T>(index); if constexpr (std::is_same_v<T, GeometrySet>) { this->check_input_geometry_set(identifier, value); } @@ -226,17 +133,28 @@ class GeoNodeExecParams { this->set_output(identifier, ValueOrField<BaseType>(std::forward<T>(value))); } else { - const CPPType &type = CPPType::get<StoredT>(); #ifdef DEBUG + const CPPType &type = CPPType::get<StoredT>(); this->check_output_access(identifier, type); #endif if constexpr (std::is_same_v<StoredT, GeometrySet>) { this->check_output_geometry_set(value); } - GMutablePointer gvalue = provider_->alloc_output_value(type); - new (gvalue.get()) StoredT(std::forward<T>(value)); - provider_->set_output(identifier, gvalue); + const int index = this->get_output_index(identifier); + params_.set_output(index, std::forward<T>(value)); + } + } + + geo_eval_log::GeoTreeLogger *get_local_tree_logger() const + { + GeoNodesLFUserData *user_data = this->user_data(); + BLI_assert(user_data != nullptr); + const ComputeContext *compute_context = user_data->compute_context; + BLI_assert(compute_context != nullptr); + if (user_data->modifier_data->eval_log == nullptr) { + return nullptr; } + return &user_data->modifier_data->eval_log->get_local_tree_logger(*compute_context); } /** @@ -244,7 +162,8 @@ class GeoNodeExecParams { */ void set_input_unused(StringRef identifier) { - provider_->set_input_unused(identifier); + const int index = this->get_input_index(identifier); + params_.set_input_unused(index); } /** @@ -254,7 +173,8 @@ class GeoNodeExecParams { */ bool output_is_required(StringRef identifier) const { - return provider_->output_is_required(identifier); + const int index = this->get_output_index(identifier); + return params_.get_output_usage(index) != lf::ValueUsage::Unused; } /** @@ -265,7 +185,8 @@ class GeoNodeExecParams { */ bool lazy_require_input(StringRef identifier) { - return provider_->lazy_require_input(identifier); + const int index = this->get_input_index(identifier); + return params_.try_get_input_data_ptr_or_request(index) == nullptr; } /** @@ -275,7 +196,8 @@ class GeoNodeExecParams { */ bool lazy_output_is_required(StringRef identifier) { - return provider_->lazy_output_is_required(identifier); + const int index = this->get_output_index(identifier); + return params_.get_output_usage(index) == lf::ValueUsage::Used; } /** @@ -283,17 +205,32 @@ class GeoNodeExecParams { */ const bNode &node() const { - return *provider_->dnode; + return node_; } const Object *self_object() const { - return provider_->self_object; + if (const auto *data = this->user_data()) { + if (data->modifier_data) { + return data->modifier_data->self_object; + } + } + return nullptr; } Depsgraph *depsgraph() const { - return provider_->depsgraph; + if (const auto *data = this->user_data()) { + if (data->modifier_data) { + return data->modifier_data->depsgraph; + } + } + return nullptr; + } + + GeoNodesLFUserData *user_data() const + { + return dynamic_cast<GeoNodesLFUserData *>(lf_context_.user_data); } /** @@ -306,7 +243,7 @@ class GeoNodeExecParams { void set_default_remaining_outputs(); - void used_named_attribute(std::string attribute_name, eNamedAttrUsage usage); + void used_named_attribute(std::string attribute_name, NamedAttributeUsage usage); private: /* Utilities for detecting common errors at when using this class. */ @@ -315,6 +252,38 @@ class GeoNodeExecParams { /* Find the active socket with the input name (not the identifier). */ const bNodeSocket *find_available_socket(const StringRef name) const; + + int get_input_index(const StringRef identifier) const + { + int counter = 0; + for (const bNodeSocket *socket : node_.input_sockets()) { + if (!socket->is_available()) { + continue; + } + if (socket->identifier == identifier) { + return counter; + } + counter++; + } + BLI_assert_unreachable(); + return -1; + } + + int get_output_index(const StringRef identifier) const + { + int counter = 0; + for (const bNodeSocket *socket : node_.output_sockets()) { + if (!socket->is_available()) { + continue; + } + if (socket->identifier == identifier) { + return counter; + } + counter++; + } + BLI_assert_unreachable(); + return -1; + } }; } // namespace blender::nodes diff --git a/source/blender/nodes/NOD_geometry_nodes_eval_log.hh b/source/blender/nodes/NOD_geometry_nodes_eval_log.hh deleted file mode 100644 index 46ba72d14d8..00000000000 --- a/source/blender/nodes/NOD_geometry_nodes_eval_log.hh +++ /dev/null @@ -1,411 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#pragma once - -/** - * Many geometry nodes related UI features need access to data produced during evaluation. Not only - * is the final output required but also the intermediate results. Those features include - * attribute search, node warnings, socket inspection and the viewer node. - * - * This file provides the framework for logging data during evaluation and accessing the data after - * evaluation. - * - * During logging every thread gets its own local logger to avoid too much locking (logging - * generally happens for every socket). After geometry nodes evaluation is done, the thread-local - * logging information is combined and post-processed to make it easier for the UI to lookup. - * necessary information. - */ - -#include "BLI_enumerable_thread_specific.hh" -#include "BLI_function_ref.hh" -#include "BLI_generic_pointer.hh" -#include "BLI_linear_allocator.hh" -#include "BLI_map.hh" - -#include "BKE_geometry_set.hh" - -#include "NOD_derived_node_tree.hh" - -#include "FN_field.hh" - -#include <chrono> - -struct SpaceNode; -struct SpaceSpreadsheet; - -namespace blender::nodes::geometry_nodes_eval_log { - -/** Contains information about a value that has been computed during geometry nodes evaluation. */ -class ValueLog { - public: - virtual ~ValueLog() = default; -}; - -/** Contains an owned copy of a value of a generic type. */ -class GenericValueLog : public ValueLog { - private: - GMutablePointer data_; - - public: - GenericValueLog(GMutablePointer data) : data_(data) - { - } - - ~GenericValueLog() - { - data_.destruct(); - } - - GPointer value() const - { - return data_; - } -}; - -class GFieldValueLog : public ValueLog { - private: - fn::GField field_; - const CPPType &type_; - Vector<std::string> input_tooltips_; - - public: - GFieldValueLog(fn::GField field, bool log_full_field); - - const fn::GField &field() const - { - return field_; - } - - Span<std::string> input_tooltips() const - { - return input_tooltips_; - } - - const CPPType &type() const - { - return type_; - } -}; - -struct GeometryAttributeInfo { - std::string name; - /** Can be empty when #name does not actually exist on a geometry yet. */ - std::optional<eAttrDomain> domain; - std::optional<eCustomDataType> data_type; -}; - -/** Contains information about a geometry set. In most cases this does not store the entire - * geometry set as this would require too much memory. */ -class GeometryValueLog : public ValueLog { - private: - Vector<GeometryAttributeInfo> attributes_; - Vector<GeometryComponentType> component_types_; - std::unique_ptr<GeometrySet> full_geometry_; - - public: - struct MeshInfo { - int verts_num, edges_num, faces_num; - }; - struct CurveInfo { - int splines_num; - }; - struct PointCloudInfo { - int points_num; - }; - struct InstancesInfo { - int instances_num; - }; - struct EditDataInfo { - bool has_deformed_positions; - bool has_deform_matrices; - }; - - std::optional<MeshInfo> mesh_info; - std::optional<CurveInfo> curve_info; - std::optional<PointCloudInfo> pointcloud_info; - std::optional<InstancesInfo> instances_info; - std::optional<EditDataInfo> edit_data_info; - - GeometryValueLog(const GeometrySet &geometry_set, bool log_full_geometry = false); - - Span<GeometryAttributeInfo> attributes() const - { - return attributes_; - } - - Span<GeometryComponentType> component_types() const - { - return component_types_; - } - - const GeometrySet *full_geometry() const - { - return full_geometry_.get(); - } -}; - -enum class NodeWarningType { - Error, - Warning, - Info, -}; - -struct NodeWarning { - NodeWarningType type; - std::string message; -}; - -struct NodeWithWarning { - DNode node; - NodeWarning warning; -}; - -struct NodeWithExecutionTime { - DNode node; - std::chrono::microseconds exec_time; -}; - -struct NodeWithDebugMessage { - DNode node; - std::string message; -}; - -/** The same value can be referenced by multiple sockets when they are linked. */ -struct ValueOfSockets { - Span<DSocket> sockets; - destruct_ptr<ValueLog> value; -}; - -enum class eNamedAttrUsage { - None = 0, - Read = 1 << 0, - Write = 1 << 1, - Remove = 1 << 2, -}; -ENUM_OPERATORS(eNamedAttrUsage, eNamedAttrUsage::Remove); - -struct UsedNamedAttribute { - std::string name; - eNamedAttrUsage usage; -}; - -struct NodeWithUsedNamedAttribute { - DNode node; - UsedNamedAttribute attribute; -}; - -class GeoLogger; -class ModifierLog; - -/** Every thread has its own local logger to avoid having to communicate between threads during - * evaluation. After evaluation the individual logs are combined. */ -class LocalGeoLogger { - private: - /* Back pointer to the owner of this local logger. */ - GeoLogger *main_logger_; - /* Allocator for the many small allocations during logging. This is in a `unique_ptr` so that - * ownership can be transferred later on. */ - std::unique_ptr<LinearAllocator<>> allocator_; - Vector<ValueOfSockets> values_; - Vector<NodeWithWarning> node_warnings_; - Vector<NodeWithExecutionTime> node_exec_times_; - Vector<NodeWithDebugMessage> node_debug_messages_; - Vector<NodeWithUsedNamedAttribute> used_named_attributes_; - - friend ModifierLog; - - public: - LocalGeoLogger(GeoLogger &main_logger) : main_logger_(&main_logger) - { - this->allocator_ = std::make_unique<LinearAllocator<>>(); - } - - void log_value_for_sockets(Span<DSocket> sockets, GPointer value); - void log_multi_value_socket(DSocket socket, Span<GPointer> values); - void log_node_warning(DNode node, NodeWarningType type, std::string message); - void log_execution_time(DNode node, std::chrono::microseconds exec_time); - void log_used_named_attribute(DNode node, std::string attribute_name, eNamedAttrUsage usage); - /** - * Log a message that will be displayed in the node editor next to the node. - * This should only be used for debugging purposes and not to display information to users. - */ - void log_debug_message(DNode node, std::string message); -}; - -/** The root logger class. */ -class GeoLogger { - private: - /** - * Log the entire value for these sockets, because they may be inspected afterwards. - * We don't log everything, because that would take up too much memory and cause significant - * slowdowns. - */ - Set<DSocket> log_full_sockets_; - threading::EnumerableThreadSpecific<LocalGeoLogger> threadlocals_; - - /* These are only optional since they don't have a default constructor. */ - std::unique_ptr<GeometryValueLog> input_geometry_log_; - std::unique_ptr<GeometryValueLog> output_geometry_log_; - - friend LocalGeoLogger; - friend ModifierLog; - - public: - GeoLogger(Set<DSocket> log_full_sockets) - : log_full_sockets_(std::move(log_full_sockets)), - threadlocals_([this]() { return LocalGeoLogger(*this); }) - { - } - - void log_input_geometry(const GeometrySet &geometry) - { - input_geometry_log_ = std::make_unique<GeometryValueLog>(geometry); - } - - void log_output_geometry(const GeometrySet &geometry) - { - output_geometry_log_ = std::make_unique<GeometryValueLog>(geometry); - } - - LocalGeoLogger &local() - { - return threadlocals_.local(); - } - - auto begin() - { - return threadlocals_.begin(); - } - - auto end() - { - return threadlocals_.end(); - } -}; - -/** Contains information that has been logged for one specific socket. */ -class SocketLog { - private: - ValueLog *value_ = nullptr; - - friend ModifierLog; - - public: - const ValueLog *value() const - { - return value_; - } -}; - -/** Contains information that has been logged for one specific node. */ -class NodeLog { - private: - Vector<SocketLog> input_logs_; - Vector<SocketLog> output_logs_; - Vector<NodeWarning, 0> warnings_; - Vector<std::string, 0> debug_messages_; - Vector<UsedNamedAttribute, 0> used_named_attributes_; - std::chrono::microseconds exec_time_; - - friend ModifierLog; - - public: - const SocketLog *lookup_socket_log(eNodeSocketInOut in_out, int index) const; - const SocketLog *lookup_socket_log(const bNode &node, const bNodeSocket &socket) const; - void execution_time(std::chrono::microseconds exec_time); - - Span<SocketLog> input_logs() const - { - return input_logs_; - } - - Span<SocketLog> output_logs() const - { - return output_logs_; - } - - Span<NodeWarning> warnings() const - { - return warnings_; - } - - Span<std::string> debug_messages() const - { - return debug_messages_; - } - - Span<UsedNamedAttribute> used_named_attributes() const - { - return used_named_attributes_; - } - - std::chrono::microseconds execution_time() const - { - return exec_time_; - } - - Vector<const GeometryAttributeInfo *> lookup_available_attributes() const; -}; - -/** Contains information that has been logged for one specific tree. */ -class TreeLog { - private: - Map<std::string, destruct_ptr<NodeLog>> node_logs_; - Map<std::string, destruct_ptr<TreeLog>> child_logs_; - - friend ModifierLog; - - public: - const NodeLog *lookup_node_log(StringRef node_name) const; - const NodeLog *lookup_node_log(const bNode &node) const; - const TreeLog *lookup_child_log(StringRef node_name) const; - void foreach_node_log(FunctionRef<void(const NodeLog &)> fn) const; -}; - -/** Contains information about an entire geometry nodes evaluation. */ -class ModifierLog { - private: - LinearAllocator<> allocator_; - /* Allocators of the individual loggers. */ - Vector<std::unique_ptr<LinearAllocator<>>> logger_allocators_; - destruct_ptr<TreeLog> root_tree_logs_; - Vector<destruct_ptr<ValueLog>> logged_values_; - - std::unique_ptr<GeometryValueLog> input_geometry_log_; - std::unique_ptr<GeometryValueLog> output_geometry_log_; - - public: - ModifierLog(GeoLogger &logger); - - const TreeLog &root_tree() const - { - return *root_tree_logs_; - } - - /* Utilities to find logged information for a specific context. */ - static const ModifierLog *find_root_by_node_editor_context(const SpaceNode &snode); - static const TreeLog *find_tree_by_node_editor_context(const SpaceNode &snode); - static const NodeLog *find_node_by_node_editor_context(const SpaceNode &snode, - const bNode &node); - static const NodeLog *find_node_by_node_editor_context(const SpaceNode &snode, - const StringRef node_name); - static const SocketLog *find_socket_by_node_editor_context(const SpaceNode &snode, - const bNode &node, - const bNodeSocket &socket); - static const NodeLog *find_node_by_spreadsheet_editor_context( - const SpaceSpreadsheet &sspreadsheet); - void foreach_node_log(FunctionRef<void(const NodeLog &)> fn) const; - - const GeometryValueLog *input_geometry_log() const; - const GeometryValueLog *output_geometry_log() const; - - private: - using LogByTreeContext = Map<const DTreeContext *, TreeLog *>; - - TreeLog &lookup_or_add_tree_log(LogByTreeContext &log_by_tree_context, - const DTreeContext &tree_context); - NodeLog &lookup_or_add_node_log(LogByTreeContext &log_by_tree_context, DNode node); - SocketLog &lookup_or_add_socket_log(LogByTreeContext &log_by_tree_context, DSocket socket); -}; - -} // namespace blender::nodes::geometry_nodes_eval_log diff --git a/source/blender/nodes/NOD_geometry_nodes_lazy_function.hh b/source/blender/nodes/NOD_geometry_nodes_lazy_function.hh new file mode 100644 index 00000000000..3137dc41857 --- /dev/null +++ b/source/blender/nodes/NOD_geometry_nodes_lazy_function.hh @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** + * For evaluation, geometry node groups are converted to a lazy-function graph. The generated graph + * is cached per node group, so it only has to be generated once after a change. + * + * Node groups are *not* inlined into the lazy-function graph. This could be added in the future as + * it might improve performance in some cases, but generally does not seem necessary. Inlining node + * groups also has disadvantages like making per-node-group caches less useful, resulting in more + * overhead. + * + * Instead, group nodes are just like all other nodes in the lazy-function graph. What makes them + * special is that they reference the lazy-function graph of the group they reference. + * + * During lazy-function graph generation, a mapping between the #bNodeTree and + * #lazy_function::Graph is build that can be used when evaluating the graph (e.g. for logging). + */ + +#include "FN_lazy_function_graph.hh" +#include "FN_lazy_function_graph_executor.hh" + +#include "NOD_geometry_nodes_log.hh" +#include "NOD_multi_function.hh" + +#include "BLI_compute_context.hh" + +struct Object; +struct Depsgraph; + +namespace blender::nodes { + +namespace lf = fn::lazy_function; +using lf::LazyFunction; + +/** + * Data that is passed into geometry nodes evaluation from the modifier. + */ +struct GeoNodesModifierData { + /** Object that is currently evaluated. */ + const Object *self_object = nullptr; + /** Depsgraph that is evaluating the modifier. */ + Depsgraph *depsgraph = nullptr; + /** Optional logger. */ + geo_eval_log::GeoModifierLog *eval_log = nullptr; + /** + * Some nodes should be executed even when their output is not used (e.g. active viewer nodes and + * the node groups they are contained in). + */ + const MultiValueMap<ComputeContextHash, const lf::FunctionNode *> *side_effect_nodes; +}; + +/** + * Custom user data that is passed to every geometry nodes related lazy-function evaluation. + */ +struct GeoNodesLFUserData : public lf::UserData { + /** + * Data from the modifier that is being evaluated. + */ + GeoNodesModifierData *modifier_data = nullptr; + /** + * Current compute context. This is different depending in the (nested) node group that is being + * evaluated. + */ + const ComputeContext *compute_context = nullptr; +}; + +/** + * Contains the mapping between the #bNodeTree and the corresponding lazy-function graph. + * This is *not* a one-to-one mapping. + */ +struct GeometryNodeLazyFunctionGraphMapping { + /** + * Contains mapping of sockets for special nodes like group input and group output. + */ + Map<const bNodeSocket *, lf::Socket *> dummy_socket_map; + /** + * The inputs sockets in the graph. Multiple group input nodes are combined into one in the + * lazy-function graph. + */ + Vector<lf::OutputSocket *> group_input_sockets; + /** + * A mapping used for logging intermediate values. + */ + MultiValueMap<const lf::Socket *, const bNodeSocket *> bsockets_by_lf_socket_map; + /** + * Mappings for some special node types. Generally, this mapping does not exist for all node + * types, so better have more specialized mappings for now. + */ + Map<const bNode *, const lf::FunctionNode *> group_node_map; + Map<const bNode *, const lf::FunctionNode *> viewer_node_map; +}; + +/** + * Data that is cached for every #bNodeTree. + */ +struct GeometryNodesLazyFunctionGraphInfo { + /** + * Allocator used for many things contained in this struct. + */ + LinearAllocator<> allocator; + /** + * Many nodes are implemented as multi-functions. So this contains a mapping from nodes to their + * corresponding multi-functions. + */ + std::unique_ptr<NodeMultiFunctions> node_multi_functions; + /** + * Many lazy-functions are build for the lazy-function graph. Since the graph does not own them, + * we have to keep track of them separately. + */ + Vector<std::unique_ptr<LazyFunction>> functions; + /** + * Many sockets have default values. Since those are not owned by the lazy-function graph, we + * have to keep track of them separately. This only owns the values, the memory is owned by the + * allocator above. + */ + Vector<GMutablePointer> values_to_destruct; + /** + * The actual lazy-function graph. + */ + lf::Graph graph; + /** + * Mappings between the lazy-function graph and the #bNodeTree. + */ + GeometryNodeLazyFunctionGraphMapping mapping; + + GeometryNodesLazyFunctionGraphInfo(); + ~GeometryNodesLazyFunctionGraphInfo(); +}; + +/** + * Logs intermediate values from the lazy-function graph evaluation into #GeoModifierLog based on + * the mapping between the lazy-function graph and the corresponding #bNodeTree. + */ +class GeometryNodesLazyFunctionLogger : public fn::lazy_function::GraphExecutor::Logger { + private: + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info_; + + public: + GeometryNodesLazyFunctionLogger(const GeometryNodesLazyFunctionGraphInfo &lf_graph_info); + void log_socket_value(const fn::lazy_function::Socket &lf_socket, + GPointer value, + const fn::lazy_function::Context &context) const override; + void dump_when_outputs_are_missing(const lf::FunctionNode &node, + Span<const lf::OutputSocket *> missing_sockets, + const lf::Context &context) const override; + void dump_when_input_is_set_twice(const lf::InputSocket &target_socket, + const lf::OutputSocket &from_socket, + const lf::Context &context) const override; +}; + +/** + * Tells the lazy-function graph evaluator which nodes have side effects based on the current + * context. For example, the same viewer node can have side effects in one context, but not in + * another (depending on e.g. which tree path is currently viewed in the node editor). + */ +class GeometryNodesLazyFunctionSideEffectProvider + : public fn::lazy_function::GraphExecutor::SideEffectProvider { + private: + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info_; + + public: + GeometryNodesLazyFunctionSideEffectProvider( + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info); + Vector<const lf::FunctionNode *> get_nodes_with_side_effects( + const lf::Context &context) const override; +}; + +/** + * Main function that converts a #bNodeTree into a lazy-function graph. If the graph has been + * generated already, nothing is done. Under some circumstances a valid graph cannot be created. In + * those cases null is returned. + */ +const GeometryNodesLazyFunctionGraphInfo *ensure_geometry_nodes_lazy_function_graph( + const bNodeTree &btree); + +} // namespace blender::nodes diff --git a/source/blender/nodes/NOD_geometry_nodes_log.hh b/source/blender/nodes/NOD_geometry_nodes_log.hh new file mode 100644 index 00000000000..dd4868b6ba0 --- /dev/null +++ b/source/blender/nodes/NOD_geometry_nodes_log.hh @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** + * Many geometry nodes related UI features need access to data produced during evaluation. Not only + * is the final output required but also the intermediate results. Those features include attribute + * search, node warnings, socket inspection and the viewer node. + * + * This file provides the system for logging data during evaluation and accessing the data after + * evaluation. Geometry nodes is executed by a modifier, therefore the "root" of logging is + * #GeoModifierLog which will contain all data generated in a modifier. + * + * The system makes a distinction between "loggers" and the "log": + * - Logger (#GeoTreeLogger): Is used during geometry nodes evaluation. Each thread logs data + * independently to avoid communication between threads. Logging should generally be fast. + * Generally, the logged data is just dumped into simple containers. Any processing of the data + * happens later if necessary. This is important for performance, because in practice, most of + * the logged data is never used again. So any processing of the data is likely to be a waste of + * resources. + * - Log (#GeoTreeLog, #GeoNodeLog): Those are used when accessing logged data in UI code. They + * contain and cache preprocessed data produced during logging. The log combines data from all + * thread-local loggers to provide simple access. Importantly, the (preprocessed) log is only + * created when it is actually used by UI code. + */ + +#include <chrono> + +#include "BLI_compute_context.hh" +#include "BLI_enumerable_thread_specific.hh" +#include "BLI_generic_pointer.hh" +#include "BLI_multi_value_map.hh" + +#include "BKE_attribute.h" +#include "BKE_geometry_set.hh" + +#include "FN_field.hh" + +#include "DNA_node_types.h" + +struct SpaceNode; +struct SpaceSpreadsheet; +struct NodesModifierData; + +namespace blender::nodes::geo_eval_log { + +using fn::GField; + +enum class NodeWarningType { + Error, + Warning, + Info, +}; + +struct NodeWarning { + NodeWarningType type; + std::string message; +}; + +enum class NamedAttributeUsage { + None = 0, + Read = 1 << 0, + Write = 1 << 1, + Remove = 1 << 2, +}; +ENUM_OPERATORS(NamedAttributeUsage, NamedAttributeUsage::Remove); + +/** + * Values of different types are logged differently. This is necessary because some types are so + * simple that we can log them entirely (e.g. `int`), while we don't want to log all intermediate + * geometries in their entirety. + * + * #ValueLog is a base class for the different ways we log values. + */ +class ValueLog { + public: + virtual ~ValueLog() = default; +}; + +/** + * Simplest logger. It just stores a copy of the entire value. This is used for most simple types + * like `int`. + */ +class GenericValueLog : public ValueLog { + public: + /** + * This is owning the value, but not the memory. + */ + GMutablePointer value; + + GenericValueLog(const GMutablePointer value) : value(value) + { + } + + ~GenericValueLog(); +}; + +/** + * Fields are not logged entirely, because they might contain arbitrarily large data (e.g. + * geometries that are sampled). Instead, only the data needed for UI features is logged. + */ +class FieldInfoLog : public ValueLog { + public: + const CPPType &type; + Vector<std::string> input_tooltips; + + FieldInfoLog(const GField &field); +}; + +struct GeometryAttributeInfo { + std::string name; + /** Can be empty when #name does not actually exist on a geometry yet. */ + std::optional<eAttrDomain> domain; + std::optional<eCustomDataType> data_type; +}; + +/** + * Geometries are not logged entirely, because that would result in a lot of time and memory + * overhead. Instead, only the data needed for UI features is logged. + */ +class GeometryInfoLog : public ValueLog { + public: + Vector<GeometryAttributeInfo> attributes; + Vector<GeometryComponentType> component_types; + + struct MeshInfo { + int verts_num, edges_num, faces_num; + }; + struct CurveInfo { + int splines_num; + }; + struct PointCloudInfo { + int points_num; + }; + struct InstancesInfo { + int instances_num; + }; + struct EditDataInfo { + bool has_deformed_positions; + bool has_deform_matrices; + }; + + std::optional<MeshInfo> mesh_info; + std::optional<CurveInfo> curve_info; + std::optional<PointCloudInfo> pointcloud_info; + std::optional<InstancesInfo> instances_info; + std::optional<EditDataInfo> edit_data_info; + + GeometryInfoLog(const GeometrySet &geometry_set); +}; + +/** + * Data logged by a viewer node when it is executed. In this case, we do want to log the entire + * geometry. + */ +class ViewerNodeLog { + public: + GeometrySet geometry; + GField field; +}; + +using Clock = std::chrono::steady_clock; +using TimePoint = Clock::time_point; + +/** + * Logs all data for a specific geometry node tree in a specific context. When the same node group + * is used in multiple times each instantiation will have a separate logger. + */ +class GeoTreeLogger { + public: + std::optional<ComputeContextHash> parent_hash; + std::optional<std::string> group_node_name; + Vector<ComputeContextHash> children_hashes; + + LinearAllocator<> *allocator = nullptr; + + struct WarningWithNode { + std::string node_name; + NodeWarning warning; + }; + struct SocketValueLog { + std::string node_name; + std::string socket_identifier; + destruct_ptr<ValueLog> value; + }; + struct NodeExecutionTime { + std::string node_name; + TimePoint start; + TimePoint end; + }; + struct ViewerNodeLogWithNode { + std::string node_name; + destruct_ptr<ViewerNodeLog> viewer_log; + }; + struct AttributeUsageWithNode { + std::string node_name; + std::string attribute_name; + NamedAttributeUsage usage; + }; + struct DebugMessage { + std::string node_name; + std::string message; + }; + + Vector<WarningWithNode> node_warnings; + Vector<SocketValueLog> input_socket_values; + Vector<SocketValueLog> output_socket_values; + Vector<NodeExecutionTime> node_execution_times; + Vector<ViewerNodeLogWithNode, 0> viewer_node_logs; + Vector<AttributeUsageWithNode, 0> used_named_attributes; + Vector<DebugMessage, 0> debug_messages; + + GeoTreeLogger(); + ~GeoTreeLogger(); + + void log_value(const bNode &node, const bNodeSocket &socket, GPointer value); + void log_viewer_node(const bNode &viewer_node, const GeometrySet &geometry, const GField &field); +}; + +/** + * Contains data that has been logged for a specific node in a context. So when the node is in a + * node group that is used multiple times, there will be a different #GeoNodeLog for every + * instance. + * + * By default, not all of the info below is valid. A #GeoTreeLog::ensure_* method has to be called + * first. + */ +class GeoNodeLog { + public: + /** Warnings generated for that node. */ + Vector<NodeWarning> warnings; + /** + * Time spend in that node. For node groups this is the sum of the run times of the nodes + * inside. + */ + std::chrono::nanoseconds run_time{0}; + /** Maps from socket identifiers to their values. */ + Map<std::string, ValueLog *> input_values_; + Map<std::string, ValueLog *> output_values_; + /** Maps from attribute name to their usage flags. */ + Map<std::string, NamedAttributeUsage> used_named_attributes; + /** Messages that are used for debugging purposes during development. */ + Vector<std::string> debug_messages; + + GeoNodeLog(); + ~GeoNodeLog(); +}; + +class GeoModifierLog; + +/** + * Contains data that has been logged for a specific node group in a context. If the same node + * group is used multiple times, there will be a different #GeoTreeLog for every instance. + * + * This contains lazily evaluated data. Call the corresponding `ensure_*` methods before accessing + * data. + */ +class GeoTreeLog { + private: + GeoModifierLog *modifier_log_; + Vector<GeoTreeLogger *> tree_loggers_; + VectorSet<ComputeContextHash> children_hashes_; + bool reduced_node_warnings_ = false; + bool reduced_node_run_times_ = false; + bool reduced_socket_values_ = false; + bool reduced_viewer_node_logs_ = false; + bool reduced_existing_attributes_ = false; + bool reduced_used_named_attributes_ = false; + bool reduced_debug_messages_ = false; + + public: + Map<std::string, GeoNodeLog> nodes; + Map<std::string, ViewerNodeLog *, 0> viewer_node_logs; + Vector<NodeWarning> all_warnings; + std::chrono::nanoseconds run_time_sum{0}; + Vector<const GeometryAttributeInfo *> existing_attributes; + Map<std::string, NamedAttributeUsage> used_named_attributes; + + GeoTreeLog(GeoModifierLog *modifier_log, Vector<GeoTreeLogger *> tree_loggers); + ~GeoTreeLog(); + + void ensure_node_warnings(); + void ensure_node_run_time(); + void ensure_socket_values(); + void ensure_viewer_node_logs(); + void ensure_existing_attributes(); + void ensure_used_named_attributes(); + void ensure_debug_messages(); + + ValueLog *find_socket_value_log(const bNodeSocket &query_socket); +}; + +/** + * There is one #GeoModifierLog for every modifier that evaluates geometry nodes. It contains all + * the loggers that are used during evaluation as well as the preprocessed logs that are used by UI + * code. + */ +class GeoModifierLog { + private: + /** Data that is stored for each thread. */ + struct LocalData { + /** Each thread has its own allocator. */ + LinearAllocator<> allocator; + /** + * Store a separate #GeoTreeLogger for each instance of the corresponding node group (e.g. + * when the same node group is used multiple times). + */ + Map<ComputeContextHash, destruct_ptr<GeoTreeLogger>> tree_logger_by_context; + }; + + /** Container for all thread-local data. */ + threading::EnumerableThreadSpecific<LocalData> data_per_thread_; + /** + * A #GeoTreeLog for every compute context. Those are created lazily when requested by UI code. + */ + Map<ComputeContextHash, std::unique_ptr<GeoTreeLog>> tree_logs_; + + public: + GeoModifierLog(); + ~GeoModifierLog(); + + /** + * Get a thread-local logger for the current node tree. + */ + GeoTreeLogger &get_local_tree_logger(const ComputeContext &compute_context); + + /** + * Get a log a specific node tree instance. + */ + GeoTreeLog &get_tree_log(const ComputeContextHash &compute_context_hash); + + /** + * Utility accessor to logged data. + */ + static GeoTreeLog *get_tree_log_for_node_editor(const SpaceNode &snode); + static const ViewerNodeLog *find_viewer_node_log_for_spreadsheet( + const SpaceSpreadsheet &sspreadsheet); +}; + +} // namespace blender::nodes::geo_eval_log diff --git a/source/blender/nodes/NOD_multi_function.hh b/source/blender/nodes/NOD_multi_function.hh index 21a94d9192b..676bf03927e 100644 --- a/source/blender/nodes/NOD_multi_function.hh +++ b/source/blender/nodes/NOD_multi_function.hh @@ -6,8 +6,6 @@ #include "DNA_node_types.h" -#include "NOD_derived_node_tree.hh" - namespace blender::nodes { using namespace fn::multi_function_types; @@ -60,9 +58,9 @@ class NodeMultiFunctions { Map<const bNode *, Item> map_; public: - NodeMultiFunctions(const DerivedNodeTree &tree); + NodeMultiFunctions(const bNodeTree &tree); - const Item &try_get(const DNode &node) const; + const Item &try_get(const bNode &node) const; }; /* -------------------------------------------------------------------- */ @@ -107,10 +105,10 @@ inline void NodeMultiFunctionBuilder::construct_and_set_matching_fn(Args &&...ar /** \name #NodeMultiFunctions Inline Methods * \{ */ -inline const NodeMultiFunctions::Item &NodeMultiFunctions::try_get(const DNode &node) const +inline const NodeMultiFunctions::Item &NodeMultiFunctions::try_get(const bNode &node) const { static Item empty_item; - const Item *item = map_.lookup_ptr(node.bnode()); + const Item *item = map_.lookup_ptr(&node); if (item == nullptr) { return empty_item; } diff --git a/source/blender/nodes/geometry/node_geometry_exec.cc b/source/blender/nodes/geometry/node_geometry_exec.cc index 58ded7aadd2..ef4daf94bbe 100644 --- a/source/blender/nodes/geometry/node_geometry_exec.cc +++ b/source/blender/nodes/geometry/node_geometry_exec.cc @@ -4,3 +4,4 @@ #include "NOD_geometry_exec.hh" BLI_CPP_TYPE_MAKE(GeometrySet, GeometrySet, CPPTypeFlags::Printable); +BLI_CPP_TYPE_MAKE(GeometrySetVector, blender::Vector<GeometrySet>, CPPTypeFlags::None); diff --git a/source/blender/nodes/geometry/nodes/node_geo_boolean.cc b/source/blender/nodes/geometry/nodes/node_geo_boolean.cc index a6af74645b6..c8c58945bce 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_boolean.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_boolean.cc @@ -93,7 +93,7 @@ static void node_geo_exec(GeoNodeExecParams params) /* The instance transform matrices are owned by the instance group, so we have to * keep all of them around for use during the boolean operation. */ Vector<bke::GeometryInstanceGroup> set_groups; - Vector<GeometrySet> geometry_sets = params.extract_multi_input<GeometrySet>("Mesh 2"); + Vector<GeometrySet> geometry_sets = params.extract_input<Vector<GeometrySet>>("Mesh 2"); for (const GeometrySet &geometry_set : geometry_sets) { bke::geometry_set_gather_instances(geometry_set, set_groups); } diff --git a/source/blender/nodes/geometry/nodes/node_geo_curve_trim.cc b/source/blender/nodes/geometry/nodes/node_geo_curve_trim.cc index 443f67be421..0d3ae47e712 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_curve_trim.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_curve_trim.cc @@ -9,12 +9,12 @@ #include "NOD_socket_search_link.hh" +#include "GEO_trim_curves.hh" + #include "node_geometry_util.hh" namespace blender::nodes::node_geo_curve_trim_cc { -using blender::attribute_math::mix2; - NODE_STORAGE_FUNCS(NodeGeometryCurveTrim) static void node_declare(NodeDeclarationBuilder &b) @@ -108,394 +108,6 @@ static void node_gather_link_searches(GatherLinkSearchOpParams ¶ms) } } -struct TrimLocation { - /* Control point index at the start side of the trim location. */ - int left_index; - /* Control point index at the end of the trim location's segment. */ - int right_index; - /* The factor between the left and right indices. */ - float factor; -}; - -template<typename T> -static void shift_slice_to_start(MutableSpan<T> data, const int start_index, const int num) -{ - BLI_assert(start_index + num - 1 <= data.size()); - memmove(data.data(), &data[start_index], sizeof(T) * num); -} - -/* Shift slice to start of span and modifies start and end data. */ -template<typename T> -static void linear_trim_data(const TrimLocation &start, - const TrimLocation &end, - MutableSpan<T> data) -{ - const int num = end.right_index - start.left_index + 1; - - if (start.left_index > 0) { - shift_slice_to_start<T>(data, start.left_index, num); - } - - const T start_data = mix2<T>(start.factor, data.first(), data[1]); - const T end_data = mix2<T>(end.factor, data[num - 2], data[num - 1]); - - data.first() = start_data; - data[num - 1] = end_data; -} - -/** - * Identical operation as #linear_trim_data, but copy data to a new #MutableSpan rather than - * modifying the original data. - */ -template<typename T> -static void linear_trim_to_output_data(const TrimLocation &start, - const TrimLocation &end, - Span<T> src, - MutableSpan<T> dst) -{ - const int num = end.right_index - start.left_index + 1; - - const T start_data = mix2<T>(start.factor, src[start.left_index], src[start.right_index]); - const T end_data = mix2<T>(end.factor, src[end.left_index], src[end.right_index]); - - dst.copy_from(src.slice(start.left_index, num)); - dst.first() = start_data; - dst.last() = end_data; -} - -/* Look up the control points to the left and right of factor, and get the factor between them. */ -static TrimLocation lookup_control_point_position(const Spline::LookupResult &lookup, - const BezierSpline &spline) -{ - Span<int> offsets = spline.control_point_offsets(); - - const int *offset = std::lower_bound(offsets.begin(), offsets.end(), lookup.evaluated_index); - const int index = offset - offsets.begin(); - - const int left = offsets[index] > lookup.evaluated_index ? index - 1 : index; - const int right = left == (spline.size() - 1) ? 0 : left + 1; - - const float offset_in_segment = lookup.evaluated_index + lookup.factor - offsets[left]; - const int segment_eval_num = offsets[left + 1] - offsets[left]; - const float factor = std::clamp(offset_in_segment / segment_eval_num, 0.0f, 1.0f); - - return {left, right, factor}; -} - -static void trim_poly_spline(Spline &spline, - const Spline::LookupResult &start_lookup, - const Spline::LookupResult &end_lookup) -{ - /* Poly splines have a 1 to 1 mapping between control points and evaluated points. */ - const TrimLocation start = { - start_lookup.evaluated_index, start_lookup.next_evaluated_index, start_lookup.factor}; - const TrimLocation end = { - end_lookup.evaluated_index, end_lookup.next_evaluated_index, end_lookup.factor}; - - const int num = end.right_index - start.left_index + 1; - - linear_trim_data<float3>(start, end, spline.positions()); - linear_trim_data<float>(start, end, spline.radii()); - linear_trim_data<float>(start, end, spline.tilts()); - - spline.attributes.foreach_attribute( - [&](const AttributeIDRef &attribute_id, const AttributeMetaData &UNUSED(meta_data)) { - std::optional<GMutableSpan> src = spline.attributes.get_for_write(attribute_id); - BLI_assert(src); - attribute_math::convert_to_static_type(src->type(), [&](auto dummy) { - using T = decltype(dummy); - linear_trim_data<T>(start, end, src->typed<T>()); - }); - return true; - }, - ATTR_DOMAIN_POINT); - - spline.resize(num); -} - -/** - * Trim NURB splines by converting to a poly spline. - */ -static PolySpline trim_nurbs_spline(const Spline &spline, - const Spline::LookupResult &start_lookup, - const Spline::LookupResult &end_lookup) -{ - /* Since this outputs a poly spline, the evaluated indices are the control point indices. */ - const TrimLocation start = { - start_lookup.evaluated_index, start_lookup.next_evaluated_index, start_lookup.factor}; - const TrimLocation end = { - end_lookup.evaluated_index, end_lookup.next_evaluated_index, end_lookup.factor}; - - const int num = end.right_index - start.left_index + 1; - - /* Create poly spline and copy trimmed data to it. */ - PolySpline new_spline; - new_spline.resize(num); - - /* Copy generic attribute data. */ - spline.attributes.foreach_attribute( - [&](const AttributeIDRef &attribute_id, const AttributeMetaData &meta_data) { - std::optional<GSpan> src = spline.attributes.get_for_read(attribute_id); - BLI_assert(src); - if (!new_spline.attributes.create(attribute_id, meta_data.data_type)) { - BLI_assert_unreachable(); - return false; - } - std::optional<GMutableSpan> dst = new_spline.attributes.get_for_write(attribute_id); - BLI_assert(dst); - - attribute_math::convert_to_static_type(src->type(), [&](auto dummy) { - using T = decltype(dummy); - VArray<T> eval_data = spline.interpolate_to_evaluated<T>(src->typed<T>()); - linear_trim_to_output_data<T>( - start, end, eval_data.get_internal_span(), dst->typed<T>()); - }); - return true; - }, - ATTR_DOMAIN_POINT); - - linear_trim_to_output_data<float3>( - start, end, spline.evaluated_positions(), new_spline.positions()); - - VArray<float> evaluated_radii = spline.interpolate_to_evaluated(spline.radii()); - linear_trim_to_output_data<float>( - start, end, evaluated_radii.get_internal_span(), new_spline.radii()); - - VArray<float> evaluated_tilts = spline.interpolate_to_evaluated(spline.tilts()); - linear_trim_to_output_data<float>( - start, end, evaluated_tilts.get_internal_span(), new_spline.tilts()); - - return new_spline; -} - -/** - * Trim Bezier splines by adjusting the first and last handles - * and control points to maintain the original shape. - */ -static void trim_bezier_spline(Spline &spline, - const Spline::LookupResult &start_lookup, - const Spline::LookupResult &end_lookup) -{ - BezierSpline &bezier_spline = static_cast<BezierSpline &>(spline); - - const TrimLocation start = lookup_control_point_position(start_lookup, bezier_spline); - TrimLocation end = lookup_control_point_position(end_lookup, bezier_spline); - - const Span<int> control_offsets = bezier_spline.control_point_offsets(); - - /* The number of control points in the resulting spline. */ - const int num = end.right_index - start.left_index + 1; - - /* Trim the spline attributes. Done before end.factor recalculation as it needs - * the original end.factor value. */ - linear_trim_data<float>(start, end, bezier_spline.radii()); - linear_trim_data<float>(start, end, bezier_spline.tilts()); - spline.attributes.foreach_attribute( - [&](const AttributeIDRef &attribute_id, const AttributeMetaData &UNUSED(meta_data)) { - std::optional<GMutableSpan> src = spline.attributes.get_for_write(attribute_id); - BLI_assert(src); - attribute_math::convert_to_static_type(src->type(), [&](auto dummy) { - using T = decltype(dummy); - linear_trim_data<T>(start, end, src->typed<T>()); - }); - return true; - }, - ATTR_DOMAIN_POINT); - - /* Recalculate end.factor if the `num` is two, because the adjustment in the - * position of the control point of the spline to the left of the new end point will change the - * factor between them. */ - if (num == 2) { - if (start_lookup.factor == 1.0f) { - end.factor = 0.0f; - } - else { - end.factor = (end_lookup.evaluated_index + end_lookup.factor - - (start_lookup.evaluated_index + start_lookup.factor)) / - (control_offsets[end.right_index] - - (start_lookup.evaluated_index + start_lookup.factor)); - end.factor = std::clamp(end.factor, 0.0f, 1.0f); - } - } - - BezierSpline::InsertResult start_point = bezier_spline.calculate_segment_insertion( - start.left_index, start.right_index, start.factor); - - /* Update the start control point parameters so they are used calculating the new end point. */ - bezier_spline.positions()[start.left_index] = start_point.position; - bezier_spline.handle_positions_right()[start.left_index] = start_point.right_handle; - bezier_spline.handle_positions_left()[start.right_index] = start_point.handle_next; - - const BezierSpline::InsertResult end_point = bezier_spline.calculate_segment_insertion( - end.left_index, end.right_index, end.factor); - - /* If `num` is two, then the start point right handle needs to change to reflect the end point - * previous handle update. */ - if (num == 2) { - start_point.right_handle = end_point.handle_prev; - } - - /* Shift control point position data to start at beginning of array. */ - if (start.left_index > 0) { - shift_slice_to_start(bezier_spline.positions(), start.left_index, num); - shift_slice_to_start(bezier_spline.handle_positions_left(), start.left_index, num); - shift_slice_to_start(bezier_spline.handle_positions_right(), start.left_index, num); - } - - bezier_spline.positions().first() = start_point.position; - bezier_spline.positions()[num - 1] = end_point.position; - - bezier_spline.handle_positions_left().first() = start_point.left_handle; - bezier_spline.handle_positions_left()[num - 1] = end_point.left_handle; - - bezier_spline.handle_positions_right().first() = start_point.right_handle; - bezier_spline.handle_positions_right()[num - 1] = end_point.right_handle; - - /* If there is at least one control point between the endpoints, update the control - * point handle to the right of the start point and to the left of the end point. */ - if (num > 2) { - bezier_spline.handle_positions_left()[start.right_index - start.left_index] = - start_point.handle_next; - bezier_spline.handle_positions_right()[end.left_index - start.left_index] = - end_point.handle_prev; - } - - bezier_spline.resize(num); -} - -static void trim_spline(SplinePtr &spline, - const Spline::LookupResult start, - const Spline::LookupResult end) -{ - switch (spline->type()) { - case CURVE_TYPE_BEZIER: - trim_bezier_spline(*spline, start, end); - break; - case CURVE_TYPE_POLY: - trim_poly_spline(*spline, start, end); - break; - case CURVE_TYPE_NURBS: - spline = std::make_unique<PolySpline>(trim_nurbs_spline(*spline, start, end)); - break; - case CURVE_TYPE_CATMULL_ROM: - BLI_assert_unreachable(); - spline = {}; - } - spline->mark_cache_invalid(); -} - -template<typename T> -static void to_single_point_data(const TrimLocation &trim, MutableSpan<T> data) -{ - data.first() = mix2<T>(trim.factor, data[trim.left_index], data[trim.right_index]); -} -template<typename T> -static void to_single_point_data(const TrimLocation &trim, Span<T> src, MutableSpan<T> dst) -{ - dst.first() = mix2<T>(trim.factor, src[trim.left_index], src[trim.right_index]); -} - -static void to_single_point_bezier(Spline &spline, const Spline::LookupResult &lookup) -{ - BezierSpline &bezier = static_cast<BezierSpline &>(spline); - - const TrimLocation trim = lookup_control_point_position(lookup, bezier); - - const BezierSpline::InsertResult new_point = bezier.calculate_segment_insertion( - trim.left_index, trim.right_index, trim.factor); - bezier.positions().first() = new_point.position; - bezier.handle_types_left().first() = BEZIER_HANDLE_FREE; - bezier.handle_types_right().first() = BEZIER_HANDLE_FREE; - bezier.handle_positions_left().first() = new_point.left_handle; - bezier.handle_positions_right().first() = new_point.right_handle; - - to_single_point_data<float>(trim, bezier.radii()); - to_single_point_data<float>(trim, bezier.tilts()); - spline.attributes.foreach_attribute( - [&](const AttributeIDRef &attribute_id, const AttributeMetaData &UNUSED(meta_data)) { - std::optional<GMutableSpan> data = spline.attributes.get_for_write(attribute_id); - attribute_math::convert_to_static_type(data->type(), [&](auto dummy) { - using T = decltype(dummy); - to_single_point_data<T>(trim, data->typed<T>()); - }); - return true; - }, - ATTR_DOMAIN_POINT); - spline.resize(1); -} - -static void to_single_point_poly(Spline &spline, const Spline::LookupResult &lookup) -{ - const TrimLocation trim{lookup.evaluated_index, lookup.next_evaluated_index, lookup.factor}; - - to_single_point_data<float3>(trim, spline.positions()); - to_single_point_data<float>(trim, spline.radii()); - to_single_point_data<float>(trim, spline.tilts()); - spline.attributes.foreach_attribute( - [&](const AttributeIDRef &attribute_id, const AttributeMetaData &UNUSED(meta_data)) { - std::optional<GMutableSpan> data = spline.attributes.get_for_write(attribute_id); - attribute_math::convert_to_static_type(data->type(), [&](auto dummy) { - using T = decltype(dummy); - to_single_point_data<T>(trim, data->typed<T>()); - }); - return true; - }, - ATTR_DOMAIN_POINT); - spline.resize(1); -} - -static PolySpline to_single_point_nurbs(const Spline &spline, const Spline::LookupResult &lookup) -{ - /* Since this outputs a poly spline, the evaluated indices are the control point indices. */ - const TrimLocation trim{lookup.evaluated_index, lookup.next_evaluated_index, lookup.factor}; - - /* Create poly spline and copy trimmed data to it. */ - PolySpline new_spline; - new_spline.resize(1); - - spline.attributes.foreach_attribute( - [&](const AttributeIDRef &attribute_id, const AttributeMetaData &meta_data) { - new_spline.attributes.create(attribute_id, meta_data.data_type); - std::optional<GSpan> src = spline.attributes.get_for_read(attribute_id); - std::optional<GMutableSpan> dst = new_spline.attributes.get_for_write(attribute_id); - attribute_math::convert_to_static_type(src->type(), [&](auto dummy) { - using T = decltype(dummy); - VArray<T> eval_data = spline.interpolate_to_evaluated<T>(src->typed<T>()); - to_single_point_data<T>(trim, eval_data.get_internal_span(), dst->typed<T>()); - }); - return true; - }, - ATTR_DOMAIN_POINT); - - to_single_point_data<float3>(trim, spline.evaluated_positions(), new_spline.positions()); - - VArray<float> evaluated_radii = spline.interpolate_to_evaluated(spline.radii()); - to_single_point_data<float>(trim, evaluated_radii.get_internal_span(), new_spline.radii()); - - VArray<float> evaluated_tilts = spline.interpolate_to_evaluated(spline.tilts()); - to_single_point_data<float>(trim, evaluated_tilts.get_internal_span(), new_spline.tilts()); - - return new_spline; -} - -static void to_single_point_spline(SplinePtr &spline, const Spline::LookupResult &lookup) -{ - switch (spline->type()) { - case CURVE_TYPE_BEZIER: - to_single_point_bezier(*spline, lookup); - break; - case CURVE_TYPE_POLY: - to_single_point_poly(*spline, lookup); - break; - case CURVE_TYPE_NURBS: - spline = std::make_unique<PolySpline>(to_single_point_nurbs(*spline, lookup)); - break; - case CURVE_TYPE_CATMULL_ROM: - BLI_assert_unreachable(); - spline = {}; - } -} - static void geometry_set_curve_trim(GeometrySet &geometry_set, const GeometryNodeCurveSampleMode mode, Field<float> &start_field, @@ -505,68 +117,49 @@ static void geometry_set_curve_trim(GeometrySet &geometry_set, return; } const Curves &src_curves_id = *geometry_set.get_curves_for_read(); - const bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(src_curves_id.geometry); + const bke::CurvesGeometry &src_curves = bke::CurvesGeometry::wrap(src_curves_id.geometry); + if (src_curves.curves_num() == 0) { + return; + } - bke::CurvesFieldContext field_context{curves, ATTR_DOMAIN_CURVE}; - fn::FieldEvaluator evaluator{field_context, curves.curves_num()}; + bke::CurvesFieldContext field_context{src_curves, ATTR_DOMAIN_CURVE}; + fn::FieldEvaluator evaluator{field_context, src_curves.curves_num()}; evaluator.add(start_field); evaluator.add(end_field); evaluator.evaluate(); const VArray<float> starts = evaluator.get_evaluated<float>(0); const VArray<float> ends = evaluator.get_evaluated<float>(1); - std::unique_ptr<CurveEval> curve = curves_to_curve_eval(src_curves_id); - MutableSpan<SplinePtr> splines = curve->splines(); - - threading::parallel_for(splines.index_range(), 128, [&](IndexRange range) { - for (const int i : range) { - SplinePtr &spline = splines[i]; - - /* Currently trimming cyclic splines is not supported. It could be in the future though. */ - if (spline->is_cyclic()) { - continue; - } - - if (spline->evaluated_edges_num() == 0) { - continue; - } - - const float length = spline->length(); - if (length == 0.0f) { - continue; - } - - const float start = starts[i]; - const float end = ends[i]; - - /* When the start and end samples are reversed, instead of implicitly reversing the spline - * or switching the parameters, create a single point spline with the end sample point. */ - if (end <= start) { - if (mode == GEO_NODE_CURVE_SAMPLE_LENGTH) { - to_single_point_spline(spline, - spline->lookup_evaluated_length(std::clamp(start, 0.0f, length))); - } - else { - to_single_point_spline(spline, - spline->lookup_evaluated_factor(std::clamp(start, 0.0f, 1.0f))); - } - continue; - } - - if (mode == GEO_NODE_CURVE_SAMPLE_LENGTH) { - trim_spline(spline, - spline->lookup_evaluated_length(std::clamp(start, 0.0f, length)), - spline->lookup_evaluated_length(std::clamp(end, 0.0f, length))); - } - else { - trim_spline(spline, - spline->lookup_evaluated_factor(std::clamp(start, 0.0f, 1.0f)), - spline->lookup_evaluated_factor(std::clamp(end, 0.0f, 1.0f))); - } + const VArray<bool> cyclic = src_curves.cyclic(); + + /* If node length input is on form [0, 1] instead of [0, length]*/ + const bool normalized_length_lookup = mode == GEO_NODE_CURVE_SAMPLE_FACTOR; + + /* Stack start + end field. */ + Vector<float> length_factors(src_curves.curves_num() * 2); + Vector<int64_t> lookup_indices(src_curves.curves_num() * 2); + threading::parallel_for(src_curves.curves_range(), 512, [&](IndexRange curve_range) { + for (const int64_t curve_i : curve_range) { + const bool negative_trim = !cyclic[curve_i] && starts[curve_i] > ends[curve_i]; + length_factors[curve_i] = starts[curve_i]; + length_factors[curve_i + src_curves.curves_num()] = negative_trim ? starts[curve_i] : + ends[curve_i]; + lookup_indices[curve_i] = curve_i; + lookup_indices[curve_i + src_curves.curves_num()] = curve_i; } }); - Curves *dst_curves_id = curve_eval_to_curves(*curve); + /* Create curve trim lookup table. */ + Array<bke::curves::CurvePoint, 12> point_lookups = geometry::lookup_curve_points( + src_curves, length_factors, lookup_indices, normalized_length_lookup); + + bke::CurvesGeometry dst_curves = geometry::trim_curves( + src_curves, + src_curves.curves_range().as_span(), + point_lookups.as_span().slice(0, src_curves.curves_num()), + point_lookups.as_span().slice(src_curves.curves_num(), src_curves.curves_num())); + + Curves *dst_curves_id = bke::curves_new_nomain(std::move(dst_curves)); bke::curves_copy_parameters(src_curves_id, *dst_curves_id); geometry_set.replace_curves(dst_curves_id); } diff --git a/source/blender/nodes/geometry/nodes/node_geo_extrude_mesh.cc b/source/blender/nodes/geometry/nodes/node_geo_extrude_mesh.cc index 64779494e3e..c7f4b78946d 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_extrude_mesh.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_extrude_mesh.cc @@ -94,24 +94,24 @@ static void expand_mesh(Mesh &mesh, const int loop_expand) { if (vert_expand != 0) { - CustomData_duplicate_referenced_layers(&mesh.vdata, mesh.totvert); + const int old_verts_num = mesh.totvert; mesh.totvert += vert_expand; - CustomData_realloc(&mesh.vdata, mesh.totvert); + CustomData_realloc(&mesh.vdata, old_verts_num, mesh.totvert); } if (edge_expand != 0) { - CustomData_duplicate_referenced_layers(&mesh.edata, mesh.totedge); + const int old_edges_num = mesh.totedge; mesh.totedge += edge_expand; - CustomData_realloc(&mesh.edata, mesh.totedge); + CustomData_realloc(&mesh.edata, old_edges_num, mesh.totedge); } if (poly_expand != 0) { - CustomData_duplicate_referenced_layers(&mesh.pdata, mesh.totpoly); + const int old_polys_num = mesh.totpoly; mesh.totpoly += poly_expand; - CustomData_realloc(&mesh.pdata, mesh.totpoly); + CustomData_realloc(&mesh.pdata, old_polys_num, mesh.totpoly); } if (loop_expand != 0) { - CustomData_duplicate_referenced_layers(&mesh.ldata, mesh.totloop); + const int old_loops_num = mesh.totloop; mesh.totloop += loop_expand; - CustomData_realloc(&mesh.ldata, mesh.totloop); + CustomData_realloc(&mesh.ldata, old_loops_num, mesh.totloop); } } @@ -147,6 +147,7 @@ static MEdge new_edge(const int v1, const int v2) MEdge edge; edge.v1 = v1; edge.v2 = v2; + edge.crease = 0; edge.flag = (ME_EDGEDRAW | ME_EDGERENDER); return edge; } @@ -156,6 +157,7 @@ static MEdge new_loose_edge(const int v1, const int v2) MEdge edge; edge.v1 = v1; edge.v2 = v2; + edge.crease = 0; edge.flag = ME_LOOSEEDGE; return edge; } @@ -286,6 +288,7 @@ static void extrude_mesh_vertices(Mesh &mesh, for (const int i : range) { const float3 offset = offsets[selection[i]]; add_v3_v3(new_verts[i].co, offset); + new_verts[i].flag = 0; } }); }); @@ -608,6 +611,7 @@ static void extrude_mesh_edges(Mesh &mesh, threading::parallel_for(new_verts.index_range(), 1024, [&](const IndexRange range) { for (const int i : range) { add_v3_v3(new_verts[i].co, offset); + new_verts[i].flag = 0; } }); } @@ -615,6 +619,7 @@ static void extrude_mesh_edges(Mesh &mesh, threading::parallel_for(new_verts.index_range(), 1024, [&](const IndexRange range) { for (const int i : range) { add_v3_v3(new_verts[i].co, vert_offsets[new_vert_indices[i]]); + new_verts[i].flag = 0; } }); } @@ -996,6 +1001,10 @@ static void extrude_mesh_face_regions(Mesh &mesh, }); } + for (MVert &vert : verts.slice(new_vert_range)) { + vert.flag = 0; + } + MutableSpan<int> vert_orig_indices = get_orig_index_layer(mesh, ATTR_DOMAIN_POINT); vert_orig_indices.slice(new_vert_range).fill(ORIGINDEX_NONE); @@ -1253,6 +1262,7 @@ static void extrude_individual_mesh_faces(Mesh &mesh, const IndexRange poly_corner_range = selected_corner_range(index_offsets, i_selection); for (MVert &vert : new_verts.slice(poly_corner_range)) { add_v3_v3(vert.co, poly_offset[poly_selection[i_selection]]); + vert.flag = 0; } } }); diff --git a/source/blender/nodes/geometry/nodes/node_geo_geometry_to_instance.cc b/source/blender/nodes/geometry/nodes/node_geo_geometry_to_instance.cc index 1f84f8f288d..8e64209a418 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_geometry_to_instance.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_geometry_to_instance.cc @@ -12,7 +12,7 @@ static void node_declare(NodeDeclarationBuilder &b) static void node_geo_exec(GeoNodeExecParams params) { - Vector<GeometrySet> geometries = params.extract_multi_input<GeometrySet>("Geometry"); + Vector<GeometrySet> geometries = params.extract_input<Vector<GeometrySet>>("Geometry"); GeometrySet instances_geometry; InstancesComponent &instances_component = instances_geometry.get_component_for_write<InstancesComponent>(); diff --git a/source/blender/nodes/geometry/nodes/node_geo_input_named_attribute.cc b/source/blender/nodes/geometry/nodes/node_geo_input_named_attribute.cc index 122c7b352c7..da09d3650e3 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_input_named_attribute.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_input_named_attribute.cc @@ -88,7 +88,7 @@ static void node_geo_exec(GeoNodeExecParams params) return; } - params.used_named_attribute(name, eNamedAttrUsage::Read); + params.used_named_attribute(name, NamedAttributeUsage::Read); switch (data_type) { case CD_PROP_FLOAT: diff --git a/source/blender/nodes/geometry/nodes/node_geo_join_geometry.cc b/source/blender/nodes/geometry/nodes/node_geo_join_geometry.cc index 023d7a32a61..9fdf7fe7d31 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_join_geometry.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_join_geometry.cc @@ -177,7 +177,7 @@ static void join_component_type(Span<GeometrySet> src_geometry_sets, GeometrySet static void node_geo_exec(GeoNodeExecParams params) { - Vector<GeometrySet> geometry_sets = params.extract_multi_input<GeometrySet>("Geometry"); + Vector<GeometrySet> geometry_sets = params.extract_input<Vector<GeometrySet>>("Geometry"); GeometrySet geometry_set_result; join_component_type<MeshComponent>(geometry_sets, geometry_set_result); diff --git a/source/blender/nodes/geometry/nodes/node_geo_remove_attribute.cc b/source/blender/nodes/geometry/nodes/node_geo_remove_attribute.cc index ee279ba58f9..1b398f63691 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_remove_attribute.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_remove_attribute.cc @@ -55,7 +55,7 @@ static void node_geo_exec(GeoNodeExecParams params) }); if (attribute_exists && !cannot_delete) { - params.used_named_attribute(name, eNamedAttrUsage::Remove); + params.used_named_attribute(name, NamedAttributeUsage::Remove); } if (!attribute_exists) { diff --git a/source/blender/nodes/geometry/nodes/node_geo_store_named_attribute.cc b/source/blender/nodes/geometry/nodes/node_geo_store_named_attribute.cc index c2d6f57ce8a..2a590f5bf4a 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_store_named_attribute.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_store_named_attribute.cc @@ -149,7 +149,7 @@ static void node_geo_exec(GeoNodeExecParams params) return; } - params.used_named_attribute(name, eNamedAttrUsage::Write); + params.used_named_attribute(name, NamedAttributeUsage::Write); const NodeGeometryStoreNamedAttribute &storage = node_storage(params.node()); const eCustomDataType data_type = static_cast<eCustomDataType>(storage.data_type); diff --git a/source/blender/nodes/geometry/nodes/node_geo_string_join.cc b/source/blender/nodes/geometry/nodes/node_geo_string_join.cc index bb33430a02f..09c01b8c627 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_string_join.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_string_join.cc @@ -13,12 +13,13 @@ static void node_declare(NodeDeclarationBuilder &b) static void node_geo_exec(GeoNodeExecParams params) { - Vector<std::string> strings = params.extract_multi_input<std::string>("Strings"); + Vector<fn::ValueOrField<std::string>> strings = + params.extract_input<Vector<fn::ValueOrField<std::string>>>("Strings"); const std::string delim = params.extract_input<std::string>("Delimiter"); std::string output; for (const int i : strings.index_range()) { - output += strings[i]; + output += strings[i].as_value(); if (i < (strings.size() - 1)) { output += delim; } diff --git a/source/blender/nodes/intern/geometry_nodes_eval_log.cc b/source/blender/nodes/intern/geometry_nodes_eval_log.cc deleted file mode 100644 index 89bfa5834e8..00000000000 --- a/source/blender/nodes/intern/geometry_nodes_eval_log.cc +++ /dev/null @@ -1,520 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#include "NOD_geometry_nodes_eval_log.hh" - -#include "BKE_curves.hh" -#include "BKE_geometry_set_instances.hh" - -#include "DNA_modifier_types.h" -#include "DNA_space_types.h" - -#include "FN_field_cpp_type.hh" - -#include "BLT_translation.h" - -#include <chrono> - -namespace blender::nodes::geometry_nodes_eval_log { - -using fn::FieldCPPType; -using fn::FieldInput; -using fn::GField; -using fn::ValueOrFieldCPPType; - -ModifierLog::ModifierLog(GeoLogger &logger) - : input_geometry_log_(std::move(logger.input_geometry_log_)), - output_geometry_log_(std::move(logger.output_geometry_log_)) -{ - root_tree_logs_ = allocator_.construct<TreeLog>(); - - LogByTreeContext log_by_tree_context; - - /* Combine all the local loggers that have been used by separate threads. */ - for (LocalGeoLogger &local_logger : logger) { - /* Take ownership of the allocator. */ - logger_allocators_.append(std::move(local_logger.allocator_)); - - for (ValueOfSockets &value_of_sockets : local_logger.values_) { - ValueLog *value_log = value_of_sockets.value.get(); - - /* Take centralized ownership of the logged value. It might be referenced by multiple - * sockets. */ - logged_values_.append(std::move(value_of_sockets.value)); - - for (const DSocket &socket : value_of_sockets.sockets) { - SocketLog &socket_log = this->lookup_or_add_socket_log(log_by_tree_context, socket); - socket_log.value_ = value_log; - } - } - - for (NodeWithWarning &node_with_warning : local_logger.node_warnings_) { - NodeLog &node_log = this->lookup_or_add_node_log(log_by_tree_context, - node_with_warning.node); - node_log.warnings_.append(node_with_warning.warning); - } - - for (NodeWithExecutionTime &node_with_exec_time : local_logger.node_exec_times_) { - NodeLog &node_log = this->lookup_or_add_node_log(log_by_tree_context, - node_with_exec_time.node); - node_log.exec_time_ = node_with_exec_time.exec_time; - } - - for (NodeWithDebugMessage &debug_message : local_logger.node_debug_messages_) { - NodeLog &node_log = this->lookup_or_add_node_log(log_by_tree_context, debug_message.node); - node_log.debug_messages_.append(debug_message.message); - } - - for (NodeWithUsedNamedAttribute &node_with_attribute_name : - local_logger.used_named_attributes_) { - NodeLog &node_log = this->lookup_or_add_node_log(log_by_tree_context, - node_with_attribute_name.node); - node_log.used_named_attributes_.append(std::move(node_with_attribute_name.attribute)); - } - } -} - -TreeLog &ModifierLog::lookup_or_add_tree_log(LogByTreeContext &log_by_tree_context, - const DTreeContext &tree_context) -{ - TreeLog *tree_log = log_by_tree_context.lookup_default(&tree_context, nullptr); - if (tree_log != nullptr) { - return *tree_log; - } - - const DTreeContext *parent_context = tree_context.parent_context(); - if (parent_context == nullptr) { - return *root_tree_logs_.get(); - } - TreeLog &parent_log = this->lookup_or_add_tree_log(log_by_tree_context, *parent_context); - destruct_ptr<TreeLog> owned_tree_log = allocator_.construct<TreeLog>(); - tree_log = owned_tree_log.get(); - log_by_tree_context.add_new(&tree_context, tree_log); - parent_log.child_logs_.add_new(tree_context.parent_node()->name, std::move(owned_tree_log)); - return *tree_log; -} - -NodeLog &ModifierLog::lookup_or_add_node_log(LogByTreeContext &log_by_tree_context, DNode node) -{ - TreeLog &tree_log = this->lookup_or_add_tree_log(log_by_tree_context, *node.context()); - NodeLog &node_log = *tree_log.node_logs_.lookup_or_add_cb(node->name, [&]() { - destruct_ptr<NodeLog> node_log = allocator_.construct<NodeLog>(); - node_log->input_logs_.resize(node->input_sockets().size()); - node_log->output_logs_.resize(node->output_sockets().size()); - return node_log; - }); - return node_log; -} - -SocketLog &ModifierLog::lookup_or_add_socket_log(LogByTreeContext &log_by_tree_context, - DSocket socket) -{ - NodeLog &node_log = this->lookup_or_add_node_log(log_by_tree_context, socket.node()); - MutableSpan<SocketLog> socket_logs = socket->is_input() ? node_log.input_logs_ : - node_log.output_logs_; - SocketLog &socket_log = socket_logs[socket->index()]; - return socket_log; -} - -void ModifierLog::foreach_node_log(FunctionRef<void(const NodeLog &)> fn) const -{ - if (root_tree_logs_) { - root_tree_logs_->foreach_node_log(fn); - } -} - -const GeometryValueLog *ModifierLog::input_geometry_log() const -{ - return input_geometry_log_.get(); -} -const GeometryValueLog *ModifierLog::output_geometry_log() const -{ - return output_geometry_log_.get(); -} - -const NodeLog *TreeLog::lookup_node_log(StringRef node_name) const -{ - const destruct_ptr<NodeLog> *node_log = node_logs_.lookup_ptr_as(node_name); - if (node_log == nullptr) { - return nullptr; - } - return node_log->get(); -} - -const NodeLog *TreeLog::lookup_node_log(const bNode &node) const -{ - return this->lookup_node_log(node.name); -} - -const TreeLog *TreeLog::lookup_child_log(StringRef node_name) const -{ - const destruct_ptr<TreeLog> *tree_log = child_logs_.lookup_ptr_as(node_name); - if (tree_log == nullptr) { - return nullptr; - } - return tree_log->get(); -} - -void TreeLog::foreach_node_log(FunctionRef<void(const NodeLog &)> fn) const -{ - for (auto node_log : node_logs_.items()) { - fn(*node_log.value); - } - - for (auto child : child_logs_.items()) { - child.value->foreach_node_log(fn); - } -} - -const SocketLog *NodeLog::lookup_socket_log(eNodeSocketInOut in_out, int index) const -{ - BLI_assert(index >= 0); - Span<SocketLog> socket_logs = (in_out == SOCK_IN) ? input_logs_ : output_logs_; - if (index >= socket_logs.size()) { - return nullptr; - } - return &socket_logs[index]; -} - -const SocketLog *NodeLog::lookup_socket_log(const bNode &node, const bNodeSocket &socket) const -{ - ListBase sockets = socket.in_out == SOCK_IN ? node.inputs : node.outputs; - int index = BLI_findindex(&sockets, &socket); - return this->lookup_socket_log((eNodeSocketInOut)socket.in_out, index); -} - -GFieldValueLog::GFieldValueLog(fn::GField field, bool log_full_field) : type_(field.cpp_type()) -{ - const std::shared_ptr<const fn::FieldInputs> &field_input_nodes = field.node().field_inputs(); - - /* Put the deduplicated field inputs into a vector so that they can be sorted below. */ - Vector<std::reference_wrapper<const FieldInput>> field_inputs; - if (field_input_nodes) { - field_inputs.extend(field_input_nodes->deduplicated_nodes.begin(), - field_input_nodes->deduplicated_nodes.end()); - } - - std::sort( - field_inputs.begin(), field_inputs.end(), [](const FieldInput &a, const FieldInput &b) { - const int index_a = (int)a.category(); - const int index_b = (int)b.category(); - if (index_a == index_b) { - return a.socket_inspection_name().size() < b.socket_inspection_name().size(); - } - return index_a < index_b; - }); - - for (const FieldInput &field_input : field_inputs) { - input_tooltips_.append(field_input.socket_inspection_name()); - } - - if (log_full_field) { - field_ = std::move(field); - } -} - -GeometryValueLog::GeometryValueLog(const GeometrySet &geometry_set, bool log_full_geometry) -{ - static std::array all_component_types = {GEO_COMPONENT_TYPE_CURVE, - GEO_COMPONENT_TYPE_INSTANCES, - GEO_COMPONENT_TYPE_MESH, - GEO_COMPONENT_TYPE_POINT_CLOUD, - GEO_COMPONENT_TYPE_VOLUME}; - - /* Keep track handled attribute names to make sure that we do not return the same name twice. - * Currently #GeometrySet::attribute_foreach does not do that. Note that this will merge - * attributes with the same name but different domains or data types on separate components. */ - Set<StringRef> names; - - geometry_set.attribute_foreach( - all_component_types, - true, - [&](const bke::AttributeIDRef &attribute_id, - const bke::AttributeMetaData &meta_data, - const GeometryComponent &UNUSED(component)) { - if (attribute_id.is_named() && names.add(attribute_id.name())) { - this->attributes_.append({attribute_id.name(), meta_data.domain, meta_data.data_type}); - } - }); - - for (const GeometryComponent *component : geometry_set.get_components_for_read()) { - component_types_.append(component->type()); - switch (component->type()) { - case GEO_COMPONENT_TYPE_MESH: { - const MeshComponent &mesh_component = *(const MeshComponent *)component; - MeshInfo &info = this->mesh_info.emplace(); - info.verts_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_POINT); - info.edges_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_EDGE); - info.faces_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_FACE); - break; - } - case GEO_COMPONENT_TYPE_CURVE: { - const CurveComponent &curve_component = *(const CurveComponent *)component; - CurveInfo &info = this->curve_info.emplace(); - info.splines_num = curve_component.attribute_domain_size(ATTR_DOMAIN_CURVE); - break; - } - case GEO_COMPONENT_TYPE_POINT_CLOUD: { - const PointCloudComponent &pointcloud_component = *(const PointCloudComponent *)component; - PointCloudInfo &info = this->pointcloud_info.emplace(); - info.points_num = pointcloud_component.attribute_domain_size(ATTR_DOMAIN_POINT); - break; - } - case GEO_COMPONENT_TYPE_INSTANCES: { - const InstancesComponent &instances_component = *(const InstancesComponent *)component; - InstancesInfo &info = this->instances_info.emplace(); - info.instances_num = instances_component.instances_num(); - break; - } - case GEO_COMPONENT_TYPE_EDIT: { - const GeometryComponentEditData &edit_component = *( - const GeometryComponentEditData *)component; - if (const bke::CurvesEditHints *curve_edit_hints = - edit_component.curves_edit_hints_.get()) { - EditDataInfo &info = this->edit_data_info.emplace(); - info.has_deform_matrices = curve_edit_hints->deform_mats.has_value(); - info.has_deformed_positions = curve_edit_hints->positions.has_value(); - } - break; - } - case GEO_COMPONENT_TYPE_VOLUME: { - break; - } - } - } - if (log_full_geometry) { - full_geometry_ = std::make_unique<GeometrySet>(geometry_set); - full_geometry_->ensure_owns_direct_data(); - } -} - -Vector<const GeometryAttributeInfo *> NodeLog::lookup_available_attributes() const -{ - Vector<const GeometryAttributeInfo *> attributes; - Set<StringRef> names; - for (const SocketLog &socket_log : input_logs_) { - const ValueLog *value_log = socket_log.value(); - if (const GeometryValueLog *geo_value_log = dynamic_cast<const GeometryValueLog *>( - value_log)) { - for (const GeometryAttributeInfo &attribute : geo_value_log->attributes()) { - if (names.add(attribute.name)) { - attributes.append(&attribute); - } - } - } - } - return attributes; -} - -const ModifierLog *ModifierLog::find_root_by_node_editor_context(const SpaceNode &snode) -{ - if (snode.id == nullptr) { - return nullptr; - } - if (GS(snode.id->name) != ID_OB) { - return nullptr; - } - Object *object = (Object *)snode.id; - LISTBASE_FOREACH (ModifierData *, md, &object->modifiers) { - if (md->type == eModifierType_Nodes) { - NodesModifierData *nmd = (NodesModifierData *)md; - if (nmd->node_group == snode.nodetree) { - return (ModifierLog *)nmd->runtime_eval_log; - } - } - } - return nullptr; -} - -const TreeLog *ModifierLog::find_tree_by_node_editor_context(const SpaceNode &snode) -{ - const ModifierLog *eval_log = ModifierLog::find_root_by_node_editor_context(snode); - if (eval_log == nullptr) { - return nullptr; - } - Vector<bNodeTreePath *> tree_path_vec = snode.treepath; - if (tree_path_vec.is_empty()) { - return nullptr; - } - TreeLog *current = eval_log->root_tree_logs_.get(); - for (bNodeTreePath *path : tree_path_vec.as_span().drop_front(1)) { - destruct_ptr<TreeLog> *tree_log = current->child_logs_.lookup_ptr_as(path->node_name); - if (tree_log == nullptr) { - return nullptr; - } - current = tree_log->get(); - } - return current; -} - -const NodeLog *ModifierLog::find_node_by_node_editor_context(const SpaceNode &snode, - const bNode &node) -{ - const TreeLog *tree_log = ModifierLog::find_tree_by_node_editor_context(snode); - if (tree_log == nullptr) { - return nullptr; - } - return tree_log->lookup_node_log(node); -} - -const NodeLog *ModifierLog::find_node_by_node_editor_context(const SpaceNode &snode, - const StringRef node_name) -{ - const TreeLog *tree_log = ModifierLog::find_tree_by_node_editor_context(snode); - if (tree_log == nullptr) { - return nullptr; - } - return tree_log->lookup_node_log(node_name); -} - -const SocketLog *ModifierLog::find_socket_by_node_editor_context(const SpaceNode &snode, - const bNode &node, - const bNodeSocket &socket) -{ - const NodeLog *node_log = ModifierLog::find_node_by_node_editor_context(snode, node); - if (node_log == nullptr) { - return nullptr; - } - return node_log->lookup_socket_log(node, socket); -} - -const NodeLog *ModifierLog::find_node_by_spreadsheet_editor_context( - const SpaceSpreadsheet &sspreadsheet) -{ - Vector<SpreadsheetContext *> context_path = sspreadsheet.context_path; - if (context_path.size() <= 2) { - return nullptr; - } - if (context_path[0]->type != SPREADSHEET_CONTEXT_OBJECT) { - return nullptr; - } - if (context_path[1]->type != SPREADSHEET_CONTEXT_MODIFIER) { - return nullptr; - } - for (SpreadsheetContext *context : context_path.as_span().drop_front(2)) { - if (context->type != SPREADSHEET_CONTEXT_NODE) { - return nullptr; - } - } - Span<SpreadsheetContextNode *> node_contexts = - context_path.as_span().drop_front(2).cast<SpreadsheetContextNode *>(); - - Object *object = ((SpreadsheetContextObject *)context_path[0])->object; - StringRefNull modifier_name = ((SpreadsheetContextModifier *)context_path[1])->modifier_name; - if (object == nullptr) { - return nullptr; - } - - const ModifierLog *eval_log = nullptr; - LISTBASE_FOREACH (ModifierData *, md, &object->modifiers) { - if (md->type == eModifierType_Nodes) { - if (md->name == modifier_name) { - NodesModifierData *nmd = (NodesModifierData *)md; - eval_log = (const ModifierLog *)nmd->runtime_eval_log; - break; - } - } - } - if (eval_log == nullptr) { - return nullptr; - } - - const TreeLog *tree_log = &eval_log->root_tree(); - for (SpreadsheetContextNode *context : node_contexts.drop_back(1)) { - tree_log = tree_log->lookup_child_log(context->node_name); - if (tree_log == nullptr) { - return nullptr; - } - } - const NodeLog *node_log = tree_log->lookup_node_log(node_contexts.last()->node_name); - return node_log; -} - -void LocalGeoLogger::log_value_for_sockets(Span<DSocket> sockets, GPointer value) -{ - const CPPType &type = *value.type(); - Span<DSocket> copied_sockets = allocator_->construct_array_copy(sockets); - if (type.is<GeometrySet>()) { - bool log_full_geometry = false; - for (const DSocket &socket : sockets) { - if (main_logger_->log_full_sockets_.contains(socket)) { - log_full_geometry = true; - break; - } - } - - const GeometrySet &geometry_set = *value.get<GeometrySet>(); - destruct_ptr<GeometryValueLog> value_log = allocator_->construct<GeometryValueLog>( - geometry_set, log_full_geometry); - values_.append({copied_sockets, std::move(value_log)}); - } - else if (const ValueOrFieldCPPType *value_or_field_type = - dynamic_cast<const ValueOrFieldCPPType *>(&type)) { - const void *value_or_field = value.get(); - if (value_or_field_type->is_field(value_or_field)) { - GField field = *value_or_field_type->get_field_ptr(value_or_field); - bool log_full_field = false; - if (!field.node().depends_on_input()) { - /* Always log constant fields so that their value can be shown in socket inspection. - * In the future we can also evaluate the field here and only store the value. */ - log_full_field = true; - } - if (!log_full_field) { - for (const DSocket &socket : sockets) { - if (main_logger_->log_full_sockets_.contains(socket)) { - log_full_field = true; - break; - } - } - } - destruct_ptr<GFieldValueLog> value_log = allocator_->construct<GFieldValueLog>( - std::move(field), log_full_field); - values_.append({copied_sockets, std::move(value_log)}); - } - else { - const CPPType &base_type = value_or_field_type->base_type(); - const void *value = value_or_field_type->get_value_ptr(value_or_field); - void *buffer = allocator_->allocate(base_type.size(), base_type.alignment()); - base_type.copy_construct(value, buffer); - destruct_ptr<GenericValueLog> value_log = allocator_->construct<GenericValueLog>( - GMutablePointer{base_type, buffer}); - values_.append({copied_sockets, std::move(value_log)}); - } - } - else { - void *buffer = allocator_->allocate(type.size(), type.alignment()); - type.copy_construct(value.get(), buffer); - destruct_ptr<GenericValueLog> value_log = allocator_->construct<GenericValueLog>( - GMutablePointer{type, buffer}); - values_.append({copied_sockets, std::move(value_log)}); - } -} - -void LocalGeoLogger::log_multi_value_socket(DSocket socket, Span<GPointer> values) -{ - /* Doesn't have to be logged currently. */ - UNUSED_VARS(socket, values); -} - -void LocalGeoLogger::log_node_warning(DNode node, NodeWarningType type, std::string message) -{ - node_warnings_.append({node, {type, std::move(message)}}); -} - -void LocalGeoLogger::log_execution_time(DNode node, std::chrono::microseconds exec_time) -{ - node_exec_times_.append({node, exec_time}); -} - -void LocalGeoLogger::log_used_named_attribute(DNode node, - std::string attribute_name, - eNamedAttrUsage usage) -{ - used_named_attributes_.append({node, {std::move(attribute_name), usage}}); -} - -void LocalGeoLogger::log_debug_message(DNode node, std::string message) -{ - node_debug_messages_.append({node, std::move(message)}); -} - -} // namespace blender::nodes::geometry_nodes_eval_log diff --git a/source/blender/nodes/intern/geometry_nodes_lazy_function.cc b/source/blender/nodes/intern/geometry_nodes_lazy_function.cc new file mode 100644 index 00000000000..e4d476e6374 --- /dev/null +++ b/source/blender/nodes/intern/geometry_nodes_lazy_function.cc @@ -0,0 +1,1327 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/** + * This file mainly converts a #bNodeTree into a lazy-function graph. This generally works by + * creating a lazy-function for every node, which is then put into the lazy-function graph. Then + * the nodes in the new graph are linked based on links in the original #bNodeTree. Some additional + * nodes are inserted for things like type conversions and multi-input sockets. + * + * Currently, lazy-functions are even created for nodes that don't strictly require it, like + * reroutes or muted nodes. In the future we could avoid that at the cost of additional code + * complexity. So far, this does not seem to be a performance issue. + */ + +#include "NOD_geometry_exec.hh" +#include "NOD_geometry_nodes_lazy_function.hh" +#include "NOD_multi_function.hh" +#include "NOD_node_declaration.hh" + +#include "BLI_map.hh" + +#include "DNA_ID.h" + +#include "BKE_compute_contexts.hh" +#include "BKE_geometry_set.hh" +#include "BKE_type_conversions.hh" + +#include "FN_field_cpp_type.hh" +#include "FN_lazy_function_graph_executor.hh" + +namespace blender::nodes { + +using fn::ValueOrField; +using fn::ValueOrFieldCPPType; +using namespace fn::multi_function_types; + +static const CPPType *get_socket_cpp_type(const bNodeSocketType &typeinfo) +{ + const CPPType *type = typeinfo.geometry_nodes_cpp_type; + if (type == nullptr) { + return nullptr; + } + BLI_assert(type->has_special_member_functions()); + return type; +} + +static const CPPType *get_socket_cpp_type(const bNodeSocket &socket) +{ + return get_socket_cpp_type(*socket.typeinfo); +} + +static const CPPType *get_vector_type(const CPPType &type) +{ + /* This could be generalized in the future. For now we only support a small set of vectors. */ + if (type.is<GeometrySet>()) { + return &CPPType::get<Vector<GeometrySet>>(); + } + if (type.is<ValueOrField<std::string>>()) { + return &CPPType::get<Vector<ValueOrField<std::string>>>(); + } + return nullptr; +} + +/** + * Checks which sockets of the node are available and creates corresponding inputs/outputs on the + * lazy-function. + */ +static void lazy_function_interface_from_node(const bNode &node, + Vector<const bNodeSocket *> &r_used_inputs, + Vector<const bNodeSocket *> &r_used_outputs, + Vector<lf::Input> &r_inputs, + Vector<lf::Output> &r_outputs) +{ + const bool is_muted = node.is_muted(); + const bool supports_laziness = node.typeinfo->geometry_node_execute_supports_laziness || + node.is_group(); + const lf::ValueUsage input_usage = supports_laziness ? lf::ValueUsage::Maybe : + lf::ValueUsage::Used; + for (const bNodeSocket *socket : node.input_sockets()) { + if (!socket->is_available()) { + continue; + } + const CPPType *type = get_socket_cpp_type(*socket); + if (type == nullptr) { + continue; + } + if (socket->is_multi_input() && !is_muted) { + type = get_vector_type(*type); + } + r_inputs.append({socket->identifier, *type, input_usage}); + r_used_inputs.append(socket); + } + for (const bNodeSocket *socket : node.output_sockets()) { + if (!socket->is_available()) { + continue; + } + const CPPType *type = get_socket_cpp_type(*socket); + if (type == nullptr) { + continue; + } + r_outputs.append({socket->identifier, *type}); + r_used_outputs.append(socket); + } +} + +/** + * Used for most normal geometry nodes like Subdivision Surface and Set Position. + */ +class LazyFunctionForGeometryNode : public LazyFunction { + private: + const bNode &node_; + + public: + LazyFunctionForGeometryNode(const bNode &node, + Vector<const bNodeSocket *> &r_used_inputs, + Vector<const bNodeSocket *> &r_used_outputs) + : node_(node) + { + BLI_assert(node.typeinfo->geometry_node_execute != nullptr); + debug_name_ = node.name; + lazy_function_interface_from_node(node, r_used_inputs, r_used_outputs, inputs_, outputs_); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &context) const override + { + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + + GeoNodeExecParams geo_params{node_, params, context}; + + geo_eval_log::TimePoint start_time = geo_eval_log::Clock::now(); + node_.typeinfo->geometry_node_execute(geo_params); + geo_eval_log::TimePoint end_time = geo_eval_log::Clock::now(); + + if (geo_eval_log::GeoModifierLog *modifier_log = user_data->modifier_data->eval_log) { + geo_eval_log::GeoTreeLogger &tree_logger = modifier_log->get_local_tree_logger( + *user_data->compute_context); + tree_logger.node_execution_times.append({node_.name, start_time, end_time}); + } + } +}; + +/** + * Used to gather all inputs of a multi-input socket. A separate node is necessary, because + * multi-inputs are not supported in lazy-function graphs. + */ +class LazyFunctionForMultiInput : public LazyFunction { + private: + const CPPType *base_type_; + + public: + LazyFunctionForMultiInput(const bNodeSocket &socket) + { + debug_name_ = "Multi Input"; + base_type_ = get_socket_cpp_type(socket); + BLI_assert(base_type_ != nullptr); + BLI_assert(socket.is_multi_input()); + for (const bNodeLink *link : socket.directly_linked_links()) { + if (!link->is_muted()) { + inputs_.append({"Input", *base_type_}); + } + } + const CPPType *vector_type = get_vector_type(*base_type_); + BLI_assert(vector_type != nullptr); + outputs_.append({"Output", *vector_type}); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + /* Currently we only have multi-inputs for geometry and string sockets. This could be + * generalized in the future. */ + base_type_->to_static_type_tag<GeometrySet, ValueOrField<std::string>>([&](auto type_tag) { + using T = typename decltype(type_tag)::type; + if constexpr (std::is_void_v<T>) { + /* This type is not support in this node for now. */ + BLI_assert_unreachable(); + } + else { + void *output_ptr = params.get_output_data_ptr(0); + Vector<T> &values = *new (output_ptr) Vector<T>(); + for (const int i : inputs_.index_range()) { + values.append(params.extract_input<T>(i)); + } + params.output_set(0); + } + }); + } +}; + +/** + * Simple lazy-function that just forwards the input. + */ +class LazyFunctionForRerouteNode : public LazyFunction { + public: + LazyFunctionForRerouteNode(const CPPType &type) + { + debug_name_ = "Reroute"; + inputs_.append({"Input", type}); + outputs_.append({"Output", type}); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + void *input_value = params.try_get_input_data_ptr(0); + void *output_value = params.get_output_data_ptr(0); + BLI_assert(input_value != nullptr); + BLI_assert(output_value != nullptr); + const CPPType &type = *inputs_[0].type; + type.move_construct(input_value, output_value); + params.output_set(0); + } +}; + +/** + * Executes a multi-function. If all inputs are single values, the results will also be single + * values. If any input is a field, the outputs will also be fields. + */ +static void execute_multi_function_on_value_or_field( + const MultiFunction &fn, + const std::shared_ptr<MultiFunction> &owned_fn, + const Span<const ValueOrFieldCPPType *> input_types, + const Span<const ValueOrFieldCPPType *> output_types, + const Span<const void *> input_values, + const Span<void *> output_values) +{ + BLI_assert(fn.param_amount() == input_types.size() + output_types.size()); + BLI_assert(input_types.size() == input_values.size()); + BLI_assert(output_types.size() == output_values.size()); + + /* Check if any input is a field. */ + bool any_input_is_field = false; + for (const int i : input_types.index_range()) { + const ValueOrFieldCPPType &type = *input_types[i]; + const void *value_or_field = input_values[i]; + if (type.is_field(value_or_field)) { + any_input_is_field = true; + break; + } + } + + if (any_input_is_field) { + /* Convert all inputs into fields, so that they can be used as input in the new field. */ + Vector<GField> input_fields; + for (const int i : input_types.index_range()) { + const ValueOrFieldCPPType &type = *input_types[i]; + const void *value_or_field = input_values[i]; + input_fields.append(type.as_field(value_or_field)); + } + + /* Construct the new field node. */ + std::shared_ptr<fn::FieldOperation> operation; + if (owned_fn) { + operation = std::make_shared<fn::FieldOperation>(owned_fn, std::move(input_fields)); + } + else { + operation = std::make_shared<fn::FieldOperation>(fn, std::move(input_fields)); + } + + /* Store the new fields in the output. */ + for (const int i : output_types.index_range()) { + const ValueOrFieldCPPType &type = *output_types[i]; + void *value_or_field = output_values[i]; + type.construct_from_field(value_or_field, GField{operation, i}); + } + } + else { + /* In this case, the multi-function is evaluated directly. */ + MFParamsBuilder params{fn, 1}; + MFContextBuilder context; + + for (const int i : input_types.index_range()) { + const ValueOrFieldCPPType &type = *input_types[i]; + const CPPType &base_type = type.base_type(); + const void *value_or_field = input_values[i]; + const void *value = type.get_value_ptr(value_or_field); + params.add_readonly_single_input(GVArray::ForSingleRef(base_type, 1, value)); + } + for (const int i : output_types.index_range()) { + const ValueOrFieldCPPType &type = *output_types[i]; + const CPPType &base_type = type.base_type(); + void *value_or_field = output_values[i]; + type.default_construct(value_or_field); + void *value = type.get_value_ptr(value_or_field); + base_type.destruct(value); + params.add_uninitialized_single_output(GMutableSpan{base_type, value, 1}); + } + fn.call(IndexRange(1), params, context); + } +} + +/** + * Behavior of muted nodes: + * - Some inputs are forwarded to outputs without changes. + * - Some inputs are converted to a different type which becomes the output. + * - Some outputs are value initialized because they don't have a corresponding input. + */ +class LazyFunctionForMutedNode : public LazyFunction { + private: + Array<int> input_by_output_index_; + + public: + LazyFunctionForMutedNode(const bNode &node, + Vector<const bNodeSocket *> &r_used_inputs, + Vector<const bNodeSocket *> &r_used_outputs) + { + debug_name_ = "Muted"; + lazy_function_interface_from_node(node, r_used_inputs, r_used_outputs, inputs_, outputs_); + for (lf::Input &fn_input : inputs_) { + fn_input.usage = lf::ValueUsage::Maybe; + } + + for (lf::Input &fn_input : inputs_) { + fn_input.usage = lf::ValueUsage::Unused; + } + + input_by_output_index_.reinitialize(outputs_.size()); + input_by_output_index_.fill(-1); + for (const bNodeLink *internal_link : node.internal_links_span()) { + const int input_i = r_used_inputs.first_index_of_try(internal_link->fromsock); + const int output_i = r_used_outputs.first_index_of_try(internal_link->tosock); + if (ELEM(-1, input_i, output_i)) { + continue; + } + input_by_output_index_[output_i] = input_i; + inputs_[input_i].usage = lf::ValueUsage::Maybe; + } + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + for (const int output_i : outputs_.index_range()) { + if (params.output_was_set(output_i)) { + continue; + } + const CPPType &output_type = *outputs_[output_i].type; + void *output_value = params.get_output_data_ptr(output_i); + const int input_i = input_by_output_index_[output_i]; + if (input_i == -1) { + /* The output does not have a corresponding input. */ + output_type.value_initialize(output_value); + params.output_set(output_i); + continue; + } + const void *input_value = params.try_get_input_data_ptr_or_request(input_i); + if (input_value == nullptr) { + continue; + } + const CPPType &input_type = *inputs_[input_i].type; + if (input_type == output_type) { + /* Forward the value as is. */ + input_type.copy_construct(input_value, output_value); + params.output_set(output_i); + continue; + } + /* Perform a type conversion and then format the value. */ + const bke::DataTypeConversions &conversions = bke::get_implicit_type_conversions(); + const auto *from_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&input_type); + const auto *to_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&output_type); + if (from_field_type != nullptr && to_field_type != nullptr) { + const CPPType &from_base_type = from_field_type->base_type(); + const CPPType &to_base_type = to_field_type->base_type(); + if (conversions.is_convertible(from_base_type, to_base_type)) { + const MultiFunction &multi_fn = *conversions.get_conversion_multi_function( + MFDataType::ForSingle(from_base_type), MFDataType::ForSingle(to_base_type)); + execute_multi_function_on_value_or_field( + multi_fn, {}, {from_field_type}, {to_field_type}, {input_value}, {output_value}); + } + params.output_set(output_i); + continue; + } + /* Use a value initialization if the conversion does not work. */ + output_type.value_initialize(output_value); + params.output_set(output_i); + } + } +}; + +/** + * Type conversions are generally implemented as multi-functions. This node checks if the input is + * a field or single value and outputs a field or single value respectively. + */ +class LazyFunctionForMultiFunctionConversion : public LazyFunction { + private: + const MultiFunction &fn_; + const ValueOrFieldCPPType &from_type_; + const ValueOrFieldCPPType &to_type_; + const Vector<const bNodeSocket *> target_sockets_; + + public: + LazyFunctionForMultiFunctionConversion(const MultiFunction &fn, + const ValueOrFieldCPPType &from, + const ValueOrFieldCPPType &to, + Vector<const bNodeSocket *> &&target_sockets) + : fn_(fn), from_type_(from), to_type_(to), target_sockets_(std::move(target_sockets)) + { + debug_name_ = "Convert"; + inputs_.append({"From", from}); + outputs_.append({"To", to}); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + const void *from_value = params.try_get_input_data_ptr(0); + void *to_value = params.get_output_data_ptr(0); + BLI_assert(from_value != nullptr); + BLI_assert(to_value != nullptr); + + execute_multi_function_on_value_or_field( + fn_, {}, {&from_type_}, {&to_type_}, {from_value}, {to_value}); + + params.output_set(0); + } +}; + +/** + * This lazy-function wraps nodes that are implemented as multi-function (mostly math nodes). + */ +class LazyFunctionForMultiFunctionNode : public LazyFunction { + private: + const bNode &node_; + const NodeMultiFunctions::Item fn_item_; + Vector<const ValueOrFieldCPPType *> input_types_; + Vector<const ValueOrFieldCPPType *> output_types_; + Vector<const bNodeSocket *> output_sockets_; + + public: + LazyFunctionForMultiFunctionNode(const bNode &node, + NodeMultiFunctions::Item fn_item, + Vector<const bNodeSocket *> &r_used_inputs, + Vector<const bNodeSocket *> &r_used_outputs) + : node_(node), fn_item_(std::move(fn_item)) + { + BLI_assert(fn_item_.fn != nullptr); + debug_name_ = node.name; + lazy_function_interface_from_node(node, r_used_inputs, r_used_outputs, inputs_, outputs_); + for (const lf::Input &fn_input : inputs_) { + input_types_.append(dynamic_cast<const ValueOrFieldCPPType *>(fn_input.type)); + } + for (const lf::Output &fn_output : outputs_) { + output_types_.append(dynamic_cast<const ValueOrFieldCPPType *>(fn_output.type)); + } + output_sockets_ = r_used_outputs; + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + Vector<const void *> input_values(inputs_.size()); + Vector<void *> output_values(outputs_.size()); + for (const int i : inputs_.index_range()) { + input_values[i] = params.try_get_input_data_ptr(i); + } + for (const int i : outputs_.index_range()) { + output_values[i] = params.get_output_data_ptr(i); + } + execute_multi_function_on_value_or_field( + *fn_item_.fn, fn_item_.owned_fn, input_types_, output_types_, input_values, output_values); + for (const int i : outputs_.index_range()) { + params.output_set(i); + } + } +}; + +/** + * Some sockets have non-trivial implicit inputs (e.g. the Position input of the Set Position + * node). Those are implemented as a separate node that outputs the value. + */ +class LazyFunctionForImplicitInput : public LazyFunction { + private: + /** + * The function that generates the implicit input. The passed in memory is uninitialized. + */ + std::function<void(void *)> init_fn_; + + public: + LazyFunctionForImplicitInput(const CPPType &type, std::function<void(void *)> init_fn) + : init_fn_(std::move(init_fn)) + { + debug_name_ = "Input"; + outputs_.append({"Output", type}); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + void *value = params.get_output_data_ptr(0); + init_fn_(value); + params.output_set(0); + } +}; + +/** + * The viewer node does not have outputs. Instead it is executed because the executor knows that it + * has side effects. The side effect is that the inputs to the viewer are logged. + */ +class LazyFunctionForViewerNode : public LazyFunction { + private: + const bNode &bnode_; + /** The field is only logged when it is linked. */ + bool use_field_input_ = true; + + public: + LazyFunctionForViewerNode(const bNode &bnode, Vector<const bNodeSocket *> &r_used_inputs) + : bnode_(bnode) + { + debug_name_ = "Viewer"; + Vector<const bNodeSocket *> dummy_used_outputs; + lazy_function_interface_from_node(bnode, r_used_inputs, dummy_used_outputs, inputs_, outputs_); + if (!r_used_inputs[1]->is_directly_linked()) { + use_field_input_ = false; + r_used_inputs.pop_last(); + inputs_.pop_last(); + } + } + + void execute_impl(lf::Params ¶ms, const lf::Context &context) const override + { + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + + GeometrySet geometry = params.extract_input<GeometrySet>(0); + + GField field; + if (use_field_input_) { + const void *value_or_field = params.try_get_input_data_ptr(1); + BLI_assert(value_or_field != nullptr); + const ValueOrFieldCPPType &value_or_field_type = static_cast<const ValueOrFieldCPPType &>( + *inputs_[1].type); + field = value_or_field_type.as_field(value_or_field); + } + + geo_eval_log::GeoTreeLogger &tree_logger = + user_data->modifier_data->eval_log->get_local_tree_logger(*user_data->compute_context); + tree_logger.log_viewer_node(bnode_, geometry, field); + } +}; + +/** + * This lazy-function wraps a group node. Internally it just executes the lazy-function graph of + * the referenced group. + */ +class LazyFunctionForGroupNode : public LazyFunction { + private: + const bNode &group_node_; + std::optional<GeometryNodesLazyFunctionLogger> lf_logger_; + std::optional<GeometryNodesLazyFunctionSideEffectProvider> lf_side_effect_provider_; + std::optional<lf::GraphExecutor> graph_executor_; + + public: + LazyFunctionForGroupNode(const bNode &group_node, + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info, + Vector<const bNodeSocket *> &r_used_inputs, + Vector<const bNodeSocket *> &r_used_outputs) + : group_node_(group_node) + { + debug_name_ = group_node.name; + lazy_function_interface_from_node( + group_node, r_used_inputs, r_used_outputs, inputs_, outputs_); + + bNodeTree *group_btree = reinterpret_cast<bNodeTree *>(group_node_.id); + BLI_assert(group_btree != nullptr); + + Vector<const lf::OutputSocket *> graph_inputs; + for (const lf::OutputSocket *socket : lf_graph_info.mapping.group_input_sockets) { + if (socket != nullptr) { + graph_inputs.append(socket); + } + } + Vector<const lf::InputSocket *> graph_outputs; + if (const bNode *group_output_bnode = group_btree->group_output_node()) { + for (const bNodeSocket *bsocket : group_output_bnode->input_sockets().drop_back(1)) { + const lf::Socket *socket = lf_graph_info.mapping.dummy_socket_map.lookup_default(bsocket, + nullptr); + if (socket != nullptr) { + graph_outputs.append(&socket->as_input()); + } + } + } + + lf_logger_.emplace(lf_graph_info); + lf_side_effect_provider_.emplace(lf_graph_info); + graph_executor_.emplace(lf_graph_info.graph, + std::move(graph_inputs), + std::move(graph_outputs), + &*lf_logger_, + &*lf_side_effect_provider_); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &context) const override + { + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + + /* The compute context changes when entering a node group. */ + bke::NodeGroupComputeContext compute_context{user_data->compute_context, group_node_.name}; + GeoNodesLFUserData group_user_data = *user_data; + group_user_data.compute_context = &compute_context; + + lf::Context group_context = context; + group_context.user_data = &group_user_data; + + graph_executor_->execute(params, group_context); + } + + void *init_storage(LinearAllocator<> &allocator) const + { + return graph_executor_->init_storage(allocator); + } + + void destruct_storage(void *storage) const + { + graph_executor_->destruct_storage(storage); + } +}; + +static GMutablePointer get_socket_default_value(LinearAllocator<> &allocator, + const bNodeSocket &bsocket) +{ + const bNodeSocketType &typeinfo = *bsocket.typeinfo; + const CPPType *type = get_socket_cpp_type(typeinfo); + if (type == nullptr) { + return {}; + } + void *buffer = allocator.allocate(type->size(), type->alignment()); + typeinfo.get_geometry_nodes_cpp_value(bsocket, buffer); + return {type, buffer}; +} + +/** + * Utility class to build a lazy-function graph based on a geometry nodes tree. + * This is mainly a separate class because it makes it easier to have variables that can be + * accessed by many functions. + */ +struct GeometryNodesLazyFunctionGraphBuilder { + private: + const bNodeTree &btree_; + GeometryNodesLazyFunctionGraphInfo *lf_graph_info_; + lf::Graph *lf_graph_; + GeometryNodeLazyFunctionGraphMapping *mapping_; + MultiValueMap<const bNodeSocket *, lf::InputSocket *> input_socket_map_; + Map<const bNodeSocket *, lf::OutputSocket *> output_socket_map_; + Map<const bNodeSocket *, lf::Node *> multi_input_socket_nodes_; + const bke::DataTypeConversions *conversions_; + + /** + * All group input nodes are combined into one dummy node in the lazy-function graph. + * If some input has an invalid type, it is ignored in the new graph. In this case null and -1 is + * used in the vectors below. + */ + Vector<const CPPType *> group_input_types_; + Vector<int> group_input_indices_; + lf::DummyNode *group_input_lf_node_; + + /** + * The output types or null if an output is invalid. Each group output node gets a separate + * corresponding dummy node in the new graph. + */ + Vector<const CPPType *> group_output_types_; + Vector<int> group_output_indices_; + + public: + GeometryNodesLazyFunctionGraphBuilder(const bNodeTree &btree, + GeometryNodesLazyFunctionGraphInfo &lf_graph_info) + : btree_(btree), lf_graph_info_(&lf_graph_info) + { + } + + void build() + { + btree_.ensure_topology_cache(); + + lf_graph_ = &lf_graph_info_->graph; + mapping_ = &lf_graph_info_->mapping; + conversions_ = &bke::get_implicit_type_conversions(); + + this->prepare_node_multi_functions(); + this->prepare_group_inputs(); + this->prepare_group_outputs(); + this->build_group_input_node(); + this->handle_nodes(); + this->handle_links(); + this->add_default_inputs(); + + lf_graph_->update_node_indices(); + } + + private: + void prepare_node_multi_functions() + { + lf_graph_info_->node_multi_functions = std::make_unique<NodeMultiFunctions>(btree_); + } + + void prepare_group_inputs() + { + LISTBASE_FOREACH (const bNodeSocket *, interface_bsocket, &btree_.inputs) { + const CPPType *type = get_socket_cpp_type(*interface_bsocket->typeinfo); + if (type != nullptr) { + const int index = group_input_types_.append_and_get_index(type); + group_input_indices_.append(index); + } + else { + group_input_indices_.append(-1); + } + } + } + + void prepare_group_outputs() + { + LISTBASE_FOREACH (const bNodeSocket *, interface_bsocket, &btree_.outputs) { + const CPPType *type = get_socket_cpp_type(*interface_bsocket->typeinfo); + if (type != nullptr) { + const int index = group_output_types_.append_and_get_index(type); + group_output_indices_.append(index); + } + else { + group_output_indices_.append(-1); + } + } + } + + void build_group_input_node() + { + /* Create a dummy node for the group inputs. */ + group_input_lf_node_ = &lf_graph_->add_dummy({}, group_input_types_); + for (const int group_input_index : group_input_indices_) { + if (group_input_index == -1) { + mapping_->group_input_sockets.append(nullptr); + } + else { + mapping_->group_input_sockets.append(&group_input_lf_node_->output(group_input_index)); + } + } + } + + void handle_nodes() + { + /* Insert all nodes into the lazy function graph. */ + for (const bNode *bnode : btree_.all_nodes()) { + const bNodeType *node_type = bnode->typeinfo; + if (node_type == nullptr) { + continue; + } + if (bnode->is_muted()) { + this->handle_muted_node(*bnode); + continue; + } + switch (node_type->type) { + case NODE_FRAME: { + /* Ignored. */ + break; + } + case NODE_REROUTE: { + this->handle_reroute_node(*bnode); + break; + } + case NODE_GROUP_INPUT: { + this->handle_group_input_node(*bnode); + break; + } + case NODE_GROUP_OUTPUT: { + this->handle_group_output_node(*bnode); + break; + } + case NODE_CUSTOM_GROUP: + case NODE_GROUP: { + this->handle_group_node(*bnode); + break; + } + case GEO_NODE_VIEWER: { + this->handle_viewer_node(*bnode); + break; + } + default: { + if (node_type->geometry_node_execute) { + this->handle_geometry_node(*bnode); + break; + } + const NodeMultiFunctions::Item &fn_item = lf_graph_info_->node_multi_functions->try_get( + *bnode); + if (fn_item.fn != nullptr) { + this->handle_multi_function_node(*bnode, fn_item); + } + /* Nodes that don't match any of the criteria above are just ignored. */ + break; + } + } + } + } + + void handle_muted_node(const bNode &bnode) + { + Vector<const bNodeSocket *> used_inputs; + Vector<const bNodeSocket *> used_outputs; + auto lazy_function = std::make_unique<LazyFunctionForMutedNode>( + bnode, used_inputs, used_outputs); + lf::Node &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + for (const int i : used_inputs.index_range()) { + const bNodeSocket &bsocket = *used_inputs[i]; + lf::InputSocket &lf_socket = lf_node.input(i); + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + for (const int i : used_outputs.index_range()) { + const bNodeSocket &bsocket = *used_outputs[i]; + lf::OutputSocket &lf_socket = lf_node.output(i); + output_socket_map_.add_new(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + + void handle_reroute_node(const bNode &bnode) + { + const bNodeSocket &input_bsocket = bnode.input_socket(0); + const bNodeSocket &output_bsocket = bnode.output_socket(0); + const CPPType *type = get_socket_cpp_type(input_bsocket); + if (type == nullptr) { + return; + } + + auto lazy_function = std::make_unique<LazyFunctionForRerouteNode>(*type); + lf::Node &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + + lf::InputSocket &lf_input = lf_node.input(0); + lf::OutputSocket &lf_output = lf_node.output(0); + input_socket_map_.add(&input_bsocket, &lf_input); + output_socket_map_.add_new(&output_bsocket, &lf_output); + mapping_->bsockets_by_lf_socket_map.add(&lf_input, &input_bsocket); + mapping_->bsockets_by_lf_socket_map.add(&lf_output, &output_bsocket); + } + + void handle_group_input_node(const bNode &bnode) + { + for (const int btree_index : group_input_indices_.index_range()) { + const int lf_index = group_input_indices_[btree_index]; + if (lf_index == -1) { + continue; + } + const bNodeSocket &bsocket = bnode.output_socket(btree_index); + lf::OutputSocket &lf_socket = group_input_lf_node_->output(lf_index); + output_socket_map_.add_new(&bsocket, &lf_socket); + mapping_->dummy_socket_map.add_new(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + + void handle_group_output_node(const bNode &bnode) + { + lf::DummyNode &group_output_lf_node = lf_graph_->add_dummy(group_output_types_, {}); + for (const int btree_index : group_output_indices_.index_range()) { + const int lf_index = group_output_indices_[btree_index]; + if (lf_index == -1) { + continue; + } + const bNodeSocket &bsocket = bnode.input_socket(btree_index); + lf::InputSocket &lf_socket = group_output_lf_node.input(lf_index); + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->dummy_socket_map.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + + void handle_group_node(const bNode &bnode) + { + const bNodeTree *group_btree = reinterpret_cast<bNodeTree *>(bnode.id); + if (group_btree == nullptr) { + return; + } + const GeometryNodesLazyFunctionGraphInfo *group_lf_graph_info = + ensure_geometry_nodes_lazy_function_graph(*group_btree); + if (group_lf_graph_info == nullptr) { + return; + } + + Vector<const bNodeSocket *> used_inputs; + Vector<const bNodeSocket *> used_outputs; + auto lazy_function = std::make_unique<LazyFunctionForGroupNode>( + bnode, *group_lf_graph_info, used_inputs, used_outputs); + lf::FunctionNode &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + for (const int i : used_inputs.index_range()) { + const bNodeSocket &bsocket = *used_inputs[i]; + BLI_assert(!bsocket.is_multi_input()); + lf::InputSocket &lf_socket = lf_node.input(i); + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + for (const int i : used_outputs.index_range()) { + const bNodeSocket &bsocket = *used_outputs[i]; + lf::OutputSocket &lf_socket = lf_node.output(i); + output_socket_map_.add_new(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + mapping_->group_node_map.add(&bnode, &lf_node); + } + + void handle_geometry_node(const bNode &bnode) + { + Vector<const bNodeSocket *> used_inputs; + Vector<const bNodeSocket *> used_outputs; + auto lazy_function = std::make_unique<LazyFunctionForGeometryNode>( + bnode, used_inputs, used_outputs); + lf::Node &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + + for (const int i : used_inputs.index_range()) { + const bNodeSocket &bsocket = *used_inputs[i]; + lf::InputSocket &lf_socket = lf_node.input(i); + + if (bsocket.is_multi_input()) { + auto multi_input_lazy_function = std::make_unique<LazyFunctionForMultiInput>(bsocket); + lf::Node &lf_multi_input_node = lf_graph_->add_function(*multi_input_lazy_function); + lf_graph_info_->functions.append(std::move(multi_input_lazy_function)); + lf_graph_->add_link(lf_multi_input_node.output(0), lf_socket); + multi_input_socket_nodes_.add_new(&bsocket, &lf_multi_input_node); + for (lf::InputSocket *lf_multi_input_socket : lf_multi_input_node.inputs()) { + mapping_->bsockets_by_lf_socket_map.add(lf_multi_input_socket, &bsocket); + } + } + else { + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + for (const int i : used_outputs.index_range()) { + const bNodeSocket &bsocket = *used_outputs[i]; + lf::OutputSocket &lf_socket = lf_node.output(i); + output_socket_map_.add_new(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + + void handle_multi_function_node(const bNode &bnode, const NodeMultiFunctions::Item &fn_item) + { + Vector<const bNodeSocket *> used_inputs; + Vector<const bNodeSocket *> used_outputs; + auto lazy_function = std::make_unique<LazyFunctionForMultiFunctionNode>( + bnode, fn_item, used_inputs, used_outputs); + lf::Node &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + + for (const int i : used_inputs.index_range()) { + const bNodeSocket &bsocket = *used_inputs[i]; + BLI_assert(!bsocket.is_multi_input()); + lf::InputSocket &lf_socket = lf_node.input(i); + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + for (const int i : used_outputs.index_range()) { + const bNodeSocket &bsocket = *used_outputs[i]; + lf::OutputSocket &lf_socket = lf_node.output(i); + output_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + + void handle_viewer_node(const bNode &bnode) + { + Vector<const bNodeSocket *> used_inputs; + auto lazy_function = std::make_unique<LazyFunctionForViewerNode>(bnode, used_inputs); + lf::FunctionNode &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + + for (const int i : used_inputs.index_range()) { + const bNodeSocket &bsocket = *used_inputs[i]; + lf::InputSocket &lf_socket = lf_node.input(i); + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + + mapping_->viewer_node_map.add(&bnode, &lf_node); + } + + void handle_links() + { + for (const auto item : output_socket_map_.items()) { + this->insert_links_from_socket(*item.key, *item.value); + } + } + + void insert_links_from_socket(const bNodeSocket &from_bsocket, lf::OutputSocket &from_lf_socket) + { + const Span<const bNodeLink *> links_from_bsocket = from_bsocket.directly_linked_links(); + + struct TypeWithLinks { + const CPPType *type; + Vector<const bNodeLink *> links; + }; + + /* Group available target sockets by type so that they can be handled together. */ + Vector<TypeWithLinks> types_with_links; + for (const bNodeLink *link : links_from_bsocket) { + if (link->is_muted()) { + continue; + } + const bNodeSocket &to_bsocket = *link->tosock; + if (!to_bsocket.is_available()) { + continue; + } + const CPPType *to_type = get_socket_cpp_type(to_bsocket); + if (to_type == nullptr) { + continue; + } + bool inserted = false; + for (TypeWithLinks &types_with_links : types_with_links) { + if (types_with_links.type == to_type) { + types_with_links.links.append(link); + inserted = true; + break; + } + } + if (inserted) { + continue; + } + types_with_links.append({to_type, {link}}); + } + + for (const TypeWithLinks &type_with_links : types_with_links) { + const CPPType &to_type = *type_with_links.type; + const Span<const bNodeLink *> links = type_with_links.links; + + Vector<const bNodeSocket *> target_bsockets; + for (const bNodeLink *link : links) { + target_bsockets.append(link->tosock); + } + + lf::OutputSocket *converted_from_lf_socket = this->insert_type_conversion_if_necessary( + from_lf_socket, to_type, std::move(target_bsockets)); + + auto make_input_link_or_set_default = [&](lf::InputSocket &to_lf_socket) { + if (converted_from_lf_socket == nullptr) { + const void *default_value = to_type.default_value(); + to_lf_socket.set_default_value(default_value); + } + else { + lf_graph_->add_link(*converted_from_lf_socket, to_lf_socket); + } + }; + + for (const bNodeLink *link : links) { + const bNodeSocket &to_bsocket = *link->tosock; + if (to_bsocket.is_multi_input()) { + /* TODO: Cache this index on the link. */ + int link_index = 0; + for (const bNodeLink *multi_input_link : to_bsocket.directly_linked_links()) { + if (multi_input_link == link) { + break; + } + if (!multi_input_link->is_muted()) { + link_index++; + } + } + if (to_bsocket.owner_node().is_muted()) { + if (link_index == 0) { + for (lf::InputSocket *to_lf_socket : input_socket_map_.lookup(&to_bsocket)) { + make_input_link_or_set_default(*to_lf_socket); + } + } + } + else { + lf::Node *multi_input_lf_node = multi_input_socket_nodes_.lookup_default(&to_bsocket, + nullptr); + if (multi_input_lf_node == nullptr) { + continue; + } + make_input_link_or_set_default(multi_input_lf_node->input(link_index)); + } + } + else { + for (lf::InputSocket *to_lf_socket : input_socket_map_.lookup(&to_bsocket)) { + make_input_link_or_set_default(*to_lf_socket); + } + } + } + } + } + + lf::OutputSocket *insert_type_conversion_if_necessary( + lf::OutputSocket &from_socket, + const CPPType &to_type, + Vector<const bNodeSocket *> &&target_sockets) + { + const CPPType &from_type = from_socket.type(); + if (from_type == to_type) { + return &from_socket; + } + const auto *from_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&from_type); + const auto *to_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&to_type); + if (from_field_type != nullptr && to_field_type != nullptr) { + const CPPType &from_base_type = from_field_type->base_type(); + const CPPType &to_base_type = to_field_type->base_type(); + if (conversions_->is_convertible(from_base_type, to_base_type)) { + const MultiFunction &multi_fn = *conversions_->get_conversion_multi_function( + MFDataType::ForSingle(from_base_type), MFDataType::ForSingle(to_base_type)); + auto fn = std::make_unique<LazyFunctionForMultiFunctionConversion>( + multi_fn, *from_field_type, *to_field_type, std::move(target_sockets)); + lf::Node &conversion_node = lf_graph_->add_function(*fn); + lf_graph_info_->functions.append(std::move(fn)); + lf_graph_->add_link(from_socket, conversion_node.input(0)); + return &conversion_node.output(0); + } + } + return nullptr; + } + + void add_default_inputs() + { + for (auto item : input_socket_map_.items()) { + const bNodeSocket &bsocket = *item.key; + const Span<lf::InputSocket *> lf_sockets = item.value; + for (lf::InputSocket *lf_socket : lf_sockets) { + if (lf_socket->origin() != nullptr) { + /* Is linked already. */ + continue; + } + this->add_default_input(bsocket, *lf_socket); + } + } + } + + void add_default_input(const bNodeSocket &input_bsocket, lf::InputSocket &input_lf_socket) + { + if (this->try_add_implicit_input(input_bsocket, input_lf_socket)) { + return; + } + GMutablePointer value = get_socket_default_value(lf_graph_info_->allocator, input_bsocket); + if (value.get() == nullptr) { + /* Not possible to add a default value. */ + return; + } + input_lf_socket.set_default_value(value.get()); + if (!value.type()->is_trivially_destructible()) { + lf_graph_info_->values_to_destruct.append(value); + } + } + + bool try_add_implicit_input(const bNodeSocket &input_bsocket, lf::InputSocket &input_lf_socket) + { + const bNode &bnode = input_bsocket.owner_node(); + const NodeDeclaration *node_declaration = bnode.declaration(); + if (node_declaration == nullptr) { + return false; + } + const SocketDeclaration &socket_declaration = + *node_declaration->inputs()[input_bsocket.index()]; + if (socket_declaration.input_field_type() != InputSocketFieldType::Implicit) { + return false; + } + const CPPType &type = input_lf_socket.type(); + std::function<void(void *)> init_fn = this->get_implicit_input_init_function(bnode, + input_bsocket); + if (!init_fn) { + return false; + } + + auto lazy_function = std::make_unique<LazyFunctionForImplicitInput>(type, std::move(init_fn)); + lf::Node &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + lf_graph_->add_link(lf_node.output(0), input_lf_socket); + return true; + } + + std::function<void(void *)> get_implicit_input_init_function(const bNode &bnode, + const bNodeSocket &bsocket) + { + const bNodeSocketType &socket_type = *bsocket.typeinfo; + if (socket_type.type == SOCK_VECTOR) { + if (bnode.type == GEO_NODE_SET_CURVE_HANDLES) { + StringRef side = ((NodeGeometrySetCurveHandlePositions *)bnode.storage)->mode == + GEO_NODE_CURVE_HANDLE_LEFT ? + "handle_left" : + "handle_right"; + return [side](void *r_value) { + new (r_value) ValueOrField<float3>(bke::AttributeFieldInput::Create<float3>(side)); + }; + } + else if (bnode.type == GEO_NODE_EXTRUDE_MESH) { + return [](void *r_value) { + new (r_value) + ValueOrField<float3>(Field<float3>(std::make_shared<bke::NormalFieldInput>())); + }; + } + else { + return [](void *r_value) { + new (r_value) ValueOrField<float3>(bke::AttributeFieldInput::Create<float3>("position")); + }; + } + } + else if (socket_type.type == SOCK_INT) { + if (ELEM(bnode.type, FN_NODE_RANDOM_VALUE, GEO_NODE_INSTANCE_ON_POINTS)) { + return [](void *r_value) { + new (r_value) + ValueOrField<int>(Field<int>(std::make_shared<bke::IDAttributeFieldInput>())); + }; + } + else { + return [](void *r_value) { + new (r_value) ValueOrField<int>(Field<int>(std::make_shared<fn::IndexFieldInput>())); + }; + } + } + return {}; + } +}; + +const GeometryNodesLazyFunctionGraphInfo *ensure_geometry_nodes_lazy_function_graph( + const bNodeTree &btree) +{ + btree.ensure_topology_cache(); + if (btree.has_link_cycle()) { + return nullptr; + } + + std::unique_ptr<GeometryNodesLazyFunctionGraphInfo> &lf_graph_info_ptr = + btree.runtime->geometry_nodes_lazy_function_graph_info; + + if (lf_graph_info_ptr) { + return lf_graph_info_ptr.get(); + } + std::lock_guard lock{btree.runtime->geometry_nodes_lazy_function_graph_info_mutex}; + if (lf_graph_info_ptr) { + return lf_graph_info_ptr.get(); + } + + auto lf_graph_info = std::make_unique<GeometryNodesLazyFunctionGraphInfo>(); + GeometryNodesLazyFunctionGraphBuilder builder{btree, *lf_graph_info}; + builder.build(); + + lf_graph_info_ptr = std::move(lf_graph_info); + return lf_graph_info_ptr.get(); +} + +GeometryNodesLazyFunctionLogger::GeometryNodesLazyFunctionLogger( + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info) + : lf_graph_info_(lf_graph_info) +{ +} + +void GeometryNodesLazyFunctionLogger::log_socket_value( + const fn::lazy_function::Socket &lf_socket, + const GPointer value, + const fn::lazy_function::Context &context) const +{ + const Span<const bNodeSocket *> bsockets = + lf_graph_info_.mapping.bsockets_by_lf_socket_map.lookup(&lf_socket); + if (bsockets.is_empty()) { + return; + } + + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + if (user_data->modifier_data->eval_log == nullptr) { + return; + } + geo_eval_log::GeoTreeLogger &tree_logger = + user_data->modifier_data->eval_log->get_local_tree_logger(*user_data->compute_context); + for (const bNodeSocket *bsocket : bsockets) { + /* Avoid logging to some sockets when the same value will also be logged to a linked socket. + * This reduces the number of logged values without losing information. */ + if (bsocket->is_input() && bsocket->is_directly_linked()) { + continue; + } + const bNode &bnode = bsocket->owner_node(); + if (bnode.is_reroute()) { + continue; + } + tree_logger.log_value(bsocket->owner_node(), *bsocket, value); + } +} + +static std::mutex dump_error_context_mutex; + +void GeometryNodesLazyFunctionLogger::dump_when_outputs_are_missing( + const lf::FunctionNode &node, + Span<const lf::OutputSocket *> missing_sockets, + const lf::Context &context) const +{ + std::lock_guard lock{dump_error_context_mutex}; + + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + user_data->compute_context->print_stack(std::cout, node.name()); + std::cout << "Missing outputs:\n"; + for (const lf::OutputSocket *socket : missing_sockets) { + std::cout << " " << socket->name() << "\n"; + } +} + +void GeometryNodesLazyFunctionLogger::dump_when_input_is_set_twice( + const lf::InputSocket &target_socket, + const lf::OutputSocket &from_socket, + const lf::Context &context) const +{ + std::lock_guard lock{dump_error_context_mutex}; + + std::stringstream ss; + ss << from_socket.node().name() << ":" << from_socket.name() << " -> " + << target_socket.node().name() << ":" << target_socket.name(); + + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + user_data->compute_context->print_stack(std::cout, ss.str()); +} + +GeometryNodesLazyFunctionSideEffectProvider::GeometryNodesLazyFunctionSideEffectProvider( + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info) + : lf_graph_info_(lf_graph_info) +{ +} + +Vector<const lf::FunctionNode *> GeometryNodesLazyFunctionSideEffectProvider:: + get_nodes_with_side_effects(const lf::Context &context) const +{ + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + const ComputeContextHash &context_hash = user_data->compute_context->hash(); + const GeoNodesModifierData &modifier_data = *user_data->modifier_data; + return modifier_data.side_effect_nodes->lookup(context_hash); +} + +GeometryNodesLazyFunctionGraphInfo::GeometryNodesLazyFunctionGraphInfo() = default; +GeometryNodesLazyFunctionGraphInfo::~GeometryNodesLazyFunctionGraphInfo() +{ + for (GMutablePointer &p : this->values_to_destruct) { + p.destruct(); + } +} + +} // namespace blender::nodes diff --git a/source/blender/nodes/intern/geometry_nodes_log.cc b/source/blender/nodes/intern/geometry_nodes_log.cc new file mode 100644 index 00000000000..350b199cd60 --- /dev/null +++ b/source/blender/nodes/intern/geometry_nodes_log.cc @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "NOD_geometry_nodes_lazy_function.hh" +#include "NOD_geometry_nodes_log.hh" + +#include "BKE_compute_contexts.hh" +#include "BKE_curves.hh" +#include "BKE_node_runtime.hh" + +#include "FN_field_cpp_type.hh" + +#include "DNA_modifier_types.h" +#include "DNA_space_types.h" + +namespace blender::nodes::geo_eval_log { + +using fn::FieldInput; +using fn::FieldInputs; + +GenericValueLog::~GenericValueLog() +{ + this->value.destruct(); +} + +FieldInfoLog::FieldInfoLog(const GField &field) : type(field.cpp_type()) +{ + const std::shared_ptr<const fn::FieldInputs> &field_input_nodes = field.node().field_inputs(); + + /* Put the deduplicated field inputs into a vector so that they can be sorted below. */ + Vector<std::reference_wrapper<const FieldInput>> field_inputs; + if (field_input_nodes) { + field_inputs.extend(field_input_nodes->deduplicated_nodes.begin(), + field_input_nodes->deduplicated_nodes.end()); + } + + std::sort( + field_inputs.begin(), field_inputs.end(), [](const FieldInput &a, const FieldInput &b) { + const int index_a = (int)a.category(); + const int index_b = (int)b.category(); + if (index_a == index_b) { + return a.socket_inspection_name().size() < b.socket_inspection_name().size(); + } + return index_a < index_b; + }); + + for (const FieldInput &field_input : field_inputs) { + this->input_tooltips.append(field_input.socket_inspection_name()); + } +} + +GeometryInfoLog::GeometryInfoLog(const GeometrySet &geometry_set) +{ + static std::array all_component_types = {GEO_COMPONENT_TYPE_CURVE, + GEO_COMPONENT_TYPE_INSTANCES, + GEO_COMPONENT_TYPE_MESH, + GEO_COMPONENT_TYPE_POINT_CLOUD, + GEO_COMPONENT_TYPE_VOLUME}; + + /* Keep track handled attribute names to make sure that we do not return the same name twice. + * Currently #GeometrySet::attribute_foreach does not do that. Note that this will merge + * attributes with the same name but different domains or data types on separate components. */ + Set<StringRef> names; + + geometry_set.attribute_foreach( + all_component_types, + true, + [&](const bke::AttributeIDRef &attribute_id, + const bke::AttributeMetaData &meta_data, + const GeometryComponent &UNUSED(component)) { + if (attribute_id.is_named() && names.add(attribute_id.name())) { + this->attributes.append({attribute_id.name(), meta_data.domain, meta_data.data_type}); + } + }); + + for (const GeometryComponent *component : geometry_set.get_components_for_read()) { + this->component_types.append(component->type()); + switch (component->type()) { + case GEO_COMPONENT_TYPE_MESH: { + const MeshComponent &mesh_component = *(const MeshComponent *)component; + MeshInfo &info = this->mesh_info.emplace(); + info.verts_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_POINT); + info.edges_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_EDGE); + info.faces_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_FACE); + break; + } + case GEO_COMPONENT_TYPE_CURVE: { + const CurveComponent &curve_component = *(const CurveComponent *)component; + CurveInfo &info = this->curve_info.emplace(); + info.splines_num = curve_component.attribute_domain_size(ATTR_DOMAIN_CURVE); + break; + } + case GEO_COMPONENT_TYPE_POINT_CLOUD: { + const PointCloudComponent &pointcloud_component = *(const PointCloudComponent *)component; + PointCloudInfo &info = this->pointcloud_info.emplace(); + info.points_num = pointcloud_component.attribute_domain_size(ATTR_DOMAIN_POINT); + break; + } + case GEO_COMPONENT_TYPE_INSTANCES: { + const InstancesComponent &instances_component = *(const InstancesComponent *)component; + InstancesInfo &info = this->instances_info.emplace(); + info.instances_num = instances_component.instances_num(); + break; + } + case GEO_COMPONENT_TYPE_EDIT: { + const GeometryComponentEditData &edit_component = *( + const GeometryComponentEditData *)component; + if (const bke::CurvesEditHints *curve_edit_hints = + edit_component.curves_edit_hints_.get()) { + EditDataInfo &info = this->edit_data_info.emplace(); + info.has_deform_matrices = curve_edit_hints->deform_mats.has_value(); + info.has_deformed_positions = curve_edit_hints->positions.has_value(); + } + break; + } + case GEO_COMPONENT_TYPE_VOLUME: { + break; + } + } + } +} + +/* Avoid generating these in every translation unit. */ +GeoModifierLog::GeoModifierLog() = default; +GeoModifierLog::~GeoModifierLog() = default; + +GeoTreeLogger::GeoTreeLogger() = default; +GeoTreeLogger::~GeoTreeLogger() = default; + +GeoNodeLog::GeoNodeLog() = default; +GeoNodeLog::~GeoNodeLog() = default; + +GeoTreeLog::GeoTreeLog(GeoModifierLog *modifier_log, Vector<GeoTreeLogger *> tree_loggers) + : modifier_log_(modifier_log), tree_loggers_(std::move(tree_loggers)) +{ + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const ComputeContextHash &hash : tree_logger->children_hashes) { + children_hashes_.add(hash); + } + } +} + +GeoTreeLog::~GeoTreeLog() = default; + +void GeoTreeLogger::log_value(const bNode &node, const bNodeSocket &socket, const GPointer value) +{ + const CPPType &type = *value.type(); + + auto store_logged_value = [&](destruct_ptr<ValueLog> value_log) { + auto &socket_values = socket.in_out == SOCK_IN ? this->input_socket_values : + this->output_socket_values; + socket_values.append({node.name, socket.identifier, std::move(value_log)}); + }; + + auto log_generic_value = [&](const CPPType &type, const void *value) { + void *buffer = this->allocator->allocate(type.size(), type.alignment()); + type.copy_construct(value, buffer); + store_logged_value(this->allocator->construct<GenericValueLog>(GMutablePointer{type, buffer})); + }; + + if (type.is<GeometrySet>()) { + const GeometrySet &geometry = *value.get<GeometrySet>(); + store_logged_value(this->allocator->construct<GeometryInfoLog>(geometry)); + } + else if (const auto *value_or_field_type = dynamic_cast<const fn::ValueOrFieldCPPType *>( + &type)) { + const void *value_or_field = value.get(); + const CPPType &base_type = value_or_field_type->base_type(); + if (value_or_field_type->is_field(value_or_field)) { + const GField *field = value_or_field_type->get_field_ptr(value_or_field); + if (field->node().depends_on_input()) { + store_logged_value(this->allocator->construct<FieldInfoLog>(*field)); + } + else { + BUFFER_FOR_CPP_TYPE_VALUE(base_type, value); + fn::evaluate_constant_field(*field, value); + log_generic_value(base_type, value); + } + } + else { + const void *value = value_or_field_type->get_value_ptr(value_or_field); + log_generic_value(base_type, value); + } + } + else { + log_generic_value(type, value.get()); + } +} + +void GeoTreeLogger::log_viewer_node(const bNode &viewer_node, + const GeometrySet &geometry, + const GField &field) +{ + destruct_ptr<ViewerNodeLog> log = this->allocator->construct<ViewerNodeLog>(); + log->geometry = geometry; + log->field = field; + log->geometry.ensure_owns_direct_data(); + this->viewer_node_logs.append({viewer_node.name, std::move(log)}); +} + +void GeoTreeLog::ensure_node_warnings() +{ + if (reduced_node_warnings_) { + return; + } + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::WarningWithNode &warnings : tree_logger->node_warnings) { + this->nodes.lookup_or_add_default(warnings.node_name).warnings.append(warnings.warning); + this->all_warnings.append(warnings.warning); + } + } + for (const ComputeContextHash &child_hash : children_hashes_) { + GeoTreeLog &child_log = modifier_log_->get_tree_log(child_hash); + child_log.ensure_node_warnings(); + const std::optional<std::string> &group_node_name = + child_log.tree_loggers_[0]->group_node_name; + if (group_node_name.has_value()) { + this->nodes.lookup_or_add_default(*group_node_name).warnings.extend(child_log.all_warnings); + } + this->all_warnings.extend(child_log.all_warnings); + } + reduced_node_warnings_ = true; +} + +void GeoTreeLog::ensure_node_run_time() +{ + if (reduced_node_run_times_) { + return; + } + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::NodeExecutionTime &timings : tree_logger->node_execution_times) { + const std::chrono::nanoseconds duration = timings.end - timings.start; + this->nodes.lookup_or_add_default_as(timings.node_name).run_time += duration; + this->run_time_sum += duration; + } + } + for (const ComputeContextHash &child_hash : children_hashes_) { + GeoTreeLog &child_log = modifier_log_->get_tree_log(child_hash); + child_log.ensure_node_run_time(); + const std::optional<std::string> &group_node_name = + child_log.tree_loggers_[0]->group_node_name; + if (group_node_name.has_value()) { + this->nodes.lookup_or_add_default(*group_node_name).run_time += child_log.run_time_sum; + } + this->run_time_sum += child_log.run_time_sum; + } + reduced_node_run_times_ = true; +} + +void GeoTreeLog::ensure_socket_values() +{ + if (reduced_socket_values_) { + return; + } + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::SocketValueLog &value_log_data : tree_logger->input_socket_values) { + this->nodes.lookup_or_add_as(value_log_data.node_name) + .input_values_.add(value_log_data.socket_identifier, value_log_data.value.get()); + } + for (const GeoTreeLogger::SocketValueLog &value_log_data : tree_logger->output_socket_values) { + this->nodes.lookup_or_add_as(value_log_data.node_name) + .output_values_.add(value_log_data.socket_identifier, value_log_data.value.get()); + } + } + reduced_socket_values_ = true; +} + +void GeoTreeLog::ensure_viewer_node_logs() +{ + if (reduced_viewer_node_logs_) { + return; + } + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::ViewerNodeLogWithNode &viewer_log : tree_logger->viewer_node_logs) { + this->viewer_node_logs.add(viewer_log.node_name, viewer_log.viewer_log.get()); + } + } + reduced_viewer_node_logs_ = true; +} + +void GeoTreeLog::ensure_existing_attributes() +{ + if (reduced_existing_attributes_) { + return; + } + this->ensure_socket_values(); + + Set<StringRef> names; + + auto handle_value_log = [&](const ValueLog &value_log) { + const GeometryInfoLog *geo_log = dynamic_cast<const GeometryInfoLog *>(&value_log); + if (geo_log == nullptr) { + return; + } + for (const GeometryAttributeInfo &attribute : geo_log->attributes) { + if (names.add(attribute.name)) { + this->existing_attributes.append(&attribute); + } + } + }; + + for (const GeoNodeLog &node_log : this->nodes.values()) { + for (const ValueLog *value_log : node_log.input_values_.values()) { + handle_value_log(*value_log); + } + for (const ValueLog *value_log : node_log.output_values_.values()) { + handle_value_log(*value_log); + } + } + reduced_existing_attributes_ = true; +} + +void GeoTreeLog::ensure_used_named_attributes() +{ + if (reduced_used_named_attributes_) { + return; + } + + auto add_attribute = [&](const StringRef node_name, + const StringRef attribute_name, + const NamedAttributeUsage &usage) { + this->nodes.lookup_or_add_as(node_name).used_named_attributes.lookup_or_add_as(attribute_name, + usage) |= usage; + this->used_named_attributes.lookup_or_add_as(attribute_name, usage) |= usage; + }; + + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::AttributeUsageWithNode &item : tree_logger->used_named_attributes) { + add_attribute(item.node_name, item.attribute_name, item.usage); + } + } + for (const ComputeContextHash &child_hash : children_hashes_) { + GeoTreeLog &child_log = modifier_log_->get_tree_log(child_hash); + child_log.ensure_used_named_attributes(); + if (const std::optional<std::string> &group_node_name = + child_log.tree_loggers_[0]->group_node_name) { + for (const auto &item : child_log.used_named_attributes.items()) { + add_attribute(*group_node_name, item.key, item.value); + } + } + } + reduced_used_named_attributes_ = true; +} + +void GeoTreeLog::ensure_debug_messages() +{ + if (reduced_debug_messages_) { + return; + } + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::DebugMessage &debug_message : tree_logger->debug_messages) { + this->nodes.lookup_or_add_as(debug_message.node_name) + .debug_messages.append(debug_message.message); + } + } + reduced_debug_messages_ = true; +} + +ValueLog *GeoTreeLog::find_socket_value_log(const bNodeSocket &query_socket) +{ + /** + * Geometry nodes does not log values for every socket. That would produce a lot of redundant + * data,because often many linked sockets have the same value. To find the logged value for a + * socket one might have to look at linked sockets as well. + */ + + BLI_assert(reduced_socket_values_); + if (query_socket.is_multi_input()) { + /* Not supported currently. */ + return nullptr; + } + + Set<const bNodeSocket *> added_sockets; + Stack<const bNodeSocket *> sockets_to_check; + sockets_to_check.push(&query_socket); + added_sockets.add(&query_socket); + + while (!sockets_to_check.is_empty()) { + const bNodeSocket &socket = *sockets_to_check.pop(); + const bNode &node = socket.owner_node(); + if (GeoNodeLog *node_log = this->nodes.lookup_ptr(node.name)) { + ValueLog *value_log = socket.is_input() ? + node_log->input_values_.lookup_default(socket.identifier, + nullptr) : + node_log->output_values_.lookup_default(socket.identifier, + nullptr); + if (value_log != nullptr) { + return value_log; + } + } + + if (socket.is_input()) { + const Span<const bNodeLink *> links = socket.directly_linked_links(); + for (const bNodeLink *link : links) { + const bNodeSocket &from_socket = *link->fromsock; + if (added_sockets.add(&from_socket)) { + sockets_to_check.push(&from_socket); + } + } + } + else { + if (node.is_reroute()) { + const bNodeSocket &input_socket = node.input_socket(0); + if (added_sockets.add(&input_socket)) { + sockets_to_check.push(&input_socket); + } + const Span<const bNodeLink *> links = input_socket.directly_linked_links(); + for (const bNodeLink *link : links) { + const bNodeSocket &from_socket = *link->fromsock; + if (added_sockets.add(&from_socket)) { + sockets_to_check.push(&from_socket); + } + } + } + else if (node.is_muted()) { + if (const bNodeSocket *input_socket = socket.internal_link_input()) { + if (added_sockets.add(input_socket)) { + sockets_to_check.push(input_socket); + } + const Span<const bNodeLink *> links = input_socket->directly_linked_links(); + for (const bNodeLink *link : links) { + const bNodeSocket &from_socket = *link->fromsock; + if (added_sockets.add(&from_socket)) { + sockets_to_check.push(&from_socket); + } + } + } + } + } + } + + return nullptr; +} + +GeoTreeLogger &GeoModifierLog::get_local_tree_logger(const ComputeContext &compute_context) +{ + LocalData &local_data = data_per_thread_.local(); + Map<ComputeContextHash, destruct_ptr<GeoTreeLogger>> &local_tree_loggers = + local_data.tree_logger_by_context; + destruct_ptr<GeoTreeLogger> &tree_logger_ptr = local_tree_loggers.lookup_or_add_default( + compute_context.hash()); + if (tree_logger_ptr) { + return *tree_logger_ptr; + } + tree_logger_ptr = local_data.allocator.construct<GeoTreeLogger>(); + GeoTreeLogger &tree_logger = *tree_logger_ptr; + tree_logger.allocator = &local_data.allocator; + const ComputeContext *parent_compute_context = compute_context.parent(); + if (parent_compute_context != nullptr) { + tree_logger.parent_hash = parent_compute_context->hash(); + GeoTreeLogger &parent_logger = this->get_local_tree_logger(*parent_compute_context); + parent_logger.children_hashes.append(compute_context.hash()); + } + if (const bke::NodeGroupComputeContext *node_group_compute_context = + dynamic_cast<const bke::NodeGroupComputeContext *>(&compute_context)) { + tree_logger.group_node_name.emplace(node_group_compute_context->node_name()); + } + return tree_logger; +} + +GeoTreeLog &GeoModifierLog::get_tree_log(const ComputeContextHash &compute_context_hash) +{ + GeoTreeLog &reduced_tree_log = *tree_logs_.lookup_or_add_cb(compute_context_hash, [&]() { + Vector<GeoTreeLogger *> tree_logs; + for (LocalData &local_data : data_per_thread_) { + destruct_ptr<GeoTreeLogger> *tree_log = local_data.tree_logger_by_context.lookup_ptr( + compute_context_hash); + if (tree_log != nullptr) { + tree_logs.append(tree_log->get()); + } + } + return std::make_unique<GeoTreeLog>(this, std::move(tree_logs)); + }); + return reduced_tree_log; +} + +struct ObjectAndModifier { + const Object *object; + const NodesModifierData *nmd; +}; + +static std::optional<ObjectAndModifier> get_modifier_for_node_editor(const SpaceNode &snode) +{ + if (snode.id == nullptr) { + return std::nullopt; + } + if (GS(snode.id->name) != ID_OB) { + return std::nullopt; + } + const Object *object = reinterpret_cast<Object *>(snode.id); + const NodesModifierData *used_modifier = nullptr; + if (snode.flag & SNODE_PIN) { + LISTBASE_FOREACH (const ModifierData *, md, &object->modifiers) { + if (md->type == eModifierType_Nodes) { + const NodesModifierData *nmd = reinterpret_cast<const NodesModifierData *>(md); + /* Would be good to store the name of the pinned modifier in the node editor. */ + if (nmd->node_group == snode.nodetree) { + used_modifier = nmd; + break; + } + } + } + } + else { + LISTBASE_FOREACH (const ModifierData *, md, &object->modifiers) { + if (md->type == eModifierType_Nodes) { + const NodesModifierData *nmd = reinterpret_cast<const NodesModifierData *>(md); + if (nmd->node_group == snode.nodetree) { + if (md->flag & eModifierFlag_Active) { + used_modifier = nmd; + break; + } + } + } + } + } + if (used_modifier == nullptr) { + return std::nullopt; + } + return ObjectAndModifier{object, used_modifier}; +} + +GeoTreeLog *GeoModifierLog::get_tree_log_for_node_editor(const SpaceNode &snode) +{ + std::optional<ObjectAndModifier> object_and_modifier = get_modifier_for_node_editor(snode); + if (!object_and_modifier) { + return nullptr; + } + GeoModifierLog *modifier_log = static_cast<GeoModifierLog *>( + object_and_modifier->nmd->runtime_eval_log); + if (modifier_log == nullptr) { + return nullptr; + } + Vector<const bNodeTreePath *> tree_path = snode.treepath; + if (tree_path.is_empty()) { + return nullptr; + } + ComputeContextBuilder compute_context_builder; + compute_context_builder.push<bke::ModifierComputeContext>( + object_and_modifier->nmd->modifier.name); + for (const bNodeTreePath *path_item : tree_path.as_span().drop_front(1)) { + compute_context_builder.push<bke::NodeGroupComputeContext>(path_item->node_name); + } + return &modifier_log->get_tree_log(compute_context_builder.hash()); +} + +const ViewerNodeLog *GeoModifierLog::find_viewer_node_log_for_spreadsheet( + const SpaceSpreadsheet &sspreadsheet) +{ + Vector<const SpreadsheetContext *> context_path = sspreadsheet.context_path; + if (context_path.size() < 3) { + return nullptr; + } + if (context_path[0]->type != SPREADSHEET_CONTEXT_OBJECT) { + return nullptr; + } + if (context_path[1]->type != SPREADSHEET_CONTEXT_MODIFIER) { + return nullptr; + } + const SpreadsheetContextObject *object_context = + reinterpret_cast<const SpreadsheetContextObject *>(context_path[0]); + const SpreadsheetContextModifier *modifier_context = + reinterpret_cast<const SpreadsheetContextModifier *>(context_path[1]); + if (object_context->object == nullptr) { + return nullptr; + } + NodesModifierData *nmd = nullptr; + LISTBASE_FOREACH (ModifierData *, md, &object_context->object->modifiers) { + if (STREQ(md->name, modifier_context->modifier_name)) { + if (md->type == eModifierType_Nodes) { + nmd = reinterpret_cast<NodesModifierData *>(md); + } + } + } + if (nmd == nullptr) { + return nullptr; + } + if (nmd->runtime_eval_log == nullptr) { + return nullptr; + } + nodes::geo_eval_log::GeoModifierLog *modifier_log = + static_cast<nodes::geo_eval_log::GeoModifierLog *>(nmd->runtime_eval_log); + + ComputeContextBuilder compute_context_builder; + compute_context_builder.push<bke::ModifierComputeContext>(modifier_context->modifier_name); + for (const SpreadsheetContext *context : context_path.as_span().drop_front(2).drop_back(1)) { + if (context->type != SPREADSHEET_CONTEXT_NODE) { + return nullptr; + } + const SpreadsheetContextNode &node_context = *reinterpret_cast<const SpreadsheetContextNode *>( + context); + compute_context_builder.push<bke::NodeGroupComputeContext>(node_context.node_name); + } + const ComputeContextHash context_hash = compute_context_builder.hash(); + nodes::geo_eval_log::GeoTreeLog &tree_log = modifier_log->get_tree_log(context_hash); + tree_log.ensure_viewer_node_logs(); + + const SpreadsheetContext *last_context = context_path.last(); + if (last_context->type != SPREADSHEET_CONTEXT_NODE) { + return nullptr; + } + const SpreadsheetContextNode &last_node_context = + *reinterpret_cast<const SpreadsheetContextNode *>(last_context); + const ViewerNodeLog *viewer_log = tree_log.viewer_node_logs.lookup_default( + last_node_context.node_name, nullptr); + return viewer_log; +} + +} // namespace blender::nodes::geo_eval_log diff --git a/source/blender/nodes/intern/node_geometry_exec.cc b/source/blender/nodes/intern/node_geometry_exec.cc index 953dce035c2..1833774fe33 100644 --- a/source/blender/nodes/intern/node_geometry_exec.cc +++ b/source/blender/nodes/intern/node_geometry_exec.cc @@ -11,34 +11,27 @@ #include "node_geometry_util.hh" -using blender::nodes::geometry_nodes_eval_log::LocalGeoLogger; - namespace blender::nodes { void GeoNodeExecParams::error_message_add(const NodeWarningType type, std::string message) const { - if (provider_->logger == nullptr) { - return; + if (geo_eval_log::GeoTreeLogger *tree_logger = this->get_local_tree_logger()) { + tree_logger->node_warnings.append({node_.name, {type, std::move(message)}}); } - LocalGeoLogger &local_logger = provider_->logger->local(); - local_logger.log_node_warning(provider_->dnode, type, std::move(message)); } void GeoNodeExecParams::used_named_attribute(std::string attribute_name, - const eNamedAttrUsage usage) + const NamedAttributeUsage usage) { - if (provider_->logger == nullptr) { - return; + if (geo_eval_log::GeoTreeLogger *tree_logger = this->get_local_tree_logger()) { + tree_logger->used_named_attributes.append({node_.name, std::move(attribute_name), usage}); } - LocalGeoLogger &local_logger = provider_->logger->local(); - local_logger.log_used_named_attribute(provider_->dnode, std::move(attribute_name), usage); } void GeoNodeExecParams::check_input_geometry_set(StringRef identifier, const GeometrySet &geometry_set) const { - const SocketDeclaration &decl = - *provider_->dnode->input_by_identifier(identifier).runtime->declaration; + const SocketDeclaration &decl = *node_.input_by_identifier(identifier).runtime->declaration; const decl::Geometry *geo_decl = dynamic_cast<const decl::Geometry *>(&decl); if (geo_decl == nullptr) { return; @@ -118,7 +111,7 @@ void GeoNodeExecParams::check_output_geometry_set(const GeometrySet &geometry_se const bNodeSocket *GeoNodeExecParams::find_available_socket(const StringRef name) const { - for (const bNodeSocket *socket : provider_->dnode->runtime->inputs) { + for (const bNodeSocket *socket : node_.input_sockets()) { if (socket->is_available() && socket->name == name) { return socket; } @@ -129,19 +122,19 @@ const bNodeSocket *GeoNodeExecParams::find_available_socket(const StringRef name std::string GeoNodeExecParams::attribute_producer_name() const { - return provider_->dnode->label_or_name() + TIP_(" node"); + return node_.label_or_name() + TIP_(" node"); } void GeoNodeExecParams::set_default_remaining_outputs() { - provider_->set_default_remaining_outputs(); + params_.set_default_remaining_outputs(); } void GeoNodeExecParams::check_input_access(StringRef identifier, const CPPType *requested_type) const { const bNodeSocket *found_socket = nullptr; - for (const bNodeSocket *socket : provider_->dnode->input_sockets()) { + for (const bNodeSocket *socket : node_.input_sockets()) { if (socket->identifier == identifier) { found_socket = socket; break; @@ -151,7 +144,7 @@ void GeoNodeExecParams::check_input_access(StringRef identifier, if (found_socket == nullptr) { std::cout << "Did not find an input socket with the identifier '" << identifier << "'.\n"; std::cout << "Possible identifiers are: "; - for (const bNodeSocket *socket : provider_->dnode->input_sockets()) { + for (const bNodeSocket *socket : node_.input_sockets()) { if (socket->is_available()) { std::cout << "'" << socket->identifier << "', "; } @@ -164,13 +157,7 @@ void GeoNodeExecParams::check_input_access(StringRef identifier, << "' is disabled.\n"; BLI_assert_unreachable(); } - else if (!provider_->can_get_input(identifier)) { - std::cout << "The identifier '" << identifier - << "' is valid, but there is no value for it anymore.\n"; - std::cout << "Most likely it has been extracted before.\n"; - BLI_assert_unreachable(); - } - else if (requested_type != nullptr) { + else if (requested_type != nullptr && (found_socket->flag & SOCK_MULTI_INPUT) == 0) { const CPPType &expected_type = *found_socket->typeinfo->geometry_nodes_cpp_type; if (*requested_type != expected_type) { std::cout << "The requested type '" << requested_type->name() << "' is incorrect. Expected '" @@ -183,7 +170,7 @@ void GeoNodeExecParams::check_input_access(StringRef identifier, void GeoNodeExecParams::check_output_access(StringRef identifier, const CPPType &value_type) const { const bNodeSocket *found_socket = nullptr; - for (const bNodeSocket *socket : provider_->dnode->output_sockets()) { + for (const bNodeSocket *socket : node_.output_sockets()) { if (socket->identifier == identifier) { found_socket = socket; break; @@ -193,8 +180,8 @@ void GeoNodeExecParams::check_output_access(StringRef identifier, const CPPType if (found_socket == nullptr) { std::cout << "Did not find an output socket with the identifier '" << identifier << "'.\n"; std::cout << "Possible identifiers are: "; - for (const bNodeSocket *socket : provider_->dnode->output_sockets()) { - if (!(socket->flag & SOCK_UNAVAIL)) { + for (const bNodeSocket *socket : node_.output_sockets()) { + if (socket->is_available()) { std::cout << "'" << socket->identifier << "', "; } } @@ -206,7 +193,7 @@ void GeoNodeExecParams::check_output_access(StringRef identifier, const CPPType << "' is disabled.\n"; BLI_assert_unreachable(); } - else if (!provider_->can_set_output(identifier)) { + else if (params_.output_was_set(this->get_output_index(identifier))) { std::cout << "The identifier '" << identifier << "' has been set already.\n"; BLI_assert_unreachable(); } diff --git a/source/blender/nodes/intern/node_multi_function.cc b/source/blender/nodes/intern/node_multi_function.cc index 1f8397923e9..d731fe8f877 100644 --- a/source/blender/nodes/intern/node_multi_function.cc +++ b/source/blender/nodes/intern/node_multi_function.cc @@ -3,21 +3,21 @@ #include "NOD_multi_function.hh" #include "BKE_node.h" +#include "BKE_node_runtime.hh" namespace blender::nodes { -NodeMultiFunctions::NodeMultiFunctions(const DerivedNodeTree &tree) +NodeMultiFunctions::NodeMultiFunctions(const bNodeTree &tree) { - for (const bNodeTree *btree : tree.used_btrees()) { - for (const bNode *bnode : btree->all_nodes()) { - if (bnode->typeinfo->build_multi_function == nullptr) { - continue; - } - NodeMultiFunctionBuilder builder{*bnode, *btree}; - bnode->typeinfo->build_multi_function(builder); - if (builder.built_fn_ != nullptr) { - map_.add_new(bnode, {builder.built_fn_, std::move(builder.owned_built_fn_)}); - } + tree.ensure_topology_cache(); + for (const bNode *bnode : tree.all_nodes()) { + if (bnode->typeinfo->build_multi_function == nullptr) { + continue; + } + NodeMultiFunctionBuilder builder{*bnode, tree}; + bnode->typeinfo->build_multi_function(builder); + if (builder.built_fn_ != nullptr) { + map_.add_new(bnode, {builder.built_fn_, std::move(builder.owned_built_fn_)}); } } } diff --git a/source/blender/render/intern/engine.cc b/source/blender/render/intern/engine.cc index a440b34af78..0024ebe38f7 100644 --- a/source/blender/render/intern/engine.cc +++ b/source/blender/render/intern/engine.cc @@ -1276,8 +1276,6 @@ void RE_engine_gpu_context_destroy(RenderEngine *engine) return; } - BLI_assert(BLI_thread_is_main()); - const bool drw_state = DRW_opengl_context_release(); WM_opengl_context_activate(engine->gpu_context); diff --git a/source/blender/sequencer/intern/modifier.c b/source/blender/sequencer/intern/modifier.c index b0f2f53396b..b17db8f762e 100644 --- a/source/blender/sequencer/intern/modifier.c +++ b/source/blender/sequencer/intern/modifier.c @@ -598,7 +598,7 @@ static void modifier_color_balance_apply( ColorBalanceInitData init_data; if (!ibuf->rect_float && make_float) { - imb_addrectfloatImBuf(ibuf); + imb_addrectfloatImBuf(ibuf, 4); } init_data.cb = cb; diff --git a/source/blender/sequencer/intern/render.c b/source/blender/sequencer/intern/render.c index b7dc0e7035d..fd3b6103b94 100644 --- a/source/blender/sequencer/intern/render.c +++ b/source/blender/sequencer/intern/render.c @@ -134,7 +134,7 @@ void seq_imbuf_to_sequencer_space(Scene *scene, ImBuf *ibuf, bool make_float) /* We perform conversion to a float buffer so we don't worry about * precision loss. */ - imb_addrectfloatImBuf(ibuf); + imb_addrectfloatImBuf(ibuf, 4); IMB_colormanagement_transform_from_byte_threaded(ibuf->rect_float, (unsigned char *)ibuf->rect, ibuf->x, diff --git a/source/blender/windowmanager/intern/wm_init_exit.c b/source/blender/windowmanager/intern/wm_init_exit.c index 283b87f1a2f..7ab2e67e4b6 100644 --- a/source/blender/windowmanager/intern/wm_init_exit.c +++ b/source/blender/windowmanager/intern/wm_init_exit.c @@ -641,7 +641,8 @@ void WM_exit_ex(bContext *C, const bool do_python) BKE_tempdir_session_purge(); - /* Keep last (or near last) so logging can be used right up until everything is shut-down. */ + /* Logging cannot be called after exiting (#CLOG_INFO, #CLOG_WARN etc will crash). + * So postpone exiting until other sub-systems that may use logging have shut down. */ CLG_exit(); } diff --git a/source/creator/CMakeLists.txt b/source/creator/CMakeLists.txt index b228c8d9ac1..eee64b97e82 100644 --- a/source/creator/CMakeLists.txt +++ b/source/creator/CMakeLists.txt @@ -1173,7 +1173,7 @@ elseif(APPLE) if(WITH_PYTHON AND NOT WITH_PYTHON_MODULE AND NOT WITH_PYTHON_FRAMEWORK) # Copy the python libraries into the install directory. install_dir( - ${PYTHON_LIBPATH} + ${PYTHON_LIBPATH}/python${PYTHON_VERSION} ${TARGETDIR_VER}/python/lib ) diff --git a/source/creator/creator.c b/source/creator/creator.c index 7f236a39974..2d8b1e16098 100644 --- a/source/creator/creator.c +++ b/source/creator/creator.c @@ -286,12 +286,11 @@ int main(int argc, _putenv_s("OMP_WAIT_POLICY", "PASSIVE"); # endif - /* Win32 Unicode Arguments. */ - /* NOTE: cannot use `guardedalloc` allocation here, as it's not yet initialized - * (it depends on the arguments passed in, which is what we're getting here!) - */ # ifdef USE_WIN32_UNICODE_ARGS + /* Win32 Unicode Arguments. */ { + /* NOTE: Can't use `guardedalloc` allocation here, as it's not yet initialized + * (it depends on the arguments passed in, which is what we're getting here!) */ wchar_t **argv_16 = CommandLineToArgvW(GetCommandLineW(), &argc); argv = malloc(argc * sizeof(char *)); for (argv_num = 0; argv_num < argc; argv_num++) { |