From 3e5a4d14124029dd3ccb111de2db299bb405d668 Mon Sep 17 00:00:00 2001 From: Hans Goudey Date: Thu, 23 Jun 2022 11:50:53 -0500 Subject: Geometry Nodes: Optimize selection for virtual array input This makes calculation of selected indices slightly faster when the input is a virtual array (the direct output of various nodes like Face Area, etc). The utility can be helpful for other areas that need to find selected indices besides field evaluation. With the face area node used as a selection with 4 million faces, the speedup is 3.51 ms to 3.39 ms, just a slight speedup. Differential Revision: https://developer.blender.org/D15127 --- source/blender/blenlib/intern/index_mask.cc | 49 +++++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-) (limited to 'source/blender/blenlib/intern') diff --git a/source/blender/blenlib/intern/index_mask.cc b/source/blender/blenlib/intern/index_mask.cc index 1e301bc5fb9..f3590e4a41c 100644 --- a/source/blender/blenlib/intern/index_mask.cc +++ b/source/blender/blenlib/intern/index_mask.cc @@ -128,7 +128,9 @@ Vector IndexMask::extract_ranges_invert(const IndexRange full_range, } // namespace blender -namespace blender::index_mask_ops::detail { +namespace blender::index_mask_ops { + +namespace detail { IndexMask find_indices_based_on_predicate__merge( IndexMask indices_to_check, @@ -193,4 +195,47 @@ IndexMask find_indices_based_on_predicate__merge( return r_indices.as_span(); } -} // namespace blender::index_mask_ops::detail +} // namespace detail + +IndexMask find_indices_from_virtual_array(const IndexMask indices_to_check, + const VArray &virtual_array, + const int64_t parallel_grain_size, + Vector &r_indices) +{ + if (virtual_array.is_single()) { + return virtual_array.get_internal_single() ? indices_to_check : IndexMask(0); + } + if (virtual_array.is_span()) { + const Span span = virtual_array.get_internal_span(); + return find_indices_based_on_predicate( + indices_to_check, 4096, r_indices, [&](const int64_t i) { return span[i]; }); + } + + threading::EnumerableThreadSpecific> materialize_buffers; + threading::EnumerableThreadSpecific>> sub_masks; + + threading::parallel_for( + indices_to_check.index_range(), parallel_grain_size, [&](const IndexRange range) { + const IndexMask sliced_mask = indices_to_check.slice(range); + + /* To avoid virtual function call overhead from accessing the virtual array, + * materialize the necessary indices for this chunk into a reused buffer. */ + Vector &buffer = materialize_buffers.local(); + buffer.reinitialize(sliced_mask.size()); + virtual_array.materialize_compressed(sliced_mask, buffer); + + Vector masked_indices; + sliced_mask.to_best_mask_type([&](auto best_mask) { + for (const int64_t i : IndexRange(best_mask.size())) { + if (buffer[i]) { + masked_indices.append(best_mask[i]); + } + } + }); + sub_masks.local().append(std::move(masked_indices)); + }); + + return detail::find_indices_based_on_predicate__merge(indices_to_check, sub_masks, r_indices); +} + +} // namespace blender::index_mask_ops -- cgit v1.2.3