Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Goudey <h.goudey@me.com>2022-06-23 19:50:53 +0300
committerHans Goudey <h.goudey@me.com>2022-06-23 19:51:33 +0300
commit3e5a4d14124029dd3ccb111de2db299bb405d668 (patch)
treea9f0af8c3ca47b00037e4d7a8bd5f989d70be088 /source/blender/blenlib
parent633c2f07da2926e42f5bb5d43d16dd23767c498c (diff)
Geometry Nodes: Optimize selection for virtual array input
This makes calculation of selected indices slightly faster when the input is a virtual array (the direct output of various nodes like Face Area, etc). The utility can be helpful for other areas that need to find selected indices besides field evaluation. With the face area node used as a selection with 4 million faces, the speedup is 3.51 ms to 3.39 ms, just a slight speedup. Differential Revision: https://developer.blender.org/D15127
Diffstat (limited to 'source/blender/blenlib')
-rw-r--r--source/blender/blenlib/BLI_index_mask_ops.hh14
-rw-r--r--source/blender/blenlib/intern/index_mask.cc49
2 files changed, 61 insertions, 2 deletions
diff --git a/source/blender/blenlib/BLI_index_mask_ops.hh b/source/blender/blenlib/BLI_index_mask_ops.hh
index 48a1f27a2fa..f67abb1d550 100644
--- a/source/blender/blenlib/BLI_index_mask_ops.hh
+++ b/source/blender/blenlib/BLI_index_mask_ops.hh
@@ -13,6 +13,7 @@
#include "BLI_index_mask.hh"
#include "BLI_task.hh"
#include "BLI_vector.hh"
+#include "BLI_virtual_array.hh"
namespace blender::index_mask_ops {
@@ -57,4 +58,17 @@ inline IndexMask find_indices_based_on_predicate(const IndexMask indices_to_chec
return detail::find_indices_based_on_predicate__merge(indices_to_check, sub_masks, r_indices);
}
+/**
+ * Find the true indices in a virtual array. This is a version of
+ * #find_indices_based_on_predicate optimised for a virtual array input.
+ *
+ * \param parallel_grain_size: The grain size for when the virtual array isn't a span or a single
+ * value internally. This should be adjusted based on the expected cost of evaluating the virtual
+ * array-- more expensive virtual arrays should have smaller grain sizes.
+ */
+IndexMask find_indices_from_virtual_array(IndexMask indices_to_check,
+ const VArray<bool> &virtual_array,
+ int64_t parallel_grain_size,
+ Vector<int64_t> &r_indices);
+
} // namespace blender::index_mask_ops
diff --git a/source/blender/blenlib/intern/index_mask.cc b/source/blender/blenlib/intern/index_mask.cc
index 1e301bc5fb9..f3590e4a41c 100644
--- a/source/blender/blenlib/intern/index_mask.cc
+++ b/source/blender/blenlib/intern/index_mask.cc
@@ -128,7 +128,9 @@ Vector<IndexRange> IndexMask::extract_ranges_invert(const IndexRange full_range,
} // namespace blender
-namespace blender::index_mask_ops::detail {
+namespace blender::index_mask_ops {
+
+namespace detail {
IndexMask find_indices_based_on_predicate__merge(
IndexMask indices_to_check,
@@ -193,4 +195,47 @@ IndexMask find_indices_based_on_predicate__merge(
return r_indices.as_span();
}
-} // namespace blender::index_mask_ops::detail
+} // namespace detail
+
+IndexMask find_indices_from_virtual_array(const IndexMask indices_to_check,
+ const VArray<bool> &virtual_array,
+ const int64_t parallel_grain_size,
+ Vector<int64_t> &r_indices)
+{
+ if (virtual_array.is_single()) {
+ return virtual_array.get_internal_single() ? indices_to_check : IndexMask(0);
+ }
+ if (virtual_array.is_span()) {
+ const Span<bool> span = virtual_array.get_internal_span();
+ return find_indices_based_on_predicate(
+ indices_to_check, 4096, r_indices, [&](const int64_t i) { return span[i]; });
+ }
+
+ threading::EnumerableThreadSpecific<Vector<bool>> materialize_buffers;
+ threading::EnumerableThreadSpecific<Vector<Vector<int64_t>>> sub_masks;
+
+ threading::parallel_for(
+ indices_to_check.index_range(), parallel_grain_size, [&](const IndexRange range) {
+ const IndexMask sliced_mask = indices_to_check.slice(range);
+
+ /* To avoid virtual function call overhead from accessing the virtual array,
+ * materialize the necessary indices for this chunk into a reused buffer. */
+ Vector<bool> &buffer = materialize_buffers.local();
+ buffer.reinitialize(sliced_mask.size());
+ virtual_array.materialize_compressed(sliced_mask, buffer);
+
+ Vector<int64_t> masked_indices;
+ sliced_mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ if (buffer[i]) {
+ masked_indices.append(best_mask[i]);
+ }
+ }
+ });
+ sub_masks.local().append(std::move(masked_indices));
+ });
+
+ return detail::find_indices_based_on_predicate__merge(indices_to_check, sub_masks, r_indices);
+}
+
+} // namespace blender::index_mask_ops