diff options
Diffstat (limited to 'source/blender/blenlib/intern')
-rw-r--r-- | source/blender/blenlib/intern/BLI_kdopbvh.c | 22 | ||||
-rw-r--r-- | source/blender/blenlib/intern/task.c | 10 |
2 files changed, 11 insertions, 21 deletions
diff --git a/source/blender/blenlib/intern/BLI_kdopbvh.c b/source/blender/blenlib/intern/BLI_kdopbvh.c index d2fc5c7b72b..10e29e09827 100644 --- a/source/blender/blenlib/intern/BLI_kdopbvh.c +++ b/source/blender/blenlib/intern/BLI_kdopbvh.c @@ -874,14 +874,9 @@ static void non_recursive_bvh_div_nodes(BVHTree *tree, BVHNode *branches_array, cb_data.i = i; cb_data.depth = depth; - if (num_leafs > KDOPBVH_THREAD_LEAF_THRESHOLD) { - BLI_task_parallel_range_ex(i, end_j, &cb_data, NULL, 0, non_recursive_bvh_div_nodes_task_cb, 0, false); - } - else { - for (j = i; j < end_j; j++) { - non_recursive_bvh_div_nodes_task_cb(&cb_data, NULL, j); - } - } + BLI_task_parallel_range_ex( + i, end_j, &cb_data, NULL, 0, non_recursive_bvh_div_nodes_task_cb, + num_leafs > KDOPBVH_THREAD_LEAF_THRESHOLD, false); } } @@ -1266,14 +1261,9 @@ BVHTreeOverlap *BLI_bvhtree_overlap( data[j].thread = j; } - if (tree1->totleaf > KDOPBVH_THREAD_LEAF_THRESHOLD) { - BLI_task_parallel_range_ex(0, thread_num, data, NULL, 0, bvhtree_overlap_task_cb, 0, false); - } - else { - for (j = 0; j < thread_num; j++) { - bvhtree_overlap_task_cb(data, NULL, j); - } - } + BLI_task_parallel_range_ex( + 0, thread_num, data, NULL, 0, bvhtree_overlap_task_cb, + tree1->totleaf > KDOPBVH_THREAD_LEAF_THRESHOLD, false); for (j = 0; j < thread_num; j++) total += BLI_stack_count(data[j].overlap); diff --git a/source/blender/blenlib/intern/task.c b/source/blender/blenlib/intern/task.c index 104ebcec26b..d3b11d8e6d0 100644 --- a/source/blender/blenlib/intern/task.c +++ b/source/blender/blenlib/intern/task.c @@ -643,8 +643,8 @@ static void parallel_range_func( * (similar to OpenMP's firstprivate). * \param userdata_chunk_size Memory size of \a userdata_chunk. * \param func Callback function. - * \param range_threshold Minimum size of processed range to start using tasks - * (below this, loop is done in main thread only). + * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop + * (allows caller to use any kind of test to switch on parallelization or not). * \param use_dynamic_scheduling If \a true, the whole range is divided in a lot of small chunks (of size 32 currently), * otherwise whole range is split in a few big chunks (num_threads * 2 chunks currently). */ @@ -654,7 +654,7 @@ void BLI_task_parallel_range_ex( void *userdata_chunk, const size_t userdata_chunk_size, TaskParallelRangeFunc func, - const int range_threshold, + const bool use_threading, const bool use_dynamic_scheduling) { TaskScheduler *task_scheduler; @@ -667,7 +667,7 @@ void BLI_task_parallel_range_ex( /* If it's not enough data to be crunched, don't bother with tasks at all, * do everything from the main thread. */ - if (stop - start < range_threshold) { + if (!use_threading) { const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL); void *userdata_chunk_local = NULL; @@ -733,7 +733,7 @@ void BLI_task_parallel_range( void *userdata, TaskParallelRangeFunc func) { - BLI_task_parallel_range_ex(start, stop, userdata, NULL, 0, func, 64, false); + BLI_task_parallel_range_ex(start, stop, userdata, NULL, 0, func, (stop - start) > 64, false); } #undef MALLOCA |