Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenlib/intern/task.c')
-rw-r--r--source/blender/blenlib/intern/task.c112
1 files changed, 112 insertions, 0 deletions
diff --git a/source/blender/blenlib/intern/task.c b/source/blender/blenlib/intern/task.c
index 8d867b9f295..3a49abc7060 100644
--- a/source/blender/blenlib/intern/task.c
+++ b/source/blender/blenlib/intern/task.c
@@ -428,3 +428,115 @@ size_t BLI_task_pool_tasks_done(TaskPool *pool)
return pool->done;
}
+/* Parallel range routines */
+
+/**
+ *
+ * Main functions:
+ * - #BLI_task_parallel_range
+ *
+ * TODO:
+ * - #BLI_task_parallel_foreach_listbase (#ListBase - double linked list)
+ * - #BLI_task_parallel_foreach_link (#Link - single linked list)
+ * - #BLI_task_parallel_foreach_ghash/gset (#GHash/#GSet - hash & set)
+ * - #BLI_task_parallel_foreach_mempool (#BLI_mempool - iterate over mempools)
+ *
+ * Possible improvements:
+ *
+ * - Chunk iterations to reduce number of spin locks.
+ */
+
+typedef struct ParallelRangeState {
+ int start, stop;
+ void *userdata;
+ TaskParallelRangeFunc func;
+
+ int iter;
+ SpinLock lock;
+} ParallelRangeState;
+
+BLI_INLINE bool parallel_range_next_iter_get(
+ ParallelRangeState *state,
+ int *iter)
+{
+ bool result = false;
+ if (state->iter < state->stop) {
+ BLI_spin_lock(&state->lock);
+ if (state->iter < state->stop) {
+ *iter = state->iter++;
+ result = true;
+ }
+ BLI_spin_unlock(&state->lock);
+ }
+ return result;
+}
+
+static void parallel_range_func(
+ TaskPool *pool,
+ void *UNUSED(taskdata),
+ int UNUSED(threadid))
+{
+ ParallelRangeState *state = BLI_task_pool_userdata(pool);
+ int iter;
+ while (parallel_range_next_iter_get(state, &iter)) {
+ state->func(state->userdata, iter);
+ }
+}
+
+void BLI_task_parallel_range_ex(
+ int start, int stop,
+ void *userdata,
+ TaskParallelRangeFunc func,
+ const int range_threshold)
+{
+ TaskScheduler *task_scheduler;
+ TaskPool *task_pool;
+ ParallelRangeState state;
+ int i;
+
+ BLI_assert(start < stop);
+
+ /* If it's not enough data to be cranched, don't bother with tasks at all,
+ * do everything from the main thread.
+ */
+ if (stop - start < range_threshold) {
+ for (i = start; i < stop; ++i) {
+ func(userdata, i);
+ }
+ return;
+ }
+
+ BLI_spin_init(&state.lock);
+ state.start = start;
+ state.stop = stop;
+ state.userdata = userdata;
+ state.func = func;
+ state.iter = start;
+
+ task_scheduler = BLI_task_scheduler_get();
+ task_pool = BLI_task_pool_create(task_scheduler, &state);
+
+ /* The idea here is to prevent creating task for each of the loop iterations
+ * and instead have tasks which are evenly distributed across CPU cores and
+ * pull next iter to be cranched using the queue.
+ */
+ for (i = 0; i < 2 * BLI_task_scheduler_num_threads(task_scheduler); i++) {
+ BLI_task_pool_push(task_pool,
+ parallel_range_func,
+ NULL, false,
+ TASK_PRIORITY_HIGH);
+ }
+
+ BLI_task_pool_work_and_wait(task_pool);
+ BLI_task_pool_free(task_pool);
+
+ BLI_spin_end(&state.lock);
+}
+
+void BLI_task_parallel_range(
+ int start, int stop,
+ void *userdata,
+ TaskParallelRangeFunc func)
+{
+ BLI_task_parallel_range_ex(start, stop, userdata, func, 64);
+}