Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenlib/intern/task_iterator.c')
-rw-r--r--source/blender/blenlib/intern/task_iterator.c341
1 files changed, 15 insertions, 326 deletions
diff --git a/source/blender/blenlib/intern/task_iterator.c b/source/blender/blenlib/intern/task_iterator.c
index 1189ec0d0c0..ee459ac2548 100644
--- a/source/blender/blenlib/intern/task_iterator.c
+++ b/source/blender/blenlib/intern/task_iterator.c
@@ -17,7 +17,7 @@
/** \file
* \ingroup bli
*
- * A generic task system which can be used for any task based subsystem.
+ * Parallel tasks over all elements in a container.
*/
#include <stdlib.h>
@@ -34,82 +34,12 @@
#include "atomic_ops.h"
-/* Parallel range routines */
-
-/**
- *
- * Main functions:
- * - #BLI_task_parallel_range
- * - #BLI_task_parallel_listbase (#ListBase - double linked list)
- *
- * TODO:
- * - #BLI_task_parallel_foreach_link (#Link - single linked list)
- * - #BLI_task_parallel_foreach_ghash/gset (#GHash/#GSet - hash & set)
- * - #BLI_task_parallel_foreach_mempool (#BLI_mempool - iterate over mempools)
- */
-
/* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */
#define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
#define MALLOCA_FREE(_mem, _size) \
if (((_mem) != NULL) && ((_size) > 8192)) \
MEM_freeN((_mem))
-/* Stores all needed data to perform a parallelized iteration,
- * with a same operation (callback function).
- * It can be chained with other tasks in a single-linked list way. */
-typedef struct TaskParallelRangeState {
- struct TaskParallelRangeState *next;
-
- /* Start and end point of integer value iteration. */
- int start, stop;
-
- /* User-defined data, shared between all worker threads. */
- void *userdata_shared;
- /* User-defined callback function called for each value in [start, stop[ specified range. */
- TaskParallelRangeFunc func;
-
- /* Each instance of looping chunks will get a copy of this data
- * (similar to OpenMP's firstprivate).
- */
- void *initial_tls_memory; /* Pointer to actual user-defined 'tls' data. */
- size_t tls_data_size; /* Size of that data. */
-
- void *flatten_tls_storage; /* 'tls' copies of initial_tls_memory for each running task. */
- /* Number of 'tls' copies in the array, i.e. number of worker threads. */
- size_t num_elements_in_tls_storage;
-
- /* Function called to join user data chunk into another, to reduce
- * the result to the original userdata_chunk memory.
- * The reduce functions should have no side effects, so that they
- * can be run on any thread. */
- TaskParallelReduceFunc func_reduce;
- /* Function called to free data created by TaskParallelRangeFunc. */
- TaskParallelFreeFunc func_free;
-
- /* Current value of the iterator, shared between all threads (atomically updated). */
- int iter_value;
- int iter_chunk_num; /* Amount of iterations to process in a single step. */
-} TaskParallelRangeState;
-
-/* Stores all the parallel tasks for a single pool. */
-typedef struct TaskParallelRangePool {
- /* The workers' task pool. */
- TaskPool *pool;
- /* The number of worker tasks we need to create. */
- int num_tasks;
- /* The total number of iterations in all the added ranges. */
- int num_total_iters;
- /* The size (number of items) processed at once by a worker task. */
- int chunk_size;
-
- /* Linked list of range tasks to process. */
- TaskParallelRangeState *parallel_range_states;
- /* Current range task beeing processed, swapped atomically. */
- TaskParallelRangeState *current_state;
- /* Scheduling settings common to all tasks. */
- TaskParallelSettings *settings;
-} TaskParallelRangePool;
-
BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings,
const int tot_items,
int num_tasks,
@@ -154,232 +84,7 @@ BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settin
}
BLI_assert(chunk_size > 0);
-
- if (tot_items > 0) {
- switch (settings->scheduling_mode) {
- case TASK_SCHEDULING_STATIC:
- *r_chunk_size = max_ii(chunk_size, tot_items / num_tasks);
- break;
- case TASK_SCHEDULING_DYNAMIC:
- *r_chunk_size = chunk_size;
- break;
- }
- }
- else {
- /* If total amount of items is unknown, we can only use dynamic scheduling. */
- *r_chunk_size = chunk_size;
- }
-}
-
-BLI_INLINE void task_parallel_range_calc_chunk_size(TaskParallelRangePool *range_pool)
-{
- int num_iters = 0;
- int min_num_iters = INT_MAX;
- for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
- state = state->next) {
- const int ni = state->stop - state->start;
- num_iters += ni;
- if (min_num_iters > ni) {
- min_num_iters = ni;
- }
- }
- range_pool->num_total_iters = num_iters;
- /* Note: Passing min_num_iters here instead of num_iters kind of partially breaks the 'static'
- * scheduling, but pooled range iterator is inherently non-static anyway, so adding a small level
- * of dynamic scheduling here should be fine. */
- task_parallel_calc_chunk_size(
- range_pool->settings, min_num_iters, range_pool->num_tasks, &range_pool->chunk_size);
-}
-
-BLI_INLINE bool parallel_range_next_iter_get(TaskParallelRangePool *__restrict range_pool,
- int *__restrict r_iter,
- int *__restrict r_count,
- TaskParallelRangeState **__restrict r_state)
-{
- /* We need an atomic op here as well to fetch the initial state, since some other thread might
- * have already updated it. */
- TaskParallelRangeState *current_state = atomic_cas_ptr(
- (void **)&range_pool->current_state, NULL, NULL);
-
- int previter = INT32_MAX;
-
- while (current_state != NULL && previter >= current_state->stop) {
- previter = atomic_fetch_and_add_int32(&current_state->iter_value, range_pool->chunk_size);
- *r_iter = previter;
- *r_count = max_ii(0, min_ii(range_pool->chunk_size, current_state->stop - previter));
-
- if (previter >= current_state->stop) {
- /* At this point the state we got is done, we need to go to the next one. In case some other
- * thread already did it, then this does nothing, and we'll just get current valid state
- * at start of the next loop. */
- TaskParallelRangeState *current_state_from_atomic_cas = atomic_cas_ptr(
- (void **)&range_pool->current_state, current_state, current_state->next);
-
- if (current_state == current_state_from_atomic_cas) {
- /* The atomic CAS operation was successful, we did update range_pool->current_state, so we
- * can safely switch to next state. */
- current_state = current_state->next;
- }
- else {
- /* The atomic CAS operation failed, but we still got range_pool->current_state value out of
- * it, just use it as our new current state. */
- current_state = current_state_from_atomic_cas;
- }
- }
- }
-
- *r_state = current_state;
- return (current_state != NULL && previter < current_state->stop);
-}
-
-static void parallel_range_func(TaskPool *__restrict pool, void *tls_data_idx, int thread_id)
-{
- TaskParallelRangePool *__restrict range_pool = BLI_task_pool_user_data(pool);
- TaskParallelTLS tls = {
- .thread_id = thread_id,
- .userdata_chunk = NULL,
- };
- TaskParallelRangeState *state;
- int iter, count;
- while (parallel_range_next_iter_get(range_pool, &iter, &count, &state)) {
- tls.userdata_chunk = (char *)state->flatten_tls_storage +
- (((size_t)POINTER_AS_INT(tls_data_idx)) * state->tls_data_size);
- for (int i = 0; i < count; i++) {
- state->func(state->userdata_shared, iter + i, &tls);
- }
- }
-}
-
-static void parallel_range_single_thread(TaskParallelRangePool *range_pool)
-{
- for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
- state = state->next) {
- const int start = state->start;
- const int stop = state->stop;
- void *userdata = state->userdata_shared;
- TaskParallelRangeFunc func = state->func;
-
- void *initial_tls_memory = state->initial_tls_memory;
- const size_t tls_data_size = state->tls_data_size;
- const bool use_tls_data = (tls_data_size != 0) && (initial_tls_memory != NULL);
- TaskParallelTLS tls = {
- .thread_id = 0,
- .userdata_chunk = initial_tls_memory,
- };
- for (int i = start; i < stop; i++) {
- func(userdata, i, &tls);
- }
- if (use_tls_data && state->func_free != NULL) {
- /* `func_free` should only free data that was created during execution of `func`. */
- state->func_free(userdata, initial_tls_memory);
- }
- }
-}
-
-/**
- * This function allows to parallelized for loops in a similar way to OpenMP's
- * 'parallel for' statement.
- *
- * See public API doc of ParallelRangeSettings for description of all settings.
- */
-void BLI_task_parallel_range(const int start,
- const int stop,
- void *userdata,
- TaskParallelRangeFunc func,
- TaskParallelSettings *settings)
-{
- if (start == stop) {
- return;
- }
-
- BLI_assert(start < stop);
-
- TaskParallelRangeState state = {
- .next = NULL,
- .start = start,
- .stop = stop,
- .userdata_shared = userdata,
- .func = func,
- .iter_value = start,
- .initial_tls_memory = settings->userdata_chunk,
- .tls_data_size = settings->userdata_chunk_size,
- .func_free = settings->func_free,
- };
- TaskParallelRangePool range_pool = {
- .pool = NULL, .parallel_range_states = &state, .current_state = NULL, .settings = settings};
- int i, num_threads, num_tasks;
-
- void *tls_data = settings->userdata_chunk;
- const size_t tls_data_size = settings->userdata_chunk_size;
- if (tls_data_size != 0) {
- BLI_assert(tls_data != NULL);
- }
- const bool use_tls_data = (tls_data_size != 0) && (tls_data != NULL);
- void *flatten_tls_storage = NULL;
-
- /* If it's not enough data to be crunched, don't bother with tasks at all,
- * do everything from the current thread.
- */
- if (!settings->use_threading) {
- parallel_range_single_thread(&range_pool);
- return;
- }
-
- TaskScheduler *task_scheduler = BLI_task_scheduler_get();
- num_threads = BLI_task_scheduler_num_threads(task_scheduler);
-
- /* The idea here is to prevent creating task for each of the loop iterations
- * and instead have tasks which are evenly distributed across CPU cores and
- * pull next iter to be crunched using the queue.
- */
- range_pool.num_tasks = num_tasks = num_threads + 2;
-
- task_parallel_range_calc_chunk_size(&range_pool);
- range_pool.num_tasks = num_tasks = min_ii(num_tasks,
- max_ii(1, (stop - start) / range_pool.chunk_size));
-
- if (num_tasks == 1) {
- parallel_range_single_thread(&range_pool);
- return;
- }
-
- TaskPool *task_pool = range_pool.pool = BLI_task_pool_create_suspended(
- task_scheduler, &range_pool, TASK_PRIORITY_HIGH);
-
- range_pool.current_state = &state;
-
- if (use_tls_data) {
- state.flatten_tls_storage = flatten_tls_storage = MALLOCA(tls_data_size * (size_t)num_tasks);
- state.tls_data_size = tls_data_size;
- }
-
- const int thread_id = BLI_task_pool_creator_thread_id(task_pool);
- for (i = 0; i < num_tasks; i++) {
- if (use_tls_data) {
- void *userdata_chunk_local = (char *)flatten_tls_storage + (tls_data_size * (size_t)i);
- memcpy(userdata_chunk_local, tls_data, tls_data_size);
- }
- /* Use this pool's pre-allocated tasks. */
- BLI_task_pool_push_from_thread(
- task_pool, parallel_range_func, POINTER_FROM_INT(i), false, NULL, thread_id);
- }
-
- BLI_task_pool_work_and_wait(task_pool);
- BLI_task_pool_free(task_pool);
-
- if (use_tls_data && (settings->func_free != NULL || settings->func_reduce != NULL)) {
- for (i = 0; i < num_tasks; i++) {
- void *userdata_chunk_local = (char *)flatten_tls_storage + (tls_data_size * (size_t)i);
- if (settings->func_reduce) {
- settings->func_reduce(userdata, tls_data, userdata_chunk_local);
- }
- if (settings->func_free) {
- /* `func_free` should only free data that was created during execution of `func`. */
- settings->func_free(userdata, userdata_chunk_local);
- }
- }
- MALLOCA_FREE(flatten_tls_storage, tls_data_size * (size_t)num_tasks);
- }
+ *r_chunk_size = chunk_size;
}
typedef struct TaskParallelIteratorState {
@@ -394,20 +99,10 @@ typedef struct TaskParallelIteratorState {
int tot_items;
} TaskParallelIteratorState;
-BLI_INLINE void task_parallel_iterator_calc_chunk_size(const TaskParallelSettings *settings,
- const int num_tasks,
- TaskParallelIteratorState *state)
-{
- task_parallel_calc_chunk_size(
- settings, state->tot_items, num_tasks, &state->iter_shared.chunk_size);
-}
-
static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict state,
- void *userdata_chunk,
- int threadid)
+ void *userdata_chunk)
{
TaskParallelTLS tls = {
- .thread_id = threadid,
.userdata_chunk = userdata_chunk,
};
@@ -460,11 +155,11 @@ static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict stat
MALLOCA_FREE(current_chunk_indices, indices_size);
}
-static void parallel_iterator_func(TaskPool *__restrict pool, void *userdata_chunk, int threadid)
+static void parallel_iterator_func(TaskPool *__restrict pool, void *userdata_chunk)
{
TaskParallelIteratorState *__restrict state = BLI_task_pool_user_data(pool);
- parallel_iterator_func_do(state, userdata_chunk, threadid);
+ parallel_iterator_func_do(state, userdata_chunk);
}
static void task_parallel_iterator_no_threads(const TaskParallelSettings *settings,
@@ -483,7 +178,7 @@ static void task_parallel_iterator_no_threads(const TaskParallelSettings *settin
/* Also marking it as non-threaded for the iterator callback. */
state->iter_shared.spin_lock = NULL;
- parallel_iterator_func_do(state, userdata_chunk, 0);
+ parallel_iterator_func_do(state, userdata_chunk);
if (use_userdata_chunk && settings->func_free != NULL) {
/* `func_free` should only free data that was created during execution of `func`. */
@@ -494,10 +189,10 @@ static void task_parallel_iterator_no_threads(const TaskParallelSettings *settin
static void task_parallel_iterator_do(const TaskParallelSettings *settings,
TaskParallelIteratorState *state)
{
- TaskScheduler *task_scheduler = BLI_task_scheduler_get();
- const int num_threads = BLI_task_scheduler_num_threads(task_scheduler);
+ const int num_threads = BLI_task_scheduler_num_threads();
- task_parallel_iterator_calc_chunk_size(settings, num_threads, state);
+ task_parallel_calc_chunk_size(
+ settings, state->tot_items, num_threads, &state->iter_shared.chunk_size);
if (!settings->use_threading) {
task_parallel_iterator_no_threads(settings, state);
@@ -526,21 +221,19 @@ static void task_parallel_iterator_do(const TaskParallelSettings *settings,
void *userdata_chunk_array = NULL;
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
- TaskPool *task_pool = BLI_task_pool_create_suspended(task_scheduler, state, TASK_PRIORITY_HIGH);
+ TaskPool *task_pool = BLI_task_pool_create(state, TASK_PRIORITY_HIGH);
if (use_userdata_chunk) {
userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
}
- const int thread_id = BLI_task_pool_creator_thread_id(task_pool);
for (size_t i = 0; i < num_tasks; i++) {
if (use_userdata_chunk) {
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
}
/* Use this pool's pre-allocated tasks. */
- BLI_task_pool_push_from_thread(
- task_pool, parallel_iterator_func, userdata_chunk_local, false, NULL, thread_id);
+ BLI_task_pool_push(task_pool, parallel_iterator_func, userdata_chunk_local, false, NULL);
}
BLI_task_pool_work_and_wait(task_pool);
@@ -656,7 +349,7 @@ typedef struct ParallelMempoolState {
TaskParallelMempoolFunc func;
} ParallelMempoolState;
-static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata, int UNUSED(threadid))
+static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata)
{
ParallelMempoolState *__restrict state = BLI_task_pool_user_data(pool);
BLI_mempool_iter *iter = taskdata;
@@ -684,7 +377,6 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
TaskParallelMempoolFunc func,
const bool use_threading)
{
- TaskScheduler *task_scheduler;
TaskPool *task_pool;
ParallelMempoolState state;
int i, num_threads, num_tasks;
@@ -704,9 +396,8 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
return;
}
- task_scheduler = BLI_task_scheduler_get();
- task_pool = BLI_task_pool_create_suspended(task_scheduler, &state, TASK_PRIORITY_HIGH);
- num_threads = BLI_task_scheduler_num_threads(task_scheduler);
+ task_pool = BLI_task_pool_create(&state, TASK_PRIORITY_HIGH);
+ num_threads = BLI_task_scheduler_num_threads();
/* The idea here is to prevent creating task for each of the loop iterations
* and instead have tasks which are evenly distributed across CPU cores and
@@ -720,11 +411,9 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool,
(size_t)num_tasks);
- const int thread_id = BLI_task_pool_creator_thread_id(task_pool);
for (i = 0; i < num_tasks; i++) {
/* Use this pool's pre-allocated tasks. */
- BLI_task_pool_push_from_thread(
- task_pool, parallel_mempool_func, &mempool_iterators[i], false, NULL, thread_id);
+ BLI_task_pool_push(task_pool, parallel_mempool_func, &mempool_iterators[i], false, NULL);
}
BLI_task_pool_work_and_wait(task_pool);