Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenlib/BLI_task.h')
-rw-r--r--source/blender/blenlib/BLI_task.h66
1 files changed, 14 insertions, 52 deletions
diff --git a/source/blender/blenlib/BLI_task.h b/source/blender/blenlib/BLI_task.h
index 42dd47266dc..ee087600a31 100644
--- a/source/blender/blenlib/BLI_task.h
+++ b/source/blender/blenlib/BLI_task.h
@@ -43,12 +43,9 @@ struct BLI_mempool;
* must be called from the main threads. All other scheduler and pool functions
* are thread-safe. */
-typedef struct TaskScheduler TaskScheduler;
-
-TaskScheduler *BLI_task_scheduler_create(int num_threads);
-void BLI_task_scheduler_free(TaskScheduler *scheduler);
-
-int BLI_task_scheduler_num_threads(TaskScheduler *scheduler);
+void BLI_task_scheduler_init(void);
+void BLI_task_scheduler_exit(void);
+int BLI_task_scheduler_num_threads(void);
/* Task Pool
*
@@ -70,16 +67,14 @@ typedef enum TaskPriority {
} TaskPriority;
typedef struct TaskPool TaskPool;
-typedef void (*TaskRunFunction)(TaskPool *__restrict pool, void *taskdata, int threadid);
+typedef void (*TaskRunFunction)(TaskPool *__restrict pool, void *taskdata);
typedef void (*TaskFreeFunction)(TaskPool *__restrict pool, void *taskdata);
-TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata, TaskPriority priority);
-TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler,
- void *userdata,
- TaskPriority priority);
-TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler,
- void *userdata,
- TaskPriority priority);
+TaskPool *BLI_task_pool_create(void *userdata, TaskPriority priority);
+TaskPool *BLI_task_pool_create_background(void *userdata, TaskPriority priority);
+TaskPool *BLI_task_pool_create_suspended(void *userdata, TaskPriority priority);
+TaskPool *BLI_task_pool_create_no_threads(void *userdata);
+TaskPool *BLI_task_pool_create_background_serial(void *userdata, TaskPriority priority);
void BLI_task_pool_free(TaskPool *pool);
void BLI_task_pool_push(TaskPool *pool,
@@ -87,17 +82,9 @@ void BLI_task_pool_push(TaskPool *pool,
void *taskdata,
bool free_taskdata,
TaskFreeFunction freedata);
-void BLI_task_pool_push_from_thread(TaskPool *pool,
- TaskRunFunction run,
- void *taskdata,
- bool free_taskdata,
- TaskFreeFunction freedata,
- int thread_id);
/* work and wait until all tasks are done */
void BLI_task_pool_work_and_wait(TaskPool *pool);
-/* work and wait until all tasks are done, then reset to the initial suspended state */
-void BLI_task_pool_work_wait_and_reset(TaskPool *pool);
/* cancel all tasks, keep worker threads running */
void BLI_task_pool_cancel(TaskPool *pool);
@@ -110,36 +97,10 @@ void *BLI_task_pool_user_data(TaskPool *pool);
/* optional mutex to use from run function */
ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool);
-/* Thread ID of thread that created the task pool. */
-int BLI_task_pool_creator_thread_id(TaskPool *pool);
-
-/* Delayed push, use that to reduce thread overhead by accumulating
- * all new tasks into local queue first and pushing it to scheduler
- * from within a single mutex lock.
- */
-void BLI_task_pool_delayed_push_begin(TaskPool *pool, int thread_id);
-void BLI_task_pool_delayed_push_end(TaskPool *pool, int thread_id);
-
/* Parallel for routines */
-typedef enum eTaskSchedulingMode {
- /* Task scheduler will divide overall work into equal chunks, scheduling
- * even chunks to all worker threads.
- * Least run time benefit, ideal for cases when each task requires equal
- * amount of compute power.
- */
- TASK_SCHEDULING_STATIC,
- /* Task scheduler will schedule small amount of work to each worker thread.
- * Has more run time overhead, but deals much better with cases when each
- * part of the work requires totally different amount of compute power.
- */
- TASK_SCHEDULING_DYNAMIC,
-} eTaskSchedulingMode;
-
/* Per-thread specific data passed to the callback. */
typedef struct TaskParallelTLS {
- /* Identifier of the thread who this data belongs to. */
- int thread_id;
/* Copy of user-specifier chunk, which is copied from original chunk to all
* worker threads. This is similar to OpenMP's firstprivate.
*/
@@ -163,8 +124,6 @@ typedef struct TaskParallelSettings {
* is higher than a chunk size. As in, threading will always be performed.
*/
bool use_threading;
- /* Scheduling mode to use for this parallel range invocation. */
- eTaskSchedulingMode scheduling_mode;
/* Each instance of looping chunks will get a copy of this data
* (similar to OpenMP's firstprivate).
*/
@@ -199,7 +158,7 @@ void BLI_task_parallel_range(const int start,
const int stop,
void *userdata,
TaskParallelRangeFunc func,
- TaskParallelSettings *settings);
+ const TaskParallelSettings *settings);
/* This data is shared between all tasks, its access needs thread lock or similar protection.
*/
@@ -254,11 +213,14 @@ BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *setti
{
memset(settings, 0, sizeof(*settings));
settings->use_threading = true;
- settings->scheduling_mode = TASK_SCHEDULING_STATIC;
/* Use default heuristic to define actual chunk size. */
settings->min_iter_per_thread = 0;
}
+/* Don't use this, store any thread specific data in tls->userdata_chunk instead.
+ * Ony here for code to be removed. */
+int BLI_task_parallel_thread_id(const TaskParallelTLS *tls);
+
#ifdef __cplusplus
}
#endif