Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBastien Montagne <montagne29@wanadoo.fr>2017-11-23 23:36:27 +0300
committerBastien Montagne <montagne29@wanadoo.fr>2017-11-23 23:36:27 +0300
commita786baa193d28e2193a9f2953b61e659c7df92e0 (patch)
tree0808fba080bdcdd9d362cd0797ca7fe7d521f933 /source/blender/blenlib
parentc9477888305b647abb89702ab2316ba5f93033b1 (diff)
parent43ddf0e9a7f0f9986ed24e05df0ce7eac5f944b6 (diff)
Merge branch 'master' into blender2.8
Diffstat (limited to 'source/blender/blenlib')
-rw-r--r--source/blender/blenlib/BLI_mempool.h5
-rw-r--r--source/blender/blenlib/BLI_task.h11
-rw-r--r--source/blender/blenlib/intern/BLI_mempool.c74
-rw-r--r--source/blender/blenlib/intern/task.c87
4 files changed, 173 insertions, 4 deletions
diff --git a/source/blender/blenlib/BLI_mempool.h b/source/blender/blenlib/BLI_mempool.h
index 0c754f551e0..b68ca6b1f2b 100644
--- a/source/blender/blenlib/BLI_mempool.h
+++ b/source/blender/blenlib/BLI_mempool.h
@@ -71,6 +71,8 @@ typedef struct BLI_mempool_iter {
BLI_mempool *pool;
struct BLI_mempool_chunk *curchunk;
unsigned int curindex;
+
+ struct BLI_mempool_chunk **curchunk_threaded_shared;
} BLI_mempool_iter;
/* flag */
@@ -87,6 +89,9 @@ enum {
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter) ATTR_NONNULL();
void *BLI_mempool_iterstep(BLI_mempool_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL();
+BLI_mempool_iter *BLI_mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t num_iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL();
+void BLI_mempool_iter_threadsafe_free(BLI_mempool_iter *iter_arr) ATTR_NONNULL();
+
#ifdef __cplusplus
}
#endif
diff --git a/source/blender/blenlib/BLI_task.h b/source/blender/blenlib/BLI_task.h
index 721327d26a8..ccfa2b6e2e7 100644
--- a/source/blender/blenlib/BLI_task.h
+++ b/source/blender/blenlib/BLI_task.h
@@ -35,6 +35,8 @@ extern "C" {
#include "BLI_threads.h"
#include "BLI_utildefines.h"
+struct BLI_mempool;
+
/* Task Scheduler
*
* Central scheduler that holds running threads ready to execute tasks. A single
@@ -150,6 +152,15 @@ void BLI_task_parallel_listbase(
TaskParallelListbaseFunc func,
const bool use_threading);
+typedef struct MempoolIterData MempoolIterData;
+typedef void (*TaskParallelMempoolFunc)(void *userdata,
+ MempoolIterData *iter);
+void BLI_task_parallel_mempool(
+ struct BLI_mempool *mempool,
+ void *userdata,
+ TaskParallelMempoolFunc func,
+ const bool use_threading);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/blender/blenlib/intern/BLI_mempool.c b/source/blender/blenlib/intern/BLI_mempool.c
index b02811616dd..c90f9e300b7 100644
--- a/source/blender/blenlib/intern/BLI_mempool.c
+++ b/source/blender/blenlib/intern/BLI_mempool.c
@@ -41,6 +41,8 @@
#include <string.h>
#include <stdlib.h>
+#include "atomic_ops.h"
+
#include "BLI_utildefines.h"
#include "BLI_mempool.h" /* own include */
@@ -553,7 +555,7 @@ void *BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
}
/**
- * Create a new mempool iterator, \a BLI_MEMPOOL_ALLOW_ITER flag must be set.
+ * Initialize a new mempool iterator, \a BLI_MEMPOOL_ALLOW_ITER flag must be set.
*/
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
{
@@ -562,6 +564,47 @@ void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
iter->pool = pool;
iter->curchunk = pool->chunks;
iter->curindex = 0;
+
+ iter->curchunk_threaded_shared = NULL;
+}
+
+/**
+ * Initialize an array of mempool iterators, \a BLI_MEMPOOL_ALLOW_ITER flag must be set.
+ *
+ * This is used in threaded code, to generate as much iterators as needed (each task should have its own),
+ * such that each iterator goes over its own single chunk, and only getting the next chunk to iterate over has to be
+ * protected against concurrency (which can be done in a lockless way).
+ *
+ * To be used when creating a task for each single item in the pool is totally overkill.
+ *
+ * See BLI_task_parallel_mempool implementation for detailed usage example.
+ */
+BLI_mempool_iter *BLI_mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t num_iter)
+{
+ BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
+
+ BLI_mempool_iter *iter_arr = MEM_mallocN(sizeof(*iter_arr) * num_iter, __func__);
+ BLI_mempool_chunk **curchunk_threaded_shared = MEM_mallocN(sizeof(void *), __func__);
+
+ BLI_mempool_iternew(pool, iter_arr);
+
+ *curchunk_threaded_shared = iter_arr->curchunk;
+ iter_arr->curchunk_threaded_shared = curchunk_threaded_shared;
+
+ for (size_t i = 1; i < num_iter; i++) {
+ iter_arr[i] = iter_arr[0];
+ *curchunk_threaded_shared = iter_arr[i].curchunk = (*curchunk_threaded_shared) ? (*curchunk_threaded_shared)->next : NULL;
+ }
+
+ return iter_arr;
+}
+
+void BLI_mempool_iter_threadsafe_free(BLI_mempool_iter *iter_arr)
+{
+ BLI_assert(iter_arr->curchunk_threaded_shared != NULL);
+
+ MEM_freeN(iter_arr->curchunk_threaded_shared);
+ MEM_freeN(iter_arr);
}
#if 0
@@ -571,15 +614,28 @@ static void *bli_mempool_iternext(BLI_mempool_iter *iter)
{
void *ret = NULL;
- if (!iter->curchunk || !iter->pool->totused) return NULL;
+ if (iter->curchunk == NULL || !iter->pool->totused) {
+ return ret;
+ }
ret = ((char *)CHUNK_DATA(iter->curchunk)) + (iter->pool->esize * iter->curindex);
iter->curindex++;
if (iter->curindex == iter->pool->pchunk) {
- iter->curchunk = iter->curchunk->next;
iter->curindex = 0;
+ if (iter->curchunk_threaded_shared) {
+ while (1) {
+ iter->curchunk = *iter->curchunk_threaded_shared;
+ if (iter->curchunk == NULL) {
+ break;
+ }
+ if (atomic_cas_ptr((void **)iter->curchunk_threaded_shared, iter->curchunk, iter->curchunk->next) == iter->curchunk) {
+ break;
+ }
+ }
+ }
+ iter->curchunk = iter->curchunk->next;
}
return ret;
@@ -620,8 +676,18 @@ void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
}
else {
iter->curindex = 0;
+ if (iter->curchunk_threaded_shared) {
+ for (iter->curchunk = *iter->curchunk_threaded_shared;
+ (iter->curchunk != NULL) &&
+ (atomic_cas_ptr((void **)iter->curchunk_threaded_shared, iter->curchunk, iter->curchunk->next) != iter->curchunk);
+ iter->curchunk = *iter->curchunk_threaded_shared);
+
+ if (UNLIKELY(iter->curchunk == NULL)) {
+ return (ret->freeword == FREEWORD) ? NULL : ret;
+ }
+ }
iter->curchunk = iter->curchunk->next;
- if (iter->curchunk == NULL) {
+ if (UNLIKELY(iter->curchunk == NULL)) {
return (ret->freeword == FREEWORD) ? NULL : ret;
}
curnode = CHUNK_DATA(iter->curchunk);
diff --git a/source/blender/blenlib/intern/task.c b/source/blender/blenlib/intern/task.c
index d69241c3737..eb7f186702b 100644
--- a/source/blender/blenlib/intern/task.c
+++ b/source/blender/blenlib/intern/task.c
@@ -32,6 +32,7 @@
#include "BLI_listbase.h"
#include "BLI_math.h"
+#include "BLI_mempool.h"
#include "BLI_task.h"
#include "BLI_threads.h"
@@ -1354,3 +1355,89 @@ void BLI_task_parallel_listbase(
BLI_spin_end(&state.lock);
}
+
+
+typedef struct ParallelMempoolState {
+ void *userdata;
+ TaskParallelMempoolFunc func;
+} ParallelMempoolState;
+
+static void parallel_mempool_func(
+ TaskPool * __restrict pool,
+ void *taskdata,
+ int UNUSED(threadid))
+{
+ ParallelMempoolState * __restrict state = BLI_task_pool_userdata(pool);
+ BLI_mempool_iter *iter = taskdata;
+ MempoolIterData *item;
+
+ while ((item = BLI_mempool_iterstep(iter)) != NULL) {
+ state->func(state->userdata, item);
+ }
+}
+
+/**
+ * This function allows to parallelize for loops over Mempool items.
+ *
+ * \param pool The iterable BLI_mempool to loop over.
+ * \param userdata Common userdata passed to all instances of \a func.
+ * \param func Callback function.
+ * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
+ * (allows caller to use any kind of test to switch on parallelization or not).
+ *
+ * \note There is no static scheduling here.
+ */
+void BLI_task_parallel_mempool(
+ BLI_mempool *mempool,
+ void *userdata,
+ TaskParallelMempoolFunc func,
+ const bool use_threading)
+{
+ TaskScheduler *task_scheduler;
+ TaskPool *task_pool;
+ ParallelMempoolState state;
+ int i, num_threads, num_tasks;
+
+ if (BLI_mempool_count(mempool) == 0) {
+ return;
+ }
+
+ if (!use_threading) {
+ BLI_mempool_iter iter;
+ BLI_mempool_iternew(mempool, &iter);
+
+ for (void *item = BLI_mempool_iterstep(&iter); item != NULL; item = BLI_mempool_iterstep(&iter)) {
+ func(userdata, item);
+ }
+ return;
+ }
+
+ task_scheduler = BLI_task_scheduler_get();
+ task_pool = BLI_task_pool_create(task_scheduler, &state);
+ num_threads = BLI_task_scheduler_num_threads(task_scheduler);
+
+ /* The idea here is to prevent creating task for each of the loop iterations
+ * and instead have tasks which are evenly distributed across CPU cores and
+ * pull next item to be crunched using the threaded-aware BLI_mempool_iter.
+ */
+ num_tasks = num_threads * 2;
+
+ state.userdata = userdata;
+ state.func = func;
+
+ BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool, (size_t)num_tasks);
+
+ for (i = 0; i < num_tasks; i++) {
+ /* Use this pool's pre-allocated tasks. */
+ BLI_task_pool_push_from_thread(task_pool,
+ parallel_mempool_func,
+ &mempool_iterators[i], false,
+ TASK_PRIORITY_HIGH,
+ task_pool->thread_id);
+ }
+
+ BLI_task_pool_work_and_wait(task_pool);
+ BLI_task_pool_free(task_pool);
+
+ BLI_mempool_iter_threadsafe_free(mempool_iterators);
+}