Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBastien Montagne <montagne29@wanadoo.fr>2019-06-05 00:23:55 +0300
committerBastien Montagne <montagne29@wanadoo.fr>2019-06-05 00:51:03 +0300
commit1324659dee4981bce37557febb446710547fb646 (patch)
tree20b3f2339e11f00a023312e647f4b7e3692d6d1e /tests/gtests/blenlib/BLI_task_test.cc
parent30d9366d17328a5f15a32e537c0a2969ea82bd3f (diff)
GTests: BLI_task: Add basic tests for BLI_task_parallel_listbase(), and some performances benchmarks.
Nothing special to mention about regression test itself, it basically mimics the one for `BLI_task_parallel_mempool()`... Basic performances benchmarks do not tell us much, besides the fact that for very light processing of listbase, even with 100k items, single-thread remains an order of magnitude faster than threaded code. Synchronization is just way too expensive in that case with current code. This should be partially solvable with much bigger (and configurable) chunk sizes though (current ones are just ridiculous for such cases ;) )...
Diffstat (limited to 'tests/gtests/blenlib/BLI_task_test.cc')
-rw-r--r--tests/gtests/blenlib/BLI_task_test.cc50
1 files changed, 49 insertions, 1 deletions
diff --git a/tests/gtests/blenlib/BLI_task_test.cc b/tests/gtests/blenlib/BLI_task_test.cc
index 96c6b572e19..0c1868380da 100644
--- a/tests/gtests/blenlib/BLI_task_test.cc
+++ b/tests/gtests/blenlib/BLI_task_test.cc
@@ -6,13 +6,19 @@
#include "atomic_ops.h"
extern "C" {
+#include "BLI_utildefines.h"
+
+#include "BLI_listbase.h"
#include "BLI_mempool.h"
#include "BLI_task.h"
-#include "BLI_utildefines.h"
+
+#include "MEM_guardedalloc.h"
};
#define NUM_ITEMS 10000
+/* *** Parallel iterations over mempool items. *** */
+
static void task_mempool_iter_func(void *userdata, MempoolIterData *item)
{
int *data = (int *)item;
@@ -79,3 +85,45 @@ TEST(task, MempoolIter)
BLI_mempool_destroy(mempool);
BLI_threadapi_exit();
}
+
+/* *** Parallel iterations over double-linked list items. *** */
+
+static void task_listbase_iter_func(void *userdata, Link *item, int index)
+{
+ LinkData *data = (LinkData *)item;
+ int *count = (int *)userdata;
+
+ data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + index);
+ atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
+}
+
+TEST(task, ListBaseIter)
+{
+ ListBase list = {NULL, NULL};
+ LinkData *items_buffer = (LinkData *)MEM_calloc_arrayN(
+ NUM_ITEMS, sizeof(*items_buffer), __func__);
+ BLI_threadapi_init();
+
+ int i;
+
+ int num_items = 0;
+ for (i = 0; i < NUM_ITEMS; i++) {
+ BLI_addtail(&list, &items_buffer[i]);
+ num_items++;
+ }
+
+ BLI_task_parallel_listbase(&list, &num_items, task_listbase_iter_func, true);
+
+ /* Those checks should ensure us all items of the listbase were processed once, and only once -
+ * as expected. */
+ EXPECT_EQ(num_items, 0);
+ LinkData *item;
+ for (i = 0, item = (LinkData *)list.first; i < NUM_ITEMS && item != NULL;
+ i++, item = item->next) {
+ EXPECT_EQ(POINTER_AS_INT(item->data), i);
+ }
+ EXPECT_EQ(NUM_ITEMS, i);
+
+ MEM_freeN(items_buffer);
+ BLI_threadapi_exit();
+}