Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenlib/intern')
-rw-r--r--source/blender/blenlib/intern/BLI_filelist.c2
-rw-r--r--source/blender/blenlib/intern/BLI_ghash.c2
-rw-r--r--source/blender/blenlib/intern/BLI_index_range.cc8
-rw-r--r--source/blender/blenlib/intern/BLI_kdopbvh.c4
-rw-r--r--source/blender/blenlib/intern/BLI_memarena.c21
-rw-r--r--source/blender/blenlib/intern/BLI_memiter.c21
-rw-r--r--source/blender/blenlib/intern/array_store.c18
-rw-r--r--source/blender/blenlib/intern/boxpack_2d.c2
-rw-r--r--source/blender/blenlib/intern/delaunay_2d.c15
-rw-r--r--source/blender/blenlib/intern/dot_export.cc305
-rw-r--r--source/blender/blenlib/intern/fileops.c26
-rw-r--r--source/blender/blenlib/intern/fnmatch.c3
-rw-r--r--source/blender/blenlib/intern/lasso_2d.c48
-rw-r--r--source/blender/blenlib/intern/math_base_inline.c25
-rw-r--r--source/blender/blenlib/intern/math_color.c3
-rw-r--r--source/blender/blenlib/intern/math_geom.c33
-rw-r--r--source/blender/blenlib/intern/math_solvers.c2
-rw-r--r--source/blender/blenlib/intern/math_vector.c31
-rw-r--r--source/blender/blenlib/intern/math_vector_inline.c21
-rw-r--r--source/blender/blenlib/intern/noise.c7
-rw-r--r--source/blender/blenlib/intern/path_util.c89
-rw-r--r--source/blender/blenlib/intern/quadric.c11
-rw-r--r--source/blender/blenlib/intern/stack.c2
-rw-r--r--source/blender/blenlib/intern/storage.c64
-rw-r--r--source/blender/blenlib/intern/storage_apple.mm6
-rw-r--r--source/blender/blenlib/intern/system.c52
-rw-r--r--source/blender/blenlib/intern/system_win32.c410
-rw-r--r--source/blender/blenlib/intern/task.c1930
-rw-r--r--source/blender/blenlib/intern/task_graph.cc166
-rw-r--r--source/blender/blenlib/intern/task_iterator.c423
-rw-r--r--source/blender/blenlib/intern/task_pool.cc546
-rw-r--r--source/blender/blenlib/intern/task_range.cc168
-rw-r--r--source/blender/blenlib/intern/task_scheduler.cc78
-rw-r--r--source/blender/blenlib/intern/threads.c71
-rw-r--r--source/blender/blenlib/intern/timeit.cc36
35 files changed, 2446 insertions, 2203 deletions
diff --git a/source/blender/blenlib/intern/BLI_filelist.c b/source/blender/blenlib/intern/BLI_filelist.c
index 2e402f0c063..26f1de33aa9 100644
--- a/source/blender/blenlib/intern/BLI_filelist.c
+++ b/source/blender/blenlib/intern/BLI_filelist.c
@@ -147,7 +147,7 @@ static void bli_builddir(struct BuildDirCtx *dir_ctx, const char *dirname)
char pardir[FILE_MAXDIR];
BLI_strncpy(pardir, dirname, sizeof(pardir));
- if (BLI_parent_dir(pardir) && (BLI_access(pardir, R_OK) == 0)) {
+ if (BLI_path_parent_dir(pardir) && (BLI_access(pardir, R_OK) == 0)) {
struct dirlink *const dlink = (struct dirlink *)malloc(sizeof(struct dirlink));
if (dlink != NULL) {
dlink->name = BLI_strdup(FILENAME_PARENT);
diff --git a/source/blender/blenlib/intern/BLI_ghash.c b/source/blender/blenlib/intern/BLI_ghash.c
index 0800027e520..09dbf18acd0 100644
--- a/source/blender/blenlib/intern/BLI_ghash.c
+++ b/source/blender/blenlib/intern/BLI_ghash.c
@@ -1056,7 +1056,7 @@ void BLI_ghash_flag_clear(GHash *gh, uint flag)
* #BLI_ghash_len(gh) times before becoming done.
*
* \param gh: The GHash to iterate over.
- * \return Pointer to a new DynStr.
+ * \return Pointer to a new iterator.
*/
GHashIterator *BLI_ghashIterator_new(GHash *gh)
{
diff --git a/source/blender/blenlib/intern/BLI_index_range.cc b/source/blender/blenlib/intern/BLI_index_range.cc
index 90eb4e4a89c..fefb6e6598e 100644
--- a/source/blender/blenlib/intern/BLI_index_range.cc
+++ b/source/blender/blenlib/intern/BLI_index_range.cc
@@ -17,10 +17,10 @@
#include <atomic>
#include <mutex>
-#include "BLI_array_cxx.h"
-#include "BLI_array_ref.h"
-#include "BLI_index_range.h"
-#include "BLI_vector.h"
+#include "BLI_array.hh"
+#include "BLI_array_ref.hh"
+#include "BLI_index_range.hh"
+#include "BLI_vector.hh"
namespace BLI {
diff --git a/source/blender/blenlib/intern/BLI_kdopbvh.c b/source/blender/blenlib/intern/BLI_kdopbvh.c
index 3a07cef7cac..da67baf0ead 100644
--- a/source/blender/blenlib/intern/BLI_kdopbvh.c
+++ b/source/blender/blenlib/intern/BLI_kdopbvh.c
@@ -719,10 +719,10 @@ static void non_recursive_bvh_div_nodes_task_cb(void *__restrict userdata,
refit_kdop_hull(data->tree, parent, parent_leafs_begin, parent_leafs_end);
split_axis = get_largest_axis(parent->bv);
- /* Save split axis (this can be used on raytracing to speedup the query time) */
+ /* Save split axis (this can be used on ray-tracing to speedup the query time) */
parent->main_axis = split_axis / 2;
- /* Split the childs along the split_axis, note: its not needed to sort the whole leafs array
+ /* Split the children along the split_axis, note: its not needed to sort the whole leafs array
* Only to assure that the elements are partitioned on a way that each child takes the elements
* it would take in case the whole array was sorted.
* Split_leafs takes care of that "sort" problem. */
diff --git a/source/blender/blenlib/intern/BLI_memarena.c b/source/blender/blenlib/intern/BLI_memarena.c
index fd2367aa992..fc381c22315 100644
--- a/source/blender/blenlib/intern/BLI_memarena.c
+++ b/source/blender/blenlib/intern/BLI_memarena.c
@@ -34,6 +34,7 @@
#include "MEM_guardedalloc.h"
+#include "BLI_asan.h"
#include "BLI_memarena.h"
#include "BLI_strict_flags.h"
#include "BLI_utildefines.h"
@@ -46,18 +47,6 @@
# define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) UNUSED_VARS(pool, addr, size)
#endif
-/* Clang defines this. */
-#ifndef __has_feature
-# define __has_feature(x) 0
-#endif
-#if defined(__SANITIZE_ADDRESS__) || __has_feature(address_sanitizer)
-# include "sanitizer/asan_interface.h"
-#else
-/* Ensure return value is used. */
-# define ASAN_POISON_MEMORY_REGION(addr, size) (void)(0 && ((size) != 0 && (addr) != NULL))
-# define ASAN_UNPOISON_MEMORY_REGION(addr, size) (void)(0 && ((size) != 0 && (addr) != NULL))
-#endif
-
struct MemBuf {
struct MemBuf *next;
uchar data[0];
@@ -80,7 +69,7 @@ static void memarena_buf_free_all(struct MemBuf *mb)
struct MemBuf *mb_next = mb->next;
/* Unpoison memory because MEM_freeN might overwrite it. */
- ASAN_UNPOISON_MEMORY_REGION(mb, (uint)MEM_allocN_len(mb));
+ BLI_asan_unpoison(mb, (uint)MEM_allocN_len(mb));
MEM_freeN(mb);
mb = mb_next;
@@ -160,7 +149,7 @@ void *BLI_memarena_alloc(MemArena *ma, size_t size)
mb->next = ma->bufs;
ma->bufs = mb;
- ASAN_POISON_MEMORY_REGION(ma->curbuf, ma->cursize);
+ BLI_asan_poison(ma->curbuf, ma->cursize);
memarena_curbuf_align(ma);
}
@@ -171,7 +160,7 @@ void *BLI_memarena_alloc(MemArena *ma, size_t size)
VALGRIND_MEMPOOL_ALLOC(ma, ptr, size);
- ASAN_UNPOISON_MEMORY_REGION(ptr, size);
+ BLI_asan_unpoison(ptr, size);
return ptr;
}
@@ -215,7 +204,7 @@ void BLI_memarena_clear(MemArena *ma)
if (ma->use_calloc) {
memset(ma->curbuf, 0, curbuf_used);
}
- ASAN_POISON_MEMORY_REGION(ma->curbuf, ma->cursize);
+ BLI_asan_poison(ma->curbuf, ma->cursize);
}
VALGRIND_DESTROY_MEMPOOL(ma);
diff --git a/source/blender/blenlib/intern/BLI_memiter.c b/source/blender/blenlib/intern/BLI_memiter.c
index 017f08f8d35..1b9509e36d8 100644
--- a/source/blender/blenlib/intern/BLI_memiter.c
+++ b/source/blender/blenlib/intern/BLI_memiter.c
@@ -40,6 +40,7 @@
#include <stdlib.h>
#include <string.h>
+#include "BLI_asan.h"
#include "BLI_utildefines.h"
#include "BLI_memiter.h" /* own include */
@@ -50,18 +51,6 @@
/* TODO: Valgrind. */
-/* Clang defines this. */
-#ifndef __has_feature
-# define __has_feature(x) 0
-#endif
-#if defined(__SANITIZE_ADDRESS__) || __has_feature(address_sanitizer)
-# include "sanitizer/asan_interface.h"
-#else
-/* Ensure return value is used. */
-# define ASAN_POISON_MEMORY_REGION(addr, size) (void)(0 && ((size) != 0 && (addr) != NULL))
-# define ASAN_UNPOISON_MEMORY_REGION(addr, size) (void)(0 && ((size) != 0 && (addr) != NULL))
-#endif
-
typedef uintptr_t data_t;
typedef intptr_t offset_t;
@@ -114,7 +103,7 @@ static void memiter_set_rewind_offset(BLI_memiter *mi)
{
BLI_memiter_elem *elem = (BLI_memiter_elem *)mi->data_curr;
- ASAN_UNPOISON_MEMORY_REGION(elem, sizeof(BLI_memiter_elem));
+ BLI_asan_unpoison(elem, sizeof(BLI_memiter_elem));
elem->size = (offset_t)(((data_t *)mi->tail) - mi->data_curr);
BLI_assert(elem->size < 0);
@@ -197,14 +186,14 @@ void *BLI_memiter_alloc(BLI_memiter *mi, uint elem_size)
mi->data_last = chunk->data + (chunk_size - 1);
data_curr_next = mi->data_curr + (1 + data_offset);
- ASAN_POISON_MEMORY_REGION(chunk->data, chunk_size * sizeof(data_t));
+ BLI_asan_poison(chunk->data, chunk_size * sizeof(data_t));
}
BLI_assert(data_curr_next <= mi->data_last);
BLI_memiter_elem *elem = (BLI_memiter_elem *)mi->data_curr;
- ASAN_UNPOISON_MEMORY_REGION(elem, sizeof(BLI_memiter_elem) + elem_size);
+ BLI_asan_unpoison(elem, sizeof(BLI_memiter_elem) + elem_size);
elem->size = (offset_t)elem_size;
mi->data_curr = data_curr_next;
@@ -242,7 +231,7 @@ static void memiter_free_data(BLI_memiter *mi)
BLI_memiter_chunk *chunk_next = chunk->next;
/* Unpoison memory because MEM_freeN might overwrite it. */
- ASAN_UNPOISON_MEMORY_REGION(chunk, MEM_allocN_len(chunk));
+ BLI_asan_unpoison(chunk, MEM_allocN_len(chunk));
MEM_freeN(chunk);
chunk = chunk_next;
diff --git a/source/blender/blenlib/intern/array_store.c b/source/blender/blenlib/intern/array_store.c
index c87dbee0f0e..85fbe7ece0f 100644
--- a/source/blender/blenlib/intern/array_store.c
+++ b/source/blender/blenlib/intern/array_store.c
@@ -406,7 +406,7 @@ static void bchunk_list_decref(BArrayMemory *bs_mem, BChunkList *chunk_list)
static size_t bchunk_list_data_check(const BChunkList *chunk_list, const uchar *data)
{
size_t offset = 0;
- for (BChunkRef *cref = chunk_list->chunk_refs.first; cref; cref = cref->next) {
+ LISTBASE_FOREACH (BChunkRef *, cref, &chunk_list->chunk_refs) {
if (memcmp(&data[offset], cref->link->data, cref->link->data_len) != 0) {
return false;
}
@@ -1511,7 +1511,7 @@ void BLI_array_store_clear(BArrayStore *bs)
size_t BLI_array_store_calc_size_expanded_get(const BArrayStore *bs)
{
size_t size_accum = 0;
- for (const BArrayState *state = bs->states.first; state; state = state->next) {
+ LISTBASE_FOREACH (const BArrayState *, state, &bs->states) {
size_accum += state->chunk_list->total_size;
}
return size_accum;
@@ -1632,14 +1632,14 @@ void BLI_array_store_state_data_get(BArrayState *state, void *data)
{
#ifdef USE_PARANOID_CHECKS
size_t data_test_len = 0;
- for (BChunkRef *cref = state->chunk_list->chunk_refs.first; cref; cref = cref->next) {
+ LISTBASE_FOREACH (BChunkRef *, cref, &state->chunk_list->chunk_refs) {
data_test_len += cref->link->data_len;
}
BLI_assert(data_test_len == state->chunk_list->total_size);
#endif
uchar *data_step = (uchar *)data;
- for (BChunkRef *cref = state->chunk_list->chunk_refs.first; cref; cref = cref->next) {
+ LISTBASE_FOREACH (BChunkRef *, cref, &state->chunk_list->chunk_refs) {
BLI_assert(cref->link->users > 0);
memcpy(data_step, cref->link->data, cref->link->data_len);
data_step += cref->link->data_len;
@@ -1666,7 +1666,7 @@ void *BLI_array_store_state_data_get_alloc(BArrayState *state, size_t *r_data_le
static size_t bchunk_list_size(const BChunkList *chunk_list)
{
size_t total_size = 0;
- for (BChunkRef *cref = chunk_list->chunk_refs.first; cref; cref = cref->next) {
+ LISTBASE_FOREACH (BChunkRef *, cref, &chunk_list->chunk_refs) {
total_size += cref->link->data_len;
}
return total_size;
@@ -1679,7 +1679,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
/* Check Length
* ------------ */
- for (BArrayState *state = bs->states.first; state; state = state->next) {
+ LISTBASE_FOREACH (BArrayState *, state, &bs->states) {
BChunkList *chunk_list = state->chunk_list;
if (!(bchunk_list_size(chunk_list) == chunk_list->total_size)) {
return false;
@@ -1692,7 +1692,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
#ifdef USE_MERGE_CHUNKS
/* ensure we merge all chunks that could be merged */
if (chunk_list->total_size > bs->info.chunk_byte_size_min) {
- for (BChunkRef *cref = chunk_list->chunk_refs.first; cref; cref = cref->next) {
+ LISTBASE_FOREACH (BChunkRef *, cref, &chunk_list->chunk_refs) {
if (cref->link->data_len < bs->info.chunk_byte_size_min) {
return false;
}
@@ -1734,7 +1734,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
GHash *chunk_map = BLI_ghash_ptr_new(__func__);
int totrefs = 0;
- for (BArrayState *state = bs->states.first; state; state = state->next) {
+ LISTBASE_FOREACH (BArrayState *, state, &bs->states) {
GHASH_PTR_ADD_USER(chunk_list_map, state->chunk_list);
}
GHASH_ITER (gh_iter, chunk_list_map) {
@@ -1753,7 +1753,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
/* count chunk's */
GHASH_ITER (gh_iter, chunk_list_map) {
const struct BChunkList *chunk_list = BLI_ghashIterator_getKey(&gh_iter);
- for (const BChunkRef *cref = chunk_list->chunk_refs.first; cref; cref = cref->next) {
+ LISTBASE_FOREACH (const BChunkRef *, cref, &chunk_list->chunk_refs) {
GHASH_PTR_ADD_USER(chunk_map, cref->link);
totrefs += 1;
}
diff --git a/source/blender/blenlib/intern/boxpack_2d.c b/source/blender/blenlib/intern/boxpack_2d.c
index 6ecadeecec5..83866f766df 100644
--- a/source/blender/blenlib/intern/boxpack_2d.c
+++ b/source/blender/blenlib/intern/boxpack_2d.c
@@ -705,7 +705,7 @@ void BLI_box_pack_2d_fixedarea(ListBase *boxes, int width, int height, ListBase
LISTBASE_FOREACH_MUTABLE (FixedSizeBoxPack *, box, boxes) {
LISTBASE_FOREACH (FixedSizeBoxPack *, space, &spaces) {
/* Skip this space if it's too small. */
- if (box->w > space->w || box->h > space->w) {
+ if (box->w > space->w || box->h > space->h) {
continue;
}
diff --git a/source/blender/blenlib/intern/delaunay_2d.c b/source/blender/blenlib/intern/delaunay_2d.c
index 201c8a66d1a..4e0cd3a78dc 100644
--- a/source/blender/blenlib/intern/delaunay_2d.c
+++ b/source/blender/blenlib/intern/delaunay_2d.c
@@ -1264,6 +1264,7 @@ static void fill_crossdata_for_intersect(CDT_state *cdt,
se_vcva = t->next->next;
BLI_assert(se_vcva->vert == vc && se_vcva->next->vert == va);
BLI_assert(se_vcvb->vert == vc && se_vcvb->next->vert == vb);
+ UNUSED_VARS_NDEBUG(vc);
isect = isect_seg_seg_v2_lambda_mu_db(va->co, vb->co, curco, v2->co, &lambda, &mu);
#ifdef DEBUG_CDT
if (dbg_level > 0) {
@@ -1992,25 +1993,25 @@ typedef struct EdgeVertLambda {
/* For sorting first by edge id, then by lambda, then by vert id. */
static int evl_cmp(const void *a, const void *b)
{
- const EdgeVertLambda *sa = a;
+ const EdgeVertLambda *area = a;
const EdgeVertLambda *sb = b;
- if (sa->e_id < sb->e_id) {
+ if (area->e_id < sb->e_id) {
return -1;
}
- else if (sa->e_id > sb->e_id) {
+ else if (area->e_id > sb->e_id) {
return 1;
}
- else if (sa->lambda < sb->lambda) {
+ else if (area->lambda < sb->lambda) {
return -1;
}
- else if (sa->lambda > sb->lambda) {
+ else if (area->lambda > sb->lambda) {
return 1;
}
- else if (sa->v_id < sb->v_id) {
+ else if (area->v_id < sb->v_id) {
return -1;
}
- else if (sa->v_id > sb->v_id) {
+ else if (area->v_id > sb->v_id) {
return 1;
}
return 0;
diff --git a/source/blender/blenlib/intern/dot_export.cc b/source/blender/blenlib/intern/dot_export.cc
new file mode 100644
index 00000000000..96de4056fc5
--- /dev/null
+++ b/source/blender/blenlib/intern/dot_export.cc
@@ -0,0 +1,305 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <iomanip>
+
+#include "BLI_dot_export.hh"
+
+namespace BLI {
+namespace DotExport {
+
+/* Graph Building
+ ************************************************/
+
+Node &Graph::new_node(StringRef label)
+{
+ Node *node = new Node(*this);
+ m_nodes.append(std::unique_ptr<Node>(node));
+ m_top_level_nodes.add_new(node);
+ node->set_attribute("label", label);
+ return *node;
+}
+
+Cluster &Graph::new_cluster(StringRef label)
+{
+ Cluster *cluster = new Cluster(*this);
+ m_clusters.append(std::unique_ptr<Cluster>(cluster));
+ m_top_level_clusters.add_new(cluster);
+ cluster->set_attribute("label", label);
+ return *cluster;
+}
+
+UndirectedEdge &UndirectedGraph::new_edge(NodePort a, NodePort b)
+{
+ UndirectedEdge *edge = new UndirectedEdge(a, b);
+ m_edges.append(std::unique_ptr<UndirectedEdge>(edge));
+ return *edge;
+}
+
+DirectedEdge &DirectedGraph::new_edge(NodePort from, NodePort to)
+{
+ DirectedEdge *edge = new DirectedEdge(from, to);
+ m_edges.append(std::unique_ptr<DirectedEdge>(edge));
+ return *edge;
+}
+
+void Cluster::set_parent_cluster(Cluster *new_parent)
+{
+ if (m_parent == new_parent) {
+ return;
+ }
+ else if (m_parent == nullptr) {
+ m_graph.m_top_level_clusters.remove(this);
+ new_parent->m_children.add_new(this);
+ }
+ else if (new_parent == nullptr) {
+ m_parent->m_children.remove(this);
+ m_graph.m_top_level_clusters.add_new(this);
+ }
+ else {
+ m_parent->m_children.remove(this);
+ new_parent->m_children.add_new(this);
+ }
+ m_parent = new_parent;
+}
+
+void Node::set_parent_cluster(Cluster *cluster)
+{
+ if (m_cluster == cluster) {
+ return;
+ }
+ else if (m_cluster == nullptr) {
+ m_graph.m_top_level_nodes.remove(this);
+ cluster->m_nodes.add_new(this);
+ }
+ else if (cluster == nullptr) {
+ m_cluster->m_nodes.remove(this);
+ m_graph.m_top_level_nodes.add_new(this);
+ }
+ else {
+ m_cluster->m_nodes.remove(this);
+ cluster->m_nodes.add_new(this);
+ }
+ m_cluster = cluster;
+}
+
+/* Utility methods
+ **********************************************/
+
+void Graph::set_random_cluster_bgcolors()
+{
+ for (Cluster *cluster : m_top_level_clusters) {
+ cluster->set_random_cluster_bgcolors();
+ }
+}
+
+void Cluster::set_random_cluster_bgcolors()
+{
+ float hue = rand() / (float)RAND_MAX;
+ float staturation = 0.3f;
+ float value = 0.8f;
+ this->set_attribute("bgcolor", color_attr_from_hsv(hue, staturation, value));
+
+ for (Cluster *cluster : m_children) {
+ cluster->set_random_cluster_bgcolors();
+ }
+}
+
+/* Dot Generation
+ **********************************************/
+
+std::string DirectedGraph::to_dot_string() const
+{
+ std::stringstream ss;
+ ss << "digraph {\n";
+ this->export__declare_nodes_and_clusters(ss);
+ ss << "\n";
+
+ for (const std::unique_ptr<DirectedEdge> &edge : m_edges) {
+ edge->export__as_edge_statement(ss);
+ ss << "\n";
+ }
+
+ ss << "}\n";
+ return ss.str();
+}
+
+std::string UndirectedGraph::to_dot_string() const
+{
+ std::stringstream ss;
+ ss << "graph {\n";
+ this->export__declare_nodes_and_clusters(ss);
+ ss << "\n";
+
+ for (const std::unique_ptr<UndirectedEdge> &edge : m_edges) {
+ edge->export__as_edge_statement(ss);
+ ss << "\n";
+ }
+
+ ss << "}\n";
+ return ss.str();
+}
+
+void Graph::export__declare_nodes_and_clusters(std::stringstream &ss) const
+{
+ ss << "graph ";
+ m_attributes.export__as_bracket_list(ss);
+ ss << "\n\n";
+
+ for (Node *node : m_top_level_nodes) {
+ node->export__as_declaration(ss);
+ }
+
+ for (Cluster *cluster : m_top_level_clusters) {
+ cluster->export__declare_nodes_and_clusters(ss);
+ }
+}
+
+void Cluster::export__declare_nodes_and_clusters(std::stringstream &ss) const
+{
+ ss << "subgraph cluster_" << (uintptr_t)this << " {\n";
+
+ ss << "graph ";
+ m_attributes.export__as_bracket_list(ss);
+ ss << "\n\n";
+
+ for (Node *node : m_nodes) {
+ node->export__as_declaration(ss);
+ }
+
+ for (Cluster *cluster : m_children) {
+ cluster->export__declare_nodes_and_clusters(ss);
+ }
+
+ ss << "}\n";
+}
+
+void DirectedEdge::export__as_edge_statement(std::stringstream &ss) const
+{
+ m_a.to_dot_string(ss);
+ ss << " -> ";
+ m_b.to_dot_string(ss);
+ ss << " ";
+ m_attributes.export__as_bracket_list(ss);
+}
+
+void UndirectedEdge::export__as_edge_statement(std::stringstream &ss) const
+{
+ m_a.to_dot_string(ss);
+ ss << " -- ";
+ m_b.to_dot_string(ss);
+ ss << " ";
+ m_attributes.export__as_bracket_list(ss);
+}
+
+void AttributeList::export__as_bracket_list(std::stringstream &ss) const
+{
+ ss << "[";
+ m_attributes.foreach_item([&](StringRef key, StringRef value) {
+ if (StringRef(value).startswith("<")) {
+ /* Don't draw the quotes, this is an html-like value. */
+ ss << key << "=" << value << ", ";
+ }
+ else {
+ ss << key << "=\"" << value << "\", ";
+ }
+ });
+ ss << "]";
+}
+
+void Node::export__as_id(std::stringstream &ss) const
+{
+ ss << '"' << (uintptr_t)this << '"';
+}
+
+void Node::export__as_declaration(std::stringstream &ss) const
+{
+ this->export__as_id(ss);
+ ss << " ";
+ m_attributes.export__as_bracket_list(ss);
+ ss << "\n";
+}
+
+void NodePort::to_dot_string(std::stringstream &ss) const
+{
+ m_node->export__as_id(ss);
+ if (m_port_name.has_value()) {
+ ss << ":" << m_port_name.value();
+ }
+}
+
+std::string color_attr_from_hsv(float h, float s, float v)
+{
+ std::stringstream ss;
+ ss << std::setprecision(4) << h << ' ' << s << ' ' << v;
+ return ss.str();
+}
+
+NodeWithSocketsRef::NodeWithSocketsRef(Node &node,
+ StringRef name,
+ ArrayRef<std::string> input_names,
+ ArrayRef<std::string> output_names)
+ : m_node(&node)
+{
+ std::stringstream ss;
+
+ ss << "<<table border=\"0\" cellspacing=\"3\">";
+
+ /* Header */
+ ss << "<tr><td colspan=\"3\" align=\"center\"><b>";
+ ss << ((name.size() == 0) ? "No Name" : name);
+ ss << "</b></td></tr>";
+
+ /* Sockets */
+ uint socket_max_amount = std::max(input_names.size(), output_names.size());
+ for (uint i = 0; i < socket_max_amount; i++) {
+ ss << "<tr>";
+ if (i < input_names.size()) {
+ StringRef name = input_names[i];
+ if (name.size() == 0) {
+ name = "No Name";
+ }
+ ss << "<td align=\"left\" port=\"in" << i << "\">";
+ ss << name;
+ ss << "</td>";
+ }
+ else {
+ ss << "<td></td>";
+ }
+ ss << "<td></td>";
+ if (i < output_names.size()) {
+ StringRef name = output_names[i];
+ if (name.size() == 0) {
+ name = "No Name";
+ }
+ ss << "<td align=\"right\" port=\"out" << i << "\">";
+ ss << name;
+ ss << "</td>";
+ }
+ else {
+ ss << "<td></td>";
+ }
+ ss << "</tr>";
+ }
+
+ ss << "</table>>";
+
+ m_node->set_attribute("label", ss.str());
+ m_node->set_shape(Attr_shape::Rectangle);
+}
+
+} // namespace DotExport
+} // namespace BLI
diff --git a/source/blender/blenlib/intern/fileops.c b/source/blender/blenlib/intern/fileops.c
index da77046547c..b9133edcae3 100644
--- a/source/blender/blenlib/intern/fileops.c
+++ b/source/blender/blenlib/intern/fileops.c
@@ -492,7 +492,7 @@ static bool delete_recursive(const char *dir)
/* dir listing produces dir path without trailing slash... */
BLI_strncpy(path, fl->path, sizeof(path));
- BLI_add_slash(path);
+ BLI_path_slash_ensure(path);
if (delete_recursive(path)) {
err = true;
@@ -562,9 +562,9 @@ int BLI_move(const char *file, const char *to)
BLI_strncpy(str, to, sizeof(str));
/* points 'to' to a directory ? */
- if (BLI_last_slash(str) == (str + strlen(str) - 1)) {
- if (BLI_last_slash(file) != NULL) {
- strcat(str, BLI_last_slash(file) + 1);
+ if (BLI_path_slash_rfind(str) == (str + strlen(str) - 1)) {
+ if (BLI_path_slash_rfind(file) != NULL) {
+ strcat(str, BLI_path_slash_rfind(file) + 1);
}
}
@@ -594,9 +594,9 @@ int BLI_copy(const char *file, const char *to)
BLI_strncpy(str, to, sizeof(str));
/* points 'to' to a directory ? */
- if (BLI_last_slash(str) == (str + strlen(str) - 1)) {
- if (BLI_last_slash(file) != NULL) {
- strcat(str, BLI_last_slash(file) + 1);
+ if (BLI_path_slash_rfind(str) == (str + strlen(str) - 1)) {
+ if (BLI_path_slash_rfind(file) != NULL) {
+ strcat(str, BLI_path_slash_rfind(file) + 1);
}
}
@@ -638,7 +638,7 @@ bool BLI_dir_create_recursive(const char *dirname)
* blah1/blah2 (without slash) */
BLI_strncpy(tmp, dirname, sizeof(tmp));
- BLI_del_slash(tmp);
+ BLI_path_slash_rstrip(tmp);
/* check special case "c:\foo", don't try create "c:", harmless but prints an error below */
if (isalpha(tmp[0]) && (tmp[1] == ':') && tmp[2] == '\0') {
@@ -652,7 +652,7 @@ bool BLI_dir_create_recursive(const char *dirname)
return false;
}
- lslash = (char *)BLI_last_slash(tmp);
+ lslash = (char *)BLI_path_slash_rfind(tmp);
if (lslash) {
/* Split about the last slash and recurse */
@@ -723,7 +723,7 @@ static void join_dirfile_alloc(char **dst, size_t *alloc_len, const char *dir, c
static char *strip_last_slash(const char *dir)
{
char *result = BLI_strdup(dir);
- BLI_del_slash(result);
+ BLI_path_slash_rstrip(result);
return result;
}
@@ -1277,7 +1277,7 @@ static const char *check_destination(const char *file, const char *to)
size_t len = 0;
str = strip_last_slash(file);
- filename = BLI_last_slash(str);
+ filename = BLI_path_slash_rfind(str);
if (!filename) {
MEM_freeN(str);
@@ -1350,9 +1350,9 @@ bool BLI_dir_create_recursive(const char *dirname)
BLI_strncpy(tmp, dirname, size);
/* Avoids one useless recursion in case of '/foo/bar/' path... */
- BLI_del_slash(tmp);
+ BLI_path_slash_rstrip(tmp);
- lslash = (char *)BLI_last_slash(tmp);
+ lslash = (char *)BLI_path_slash_rfind(tmp);
if (lslash) {
/* Split about the last slash and recurse */
*lslash = 0;
diff --git a/source/blender/blenlib/intern/fnmatch.c b/source/blender/blenlib/intern/fnmatch.c
index 3df09976972..33fe34e7c24 100644
--- a/source/blender/blenlib/intern/fnmatch.c
+++ b/source/blender/blenlib/intern/fnmatch.c
@@ -27,8 +27,9 @@
# define _GNU_SOURCE 1
#endif
-#include <errno.h>
#include <ctype.h>
+#include <errno.h>
+
#include "BLI_fnmatch.h"
diff --git a/source/blender/blenlib/intern/lasso_2d.c b/source/blender/blenlib/intern/lasso_2d.c
index f1e9b1e655f..a01adf4fa6a 100644
--- a/source/blender/blenlib/intern/lasso_2d.c
+++ b/source/blender/blenlib/intern/lasso_2d.c
@@ -28,47 +28,47 @@
#include "BLI_lasso_2d.h" /* own include */
-void BLI_lasso_boundbox(rcti *rect, const int mcords[][2], const unsigned int moves)
+void BLI_lasso_boundbox(rcti *rect, const int mcoords[][2], const unsigned int mcoords_len)
{
unsigned int a;
- rect->xmin = rect->xmax = mcords[0][0];
- rect->ymin = rect->ymax = mcords[0][1];
+ rect->xmin = rect->xmax = mcoords[0][0];
+ rect->ymin = rect->ymax = mcoords[0][1];
- for (a = 1; a < moves; a++) {
- if (mcords[a][0] < rect->xmin) {
- rect->xmin = mcords[a][0];
+ for (a = 1; a < mcoords_len; a++) {
+ if (mcoords[a][0] < rect->xmin) {
+ rect->xmin = mcoords[a][0];
}
- else if (mcords[a][0] > rect->xmax) {
- rect->xmax = mcords[a][0];
+ else if (mcoords[a][0] > rect->xmax) {
+ rect->xmax = mcoords[a][0];
}
- if (mcords[a][1] < rect->ymin) {
- rect->ymin = mcords[a][1];
+ if (mcoords[a][1] < rect->ymin) {
+ rect->ymin = mcoords[a][1];
}
- else if (mcords[a][1] > rect->ymax) {
- rect->ymax = mcords[a][1];
+ else if (mcoords[a][1] > rect->ymax) {
+ rect->ymax = mcoords[a][1];
}
}
}
-bool BLI_lasso_is_point_inside(const int mcords[][2],
- const unsigned int moves,
+bool BLI_lasso_is_point_inside(const int mcoords[][2],
+ const unsigned int mcoords_len,
const int sx,
const int sy,
const int error_value)
{
- if (sx == error_value || moves == 0) {
+ if (sx == error_value || mcoords_len == 0) {
return false;
}
else {
int pt[2] = {sx, sy};
- return isect_point_poly_v2_int(pt, mcords, moves, true);
+ return isect_point_poly_v2_int(pt, mcoords, mcoords_len, true);
}
}
/* edge version for lasso select. we assume boundbox check was done */
-bool BLI_lasso_is_edge_inside(const int mcords[][2],
- const unsigned int moves,
+bool BLI_lasso_is_edge_inside(const int mcoords[][2],
+ const unsigned int mcoords_len,
int x0,
int y0,
int x1,
@@ -76,27 +76,27 @@ bool BLI_lasso_is_edge_inside(const int mcords[][2],
const int error_value)
{
- if (x0 == error_value || x1 == error_value || moves == 0) {
+ if (x0 == error_value || x1 == error_value || mcoords_len == 0) {
return false;
}
const int v1[2] = {x0, y0}, v2[2] = {x1, y1};
/* check points in lasso */
- if (BLI_lasso_is_point_inside(mcords, moves, v1[0], v1[1], error_value)) {
+ if (BLI_lasso_is_point_inside(mcoords, mcoords_len, v1[0], v1[1], error_value)) {
return true;
}
- if (BLI_lasso_is_point_inside(mcords, moves, v2[0], v2[1], error_value)) {
+ if (BLI_lasso_is_point_inside(mcoords, mcoords_len, v2[0], v2[1], error_value)) {
return true;
}
/* no points in lasso, so we have to intersect with lasso edge */
- if (isect_seg_seg_v2_int(mcords[0], mcords[moves - 1], v1, v2) > 0) {
+ if (isect_seg_seg_v2_int(mcoords[0], mcoords[mcoords_len - 1], v1, v2) > 0) {
return true;
}
- for (unsigned int a = 0; a < moves - 1; a++) {
- if (isect_seg_seg_v2_int(mcords[a], mcords[a + 1], v1, v2) > 0) {
+ for (unsigned int a = 0; a < mcoords_len - 1; a++) {
+ if (isect_seg_seg_v2_int(mcoords[a], mcoords[a + 1], v1, v2) > 0) {
return true;
}
}
diff --git a/source/blender/blenlib/intern/math_base_inline.c b/source/blender/blenlib/intern/math_base_inline.c
index c4b68e9164c..1b388dcf11f 100644
--- a/source/blender/blenlib/intern/math_base_inline.c
+++ b/source/blender/blenlib/intern/math_base_inline.c
@@ -38,6 +38,10 @@
#include "BLI_math_base.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* copied from BLI_utildefines.h */
#ifdef __GNUC__
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
@@ -356,6 +360,14 @@ MINLINE int divide_floor_i(int a, int b)
}
/**
+ * Integer division that returns the ceiling, instead of flooring like normal C division.
+ */
+MINLINE uint divide_ceil_u(uint a, uint b)
+{
+ return (a + b - 1) / b;
+}
+
+/**
* modulo that handles negative numbers, works the same as Python's.
*/
MINLINE int mod_i(int i, int n)
@@ -524,6 +536,15 @@ MINLINE size_t max_zz(size_t a, size_t b)
return (b < a) ? a : b;
}
+MINLINE char min_cc(char a, char b)
+{
+ return (a < b) ? a : b;
+}
+MINLINE char max_cc(char a, char b)
+{
+ return (b < a) ? a : b;
+}
+
MINLINE int clamp_i(int value, int min, int max)
{
return min_ii(max_ii(value, min), max);
@@ -784,4 +805,8 @@ MINLINE unsigned char unit_ushort_to_uchar(unsigned short val)
} \
((void)0)
+#ifdef __cplusplus
+}
+#endif
+
#endif /* __MATH_BASE_INLINE_C__ */
diff --git a/source/blender/blenlib/intern/math_color.c b/source/blender/blenlib/intern/math_color.c
index cc29ebe4f20..c1f7b0c2907 100644
--- a/source/blender/blenlib/intern/math_color.c
+++ b/source/blender/blenlib/intern/math_color.c
@@ -91,6 +91,7 @@ void rgb_to_yuv(float r, float g, float b, float *ly, float *lu, float *lv, int
break;
case BLI_YUV_ITU_BT709:
default:
+ BLI_assert(colorspace == BLI_YUV_ITU_BT709);
y = 0.2126f * r + 0.7152f * g + 0.0722f * b;
u = -0.09991f * r - 0.33609f * g + 0.436f * b;
v = 0.615f * r - 0.55861f * g - 0.05639f * b;
@@ -113,6 +114,8 @@ void yuv_to_rgb(float y, float u, float v, float *lr, float *lg, float *lb, int
b = y + 2.032f * u;
break;
case BLI_YUV_ITU_BT709:
+ default:
+ BLI_assert(colorspace == BLI_YUV_ITU_BT709);
r = y + 1.28033f * v;
g = y - 0.21482f * u - 0.38059f * v;
b = y + 2.12798f * u;
diff --git a/source/blender/blenlib/intern/math_geom.c b/source/blender/blenlib/intern/math_geom.c
index 4c0d7e08a3c..e7c1fc8c2d9 100644
--- a/source/blender/blenlib/intern/math_geom.c
+++ b/source/blender/blenlib/intern/math_geom.c
@@ -2379,7 +2379,10 @@ bool isect_tri_tri_epsilon_v3(const float t_a0[3],
if (UNLIKELY(edge_fac == -1.0f)) {
/* pass */
}
- else if (edge_fac > 0.0f && edge_fac < 1.0f) {
+ /* Important to include 0.0f and 1.0f as one of the triangles vertices may be placed
+ * exactly on the plane. In this case both it's edges will have a factor of 0 or 1,
+ * but not be going through the plane. See T73566. */
+ else if (edge_fac >= 0.0f && edge_fac <= 1.0f) {
float ix_tri[3];
float span_fac;
@@ -4217,7 +4220,19 @@ static float mean_value_half_tan_v2(const struct Float2_Len *d_curr,
void interp_weights_poly_v3(float *w, float v[][3], const int n, const float co[3])
{
- const float eps = 1e-5f; /* take care, low values cause [#36105] */
+ /* Before starting to calculate the weight, we need to figure out the floating point precision we
+ * can expect from the supplied data. */
+ float max_value = 0;
+
+ for (int i = 0; i < n; i++) {
+ max_value = max_ff(max_value, fabsf(v[i][0] - co[0]));
+ max_value = max_ff(max_value, fabsf(v[i][1] - co[1]));
+ max_value = max_ff(max_value, fabsf(v[i][2] - co[2]));
+ }
+
+ /* These to values we derived by empirically testing different values that works for the test
+ * files in D7772. */
+ const float eps = 16.0f * FLT_EPSILON * max_value;
const float eps_sq = eps * eps;
const float *v_curr, *v_next;
float ht_prev, ht; /* half tangents */
@@ -4290,8 +4305,20 @@ void interp_weights_poly_v3(float *w, float v[][3], const int n, const float co[
void interp_weights_poly_v2(float *w, float v[][2], const int n, const float co[2])
{
- const float eps = 1e-5f; /* take care, low values cause [#36105] */
+ /* Before starting to calculate the weight, we need to figure out the floating point precision we
+ * can expect from the supplied data. */
+ float max_value = 0;
+
+ for (int i = 0; i < n; i++) {
+ max_value = max_ff(max_value, fabsf(v[i][0] - co[0]));
+ max_value = max_ff(max_value, fabsf(v[i][1] - co[1]));
+ }
+
+ /* These to values we derived by empirically testing different values that works for the test
+ * files in D7772. */
+ const float eps = 16.0f * FLT_EPSILON * max_value;
const float eps_sq = eps * eps;
+
const float *v_curr, *v_next;
float ht_prev, ht; /* half tangents */
float totweight = 0.0f;
diff --git a/source/blender/blenlib/intern/math_solvers.c b/source/blender/blenlib/intern/math_solvers.c
index 235589abdab..cda3d9b66a2 100644
--- a/source/blender/blenlib/intern/math_solvers.c
+++ b/source/blender/blenlib/intern/math_solvers.c
@@ -55,7 +55,7 @@ bool BLI_eigen_solve_selfadjoint_m3(const float m3[3][3],
}
/**
- * \brief Compute the SVD (Singular Values Decomposition) of given 3D matrix (m3 = USV*).
+ * \brief Compute the SVD (Singular Values Decomposition) of given 3D matrix (m3 = USV*).
*
* \param m3: the matrix to decompose.
* \return r_U the computed left singular vector of \a m3 (NULL if not needed).
diff --git a/source/blender/blenlib/intern/math_vector.c b/source/blender/blenlib/intern/math_vector.c
index 5919b7e1dd6..6ec7c960d6b 100644
--- a/source/blender/blenlib/intern/math_vector.c
+++ b/source/blender/blenlib/intern/math_vector.c
@@ -778,7 +778,7 @@ void bisect_v3_v3v3v3(float out[3], const float v1[3], const float v2[3], const
* <pre>
* v
* + ^
- * \ |
+ * \ |
* \|
* + normal: axis of reflection
* /
@@ -949,6 +949,35 @@ void print_vn(const char *str, const float v[], const int n)
printf("\n");
}
+void minmax_v4v4_v4(float min[4], float max[4], const float vec[4])
+{
+ if (min[0] > vec[0]) {
+ min[0] = vec[0];
+ }
+ if (min[1] > vec[1]) {
+ min[1] = vec[1];
+ }
+ if (min[2] > vec[2]) {
+ min[2] = vec[2];
+ }
+ if (min[3] > vec[3]) {
+ min[3] = vec[3];
+ }
+
+ if (max[0] < vec[0]) {
+ max[0] = vec[0];
+ }
+ if (max[1] < vec[1]) {
+ max[1] = vec[1];
+ }
+ if (max[2] < vec[2]) {
+ max[2] = vec[2];
+ }
+ if (max[3] < vec[3]) {
+ max[3] = vec[3];
+ }
+}
+
void minmax_v3v3_v3(float min[3], float max[3], const float vec[3])
{
if (min[0] > vec[0]) {
diff --git a/source/blender/blenlib/intern/math_vector_inline.c b/source/blender/blenlib/intern/math_vector_inline.c
index d2c55233653..ca405907bdd 100644
--- a/source/blender/blenlib/intern/math_vector_inline.c
+++ b/source/blender/blenlib/intern/math_vector_inline.c
@@ -123,6 +123,27 @@ MINLINE void copy_v4_v4_uchar(unsigned char r[4], const unsigned char a[4])
r[3] = a[3];
}
+MINLINE void copy_v2_uchar(unsigned char r[2], const unsigned char a)
+{
+ r[0] = a;
+ r[1] = a;
+}
+
+MINLINE void copy_v3_uchar(unsigned char r[3], const unsigned char a)
+{
+ r[0] = a;
+ r[1] = a;
+ r[2] = a;
+}
+
+MINLINE void copy_v4_uchar(unsigned char r[4], const unsigned char a)
+{
+ r[0] = a;
+ r[1] = a;
+ r[2] = a;
+ r[3] = a;
+}
+
/* char */
MINLINE void copy_v2_v2_char(char r[2], const char a[2])
{
diff --git a/source/blender/blenlib/intern/noise.c b/source/blender/blenlib/intern/noise.c
index 229a06948c2..42b5ba28f5a 100644
--- a/source/blender/blenlib/intern/noise.c
+++ b/source/blender/blenlib/intern/noise.c
@@ -23,6 +23,7 @@
#include <math.h>
+#include "BLI_compiler_compat.h"
#include "BLI_noise.h"
/* local */
@@ -264,17 +265,17 @@ static const float hashvectf[768] = {
/* IMPROVED PERLIN NOISE */
/**************************/
-static float lerp(float t, float a, float b)
+BLI_INLINE float lerp(float t, float a, float b)
{
return (a + t * (b - a));
}
-static float npfade(float t)
+BLI_INLINE float npfade(float t)
{
return (t * t * t * (t * (t * 6.0f - 15.0f) + 10.0f));
}
-static float grad(int hash_val, float x, float y, float z)
+BLI_INLINE float grad(int hash_val, float x, float y, float z)
{
int h = hash_val & 15; /* CONVERT LO 4 BITS OF HASH CODE */
float u = h < 8 ? x : y; /* INTO 12 GRADIENT DIRECTIONS. */
diff --git a/source/blender/blenlib/intern/path_util.c b/source/blender/blenlib/intern/path_util.c
index 0bb2ba5859b..2f51b66725b 100644
--- a/source/blender/blenlib/intern/path_util.c
+++ b/source/blender/blenlib/intern/path_util.c
@@ -80,12 +80,12 @@ static bool BLI_path_is_abs(const char *name);
* or from dot if no digits.
* \param r_num_len: Optional to return number of digits found.
*/
-int BLI_stringdec(const char *string, char *head, char *tail, ushort *r_num_len)
+int BLI_path_sequence_decode(const char *string, char *head, char *tail, ushort *r_num_len)
{
uint nums = 0, nume = 0;
int i;
bool found_digit = false;
- const char *const lslash = BLI_last_slash(string);
+ const char *const lslash = BLI_path_slash_rfind(string);
const uint string_len = strlen(string);
const uint lslash_len = lslash != NULL ? (int)(lslash - string) : 0;
uint name_end = string_len;
@@ -151,7 +151,7 @@ int BLI_stringdec(const char *string, char *head, char *tail, ushort *r_num_len)
* Returns in area pointed to by string a string of the form "<head><pic><tail>", where pic
* is formatted as numlen digits with leading zeroes.
*/
-void BLI_stringenc(
+void BLI_path_sequence_encode(
char *string, const char *head, const char *tail, unsigned short numlen, int pic)
{
sprintf(string, "%s%.*d%s", head, numlen, MAX2(0, pic), tail);
@@ -170,7 +170,7 @@ static int BLI_path_unc_prefix_len(const char *path); /* defined below in same f
*
* \note \a path isn't protected for max string names...
*/
-void BLI_cleanup_path(const char *relabase, char *path)
+void BLI_path_normalize(const char *relabase, char *path)
{
ptrdiff_t a;
char *start, *eind;
@@ -263,10 +263,10 @@ void BLI_cleanup_path(const char *relabase, char *path)
/**
* Cleanup filepath ensuring a trailing slash.
*/
-void BLI_cleanup_dir(const char *relabase, char *dir)
+void BLI_path_normalize_dir(const char *relabase, char *dir)
{
- BLI_cleanup_path(relabase, dir);
- BLI_add_slash(dir);
+ BLI_path_normalize(relabase, dir);
+ BLI_path_slash_ensure(dir);
}
/**
@@ -381,8 +381,8 @@ bool BLI_path_make_safe(char *path)
}
#endif
- for (curr_slash = (char *)BLI_first_slash(curr_path); curr_slash;
- curr_slash = (char *)BLI_first_slash(curr_path)) {
+ for (curr_slash = (char *)BLI_path_slash_find(curr_path); curr_slash;
+ curr_slash = (char *)BLI_path_slash_find(curr_path)) {
const char backup = *curr_slash;
*curr_slash = '\0';
if (!skip_first && (*curr_path != '\0') && BLI_filename_make_safe(curr_path)) {
@@ -494,14 +494,14 @@ static void BLI_path_unc_to_short(wchar_t *unc)
}
}
-void BLI_cleanup_unc(char *path, int maxlen)
+void BLI_path_normalize_unc(char *path, int maxlen)
{
wchar_t *tmp_16 = alloc_utf16_from_8(path, 1);
- BLI_cleanup_unc_16(tmp_16);
+ BLI_path_normalize_unc_16(tmp_16);
conv_utf_16_to_8(tmp_16, path, maxlen);
}
-void BLI_cleanup_unc_16(wchar_t *path_16)
+void BLI_path_normalize_unc_16(wchar_t *path_16)
{
BLI_path_unc_to_short(path_16);
BLI_path_add_slash_to_share(path_16);
@@ -578,11 +578,11 @@ void BLI_path_rel(char *file, const char *relfile)
BLI_str_replace_char(file + BLI_path_unc_prefix_len(file), '\\', '/');
/* remove /./ which confuse the following slash counting... */
- BLI_cleanup_path(NULL, file);
- BLI_cleanup_path(NULL, temp);
+ BLI_path_normalize(NULL, file);
+ BLI_path_normalize(NULL, temp);
/* the last slash in the file indicates where the path part ends */
- lslash = BLI_last_slash(temp);
+ lslash = BLI_path_slash_rfind(temp);
if (lslash) {
/* find the prefix of the filename that is equal for both filenames.
@@ -701,13 +701,13 @@ bool BLI_path_suffix(char *string, size_t maxlen, const char *suffix, const char
* Replaces path with the path of its parent directory, returning true if
* it was able to find a parent directory within the pathname.
*/
-bool BLI_parent_dir(char *path)
+bool BLI_path_parent_dir(char *path)
{
const char parent_dir[] = {'.', '.', SEP, '\0'}; /* "../" or "..\\" */
char tmp[FILE_MAX + 4];
BLI_join_dirfile(tmp, sizeof(tmp), path, parent_dir);
- BLI_cleanup_path(NULL, tmp); /* does all the work of normalizing the path for us */
+ BLI_path_normalize(NULL, tmp); /* does all the work of normalizing the path for us */
if (!BLI_path_extension_check(tmp, parent_dir)) {
strcpy(path, tmp); /* We assume pardir is always shorter... */
@@ -722,12 +722,12 @@ bool BLI_parent_dir(char *path)
* Strips off nonexistent (or non-accessible) subdirectories from the end of *dir,
* leaving the path of the lowest-level directory that does exist and we can read.
*/
-bool BLI_parent_dir_until_exists(char *dir)
+bool BLI_path_parent_dir_until_exists(char *dir)
{
bool valid_path = true;
/* Loop as long as cur path is not a dir, and we can get a parent path. */
- while ((BLI_access(dir, R_OK) != 0) && (valid_path = BLI_parent_dir(dir))) {
+ while ((BLI_access(dir, R_OK) != 0) && (valid_path = BLI_path_parent_dir(dir))) {
/* pass */
}
return (valid_path && dir[0]);
@@ -777,7 +777,7 @@ static bool stringframe_chars(const char *path, int *char_start, int *char_end)
*/
static void ensure_digits(char *path, int digits)
{
- char *file = (char *)BLI_last_slash(path);
+ char *file = (char *)BLI_path_slash_rfind(path);
if (file == NULL) {
file = path;
@@ -852,7 +852,7 @@ bool BLI_path_frame_range(char *path, int sta, int end, int digits)
bool BLI_path_frame_get(char *path, int *r_frame, int *r_numdigits)
{
if (*path) {
- char *file = (char *)BLI_last_slash(path);
+ char *file = (char *)BLI_path_slash_rfind(path);
char *c;
int len, numdigits;
@@ -908,7 +908,7 @@ void BLI_path_frame_strip(char *path, char *r_ext)
return;
}
- char *file = (char *)BLI_last_slash(path);
+ char *file = (char *)BLI_path_slash_rfind(path);
char *c, *suffix;
int len;
int numdigits = 0;
@@ -1075,8 +1075,8 @@ bool BLI_path_abs(char *path, const char *basepath)
BLI_strncpy(base, basepath, sizeof(base));
/* file component is ignored, so don't bother with the trailing slash */
- BLI_cleanup_path(NULL, base);
- lslash = BLI_last_slash(base);
+ BLI_path_normalize(NULL, base);
+ lslash = BLI_path_slash_rfind(base);
BLI_str_replace_char(base + BLI_path_unc_prefix_len(base), '\\', '/');
if (lslash) {
@@ -1110,19 +1110,20 @@ bool BLI_path_abs(char *path, const char *basepath)
#endif
/* ensure this is after correcting for path switch */
- BLI_cleanup_path(NULL, path);
+ BLI_path_normalize(NULL, path);
return wasrelative;
}
/**
- * Expands path relative to the current working directory, if it was relative.
- * Returns true if such expansion was done.
+ * Checks for relative path, expanding them relative to the current working directory.
+ * Returns true if the expansion was performed.
*
- * \note Should only be done with command line paths.
- * this is _not_ something blenders internal paths support like the "//" prefix
+ * \note Should only be called with command line paths.
+ * This is _not_ something Blender's internal paths support, instead they use the "//" prefix.
+ * In most cases #BLI_path_abs should be used instead.
*/
-bool BLI_path_cwd(char *path, const size_t maxlen)
+bool BLI_path_abs_from_cwd(char *path, const size_t maxlen)
{
#ifdef DEBUG_STRSIZE
memset(path, 0xff, sizeof(*path) * maxlen);
@@ -1363,7 +1364,7 @@ void BLI_make_file_string(const char *relabase, char *string, const char *dir, c
/* Get the file name, chop everything past the last slash (ie. the filename) */
strcpy(string, relabase);
- lslash = (char *)BLI_last_slash(string);
+ lslash = (char *)BLI_path_slash_rfind(string);
if (lslash) {
*(lslash + 1) = 0;
}
@@ -1418,7 +1419,7 @@ void BLI_make_file_string(const char *relabase, char *string, const char *dir, c
strcat(string, file);
/* Push all slashes to the system preferred direction */
- BLI_path_native_slash(string);
+ BLI_path_slash_native(string);
}
static bool path_extension_check_ex(const char *str,
@@ -1608,12 +1609,12 @@ bool BLI_path_extension_ensure(char *path, size_t maxlen, const char *ext)
return true;
}
-bool BLI_ensure_filename(char *filepath, size_t maxlen, const char *filename)
+bool BLI_path_filename_ensure(char *filepath, size_t maxlen, const char *filename)
{
#ifdef DEBUG_STRSIZE
memset(filepath, 0xff, sizeof(*filepath) * maxlen);
#endif
- char *c = (char *)BLI_last_slash(filepath);
+ char *c = (char *)BLI_path_slash_rfind(filepath);
if (!c || ((c - filepath) < maxlen - (strlen(filename) + 1))) {
strcpy(c ? &c[1] : filepath, filename);
return true;
@@ -1636,7 +1637,7 @@ void BLI_split_dirfile(
memset(dir, 0xff, sizeof(*dir) * dirlen);
memset(file, 0xff, sizeof(*file) * filelen);
#endif
- const char *lslash_str = BLI_last_slash(string);
+ const char *lslash_str = BLI_path_slash_rfind(string);
const size_t lslash = lslash_str ? (size_t)(lslash_str - string) + 1 : 0;
if (dir) {
@@ -1680,7 +1681,7 @@ const char *BLI_path_extension(const char *filepath)
if (extension == NULL) {
return NULL;
}
- if (BLI_first_slash(extension) != NULL) {
+ if (BLI_path_slash_find(extension) != NULL) {
/* There is a path separator in the extension, so the '.' was found in a
* directory component and not in the filename. */
return NULL;
@@ -1695,7 +1696,7 @@ void BLI_path_append(char *__restrict dst, const size_t maxlen, const char *__re
{
size_t dirlen = BLI_strnlen(dst, maxlen);
- /* inline BLI_add_slash */
+ /* inline BLI_path_slash_ensure */
if ((dirlen > 0) && (dst[dirlen - 1] != SEP)) {
dst[dirlen++] = SEP;
dst[dirlen] = '\0';
@@ -1738,7 +1739,7 @@ void BLI_join_dirfile(char *__restrict dst,
return; /* fills the path */
}
- /* inline BLI_add_slash */
+ /* inline BLI_path_slash_ensure */
if ((dirlen > 0) && !ELEM(dst[dirlen - 1], SEP, ALTSEP)) {
dst[dirlen++] = SEP;
dst[dirlen] = '\0';
@@ -1846,7 +1847,7 @@ size_t BLI_path_join(char *__restrict dst, const size_t dst_len, const char *pat
*/
const char *BLI_path_basename(const char *path)
{
- const char *const filename = BLI_last_slash(path);
+ const char *const filename = BLI_path_slash_rfind(path);
return filename ? filename + 1 : path;
}
@@ -1921,7 +1922,7 @@ bool BLI_path_name_at_index(const char *__restrict path,
/**
* Returns pointer to the leftmost path separator in string. Not actually used anywhere.
*/
-const char *BLI_first_slash(const char *string)
+const char *BLI_path_slash_find(const char *string)
{
const char *const ffslash = strchr(string, '/');
const char *const fbslash = strchr(string, '\\');
@@ -1939,7 +1940,7 @@ const char *BLI_first_slash(const char *string)
/**
* Returns pointer to the rightmost path separator in string.
*/
-const char *BLI_last_slash(const char *string)
+const char *BLI_path_slash_rfind(const char *string)
{
const char *const lfslash = strrchr(string, '/');
const char *const lbslash = strrchr(string, '\\');
@@ -1958,7 +1959,7 @@ const char *BLI_last_slash(const char *string)
* Appends a slash to string if there isn't one there already.
* Returns the new length of the string.
*/
-int BLI_add_slash(char *string)
+int BLI_path_slash_ensure(char *string)
{
int len = strlen(string);
if (len == 0 || string[len - 1] != SEP) {
@@ -1972,7 +1973,7 @@ int BLI_add_slash(char *string)
/**
* Removes the last slash and everything after it to the end of string, if there is one.
*/
-void BLI_del_slash(char *string)
+void BLI_path_slash_rstrip(char *string)
{
int len = strlen(string);
while (len) {
@@ -1989,7 +1990,7 @@ void BLI_del_slash(char *string)
/**
* Changes to the path separators to the native ones for this OS.
*/
-void BLI_path_native_slash(char *path)
+void BLI_path_slash_native(char *path)
{
#ifdef WIN32
if (path && BLI_strnlen(path, 3) > 2) {
diff --git a/source/blender/blenlib/intern/quadric.c b/source/blender/blenlib/intern/quadric.c
index 0a1ff9f8116..3ad1844cfe1 100644
--- a/source/blender/blenlib/intern/quadric.c
+++ b/source/blender/blenlib/intern/quadric.c
@@ -141,9 +141,14 @@ void BLI_quadric_mul(Quadric *a, const double scalar)
double BLI_quadric_evaluate(const Quadric *q, const double v[3])
{
- return ((q->a2 * v[0] * v[0]) + (q->ab * 2 * v[0] * v[1]) + (q->ac * 2 * v[0] * v[2]) +
- (q->ad * 2 * v[0]) + (q->b2 * v[1] * v[1]) + (q->bc * 2 * v[1] * v[2]) +
- (q->bd * 2 * v[1]) + (q->c2 * v[2] * v[2]) + (q->cd * 2 * v[2]) + (q->d2));
+ const double v00 = v[0] * v[0], v01 = v[0] * v[1], v02 = v[0] * v[2];
+ const double v11 = v[1] * v[1], v12 = v[1] * v[2];
+ const double v22 = v[2] * v[2];
+ return ((q->a2 * v00) + (q->ab * 2 * v01) + (q->ac * 2 * v02) + (q->ad * 2 * v[0]) + /* a */
+ (q->b2 * v11) + (q->bc * 2 * v12) + (q->bd * 2 * v[1]) + /* b */
+ (q->c2 * v22) + (q->cd * 2 * v[2]) + /* c */
+ (q->d2) /* d */
+ );
}
bool BLI_quadric_optimize(const Quadric *q, double v[3], const double epsilon)
diff --git a/source/blender/blenlib/intern/stack.c b/source/blender/blenlib/intern/stack.c
index e75d944c764..f2e8b352aab 100644
--- a/source/blender/blenlib/intern/stack.c
+++ b/source/blender/blenlib/intern/stack.c
@@ -10,7 +10,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
+ * along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
diff --git a/source/blender/blenlib/intern/storage.c b/source/blender/blenlib/intern/storage.c
index 7274a15661a..fbfb258693b 100644
--- a/source/blender/blenlib/intern/storage.c
+++ b/source/blender/blenlib/intern/storage.c
@@ -55,6 +55,7 @@
# include "utfconv.h"
# include <direct.h>
# include <io.h>
+# include <shobjidl_core.h>
# include <stdbool.h>
#else
# include <pwd.h>
@@ -226,14 +227,22 @@ size_t BLI_file_size(const char *path)
return stats.st_size;
}
+/* Return file attributes. Apple version of this function is defined in storage_apple.mm */
#ifndef __APPLE__
eFileAttributes BLI_file_attributes(const char *path)
{
int ret = 0;
# ifdef WIN32
- wchar_t wline[FILE_MAXDIR];
- BLI_strncpy_wchar_from_utf8(wline, path, ARRAY_SIZE(wline));
+
+ if (BLI_path_extension_check(path, ".lnk")) {
+ return FILE_ATTR_ALIAS;
+ }
+
+ WCHAR wline[FILE_MAXDIR];
+ if (conv_utf_8_to_16(path, wline, ARRAY_SIZE(wline)) != 0) {
+ return ret;
+ }
DWORD attr = GetFileAttributesW(wline);
if (attr & FILE_ATTRIBUTE_READONLY) {
ret |= FILE_ATTR_READONLY;
@@ -282,15 +291,52 @@ eFileAttributes BLI_file_attributes(const char *path)
}
#endif
-/**
- * Returns the target path of a file-based redirection, like Mac Alias or Win32 Shortcut file.
- */
+/* Return alias/shortcut file target. Apple version is defined in storage_apple.mm */
#ifndef __APPLE__
-bool BLI_file_alias_target(char UNUSED(target[FILE_MAXDIR]), const char *UNUSED(filepath))
+bool BLI_file_alias_target(char target[FILE_MAXDIR], const char *filepath)
{
- /* TODO: Find target in Win32 Shortcut - Shell Link (.lnk) file.
- * Format: https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-shllink/ */
+# ifdef WIN32
+ if (!BLI_path_extension_check(filepath, ".lnk")) {
+ return false;
+ }
+
+ IShellLinkW *Shortcut = NULL;
+ bool success = false;
+ CoInitializeEx(NULL, COINIT_MULTITHREADED);
+
+ HRESULT hr = CoCreateInstance(
+ &CLSID_ShellLink, NULL, CLSCTX_INPROC_SERVER, &IID_IShellLinkW, (LPVOID *)&Shortcut);
+ if (SUCCEEDED(hr)) {
+ IPersistFile *PersistFile;
+ hr = Shortcut->lpVtbl->QueryInterface(Shortcut, &IID_IPersistFile, (LPVOID *)&PersistFile);
+ if (SUCCEEDED(hr)) {
+ WCHAR path_utf16[FILE_MAXDIR] = {0};
+ if (conv_utf_8_to_16(filepath, path_utf16, ARRAY_SIZE(path_utf16)) == 0) {
+ hr = PersistFile->lpVtbl->Load(PersistFile, path_utf16, STGM_READ);
+ if (SUCCEEDED(hr)) {
+ hr = Shortcut->lpVtbl->Resolve(Shortcut, 0, SLR_NO_UI | SLR_UPDATE);
+ if (SUCCEEDED(hr)) {
+ wchar_t target_utf16[FILE_MAXDIR] = {0};
+ hr = Shortcut->lpVtbl->GetPath(Shortcut, target_utf16, FILE_MAXDIR, NULL, 0);
+ if (SUCCEEDED(hr)) {
+ success = (conv_utf_16_to_8(target_utf16, target, FILE_MAXDIR) == 0);
+ }
+ }
+ PersistFile->lpVtbl->Release(PersistFile);
+ }
+ }
+ }
+ Shortcut->lpVtbl->Release(Shortcut);
+ }
+
+ return (success && target[0]);
+# endif
+
+# ifdef __linux__
+ UNUSED_VARS(target, filepath);
+ /* File-based redirection not supported. */
return false;
+# endif
}
#endif
@@ -316,7 +362,7 @@ int BLI_exists(const char *name)
* 2. after the C:\ when the path is the volume only
*/
if ((len >= 3) && (tmp_16[0] == L'\\') && (tmp_16[1] == L'\\')) {
- BLI_cleanup_unc_16(tmp_16);
+ BLI_path_normalize_unc_16(tmp_16);
}
if ((tmp_16[1] == L':') && (tmp_16[2] == L'\0')) {
diff --git a/source/blender/blenlib/intern/storage_apple.mm b/source/blender/blenlib/intern/storage_apple.mm
index 7cb8ca28e24..08d2cfdf4a4 100644
--- a/source/blender/blenlib/intern/storage_apple.mm
+++ b/source/blender/blenlib/intern/storage_apple.mm
@@ -30,7 +30,9 @@
bool BLI_file_alias_target(char targetpath[FILE_MAXDIR], const char *filepath)
{
+ /* clang-format off */
@autoreleasepool {
+ /* clang-format on */
NSError *error = nil;
NSURL *shortcutURL = [[NSURL alloc] initFileURLWithFileSystemRepresentation:filepath
isDirectory:NO
@@ -64,7 +66,9 @@ eFileAttributes BLI_file_attributes(const char *path)
{
int ret = 0;
+ /* clang-format off */
@autoreleasepool {
+ /* clang-format on */
NSURL *fileURL = [[NSURL alloc] initFileURLWithFileSystemRepresentation:path
isDirectory:NO
relativeToURL:nil];
@@ -87,7 +91,7 @@ eFileAttributes BLI_file_attributes(const char *path)
if (is_readable && !is_writable) {
ret |= FILE_ATTR_READONLY;
}
- if (is_readable) {
+ if (!is_readable) {
ret |= FILE_ATTR_SYSTEM;
}
}
diff --git a/source/blender/blenlib/intern/system.c b/source/blender/blenlib/intern/system.c
index 7d9ed2598a6..53db49aa59c 100644
--- a/source/blender/blenlib/intern/system.c
+++ b/source/blender/blenlib/intern/system.c
@@ -32,11 +32,8 @@
/* for backtrace and gethostname/GetComputerName */
#if defined(WIN32)
# include <intrin.h>
-# include <windows.h>
-# pragma warning(push)
-# pragma warning(disable : 4091)
-# include <dbghelp.h>
-# pragma warning(pop)
+
+# include "BLI_winstuff.h"
#else
# include <execinfo.h>
# include <unistd.h>
@@ -74,6 +71,8 @@ int BLI_cpu_support_sse2(void)
#endif
}
+/* Windows stackwalk lives in system_win32.c */
+#if !defined(_MSC_VER)
/**
* Write a backtrace into a file for systems which support it.
*/
@@ -81,9 +80,9 @@ void BLI_system_backtrace(FILE *fp)
{
/* ------------- */
/* Linux / Apple */
-#if defined(__linux__) || defined(__APPLE__)
+# if defined(__linux__) || defined(__APPLE__)
-# define SIZE 100
+# define SIZE 100
void *buffer[SIZE];
int nptrs;
char **strings;
@@ -98,48 +97,15 @@ void BLI_system_backtrace(FILE *fp)
}
free(strings);
-# undef SIZE
-
- /* -------- */
- /* Windows */
-#elif defined(_MSC_VER)
-
-# ifndef NDEBUG
-# define MAXSYMBOL 256
-# define SIZE 100
- unsigned short i;
- void *stack[SIZE];
- unsigned short nframes;
- SYMBOL_INFO *symbolinfo;
- HANDLE process;
-
- process = GetCurrentProcess();
-
- SymInitialize(process, NULL, TRUE);
-
- nframes = CaptureStackBackTrace(0, SIZE, stack, NULL);
- symbolinfo = MEM_callocN(sizeof(SYMBOL_INFO) + MAXSYMBOL * sizeof(char), "crash Symbol table");
- symbolinfo->MaxNameLen = MAXSYMBOL - 1;
- symbolinfo->SizeOfStruct = sizeof(SYMBOL_INFO);
-
- for (i = 0; i < nframes; i++) {
- SymFromAddr(process, (DWORD64)(stack[i]), 0, symbolinfo);
-
- fprintf(fp, "%u: %s - 0x%0llX\n", nframes - i - 1, symbolinfo->Name, symbolinfo->Address);
- }
-
- MEM_freeN(symbolinfo);
-# undef MAXSYMBOL
# undef SIZE
+
# else
- fprintf(fp, "Crash backtrace not supported on release builds\n");
-# endif /* NDEBUG */
-#else /* _MSC_VER */
/* ------------------ */
/* non msvc/osx/linux */
(void)fp;
-#endif
+# endif
}
+#endif
/* end BLI_system_backtrace */
/* NOTE: The code for CPU brand string is adopted from Cycles. */
diff --git a/source/blender/blenlib/intern/system_win32.c b/source/blender/blenlib/intern/system_win32.c
new file mode 100644
index 00000000000..d60f54ebe67
--- /dev/null
+++ b/source/blender/blenlib/intern/system_win32.c
@@ -0,0 +1,410 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/** \file
+ * \ingroup bli
+ */
+#include <Windows.h>
+#include <stdio.h>
+
+#include <dbghelp.h>
+#include <shlwapi.h>
+#include <tlhelp32.h>
+
+#include "BLI_string.h"
+
+#include "MEM_guardedalloc.h"
+
+static EXCEPTION_POINTERS *current_exception = NULL;
+
+static const char *bli_windows_get_exception_description(const DWORD exceptioncode)
+{
+ switch (exceptioncode) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ return "EXCEPTION_ACCESS_VIOLATION";
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ return "EXCEPTION_ARRAY_BOUNDS_EXCEEDED";
+ case EXCEPTION_BREAKPOINT:
+ return "EXCEPTION_BREAKPOINT";
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ return "EXCEPTION_DATATYPE_MISALIGNMENT";
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ return "EXCEPTION_FLT_DENORMAL_OPERAND";
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ return "EXCEPTION_FLT_DIVIDE_BY_ZERO";
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ return "EXCEPTION_FLT_INEXACT_RESULT";
+ case EXCEPTION_FLT_INVALID_OPERATION:
+ return "EXCEPTION_FLT_INVALID_OPERATION";
+ case EXCEPTION_FLT_OVERFLOW:
+ return "EXCEPTION_FLT_OVERFLOW";
+ case EXCEPTION_FLT_STACK_CHECK:
+ return "EXCEPTION_FLT_STACK_CHECK";
+ case EXCEPTION_FLT_UNDERFLOW:
+ return "EXCEPTION_FLT_UNDERFLOW";
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ return "EXCEPTION_ILLEGAL_INSTRUCTION";
+ case EXCEPTION_IN_PAGE_ERROR:
+ return "EXCEPTION_IN_PAGE_ERROR";
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ return "EXCEPTION_INT_DIVIDE_BY_ZERO";
+ case EXCEPTION_INT_OVERFLOW:
+ return "EXCEPTION_INT_OVERFLOW";
+ case EXCEPTION_INVALID_DISPOSITION:
+ return "EXCEPTION_INVALID_DISPOSITION";
+ case EXCEPTION_NONCONTINUABLE_EXCEPTION:
+ return "EXCEPTION_NONCONTINUABLE_EXCEPTION";
+ case EXCEPTION_PRIV_INSTRUCTION:
+ return "EXCEPTION_PRIV_INSTRUCTION";
+ case EXCEPTION_SINGLE_STEP:
+ return "EXCEPTION_SINGLE_STEP";
+ case EXCEPTION_STACK_OVERFLOW:
+ return "EXCEPTION_STACK_OVERFLOW";
+ default:
+ return "UNKNOWN EXCEPTION";
+ }
+}
+
+static void bli_windows_get_module_name(LPVOID address, PCHAR buffer, size_t size)
+{
+ HMODULE mod;
+ buffer[0] = 0;
+ if (GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, address, &mod)) {
+ if (GetModuleFileName(mod, buffer, size)) {
+ PathStripPath(buffer);
+ }
+ }
+}
+
+static void bli_windows_get_module_version(const char *file, char *buffer, size_t buffersize)
+{
+ buffer[0] = 0;
+ DWORD verHandle = 0;
+ UINT size = 0;
+ LPBYTE lpBuffer = NULL;
+ DWORD verSize = GetFileVersionInfoSize(file, &verHandle);
+ if (verSize != 0) {
+ LPSTR verData = (LPSTR)MEM_callocN(verSize, "crash module version");
+
+ if (GetFileVersionInfo(file, verHandle, verSize, verData)) {
+ if (VerQueryValue(verData, "\\", (VOID FAR * FAR *)&lpBuffer, &size)) {
+ if (size) {
+ VS_FIXEDFILEINFO *verInfo = (VS_FIXEDFILEINFO *)lpBuffer;
+ /* Magic value from
+ * https://docs.microsoft.com/en-us/windows/win32/api/verrsrc/ns-verrsrc-vs_fixedfileinfo
+ */
+ if (verInfo->dwSignature == 0xfeef04bd) {
+ BLI_snprintf(buffer,
+ buffersize,
+ "%d.%d.%d.%d",
+ (verInfo->dwFileVersionMS >> 16) & 0xffff,
+ (verInfo->dwFileVersionMS >> 0) & 0xffff,
+ (verInfo->dwFileVersionLS >> 16) & 0xffff,
+ (verInfo->dwFileVersionLS >> 0) & 0xffff);
+ }
+ }
+ }
+ }
+ MEM_freeN(verData);
+ }
+}
+
+static void bli_windows_system_backtrace_exception_record(FILE *fp, PEXCEPTION_RECORD record)
+{
+ char module[MAX_PATH];
+ fprintf(fp, "Exception Record:\n\n");
+ fprintf(fp,
+ "ExceptionCode : %s\n",
+ bli_windows_get_exception_description(record->ExceptionCode));
+ fprintf(fp, "Exception Address : 0x%p\n", record->ExceptionAddress);
+ bli_windows_get_module_name(record->ExceptionAddress, module, sizeof(module));
+ fprintf(fp, "Exception Module : %s\n", module);
+ fprintf(fp, "Exception Flags : 0x%.8x\n", record->ExceptionFlags);
+ fprintf(fp, "Exception Parameters : 0x%x\n", record->NumberParameters);
+ for (DWORD idx = 0; idx < record->NumberParameters; idx++) {
+ fprintf(fp, "\tParameters[%d] : 0x%p\n", idx, (LPVOID *)record->ExceptionInformation[idx]);
+ }
+ if (record->ExceptionRecord) {
+ fprintf(fp, "Nested ");
+ bli_windows_system_backtrace_exception_record(fp, record->ExceptionRecord);
+ }
+ fprintf(fp, "\n\n");
+}
+
+static bool BLI_windows_system_backtrace_run_trace(FILE *fp, HANDLE hThread, PCONTEXT context)
+{
+ const int max_symbol_length = 100;
+
+ bool result = true;
+
+ PSYMBOL_INFO symbolinfo = MEM_callocN(sizeof(SYMBOL_INFO) + max_symbol_length * sizeof(char),
+ "crash Symbol table");
+ symbolinfo->MaxNameLen = max_symbol_length - 1;
+ symbolinfo->SizeOfStruct = sizeof(SYMBOL_INFO);
+
+ STACKFRAME frame = {0};
+ frame.AddrPC.Offset = context->Rip;
+ frame.AddrPC.Mode = AddrModeFlat;
+ frame.AddrFrame.Offset = context->Rsp;
+ frame.AddrFrame.Mode = AddrModeFlat;
+ frame.AddrStack.Offset = context->Rsp;
+ frame.AddrStack.Mode = AddrModeFlat;
+
+ while (true) {
+ if (StackWalk64(IMAGE_FILE_MACHINE_AMD64,
+ GetCurrentProcess(),
+ hThread,
+ &frame,
+ context,
+ NULL,
+ SymFunctionTableAccess64,
+ SymGetModuleBase64,
+ 0)) {
+ if (frame.AddrPC.Offset) {
+ char module[MAX_PATH];
+
+ bli_windows_get_module_name((LPVOID)frame.AddrPC.Offset, module, sizeof(module));
+
+ if (SymFromAddr(GetCurrentProcess(), (DWORD64)(frame.AddrPC.Offset), 0, symbolinfo)) {
+ fprintf(fp, "%-20s:0x%p %s", module, (LPVOID)symbolinfo->Address, symbolinfo->Name);
+ IMAGEHLP_LINE lineinfo;
+ lineinfo.SizeOfStruct = sizeof(lineinfo);
+ DWORD displacement = 0;
+ if (SymGetLineFromAddr(
+ GetCurrentProcess(), (DWORD64)(frame.AddrPC.Offset), &displacement, &lineinfo)) {
+ fprintf(fp, " %s:%d", lineinfo.FileName, lineinfo.LineNumber);
+ }
+ fprintf(fp, "\n");
+ }
+ else {
+ fprintf(fp,
+ "%-20s:0x%p %s\n",
+ module,
+ (LPVOID)frame.AddrPC.Offset,
+ "Symbols not available");
+ result = false;
+ break;
+ }
+ }
+ else {
+ break;
+ }
+ }
+ else {
+ break;
+ }
+ }
+ MEM_freeN(symbolinfo);
+ fprintf(fp, "\n\n");
+ return result;
+}
+
+static bool bli_windows_system_backtrace_stack_thread(FILE *fp, HANDLE hThread)
+{
+ CONTEXT context = {0};
+ context.ContextFlags = CONTEXT_ALL;
+ /* GetThreadContext requires the thread to be in a suspended state, which is problematic for the
+ * currently running thread, RtlCaptureContext is used as an alternative to sidestep this */
+ if (hThread != GetCurrentThread()) {
+ SuspendThread(hThread);
+ bool success = GetThreadContext(hThread, &context);
+ ResumeThread(hThread);
+ if (!success) {
+ fprintf(fp, "Cannot get thread context : 0x0%.8x\n", GetLastError());
+ return false;
+ }
+ }
+ else {
+ RtlCaptureContext(&context);
+ }
+ return BLI_windows_system_backtrace_run_trace(fp, hThread, &context);
+}
+
+static void bli_windows_system_backtrace_modules(FILE *fp)
+{
+ fprintf(fp, "Loaded Modules :\n");
+ HANDLE hModuleSnap = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, 0);
+ if (hModuleSnap == INVALID_HANDLE_VALUE)
+ return;
+
+ MODULEENTRY32 me32;
+ me32.dwSize = sizeof(MODULEENTRY32);
+
+ if (!Module32First(hModuleSnap, &me32)) {
+ CloseHandle(hModuleSnap); // Must clean up the snapshot object!
+ fprintf(fp, " Error getting module list.\n");
+ return;
+ }
+
+ do {
+ if (me32.th32ProcessID == GetCurrentProcessId()) {
+ char version[MAX_PATH];
+ bli_windows_get_module_version(me32.szExePath, version, sizeof(version));
+
+ IMAGEHLP_MODULE64 m64;
+ m64.SizeOfStruct = sizeof(m64);
+ if (SymGetModuleInfo64(GetCurrentProcess(), (DWORD64)me32.modBaseAddr, &m64)) {
+ fprintf(fp,
+ "0x%p %-20s %s %s %s\n",
+ me32.modBaseAddr,
+ version,
+ me32.szModule,
+ m64.LoadedPdbName,
+ m64.PdbUnmatched ? "[unmatched]" : "");
+ }
+ else {
+ fprintf(fp, "0x%p %-20s %s\n", me32.modBaseAddr, version, me32.szModule);
+ }
+ }
+ } while (Module32Next(hModuleSnap, &me32));
+}
+
+static void bli_windows_system_backtrace_threads(FILE *fp)
+{
+ fprintf(fp, "Threads:\n");
+ HANDLE hThreadSnap = INVALID_HANDLE_VALUE;
+ THREADENTRY32 te32;
+
+ hThreadSnap = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0);
+ if (hThreadSnap == INVALID_HANDLE_VALUE) {
+ fprintf(fp, "Unable to retrieve threads list.\n");
+ return;
+ }
+
+ te32.dwSize = sizeof(THREADENTRY32);
+
+ if (!Thread32First(hThreadSnap, &te32)) {
+ CloseHandle(hThreadSnap);
+ return;
+ }
+ do {
+ if (te32.th32OwnerProcessID == GetCurrentProcessId()) {
+ if (GetCurrentThreadId() != te32.th32ThreadID) {
+ fprintf(fp, "Thread : %.8x\n", te32.th32ThreadID);
+ HANDLE ht = OpenThread(THREAD_ALL_ACCESS, FALSE, te32.th32ThreadID);
+ bli_windows_system_backtrace_stack_thread(fp, ht);
+ CloseHandle(ht);
+ }
+ }
+ } while (Thread32Next(hThreadSnap, &te32));
+ CloseHandle(hThreadSnap);
+}
+
+static bool BLI_windows_system_backtrace_stack(FILE *fp)
+{
+ fprintf(fp, "Stack trace:\n");
+ /* If we are handling an exception use the context record from that. */
+ if (current_exception && current_exception->ExceptionRecord->ExceptionAddress) {
+ /* The back trace code will write to the context record, to protect the original record from
+ * modifications give the backtrace a copy to work on. */
+ CONTEXT TempContext = *current_exception->ContextRecord;
+ return BLI_windows_system_backtrace_run_trace(fp, GetCurrentThread(), &TempContext);
+ }
+ else {
+ /* If there is no current exception or the address is not set, walk the current stack. */
+ return bli_windows_system_backtrace_stack_thread(fp, GetCurrentThread());
+ }
+}
+
+static bool bli_private_symbols_loaded()
+{
+ IMAGEHLP_MODULE64 m64;
+ m64.SizeOfStruct = sizeof(m64);
+ if (SymGetModuleInfo64(GetCurrentProcess(), (DWORD64)GetModuleHandle(NULL), &m64)) {
+ return m64.GlobalSymbols;
+ }
+ return false;
+}
+
+static void bli_load_symbols()
+{
+ /* If this is a developer station and the private pdb is already loaded leave it be. */
+ if (bli_private_symbols_loaded()) {
+ return;
+ }
+
+ char pdb_file[MAX_PATH] = {0};
+
+ /* get the currently executing image */
+ if (GetModuleFileNameA(NULL, pdb_file, sizeof(pdb_file))) {
+ /* remove the filename */
+ PathRemoveFileSpecA(pdb_file);
+ /* append blender.pdb */
+ PathAppendA(pdb_file, "blender.pdb");
+ if (PathFileExistsA(pdb_file)) {
+ HMODULE mod = GetModuleHandle(NULL);
+ if (mod) {
+ WIN32_FILE_ATTRIBUTE_DATA file_data;
+ if (GetFileAttributesExA(pdb_file, GetFileExInfoStandard, &file_data)) {
+ /* SymInitialize will try to load symbols on its own, so we first must unload whatever it
+ * did trying to help */
+ SymUnloadModule64(GetCurrentProcess(), (DWORD64)mod);
+
+ DWORD64 module_base = SymLoadModule(GetCurrentProcess(),
+ NULL,
+ pdb_file,
+ NULL,
+ (DWORD64)mod,
+ (DWORD)file_data.nFileSizeLow);
+ if (module_base == 0) {
+ fprintf(stderr,
+ "Error loading symbols %s\n\terror:0x%.8x\n\tsize = %d\n\tbase=0x%p\n",
+ pdb_file,
+ GetLastError(),
+ file_data.nFileSizeLow,
+ (LPVOID)mod);
+ }
+ }
+ }
+ }
+ }
+}
+
+void BLI_system_backtrace(FILE *fp)
+{
+ SymInitialize(GetCurrentProcess(), NULL, TRUE);
+ bli_load_symbols();
+ if (current_exception) {
+ bli_windows_system_backtrace_exception_record(fp, current_exception->ExceptionRecord);
+ }
+ if (BLI_windows_system_backtrace_stack(fp)) {
+ /* When the blender symbols are missing the stack traces will be unreliable
+ * so only run if the previous step completed successfully. */
+ bli_windows_system_backtrace_threads(fp);
+ }
+ bli_windows_system_backtrace_modules(fp);
+ fputc(0, fp); /* Give our selves a nice zero terminator for later on */
+}
+
+void BLI_windows_handle_exception(EXCEPTION_POINTERS *exception)
+{
+ current_exception = exception;
+ if (current_exception) {
+ fprintf(stderr,
+ "Error : %s\n",
+ bli_windows_get_exception_description(exception->ExceptionRecord->ExceptionCode));
+ fflush(stderr);
+
+ LPVOID address = exception->ExceptionRecord->ExceptionAddress;
+ fprintf(stderr, "Address : 0x%p\n", address);
+
+ CHAR modulename[MAX_PATH];
+ bli_windows_get_module_name(address, modulename, sizeof(modulename));
+ fprintf(stderr, "Module : %s\n", modulename);
+ fprintf(stderr, "Thread : %.8x\n", GetCurrentThreadId());
+ }
+ fflush(stderr);
+}
diff --git a/source/blender/blenlib/intern/task.c b/source/blender/blenlib/intern/task.c
deleted file mode 100644
index 293275c402d..00000000000
--- a/source/blender/blenlib/intern/task.c
+++ /dev/null
@@ -1,1930 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-/** \file
- * \ingroup bli
- *
- * A generic task system which can be used for any task based subsystem.
- */
-
-#include <stdlib.h>
-
-#include "MEM_guardedalloc.h"
-
-#include "DNA_listBase.h"
-
-#include "BLI_listbase.h"
-#include "BLI_math.h"
-#include "BLI_mempool.h"
-#include "BLI_task.h"
-#include "BLI_threads.h"
-
-#include "atomic_ops.h"
-
-/* Define this to enable some detailed statistic print. */
-#undef DEBUG_STATS
-
-/* Types */
-
-/* Number of per-thread pre-allocated tasks.
- *
- * For more details see description of TaskMemPool.
- */
-#define MEMPOOL_SIZE 256
-
-/* Number of tasks which are pushed directly to local thread queue.
- *
- * This allows thread to fetch next task without locking the whole queue.
- */
-#define LOCAL_QUEUE_SIZE 1
-
-/* Number of tasks which are allowed to be scheduled in a delayed manner.
- *
- * This allows to use less locks per graph node children schedule. More details
- * could be found at TaskThreadLocalStorage::do_delayed_push.
- */
-#define DELAYED_QUEUE_SIZE 4096
-
-#ifndef NDEBUG
-# define ASSERT_THREAD_ID(scheduler, thread_id) \
- do { \
- if (!BLI_thread_is_main()) { \
- TaskThread *thread = pthread_getspecific(scheduler->tls_id_key); \
- if (thread == NULL) { \
- BLI_assert(thread_id == 0); \
- } \
- else { \
- BLI_assert(thread_id == thread->id); \
- } \
- } \
- else { \
- BLI_assert(thread_id == 0); \
- } \
- } while (false)
-#else
-# define ASSERT_THREAD_ID(scheduler, thread_id)
-#endif
-
-typedef struct Task {
- struct Task *next, *prev;
-
- TaskRunFunction run;
- void *taskdata;
- bool free_taskdata;
- TaskFreeFunction freedata;
- TaskPool *pool;
-} Task;
-
-/* This is a per-thread storage of pre-allocated tasks.
- *
- * The idea behind this is simple: reduce amount of malloc() calls when pushing
- * new task to the pool. This is done by keeping memory from the tasks which
- * were finished already, so instead of freeing that memory we put it to the
- * pool for the later re-use.
- *
- * The tricky part here is to avoid any inter-thread synchronization, hence no
- * lock must exist around this pool. The pool will become an owner of the pointer
- * from freed task, and only corresponding thread will be able to use this pool
- * (no memory stealing and such).
- *
- * This leads to the following use of the pool:
- *
- * - task_push() should provide proper thread ID from which the task is being
- * pushed from.
- *
- * - Task allocation function which check corresponding memory pool and if there
- * is any memory in there it'll mark memory as re-used, remove it from the pool
- * and use that memory for the new task.
- *
- * At this moment task queue owns the memory.
- *
- * - When task is done and task_free() is called the memory will be put to the
- * pool which corresponds to a thread which handled the task.
- */
-typedef struct TaskMemPool {
- /* Number of pre-allocated tasks in the pool. */
- int num_tasks;
- /* Pre-allocated task memory pointers. */
- Task *tasks[MEMPOOL_SIZE];
-} TaskMemPool;
-
-#ifdef DEBUG_STATS
-typedef struct TaskMemPoolStats {
- /* Number of allocations. */
- int num_alloc;
- /* Number of avoided allocations (pointer was re-used from the pool). */
- int num_reuse;
- /* Number of discarded memory due to pool saturation, */
- int num_discard;
-} TaskMemPoolStats;
-#endif
-
-typedef struct TaskThreadLocalStorage {
- /* Memory pool for faster task allocation.
- * The idea is to re-use memory of finished/discarded tasks by this thread.
- */
- TaskMemPool task_mempool;
-
- /* Local queue keeps thread alive by keeping small amount of tasks ready
- * to be picked up without causing global thread locks for synchronization.
- */
- int num_local_queue;
- Task *local_queue[LOCAL_QUEUE_SIZE];
-
- /* Thread can be marked for delayed tasks push. This is helpful when it's
- * know that lots of subsequent task pushed will happen from the same thread
- * without "interrupting" for task execution.
- *
- * We try to accumulate as much tasks as possible in a local queue without
- * any locks first, and then we push all of them into a scheduler's queue
- * from within a single mutex lock.
- */
- bool do_delayed_push;
- int num_delayed_queue;
- Task *delayed_queue[DELAYED_QUEUE_SIZE];
-} TaskThreadLocalStorage;
-
-struct TaskPool {
- TaskScheduler *scheduler;
-
- volatile size_t num;
- ThreadMutex num_mutex;
- ThreadCondition num_cond;
-
- void *userdata;
- ThreadMutex user_mutex;
-
- volatile bool do_cancel;
- volatile bool do_work;
-
- volatile bool is_suspended;
- bool start_suspended;
- ListBase suspended_queue;
- size_t num_suspended;
-
- /* If set, this pool may never be work_and_wait'ed, which means TaskScheduler
- * has to use its special background fallback thread in case we are in
- * single-threaded situation.
- */
- bool run_in_background;
-
- /* This is a task scheduler's ID of a thread at which pool was constructed.
- * It will be used to access task TLS.
- */
- int thread_id;
-
- /* For the pools which are created from non-main thread which is not a
- * scheduler worker thread we can't re-use any of scheduler's threads TLS
- * and have to use our own one.
- */
- bool use_local_tls;
- TaskThreadLocalStorage local_tls;
-#ifndef NDEBUG
- pthread_t creator_thread_id;
-#endif
-
-#ifdef DEBUG_STATS
- TaskMemPoolStats *mempool_stats;
-#endif
-};
-
-struct TaskScheduler {
- pthread_t *threads;
- struct TaskThread *task_threads;
- int num_threads;
- bool background_thread_only;
-
- ListBase queue;
- ThreadMutex queue_mutex;
- ThreadCondition queue_cond;
-
- ThreadMutex startup_mutex;
- ThreadCondition startup_cond;
- volatile int num_thread_started;
-
- volatile bool do_exit;
-
- /* NOTE: In pthread's TLS we store the whole TaskThread structure. */
- pthread_key_t tls_id_key;
-};
-
-typedef struct TaskThread {
- TaskScheduler *scheduler;
- int id;
- TaskThreadLocalStorage tls;
-} TaskThread;
-
-/* Helper */
-BLI_INLINE void task_data_free(Task *task, const int thread_id)
-{
- if (task->free_taskdata) {
- if (task->freedata) {
- task->freedata(task->pool, task->taskdata, thread_id);
- }
- else {
- MEM_freeN(task->taskdata);
- }
- }
-}
-
-BLI_INLINE void initialize_task_tls(TaskThreadLocalStorage *tls)
-{
- memset(tls, 0, sizeof(TaskThreadLocalStorage));
-}
-
-BLI_INLINE TaskThreadLocalStorage *get_task_tls(TaskPool *pool, const int thread_id)
-{
- TaskScheduler *scheduler = pool->scheduler;
- BLI_assert(thread_id >= 0);
- BLI_assert(thread_id <= scheduler->num_threads);
- if (pool->use_local_tls && thread_id == 0) {
- BLI_assert(pool->thread_id == 0);
- BLI_assert(!BLI_thread_is_main());
- BLI_assert(pthread_equal(pthread_self(), pool->creator_thread_id));
- return &pool->local_tls;
- }
- if (thread_id == 0) {
- BLI_assert(BLI_thread_is_main());
- return &scheduler->task_threads[pool->thread_id].tls;
- }
- return &scheduler->task_threads[thread_id].tls;
-}
-
-BLI_INLINE void free_task_tls(TaskThreadLocalStorage *tls)
-{
- TaskMemPool *task_mempool = &tls->task_mempool;
- for (int i = 0; i < task_mempool->num_tasks; i++) {
- MEM_freeN(task_mempool->tasks[i]);
- }
-}
-
-static Task *task_alloc(TaskPool *pool, const int thread_id)
-{
- BLI_assert(thread_id <= pool->scheduler->num_threads);
- if (thread_id != -1) {
- BLI_assert(thread_id >= 0);
- BLI_assert(thread_id <= pool->scheduler->num_threads);
- TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
- TaskMemPool *task_mempool = &tls->task_mempool;
- /* Try to re-use task memory from a thread local storage. */
- if (task_mempool->num_tasks > 0) {
- --task_mempool->num_tasks;
- /* Success! We've just avoided task allocation. */
-#ifdef DEBUG_STATS
- pool->mempool_stats[thread_id].num_reuse++;
-#endif
- return task_mempool->tasks[task_mempool->num_tasks];
- }
- /* We are doomed to allocate new task data. */
-#ifdef DEBUG_STATS
- pool->mempool_stats[thread_id].num_alloc++;
-#endif
- }
- return MEM_mallocN(sizeof(Task), "New task");
-}
-
-static void task_free(TaskPool *pool, Task *task, const int thread_id)
-{
- task_data_free(task, thread_id);
- BLI_assert(thread_id >= 0);
- BLI_assert(thread_id <= pool->scheduler->num_threads);
- if (thread_id == 0) {
- BLI_assert(pool->use_local_tls || BLI_thread_is_main());
- }
- TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
- TaskMemPool *task_mempool = &tls->task_mempool;
- if (task_mempool->num_tasks < MEMPOOL_SIZE - 1) {
- /* Successfully allowed the task to be re-used later. */
- task_mempool->tasks[task_mempool->num_tasks] = task;
- ++task_mempool->num_tasks;
- }
- else {
- /* Local storage saturated, no other way than just discard
- * the memory.
- *
- * TODO(sergey): We can perhaps store such pointer in a global
- * scheduler pool, maybe it'll be faster than discarding and
- * allocating again.
- */
- MEM_freeN(task);
-#ifdef DEBUG_STATS
- pool->mempool_stats[thread_id].num_discard++;
-#endif
- }
-}
-
-/* Task Scheduler */
-
-static void task_pool_num_decrease(TaskPool *pool, size_t done)
-{
- BLI_mutex_lock(&pool->num_mutex);
-
- BLI_assert(pool->num >= done);
-
- pool->num -= done;
-
- if (pool->num == 0) {
- BLI_condition_notify_all(&pool->num_cond);
- }
-
- BLI_mutex_unlock(&pool->num_mutex);
-}
-
-static void task_pool_num_increase(TaskPool *pool, size_t new)
-{
- BLI_mutex_lock(&pool->num_mutex);
-
- pool->num += new;
- BLI_condition_notify_all(&pool->num_cond);
-
- BLI_mutex_unlock(&pool->num_mutex);
-}
-
-static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task)
-{
- bool found_task = false;
- BLI_mutex_lock(&scheduler->queue_mutex);
-
- while (!scheduler->queue.first && !scheduler->do_exit) {
- BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
- }
-
- do {
- Task *current_task;
-
- /* Assuming we can only have a void queue in 'exit' case here seems logical
- * (we should only be here after our worker thread has been woken up from a
- * condition_wait(), which only happens after a new task was added to the queue),
- * but it is wrong.
- * Waiting on condition may wake up the thread even if condition is not signaled
- * (spurious wake-ups), and some race condition may also empty the queue **after**
- * condition has been signaled, but **before** awoken thread reaches this point...
- * See http://stackoverflow.com/questions/8594591
- *
- * So we only abort here if do_exit is set.
- */
- if (scheduler->do_exit) {
- BLI_mutex_unlock(&scheduler->queue_mutex);
- return false;
- }
-
- for (current_task = scheduler->queue.first; current_task != NULL;
- current_task = current_task->next) {
- TaskPool *pool = current_task->pool;
-
- if (scheduler->background_thread_only && !pool->run_in_background) {
- continue;
- }
-
- *task = current_task;
- found_task = true;
- BLI_remlink(&scheduler->queue, *task);
- break;
- }
- if (!found_task) {
- BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
- }
- } while (!found_task);
-
- BLI_mutex_unlock(&scheduler->queue_mutex);
-
- return true;
-}
-
-BLI_INLINE void handle_local_queue(TaskThreadLocalStorage *tls, const int thread_id)
-{
- BLI_assert(!tls->do_delayed_push);
- while (tls->num_local_queue > 0) {
- /* We pop task from queue before handling it so handler of the task can
- * push next job to the local queue.
- */
- tls->num_local_queue--;
- Task *local_task = tls->local_queue[tls->num_local_queue];
- /* TODO(sergey): Double-check work_and_wait() doesn't handle other's
- * pool tasks.
- */
- TaskPool *local_pool = local_task->pool;
- local_task->run(local_pool, local_task->taskdata, thread_id);
- task_free(local_pool, local_task, thread_id);
- }
- BLI_assert(!tls->do_delayed_push);
-}
-
-static void *task_scheduler_thread_run(void *thread_p)
-{
- TaskThread *thread = (TaskThread *)thread_p;
- TaskThreadLocalStorage *tls = &thread->tls;
- TaskScheduler *scheduler = thread->scheduler;
- int thread_id = thread->id;
- Task *task;
-
- pthread_setspecific(scheduler->tls_id_key, thread);
-
- /* signal the main thread when all threads have started */
- BLI_mutex_lock(&scheduler->startup_mutex);
- scheduler->num_thread_started++;
- if (scheduler->num_thread_started == scheduler->num_threads) {
- BLI_condition_notify_one(&scheduler->startup_cond);
- }
- BLI_mutex_unlock(&scheduler->startup_mutex);
-
- /* keep popping off tasks */
- while (task_scheduler_thread_wait_pop(scheduler, &task)) {
- TaskPool *pool = task->pool;
-
- /* run task */
- BLI_assert(!tls->do_delayed_push);
- task->run(pool, task->taskdata, thread_id);
- BLI_assert(!tls->do_delayed_push);
-
- /* delete task */
- task_free(pool, task, thread_id);
-
- /* Handle all tasks from local queue. */
- handle_local_queue(tls, thread_id);
-
- /* notify pool task was done */
- task_pool_num_decrease(pool, 1);
- }
-
- return NULL;
-}
-
-TaskScheduler *BLI_task_scheduler_create(int num_threads)
-{
- TaskScheduler *scheduler = MEM_callocN(sizeof(TaskScheduler), "TaskScheduler");
-
- /* multiple places can use this task scheduler, sharing the same
- * threads, so we keep track of the number of users. */
- scheduler->do_exit = false;
-
- BLI_listbase_clear(&scheduler->queue);
- BLI_mutex_init(&scheduler->queue_mutex);
- BLI_condition_init(&scheduler->queue_cond);
-
- BLI_mutex_init(&scheduler->startup_mutex);
- BLI_condition_init(&scheduler->startup_cond);
- scheduler->num_thread_started = 0;
-
- if (num_threads == 0) {
- /* automatic number of threads will be main thread + num cores */
- num_threads = BLI_system_thread_count();
- }
-
- /* main thread will also work, so we count it too */
- num_threads -= 1;
-
- /* Add background-only thread if needed. */
- if (num_threads == 0) {
- scheduler->background_thread_only = true;
- num_threads = 1;
- }
-
- scheduler->task_threads = MEM_mallocN(sizeof(TaskThread) * (num_threads + 1),
- "TaskScheduler task threads");
-
- /* Initialize TLS for main thread. */
- initialize_task_tls(&scheduler->task_threads[0].tls);
-
- pthread_key_create(&scheduler->tls_id_key, NULL);
-
- /* launch threads that will be waiting for work */
- if (num_threads > 0) {
- int i;
-
- scheduler->num_threads = num_threads;
- scheduler->threads = MEM_callocN(sizeof(pthread_t) * num_threads, "TaskScheduler threads");
-
- for (i = 0; i < num_threads; i++) {
- TaskThread *thread = &scheduler->task_threads[i + 1];
- thread->scheduler = scheduler;
- thread->id = i + 1;
- initialize_task_tls(&thread->tls);
-
- if (pthread_create(&scheduler->threads[i], NULL, task_scheduler_thread_run, thread) != 0) {
- fprintf(stderr, "TaskScheduler failed to launch thread %d/%d\n", i, num_threads);
- }
- }
- }
-
- /* Wait for all worker threads to start before returning to caller to prevent the case where
- * threads are still starting and pthread_join is called, which causes a deadlock on pthreads4w.
- */
- BLI_mutex_lock(&scheduler->startup_mutex);
- /* NOTE: Use loop here to avoid false-positive everything-is-ready caused by spontaneous thread
- * wake up. */
- while (scheduler->num_thread_started != num_threads) {
- BLI_condition_wait(&scheduler->startup_cond, &scheduler->startup_mutex);
- }
- BLI_mutex_unlock(&scheduler->startup_mutex);
-
- return scheduler;
-}
-
-void BLI_task_scheduler_free(TaskScheduler *scheduler)
-{
- Task *task;
-
- /* stop all waiting threads */
- BLI_mutex_lock(&scheduler->queue_mutex);
- scheduler->do_exit = true;
- BLI_condition_notify_all(&scheduler->queue_cond);
- BLI_mutex_unlock(&scheduler->queue_mutex);
-
- pthread_key_delete(scheduler->tls_id_key);
-
- /* delete threads */
- if (scheduler->threads) {
- int i;
-
- for (i = 0; i < scheduler->num_threads; i++) {
- if (pthread_join(scheduler->threads[i], NULL) != 0) {
- fprintf(stderr, "TaskScheduler failed to join thread %d/%d\n", i, scheduler->num_threads);
- }
- }
-
- MEM_freeN(scheduler->threads);
- }
-
- /* Delete task thread data */
- if (scheduler->task_threads) {
- for (int i = 0; i < scheduler->num_threads + 1; i++) {
- TaskThreadLocalStorage *tls = &scheduler->task_threads[i].tls;
- free_task_tls(tls);
- }
-
- MEM_freeN(scheduler->task_threads);
- }
-
- /* delete leftover tasks */
- for (task = scheduler->queue.first; task; task = task->next) {
- task_data_free(task, 0);
- }
- BLI_freelistN(&scheduler->queue);
-
- /* delete mutex/condition */
- BLI_mutex_end(&scheduler->queue_mutex);
- BLI_condition_end(&scheduler->queue_cond);
- BLI_mutex_end(&scheduler->startup_mutex);
- BLI_condition_end(&scheduler->startup_cond);
-
- MEM_freeN(scheduler);
-}
-
-int BLI_task_scheduler_num_threads(TaskScheduler *scheduler)
-{
- return scheduler->num_threads + 1;
-}
-
-static void task_scheduler_push(TaskScheduler *scheduler, Task *task, TaskPriority priority)
-{
- task_pool_num_increase(task->pool, 1);
-
- /* add task to queue */
- BLI_mutex_lock(&scheduler->queue_mutex);
-
- if (priority == TASK_PRIORITY_HIGH) {
- BLI_addhead(&scheduler->queue, task);
- }
- else {
- BLI_addtail(&scheduler->queue, task);
- }
-
- BLI_condition_notify_one(&scheduler->queue_cond);
- BLI_mutex_unlock(&scheduler->queue_mutex);
-}
-
-static void task_scheduler_push_all(TaskScheduler *scheduler,
- TaskPool *pool,
- Task **tasks,
- int num_tasks)
-{
- if (num_tasks == 0) {
- return;
- }
-
- task_pool_num_increase(pool, num_tasks);
-
- BLI_mutex_lock(&scheduler->queue_mutex);
-
- for (int i = 0; i < num_tasks; i++) {
- BLI_addhead(&scheduler->queue, tasks[i]);
- }
-
- BLI_condition_notify_all(&scheduler->queue_cond);
- BLI_mutex_unlock(&scheduler->queue_mutex);
-}
-
-static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool)
-{
- Task *task, *nexttask;
- size_t done = 0;
-
- BLI_mutex_lock(&scheduler->queue_mutex);
-
- /* free all tasks from this pool from the queue */
- for (task = scheduler->queue.first; task; task = nexttask) {
- nexttask = task->next;
-
- if (task->pool == pool) {
- task_data_free(task, pool->thread_id);
- BLI_freelinkN(&scheduler->queue, task);
-
- done++;
- }
- }
-
- BLI_mutex_unlock(&scheduler->queue_mutex);
-
- /* notify done */
- task_pool_num_decrease(pool, done);
-}
-
-/* Task Pool */
-
-static TaskPool *task_pool_create_ex(TaskScheduler *scheduler,
- void *userdata,
- const bool is_background,
- const bool is_suspended)
-{
- TaskPool *pool = MEM_mallocN(sizeof(TaskPool), "TaskPool");
-
-#ifndef NDEBUG
- /* Assert we do not try to create a background pool from some parent task -
- * those only work OK from main thread. */
- if (is_background) {
- const pthread_t thread_id = pthread_self();
- int i = scheduler->num_threads;
-
- while (i--) {
- BLI_assert(!pthread_equal(scheduler->threads[i], thread_id));
- }
- }
-#endif
-
- pool->scheduler = scheduler;
- pool->num = 0;
- pool->do_cancel = false;
- pool->do_work = false;
- pool->is_suspended = is_suspended;
- pool->start_suspended = is_suspended;
- pool->num_suspended = 0;
- pool->suspended_queue.first = pool->suspended_queue.last = NULL;
- pool->run_in_background = is_background;
- pool->use_local_tls = false;
-
- BLI_mutex_init(&pool->num_mutex);
- BLI_condition_init(&pool->num_cond);
-
- pool->userdata = userdata;
- BLI_mutex_init(&pool->user_mutex);
-
- if (BLI_thread_is_main()) {
- pool->thread_id = 0;
- }
- else {
- TaskThread *thread = pthread_getspecific(scheduler->tls_id_key);
- if (thread == NULL) {
- /* NOTE: Task pool is created from non-main thread which is not
- * managed by the task scheduler. We identify ourselves as thread ID
- * 0 but we do not use scheduler's TLS storage and use our own
- * instead to avoid any possible threading conflicts.
- */
- pool->thread_id = 0;
- pool->use_local_tls = true;
-#ifndef NDEBUG
- pool->creator_thread_id = pthread_self();
-#endif
- initialize_task_tls(&pool->local_tls);
- }
- else {
- pool->thread_id = thread->id;
- }
- }
-
-#ifdef DEBUG_STATS
- pool->mempool_stats = MEM_callocN(sizeof(*pool->mempool_stats) * (scheduler->num_threads + 1),
- "per-taskpool mempool stats");
-#endif
-
- /* Ensure malloc will go fine from threads,
- *
- * This is needed because we could be in main thread here
- * and malloc could be non-thread safe at this point because
- * no other jobs are running.
- */
- BLI_threaded_malloc_begin();
-
- return pool;
-}
-
-/**
- * Create a normal task pool. Tasks will be executed as soon as they are added.
- */
-TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata)
-{
- return task_pool_create_ex(scheduler, userdata, false, false);
-}
-
-/**
- * Create a background task pool.
- * In multi-threaded context, there is no differences with #BLI_task_pool_create(),
- * but in single-threaded case it is ensured to have at least one worker thread to run on
- * (i.e. you don't have to call #BLI_task_pool_work_and_wait
- * on it to be sure it will be processed).
- *
- * \note Background pools are non-recursive
- * (that is, you should not create other background pools in tasks assigned to a background pool,
- * they could end never being executed, since the 'fallback' background thread is already
- * busy with parent task in single-threaded context).
- */
-TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler, void *userdata)
-{
- return task_pool_create_ex(scheduler, userdata, true, false);
-}
-
-/**
- * Similar to BLI_task_pool_create() but does not schedule any tasks for execution
- * for until BLI_task_pool_work_and_wait() is called. This helps reducing threading
- * overhead when pushing huge amount of small initial tasks from the main thread.
- */
-TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler, void *userdata)
-{
- return task_pool_create_ex(scheduler, userdata, false, true);
-}
-
-void BLI_task_pool_free(TaskPool *pool)
-{
- BLI_task_pool_cancel(pool);
-
- BLI_mutex_end(&pool->num_mutex);
- BLI_condition_end(&pool->num_cond);
-
- BLI_mutex_end(&pool->user_mutex);
-
-#ifdef DEBUG_STATS
- printf("Thread ID Allocated Reused Discarded\n");
- for (int i = 0; i < pool->scheduler->num_threads + 1; i++) {
- printf("%02d %05d %05d %05d\n",
- i,
- pool->mempool_stats[i].num_alloc,
- pool->mempool_stats[i].num_reuse,
- pool->mempool_stats[i].num_discard);
- }
- MEM_freeN(pool->mempool_stats);
-#endif
-
- if (pool->use_local_tls) {
- free_task_tls(&pool->local_tls);
- }
-
- MEM_freeN(pool);
-
- BLI_threaded_malloc_end();
-}
-
-BLI_INLINE bool task_can_use_local_queues(TaskPool *pool, int thread_id)
-{
- return (thread_id != -1 && (thread_id != pool->thread_id || pool->do_work));
-}
-
-static void task_pool_push(TaskPool *pool,
- TaskRunFunction run,
- void *taskdata,
- bool free_taskdata,
- TaskFreeFunction freedata,
- TaskPriority priority,
- int thread_id)
-{
- /* Allocate task and fill it's properties. */
- Task *task = task_alloc(pool, thread_id);
- task->run = run;
- task->taskdata = taskdata;
- task->free_taskdata = free_taskdata;
- task->freedata = freedata;
- task->pool = pool;
- /* For suspended pools we put everything yo a global queue first
- * and exit as soon as possible.
- *
- * This tasks will be moved to actual execution when pool is
- * activated by work_and_wait().
- */
- if (pool->is_suspended) {
- BLI_addhead(&pool->suspended_queue, task);
- atomic_fetch_and_add_z(&pool->num_suspended, 1);
- return;
- }
- /* Populate to any local queue first, this is cheapest push ever. */
- if (task_can_use_local_queues(pool, thread_id)) {
- ASSERT_THREAD_ID(pool->scheduler, thread_id);
- TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
- /* Try to push to a local execution queue.
- * These tasks will be picked up next.
- */
- if (tls->num_local_queue < LOCAL_QUEUE_SIZE) {
- tls->local_queue[tls->num_local_queue] = task;
- tls->num_local_queue++;
- return;
- }
- /* If we are in the delayed tasks push mode, we push tasks to a
- * temporary local queue first without any locks, and then move them
- * to global execution queue with a single lock.
- */
- if (tls->do_delayed_push && tls->num_delayed_queue < DELAYED_QUEUE_SIZE) {
- tls->delayed_queue[tls->num_delayed_queue] = task;
- tls->num_delayed_queue++;
- return;
- }
- }
- /* Do push to a global execution pool, slowest possible method,
- * causes quite reasonable amount of threading overhead.
- */
- task_scheduler_push(pool->scheduler, task, priority);
-}
-
-void BLI_task_pool_push_ex(TaskPool *pool,
- TaskRunFunction run,
- void *taskdata,
- bool free_taskdata,
- TaskFreeFunction freedata,
- TaskPriority priority)
-{
- task_pool_push(pool, run, taskdata, free_taskdata, freedata, priority, -1);
-}
-
-void BLI_task_pool_push(
- TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskPriority priority)
-{
- BLI_task_pool_push_ex(pool, run, taskdata, free_taskdata, NULL, priority);
-}
-
-void BLI_task_pool_push_from_thread(TaskPool *pool,
- TaskRunFunction run,
- void *taskdata,
- bool free_taskdata,
- TaskPriority priority,
- int thread_id)
-{
- task_pool_push(pool, run, taskdata, free_taskdata, NULL, priority, thread_id);
-}
-
-void BLI_task_pool_work_and_wait(TaskPool *pool)
-{
- TaskThreadLocalStorage *tls = get_task_tls(pool, pool->thread_id);
- TaskScheduler *scheduler = pool->scheduler;
-
- if (atomic_fetch_and_and_uint8((uint8_t *)&pool->is_suspended, 0)) {
- if (pool->num_suspended) {
- task_pool_num_increase(pool, pool->num_suspended);
- BLI_mutex_lock(&scheduler->queue_mutex);
-
- BLI_movelisttolist(&scheduler->queue, &pool->suspended_queue);
-
- BLI_condition_notify_all(&scheduler->queue_cond);
- BLI_mutex_unlock(&scheduler->queue_mutex);
-
- pool->num_suspended = 0;
- }
- }
-
- pool->do_work = true;
-
- ASSERT_THREAD_ID(pool->scheduler, pool->thread_id);
-
- handle_local_queue(tls, pool->thread_id);
-
- BLI_mutex_lock(&pool->num_mutex);
-
- while (pool->num != 0) {
- Task *task, *work_task = NULL;
- bool found_task = false;
-
- BLI_mutex_unlock(&pool->num_mutex);
-
- BLI_mutex_lock(&scheduler->queue_mutex);
-
- /* find task from this pool. if we get a task from another pool,
- * we can get into deadlock */
-
- for (task = scheduler->queue.first; task; task = task->next) {
- if (task->pool == pool) {
- work_task = task;
- found_task = true;
- BLI_remlink(&scheduler->queue, task);
- break;
- }
- }
-
- BLI_mutex_unlock(&scheduler->queue_mutex);
-
- /* if found task, do it, otherwise wait until other tasks are done */
- if (found_task) {
- /* run task */
- BLI_assert(!tls->do_delayed_push);
- work_task->run(pool, work_task->taskdata, pool->thread_id);
- BLI_assert(!tls->do_delayed_push);
-
- /* delete task */
- task_free(pool, task, pool->thread_id);
-
- /* Handle all tasks from local queue. */
- handle_local_queue(tls, pool->thread_id);
-
- /* notify pool task was done */
- task_pool_num_decrease(pool, 1);
- }
-
- BLI_mutex_lock(&pool->num_mutex);
- if (pool->num == 0) {
- break;
- }
-
- if (!found_task) {
- BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
- }
- }
-
- BLI_mutex_unlock(&pool->num_mutex);
-
- BLI_assert(tls->num_local_queue == 0);
-}
-
-void BLI_task_pool_work_wait_and_reset(TaskPool *pool)
-{
- BLI_task_pool_work_and_wait(pool);
-
- pool->do_work = false;
- pool->is_suspended = pool->start_suspended;
-}
-
-void BLI_task_pool_cancel(TaskPool *pool)
-{
- pool->do_cancel = true;
-
- task_scheduler_clear(pool->scheduler, pool);
-
- /* wait until all entries are cleared */
- BLI_mutex_lock(&pool->num_mutex);
- while (pool->num) {
- BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
- }
- BLI_mutex_unlock(&pool->num_mutex);
-
- pool->do_cancel = false;
-}
-
-bool BLI_task_pool_canceled(TaskPool *pool)
-{
- return pool->do_cancel;
-}
-
-void *BLI_task_pool_userdata(TaskPool *pool)
-{
- return pool->userdata;
-}
-
-ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool)
-{
- return &pool->user_mutex;
-}
-
-void BLI_task_pool_delayed_push_begin(TaskPool *pool, int thread_id)
-{
- if (task_can_use_local_queues(pool, thread_id)) {
- ASSERT_THREAD_ID(pool->scheduler, thread_id);
- TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
- tls->do_delayed_push = true;
- }
-}
-
-void BLI_task_pool_delayed_push_end(TaskPool *pool, int thread_id)
-{
- if (task_can_use_local_queues(pool, thread_id)) {
- ASSERT_THREAD_ID(pool->scheduler, thread_id);
- TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
- BLI_assert(tls->do_delayed_push);
- task_scheduler_push_all(pool->scheduler, pool, tls->delayed_queue, tls->num_delayed_queue);
- tls->do_delayed_push = false;
- tls->num_delayed_queue = 0;
- }
-}
-
-/* Parallel range routines */
-
-/**
- *
- * Main functions:
- * - #BLI_task_parallel_range
- * - #BLI_task_parallel_listbase (#ListBase - double linked list)
- *
- * TODO:
- * - #BLI_task_parallel_foreach_link (#Link - single linked list)
- * - #BLI_task_parallel_foreach_ghash/gset (#GHash/#GSet - hash & set)
- * - #BLI_task_parallel_foreach_mempool (#BLI_mempool - iterate over mempools)
- */
-
-/* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */
-#define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
-#define MALLOCA_FREE(_mem, _size) \
- if (((_mem) != NULL) && ((_size) > 8192)) \
- MEM_freeN((_mem))
-
-/* Stores all needed data to perform a parallelized iteration,
- * with a same operation (callback function).
- * It can be chained with other tasks in a single-linked list way. */
-typedef struct TaskParallelRangeState {
- struct TaskParallelRangeState *next;
-
- /* Start and end point of integer value iteration. */
- int start, stop;
-
- /* User-defined data, shared between all worker threads. */
- void *userdata_shared;
- /* User-defined callback function called for each value in [start, stop[ specified range. */
- TaskParallelRangeFunc func;
-
- /* Each instance of looping chunks will get a copy of this data
- * (similar to OpenMP's firstprivate).
- */
- void *initial_tls_memory; /* Pointer to actual user-defined 'tls' data. */
- size_t tls_data_size; /* Size of that data. */
-
- void *flatten_tls_storage; /* 'tls' copies of initial_tls_memory for each running task. */
- /* Number of 'tls' copies in the array, i.e. number of worker threads. */
- size_t num_elements_in_tls_storage;
-
- /* Function called from calling thread once whole range have been processed. */
- TaskParallelFinalizeFunc func_finalize;
-
- /* Current value of the iterator, shared between all threads (atomically updated). */
- int iter_value;
- int iter_chunk_num; /* Amount of iterations to process in a single step. */
-} TaskParallelRangeState;
-
-/* Stores all the parallel tasks for a single pool. */
-typedef struct TaskParallelRangePool {
- /* The workers' task pool. */
- TaskPool *pool;
- /* The number of worker tasks we need to create. */
- int num_tasks;
- /* The total number of iterations in all the added ranges. */
- int num_total_iters;
- /* The size (number of items) processed at once by a worker task. */
- int chunk_size;
-
- /* Linked list of range tasks to process. */
- TaskParallelRangeState *parallel_range_states;
- /* Current range task beeing processed, swapped atomically. */
- TaskParallelRangeState *current_state;
- /* Scheduling settings common to all tasks. */
- TaskParallelSettings *settings;
-} TaskParallelRangePool;
-
-BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings,
- const int tot_items,
- int num_tasks,
- int *r_chunk_size)
-{
- int chunk_size = 0;
-
- if (!settings->use_threading) {
- /* Some users of this helper will still need a valid chunk size in case processing is not
- * threaded. We can use a bigger one than in default threaded case then. */
- chunk_size = 1024;
- num_tasks = 1;
- }
- else if (settings->min_iter_per_thread > 0) {
- /* Already set by user, no need to do anything here. */
- chunk_size = settings->min_iter_per_thread;
- }
- else {
- /* Multiplier used in heuristics below to define "optimal" chunk size.
- * The idea here is to increase the chunk size to compensate for a rather measurable threading
- * overhead caused by fetching tasks. With too many CPU threads we are starting
- * to spend too much time in those overheads.
- * First values are: 1 if num_tasks < 16;
- * else 2 if num_tasks < 32;
- * else 3 if num_tasks < 48;
- * else 4 if num_tasks < 64;
- * etc.
- * Note: If we wanted to keep the 'power of two' multiplier, we'd need something like:
- * 1 << max_ii(0, (int)(sizeof(int) * 8) - 1 - bitscan_reverse_i(num_tasks) - 3)
- */
- const int num_tasks_factor = max_ii(1, num_tasks >> 3);
-
- /* We could make that 'base' 32 number configurable in TaskParallelSettings too, or maybe just
- * always use that heuristic using TaskParallelSettings.min_iter_per_thread as basis? */
- chunk_size = 32 * num_tasks_factor;
-
- /* Basic heuristic to avoid threading on low amount of items.
- * We could make that limit configurable in settings too. */
- if (tot_items > 0 && tot_items < max_ii(256, chunk_size * 2)) {
- chunk_size = tot_items;
- }
- }
-
- BLI_assert(chunk_size > 0);
-
- if (tot_items > 0) {
- switch (settings->scheduling_mode) {
- case TASK_SCHEDULING_STATIC:
- *r_chunk_size = max_ii(chunk_size, tot_items / num_tasks);
- break;
- case TASK_SCHEDULING_DYNAMIC:
- *r_chunk_size = chunk_size;
- break;
- }
- }
- else {
- /* If total amount of items is unknown, we can only use dynamic scheduling. */
- *r_chunk_size = chunk_size;
- }
-}
-
-BLI_INLINE void task_parallel_range_calc_chunk_size(TaskParallelRangePool *range_pool)
-{
- int num_iters = 0;
- int min_num_iters = INT_MAX;
- for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
- state = state->next) {
- const int ni = state->stop - state->start;
- num_iters += ni;
- if (min_num_iters > ni) {
- min_num_iters = ni;
- }
- }
- range_pool->num_total_iters = num_iters;
- /* Note: Passing min_num_iters here instead of num_iters kind of partially breaks the 'static'
- * scheduling, but pooled range iterator is inherently non-static anyway, so adding a small level
- * of dynamic scheduling here should be fine. */
- task_parallel_calc_chunk_size(
- range_pool->settings, min_num_iters, range_pool->num_tasks, &range_pool->chunk_size);
-}
-
-BLI_INLINE bool parallel_range_next_iter_get(TaskParallelRangePool *__restrict range_pool,
- int *__restrict r_iter,
- int *__restrict r_count,
- TaskParallelRangeState **__restrict r_state)
-{
- /* We need an atomic op here as well to fetch the initial state, since some other thread might
- * have already updated it. */
- TaskParallelRangeState *current_state = atomic_cas_ptr(
- (void **)&range_pool->current_state, NULL, NULL);
-
- int previter = INT32_MAX;
-
- while (current_state != NULL && previter >= current_state->stop) {
- previter = atomic_fetch_and_add_int32(&current_state->iter_value, range_pool->chunk_size);
- *r_iter = previter;
- *r_count = max_ii(0, min_ii(range_pool->chunk_size, current_state->stop - previter));
-
- if (previter >= current_state->stop) {
- /* At this point the state we got is done, we need to go to the next one. In case some other
- * thread already did it, then this does nothing, and we'll just get current valid state
- * at start of the next loop. */
- TaskParallelRangeState *current_state_from_atomic_cas = atomic_cas_ptr(
- (void **)&range_pool->current_state, current_state, current_state->next);
-
- if (current_state == current_state_from_atomic_cas) {
- /* The atomic CAS operation was successful, we did update range_pool->current_state, so we
- * can safely switch to next state. */
- current_state = current_state->next;
- }
- else {
- /* The atomic CAS operation failed, but we still got range_pool->current_state value out of
- * it, just use it as our new current state. */
- current_state = current_state_from_atomic_cas;
- }
- }
- }
-
- *r_state = current_state;
- return (current_state != NULL && previter < current_state->stop);
-}
-
-static void parallel_range_func(TaskPool *__restrict pool, void *tls_data_idx, int thread_id)
-{
- TaskParallelRangePool *__restrict range_pool = BLI_task_pool_userdata(pool);
- TaskParallelTLS tls = {
- .thread_id = thread_id,
- .userdata_chunk = NULL,
- };
- TaskParallelRangeState *state;
- int iter, count;
- while (parallel_range_next_iter_get(range_pool, &iter, &count, &state)) {
- tls.userdata_chunk = (char *)state->flatten_tls_storage +
- (((size_t)POINTER_AS_INT(tls_data_idx)) * state->tls_data_size);
- for (int i = 0; i < count; i++) {
- state->func(state->userdata_shared, iter + i, &tls);
- }
- }
-}
-
-static void parallel_range_single_thread(TaskParallelRangePool *range_pool)
-{
- for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
- state = state->next) {
- const int start = state->start;
- const int stop = state->stop;
- void *userdata = state->userdata_shared;
- TaskParallelRangeFunc func = state->func;
-
- void *initial_tls_memory = state->initial_tls_memory;
- const size_t tls_data_size = state->tls_data_size;
- void *flatten_tls_storage = NULL;
- const bool use_tls_data = (tls_data_size != 0) && (initial_tls_memory != NULL);
- if (use_tls_data) {
- flatten_tls_storage = MALLOCA(tls_data_size);
- memcpy(flatten_tls_storage, initial_tls_memory, tls_data_size);
- }
- TaskParallelTLS tls = {
- .thread_id = 0,
- .userdata_chunk = flatten_tls_storage,
- };
- for (int i = start; i < stop; i++) {
- func(userdata, i, &tls);
- }
- if (state->func_finalize != NULL) {
- state->func_finalize(userdata, flatten_tls_storage);
- }
- MALLOCA_FREE(flatten_tls_storage, tls_data_size);
- }
-}
-
-/**
- * This function allows to parallelized for loops in a similar way to OpenMP's
- * 'parallel for' statement.
- *
- * See public API doc of ParallelRangeSettings for description of all settings.
- */
-void BLI_task_parallel_range(const int start,
- const int stop,
- void *userdata,
- TaskParallelRangeFunc func,
- TaskParallelSettings *settings)
-{
- if (start == stop) {
- return;
- }
-
- BLI_assert(start < stop);
-
- TaskParallelRangeState state = {
- .next = NULL,
- .start = start,
- .stop = stop,
- .userdata_shared = userdata,
- .func = func,
- .iter_value = start,
- .initial_tls_memory = settings->userdata_chunk,
- .tls_data_size = settings->userdata_chunk_size,
- .func_finalize = settings->func_finalize,
- };
- TaskParallelRangePool range_pool = {
- .pool = NULL, .parallel_range_states = &state, .current_state = NULL, .settings = settings};
- int i, num_threads, num_tasks;
-
- void *tls_data = settings->userdata_chunk;
- const size_t tls_data_size = settings->userdata_chunk_size;
- if (tls_data_size != 0) {
- BLI_assert(tls_data != NULL);
- }
- const bool use_tls_data = (tls_data_size != 0) && (tls_data != NULL);
- void *flatten_tls_storage = NULL;
-
- /* If it's not enough data to be crunched, don't bother with tasks at all,
- * do everything from the current thread.
- */
- if (!settings->use_threading) {
- parallel_range_single_thread(&range_pool);
- return;
- }
-
- TaskScheduler *task_scheduler = BLI_task_scheduler_get();
- num_threads = BLI_task_scheduler_num_threads(task_scheduler);
-
- /* The idea here is to prevent creating task for each of the loop iterations
- * and instead have tasks which are evenly distributed across CPU cores and
- * pull next iter to be crunched using the queue.
- */
- range_pool.num_tasks = num_tasks = num_threads + 2;
-
- task_parallel_range_calc_chunk_size(&range_pool);
- range_pool.num_tasks = num_tasks = min_ii(num_tasks,
- max_ii(1, (stop - start) / range_pool.chunk_size));
-
- if (num_tasks == 1) {
- parallel_range_single_thread(&range_pool);
- return;
- }
-
- TaskPool *task_pool = range_pool.pool = BLI_task_pool_create_suspended(task_scheduler,
- &range_pool);
-
- range_pool.current_state = &state;
-
- if (use_tls_data) {
- state.flatten_tls_storage = flatten_tls_storage = MALLOCA(tls_data_size * (size_t)num_tasks);
- state.tls_data_size = tls_data_size;
- }
-
- for (i = 0; i < num_tasks; i++) {
- if (use_tls_data) {
- void *userdata_chunk_local = (char *)flatten_tls_storage + (tls_data_size * (size_t)i);
- memcpy(userdata_chunk_local, tls_data, tls_data_size);
- }
- /* Use this pool's pre-allocated tasks. */
- BLI_task_pool_push_from_thread(task_pool,
- parallel_range_func,
- POINTER_FROM_INT(i),
- false,
- TASK_PRIORITY_HIGH,
- task_pool->thread_id);
- }
-
- BLI_task_pool_work_and_wait(task_pool);
- BLI_task_pool_free(task_pool);
-
- if (use_tls_data) {
- if (settings->func_finalize != NULL) {
- for (i = 0; i < num_tasks; i++) {
- void *userdata_chunk_local = (char *)flatten_tls_storage + (tls_data_size * (size_t)i);
- settings->func_finalize(userdata, userdata_chunk_local);
- }
- }
- MALLOCA_FREE(flatten_tls_storage, tls_data_size * (size_t)num_tasks);
- }
-}
-
-/**
- * Initialize a task pool to parallelize several for loops at the same time.
- *
- * See public API doc of ParallelRangeSettings for description of all settings.
- * Note that loop-specific settings (like 'tls' data or finalize function) must be left NULL here.
- * Only settings controlling how iteration is parallelized must be defined, as those will affect
- * all loops added to that pool.
- */
-TaskParallelRangePool *BLI_task_parallel_range_pool_init(const TaskParallelSettings *settings)
-{
- TaskParallelRangePool *range_pool = MEM_callocN(sizeof(*range_pool), __func__);
-
- BLI_assert(settings->userdata_chunk == NULL);
- BLI_assert(settings->func_finalize == NULL);
- range_pool->settings = MEM_mallocN(sizeof(*range_pool->settings), __func__);
- *range_pool->settings = *settings;
-
- return range_pool;
-}
-
-/**
- * Add a loop task to the pool. It does not execute it at all.
- *
- * See public API doc of ParallelRangeSettings for description of all settings.
- * Note that only 'tls'-related data are used here.
- */
-void BLI_task_parallel_range_pool_push(TaskParallelRangePool *range_pool,
- const int start,
- const int stop,
- void *userdata,
- TaskParallelRangeFunc func,
- const TaskParallelSettings *settings)
-{
- BLI_assert(range_pool->pool == NULL);
-
- if (start == stop) {
- return;
- }
-
- BLI_assert(start < stop);
- if (settings->userdata_chunk_size != 0) {
- BLI_assert(settings->userdata_chunk != NULL);
- }
-
- TaskParallelRangeState *state = MEM_callocN(sizeof(*state), __func__);
- state->start = start;
- state->stop = stop;
- state->userdata_shared = userdata;
- state->func = func;
- state->iter_value = start;
- state->initial_tls_memory = settings->userdata_chunk;
- state->tls_data_size = settings->userdata_chunk_size;
- state->func_finalize = settings->func_finalize;
-
- state->next = range_pool->parallel_range_states;
- range_pool->parallel_range_states = state;
-}
-
-static void parallel_range_func_finalize(TaskPool *__restrict pool,
- void *v_state,
- int UNUSED(thread_id))
-{
- TaskParallelRangePool *__restrict range_pool = BLI_task_pool_userdata(pool);
- TaskParallelRangeState *state = v_state;
-
- for (int i = 0; i < range_pool->num_tasks; i++) {
- void *tls_data = (char *)state->flatten_tls_storage + (state->tls_data_size * (size_t)i);
- state->func_finalize(state->userdata_shared, tls_data);
- }
-}
-
-/**
- * Run all tasks pushed to the range_pool.
- *
- * Note that the range pool is re-usable (you may push new tasks into it and call this function
- * again).
- */
-void BLI_task_parallel_range_pool_work_and_wait(TaskParallelRangePool *range_pool)
-{
- BLI_assert(range_pool->pool == NULL);
-
- /* If it's not enough data to be crunched, don't bother with tasks at all,
- * do everything from the current thread.
- */
- if (!range_pool->settings->use_threading) {
- parallel_range_single_thread(range_pool);
- return;
- }
-
- TaskScheduler *task_scheduler = BLI_task_scheduler_get();
- const int num_threads = BLI_task_scheduler_num_threads(task_scheduler);
-
- /* The idea here is to prevent creating task for each of the loop iterations
- * and instead have tasks which are evenly distributed across CPU cores and
- * pull next iter to be crunched using the queue.
- */
- int num_tasks = num_threads + 2;
- range_pool->num_tasks = num_tasks;
-
- task_parallel_range_calc_chunk_size(range_pool);
- range_pool->num_tasks = num_tasks = min_ii(
- num_tasks, max_ii(1, range_pool->num_total_iters / range_pool->chunk_size));
-
- if (num_tasks == 1) {
- parallel_range_single_thread(range_pool);
- return;
- }
-
- /* We create all 'tls' data here in a single loop. */
- for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
- state = state->next) {
- void *userdata_chunk = state->initial_tls_memory;
- const size_t userdata_chunk_size = state->tls_data_size;
- if (userdata_chunk_size == 0) {
- BLI_assert(userdata_chunk == NULL);
- continue;
- }
-
- void *userdata_chunk_array = NULL;
- state->flatten_tls_storage = userdata_chunk_array = MALLOCA(userdata_chunk_size *
- (size_t)num_tasks);
- for (int i = 0; i < num_tasks; i++) {
- void *userdata_chunk_local = (char *)userdata_chunk_array +
- (userdata_chunk_size * (size_t)i);
- memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
- }
- }
-
- TaskPool *task_pool = range_pool->pool = BLI_task_pool_create_suspended(task_scheduler,
- range_pool);
-
- range_pool->current_state = range_pool->parallel_range_states;
-
- for (int i = 0; i < num_tasks; i++) {
- BLI_task_pool_push_from_thread(task_pool,
- parallel_range_func,
- POINTER_FROM_INT(i),
- false,
- TASK_PRIORITY_HIGH,
- task_pool->thread_id);
- }
-
- BLI_task_pool_work_and_wait(task_pool);
-
- BLI_assert(atomic_cas_ptr((void **)&range_pool->current_state, NULL, NULL) == NULL);
-
- /* Finalize all tasks. */
- for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
- state = state->next) {
- const size_t userdata_chunk_size = state->tls_data_size;
- void *userdata_chunk_array = state->flatten_tls_storage;
- UNUSED_VARS_NDEBUG(userdata_chunk_array);
- if (userdata_chunk_size == 0) {
- BLI_assert(userdata_chunk_array == NULL);
- continue;
- }
-
- if (state->func_finalize != NULL) {
- BLI_task_pool_push_from_thread(task_pool,
- parallel_range_func_finalize,
- state,
- false,
- TASK_PRIORITY_HIGH,
- task_pool->thread_id);
- }
- }
-
- BLI_task_pool_work_and_wait(task_pool);
- BLI_task_pool_free(task_pool);
- range_pool->pool = NULL;
-
- /* Cleanup all tasks. */
- TaskParallelRangeState *state_next;
- for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
- state = state_next) {
- state_next = state->next;
-
- const size_t userdata_chunk_size = state->tls_data_size;
- void *userdata_chunk_array = state->flatten_tls_storage;
- if (userdata_chunk_size != 0) {
- BLI_assert(userdata_chunk_array != NULL);
- MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * (size_t)num_tasks);
- }
-
- MEM_freeN(state);
- }
- range_pool->parallel_range_states = NULL;
-}
-
-/**
- * Clear/free given \a range_pool.
- */
-void BLI_task_parallel_range_pool_free(TaskParallelRangePool *range_pool)
-{
- TaskParallelRangeState *state_next = NULL;
- for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
- state = state_next) {
- state_next = state->next;
- MEM_freeN(state);
- }
- MEM_freeN(range_pool->settings);
- MEM_freeN(range_pool);
-}
-
-typedef struct TaskParallelIteratorState {
- void *userdata;
- TaskParallelIteratorIterFunc iter_func;
- TaskParallelIteratorFunc func;
-
- /* *** Data used to 'acquire' chunks of items from the iterator. *** */
- /* Common data also passed to the generator callback. */
- TaskParallelIteratorStateShared iter_shared;
- /* Total number of items. If unknown, set it to a negative number. */
- int tot_items;
-} TaskParallelIteratorState;
-
-BLI_INLINE void task_parallel_iterator_calc_chunk_size(const TaskParallelSettings *settings,
- const int num_tasks,
- TaskParallelIteratorState *state)
-{
- task_parallel_calc_chunk_size(
- settings, state->tot_items, num_tasks, &state->iter_shared.chunk_size);
-}
-
-static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict state,
- void *userdata_chunk,
- int threadid)
-{
- TaskParallelTLS tls = {
- .thread_id = threadid,
- .userdata_chunk = userdata_chunk,
- };
-
- void **current_chunk_items;
- int *current_chunk_indices;
- int current_chunk_size;
-
- const size_t items_size = sizeof(*current_chunk_items) * (size_t)state->iter_shared.chunk_size;
- const size_t indices_size = sizeof(*current_chunk_indices) *
- (size_t)state->iter_shared.chunk_size;
-
- current_chunk_items = MALLOCA(items_size);
- current_chunk_indices = MALLOCA(indices_size);
- current_chunk_size = 0;
-
- for (bool do_abort = false; !do_abort;) {
- if (state->iter_shared.spin_lock != NULL) {
- BLI_spin_lock(state->iter_shared.spin_lock);
- }
-
- /* Get current status. */
- int index = state->iter_shared.next_index;
- void *item = state->iter_shared.next_item;
- int i;
-
- /* 'Acquire' a chunk of items from the iterator function. */
- for (i = 0; i < state->iter_shared.chunk_size && !state->iter_shared.is_finished; i++) {
- current_chunk_indices[i] = index;
- current_chunk_items[i] = item;
- state->iter_func(state->userdata, &tls, &item, &index, &state->iter_shared.is_finished);
- }
-
- /* Update current status. */
- state->iter_shared.next_index = index;
- state->iter_shared.next_item = item;
- current_chunk_size = i;
-
- do_abort = state->iter_shared.is_finished;
-
- if (state->iter_shared.spin_lock != NULL) {
- BLI_spin_unlock(state->iter_shared.spin_lock);
- }
-
- for (i = 0; i < current_chunk_size; ++i) {
- state->func(state->userdata, current_chunk_items[i], current_chunk_indices[i], &tls);
- }
- }
-
- MALLOCA_FREE(current_chunk_items, items_size);
- MALLOCA_FREE(current_chunk_indices, indices_size);
-}
-
-static void parallel_iterator_func(TaskPool *__restrict pool, void *userdata_chunk, int threadid)
-{
- TaskParallelIteratorState *__restrict state = BLI_task_pool_userdata(pool);
-
- parallel_iterator_func_do(state, userdata_chunk, threadid);
-}
-
-static void task_parallel_iterator_no_threads(const TaskParallelSettings *settings,
- TaskParallelIteratorState *state)
-{
- /* Prepare user's TLS data. */
- void *userdata_chunk = settings->userdata_chunk;
- const size_t userdata_chunk_size = settings->userdata_chunk_size;
- void *userdata_chunk_local = NULL;
- const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
- if (use_userdata_chunk) {
- userdata_chunk_local = MALLOCA(userdata_chunk_size);
- memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
- }
-
- /* Also marking it as non-threaded for the iterator callback. */
- state->iter_shared.spin_lock = NULL;
-
- parallel_iterator_func_do(state, userdata_chunk, 0);
-
- if (use_userdata_chunk) {
- if (settings->func_finalize != NULL) {
- settings->func_finalize(state->userdata, userdata_chunk_local);
- }
- MALLOCA_FREE(userdata_chunk_local, userdata_chunk_size);
- }
-}
-
-static void task_parallel_iterator_do(const TaskParallelSettings *settings,
- TaskParallelIteratorState *state)
-{
- TaskScheduler *task_scheduler = BLI_task_scheduler_get();
- const int num_threads = BLI_task_scheduler_num_threads(task_scheduler);
-
- task_parallel_iterator_calc_chunk_size(settings, num_threads, state);
-
- if (!settings->use_threading) {
- task_parallel_iterator_no_threads(settings, state);
- return;
- }
-
- const int chunk_size = state->iter_shared.chunk_size;
- const int tot_items = state->tot_items;
- const size_t num_tasks = tot_items >= 0 ?
- (size_t)min_ii(num_threads, state->tot_items / chunk_size) :
- (size_t)num_threads;
-
- BLI_assert(num_tasks > 0);
- if (num_tasks == 1) {
- task_parallel_iterator_no_threads(settings, state);
- return;
- }
-
- SpinLock spin_lock;
- BLI_spin_init(&spin_lock);
- state->iter_shared.spin_lock = &spin_lock;
-
- void *userdata_chunk = settings->userdata_chunk;
- const size_t userdata_chunk_size = settings->userdata_chunk_size;
- void *userdata_chunk_local = NULL;
- void *userdata_chunk_array = NULL;
- const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
-
- TaskPool *task_pool = BLI_task_pool_create_suspended(task_scheduler, state);
-
- if (use_userdata_chunk) {
- userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
- }
-
- for (size_t i = 0; i < num_tasks; i++) {
- if (use_userdata_chunk) {
- userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
- memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
- }
- /* Use this pool's pre-allocated tasks. */
- BLI_task_pool_push_from_thread(task_pool,
- parallel_iterator_func,
- userdata_chunk_local,
- false,
- TASK_PRIORITY_HIGH,
- task_pool->thread_id);
- }
-
- BLI_task_pool_work_and_wait(task_pool);
- BLI_task_pool_free(task_pool);
-
- if (use_userdata_chunk) {
- if (settings->func_finalize != NULL) {
- for (size_t i = 0; i < num_tasks; i++) {
- userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
- settings->func_finalize(state->userdata, userdata_chunk_local);
- }
- }
- MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
- }
-
- BLI_spin_end(&spin_lock);
- state->iter_shared.spin_lock = NULL;
-}
-
-/**
- * This function allows to parallelize for loops using a generic iterator.
- *
- * \param userdata: Common userdata passed to all instances of \a func.
- * \param iter_func: Callback function used to generate chunks of items.
- * \param init_item: The initial item, if necessary (may be NULL if unused).
- * \param init_index: The initial index.
- * \param tot_items: The total amount of items to iterate over
- * (if unknown, set it to a negative number).
- * \param func: Callback function.
- * \param settings: See public API doc of TaskParallelSettings for description of all settings.
- *
- * \note Static scheduling is only available when \a tot_items is >= 0.
- */
-
-void BLI_task_parallel_iterator(void *userdata,
- TaskParallelIteratorIterFunc iter_func,
- void *init_item,
- const int init_index,
- const int tot_items,
- TaskParallelIteratorFunc func,
- const TaskParallelSettings *settings)
-{
- TaskParallelIteratorState state = {0};
-
- state.tot_items = tot_items;
- state.iter_shared.next_index = init_index;
- state.iter_shared.next_item = init_item;
- state.iter_shared.is_finished = false;
- state.userdata = userdata;
- state.iter_func = iter_func;
- state.func = func;
-
- task_parallel_iterator_do(settings, &state);
-}
-
-static void task_parallel_listbase_get(void *__restrict UNUSED(userdata),
- const TaskParallelTLS *__restrict UNUSED(tls),
- void **r_next_item,
- int *r_next_index,
- bool *r_do_abort)
-{
- /* Get current status. */
- Link *link = *r_next_item;
-
- if (link->next == NULL) {
- *r_do_abort = true;
- }
- *r_next_item = link->next;
- (*r_next_index)++;
-}
-
-/**
- * This function allows to parallelize for loops over ListBase items.
- *
- * \param listbase: The double linked list to loop over.
- * \param userdata: Common userdata passed to all instances of \a func.
- * \param func: Callback function.
- * \param settings: See public API doc of ParallelRangeSettings for description of all settings.
- *
- * \note There is no static scheduling here,
- * since it would need another full loop over items to count them.
- */
-void BLI_task_parallel_listbase(ListBase *listbase,
- void *userdata,
- TaskParallelIteratorFunc func,
- const TaskParallelSettings *settings)
-{
- if (BLI_listbase_is_empty(listbase)) {
- return;
- }
-
- TaskParallelIteratorState state = {0};
-
- state.tot_items = BLI_listbase_count(listbase);
- state.iter_shared.next_index = 0;
- state.iter_shared.next_item = listbase->first;
- state.iter_shared.is_finished = false;
- state.userdata = userdata;
- state.iter_func = task_parallel_listbase_get;
- state.func = func;
-
- task_parallel_iterator_do(settings, &state);
-}
-
-#undef MALLOCA
-#undef MALLOCA_FREE
-
-typedef struct ParallelMempoolState {
- void *userdata;
- TaskParallelMempoolFunc func;
-} ParallelMempoolState;
-
-static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata, int UNUSED(threadid))
-{
- ParallelMempoolState *__restrict state = BLI_task_pool_userdata(pool);
- BLI_mempool_iter *iter = taskdata;
- MempoolIterData *item;
-
- while ((item = BLI_mempool_iterstep(iter)) != NULL) {
- state->func(state->userdata, item);
- }
-}
-
-/**
- * This function allows to parallelize for loops over Mempool items.
- *
- * \param mempool: The iterable BLI_mempool to loop over.
- * \param userdata: Common userdata passed to all instances of \a func.
- * \param func: Callback function.
- * \param use_threading: If \a true, actually split-execute loop in threads,
- * else just do a sequential for loop
- * (allows caller to use any kind of test to switch on parallelization or not).
- *
- * \note There is no static scheduling here.
- */
-void BLI_task_parallel_mempool(BLI_mempool *mempool,
- void *userdata,
- TaskParallelMempoolFunc func,
- const bool use_threading)
-{
- TaskScheduler *task_scheduler;
- TaskPool *task_pool;
- ParallelMempoolState state;
- int i, num_threads, num_tasks;
-
- if (BLI_mempool_len(mempool) == 0) {
- return;
- }
-
- if (!use_threading) {
- BLI_mempool_iter iter;
- BLI_mempool_iternew(mempool, &iter);
-
- for (void *item = BLI_mempool_iterstep(&iter); item != NULL;
- item = BLI_mempool_iterstep(&iter)) {
- func(userdata, item);
- }
- return;
- }
-
- task_scheduler = BLI_task_scheduler_get();
- task_pool = BLI_task_pool_create_suspended(task_scheduler, &state);
- num_threads = BLI_task_scheduler_num_threads(task_scheduler);
-
- /* The idea here is to prevent creating task for each of the loop iterations
- * and instead have tasks which are evenly distributed across CPU cores and
- * pull next item to be crunched using the threaded-aware BLI_mempool_iter.
- */
- num_tasks = num_threads + 2;
-
- state.userdata = userdata;
- state.func = func;
-
- BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool,
- (size_t)num_tasks);
-
- for (i = 0; i < num_tasks; i++) {
- /* Use this pool's pre-allocated tasks. */
- BLI_task_pool_push_from_thread(task_pool,
- parallel_mempool_func,
- &mempool_iterators[i],
- false,
- TASK_PRIORITY_HIGH,
- task_pool->thread_id);
- }
-
- BLI_task_pool_work_and_wait(task_pool);
- BLI_task_pool_free(task_pool);
-
- BLI_mempool_iter_threadsafe_free(mempool_iterators);
-}
diff --git a/source/blender/blenlib/intern/task_graph.cc b/source/blender/blenlib/intern/task_graph.cc
new file mode 100644
index 00000000000..4f112c5b2c8
--- /dev/null
+++ b/source/blender/blenlib/intern/task_graph.cc
@@ -0,0 +1,166 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/** \file
+ * \ingroup bli
+ *
+ * Task graph.
+ */
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_task.h"
+
+#include <memory>
+#include <vector>
+
+#ifdef WITH_TBB
+/* Quiet top level deprecation message, unrelated to API usage here. */
+# define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
+# include <tbb/flow_graph.h>
+# include <tbb/tbb.h>
+#endif
+
+/* Task Graph */
+struct TaskGraph {
+#ifdef WITH_TBB
+ tbb::flow::graph tbb_graph;
+#endif
+ std::vector<std::unique_ptr<TaskNode>> nodes;
+
+#ifdef WITH_CXX_GUARDEDALLOC
+ MEM_CXX_CLASS_ALLOC_FUNCS("task_graph:TaskGraph")
+#endif
+};
+
+/* TaskNode - a node in the task graph. */
+struct TaskNode {
+ /* TBB Node. */
+#ifdef WITH_TBB
+ tbb::flow::continue_node<tbb::flow::continue_msg> tbb_node;
+#endif
+ /* Successors to execute after this task, for serial execution fallback. */
+ std::vector<TaskNode *> successors;
+
+ /* User function to be executed with given task data. */
+ TaskGraphNodeRunFunction run_func;
+ void *task_data;
+ /* Optional callback to free task data along with the graph. If task data
+ * is shared between nodes, only a single task node should free the data. */
+ TaskGraphNodeFreeFunction free_func;
+
+ TaskNode(TaskGraph *task_graph,
+ TaskGraphNodeRunFunction run_func,
+ void *task_data,
+ TaskGraphNodeFreeFunction free_func)
+ :
+#ifdef WITH_TBB
+ tbb_node(task_graph->tbb_graph,
+ tbb::flow::unlimited,
+ std::bind(&TaskNode::run, this, std::placeholders::_1)),
+#endif
+ run_func(run_func),
+ task_data(task_data),
+ free_func(free_func)
+ {
+#ifndef WITH_TBB
+ UNUSED_VARS(task_graph);
+#endif
+ }
+
+ TaskNode(const TaskNode &other) = delete;
+ TaskNode &operator=(const TaskNode &other) = delete;
+
+ ~TaskNode()
+ {
+ if (task_data && free_func) {
+ free_func(task_data);
+ }
+ }
+
+#ifdef WITH_TBB
+ tbb::flow::continue_msg run(const tbb::flow::continue_msg UNUSED(input))
+ {
+ tbb::this_task_arena::isolate([this] { run_func(task_data); });
+ return tbb::flow::continue_msg();
+ }
+#endif
+
+ void run_serial()
+ {
+ run_func(task_data);
+ for (TaskNode *successor : successors) {
+ successor->run_serial();
+ }
+ }
+
+#ifdef WITH_CXX_GUARDEDALLOC
+ MEM_CXX_CLASS_ALLOC_FUNCS("task_graph:TaskNode")
+#endif
+};
+
+TaskGraph *BLI_task_graph_create(void)
+{
+ return new TaskGraph();
+}
+
+void BLI_task_graph_free(TaskGraph *task_graph)
+{
+ delete task_graph;
+}
+
+void BLI_task_graph_work_and_wait(TaskGraph *task_graph)
+{
+#ifdef WITH_TBB
+ task_graph->tbb_graph.wait_for_all();
+#else
+ UNUSED_VARS(task_graph);
+#endif
+}
+
+struct TaskNode *BLI_task_graph_node_create(struct TaskGraph *task_graph,
+ TaskGraphNodeRunFunction run,
+ void *user_data,
+ TaskGraphNodeFreeFunction free_func)
+{
+ TaskNode *task_node = new TaskNode(task_graph, run, user_data, free_func);
+ task_graph->nodes.push_back(std::unique_ptr<TaskNode>(task_node));
+ return task_node;
+}
+
+bool BLI_task_graph_node_push_work(struct TaskNode *task_node)
+{
+#ifdef WITH_TBB
+ if (BLI_task_scheduler_num_threads() > 1) {
+ return task_node->tbb_node.try_put(tbb::flow::continue_msg());
+ }
+#endif
+
+ task_node->run_serial();
+ return true;
+}
+
+void BLI_task_graph_edge_create(struct TaskNode *from_node, struct TaskNode *to_node)
+{
+#ifdef WITH_TBB
+ if (BLI_task_scheduler_num_threads() > 1) {
+ tbb::flow::make_edge(from_node->tbb_node, to_node->tbb_node);
+ return;
+ }
+#endif
+
+ from_node->successors.push_back(to_node);
+}
diff --git a/source/blender/blenlib/intern/task_iterator.c b/source/blender/blenlib/intern/task_iterator.c
new file mode 100644
index 00000000000..ee459ac2548
--- /dev/null
+++ b/source/blender/blenlib/intern/task_iterator.c
@@ -0,0 +1,423 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/** \file
+ * \ingroup bli
+ *
+ * Parallel tasks over all elements in a container.
+ */
+
+#include <stdlib.h>
+
+#include "MEM_guardedalloc.h"
+
+#include "DNA_listBase.h"
+
+#include "BLI_listbase.h"
+#include "BLI_math.h"
+#include "BLI_mempool.h"
+#include "BLI_task.h"
+#include "BLI_threads.h"
+
+#include "atomic_ops.h"
+
+/* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */
+#define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
+#define MALLOCA_FREE(_mem, _size) \
+ if (((_mem) != NULL) && ((_size) > 8192)) \
+ MEM_freeN((_mem))
+
+BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings,
+ const int tot_items,
+ int num_tasks,
+ int *r_chunk_size)
+{
+ int chunk_size = 0;
+
+ if (!settings->use_threading) {
+ /* Some users of this helper will still need a valid chunk size in case processing is not
+ * threaded. We can use a bigger one than in default threaded case then. */
+ chunk_size = 1024;
+ num_tasks = 1;
+ }
+ else if (settings->min_iter_per_thread > 0) {
+ /* Already set by user, no need to do anything here. */
+ chunk_size = settings->min_iter_per_thread;
+ }
+ else {
+ /* Multiplier used in heuristics below to define "optimal" chunk size.
+ * The idea here is to increase the chunk size to compensate for a rather measurable threading
+ * overhead caused by fetching tasks. With too many CPU threads we are starting
+ * to spend too much time in those overheads.
+ * First values are: 1 if num_tasks < 16;
+ * else 2 if num_tasks < 32;
+ * else 3 if num_tasks < 48;
+ * else 4 if num_tasks < 64;
+ * etc.
+ * Note: If we wanted to keep the 'power of two' multiplier, we'd need something like:
+ * 1 << max_ii(0, (int)(sizeof(int) * 8) - 1 - bitscan_reverse_i(num_tasks) - 3)
+ */
+ const int num_tasks_factor = max_ii(1, num_tasks >> 3);
+
+ /* We could make that 'base' 32 number configurable in TaskParallelSettings too, or maybe just
+ * always use that heuristic using TaskParallelSettings.min_iter_per_thread as basis? */
+ chunk_size = 32 * num_tasks_factor;
+
+ /* Basic heuristic to avoid threading on low amount of items.
+ * We could make that limit configurable in settings too. */
+ if (tot_items > 0 && tot_items < max_ii(256, chunk_size * 2)) {
+ chunk_size = tot_items;
+ }
+ }
+
+ BLI_assert(chunk_size > 0);
+ *r_chunk_size = chunk_size;
+}
+
+typedef struct TaskParallelIteratorState {
+ void *userdata;
+ TaskParallelIteratorIterFunc iter_func;
+ TaskParallelIteratorFunc func;
+
+ /* *** Data used to 'acquire' chunks of items from the iterator. *** */
+ /* Common data also passed to the generator callback. */
+ TaskParallelIteratorStateShared iter_shared;
+ /* Total number of items. If unknown, set it to a negative number. */
+ int tot_items;
+} TaskParallelIteratorState;
+
+static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict state,
+ void *userdata_chunk)
+{
+ TaskParallelTLS tls = {
+ .userdata_chunk = userdata_chunk,
+ };
+
+ void **current_chunk_items;
+ int *current_chunk_indices;
+ int current_chunk_size;
+
+ const size_t items_size = sizeof(*current_chunk_items) * (size_t)state->iter_shared.chunk_size;
+ const size_t indices_size = sizeof(*current_chunk_indices) *
+ (size_t)state->iter_shared.chunk_size;
+
+ current_chunk_items = MALLOCA(items_size);
+ current_chunk_indices = MALLOCA(indices_size);
+ current_chunk_size = 0;
+
+ for (bool do_abort = false; !do_abort;) {
+ if (state->iter_shared.spin_lock != NULL) {
+ BLI_spin_lock(state->iter_shared.spin_lock);
+ }
+
+ /* Get current status. */
+ int index = state->iter_shared.next_index;
+ void *item = state->iter_shared.next_item;
+ int i;
+
+ /* 'Acquire' a chunk of items from the iterator function. */
+ for (i = 0; i < state->iter_shared.chunk_size && !state->iter_shared.is_finished; i++) {
+ current_chunk_indices[i] = index;
+ current_chunk_items[i] = item;
+ state->iter_func(state->userdata, &tls, &item, &index, &state->iter_shared.is_finished);
+ }
+
+ /* Update current status. */
+ state->iter_shared.next_index = index;
+ state->iter_shared.next_item = item;
+ current_chunk_size = i;
+
+ do_abort = state->iter_shared.is_finished;
+
+ if (state->iter_shared.spin_lock != NULL) {
+ BLI_spin_unlock(state->iter_shared.spin_lock);
+ }
+
+ for (i = 0; i < current_chunk_size; ++i) {
+ state->func(state->userdata, current_chunk_items[i], current_chunk_indices[i], &tls);
+ }
+ }
+
+ MALLOCA_FREE(current_chunk_items, items_size);
+ MALLOCA_FREE(current_chunk_indices, indices_size);
+}
+
+static void parallel_iterator_func(TaskPool *__restrict pool, void *userdata_chunk)
+{
+ TaskParallelIteratorState *__restrict state = BLI_task_pool_user_data(pool);
+
+ parallel_iterator_func_do(state, userdata_chunk);
+}
+
+static void task_parallel_iterator_no_threads(const TaskParallelSettings *settings,
+ TaskParallelIteratorState *state)
+{
+ /* Prepare user's TLS data. */
+ void *userdata_chunk = settings->userdata_chunk;
+ const size_t userdata_chunk_size = settings->userdata_chunk_size;
+ void *userdata_chunk_local = NULL;
+ const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
+ if (use_userdata_chunk) {
+ userdata_chunk_local = MALLOCA(userdata_chunk_size);
+ memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
+ }
+
+ /* Also marking it as non-threaded for the iterator callback. */
+ state->iter_shared.spin_lock = NULL;
+
+ parallel_iterator_func_do(state, userdata_chunk);
+
+ if (use_userdata_chunk && settings->func_free != NULL) {
+ /* `func_free` should only free data that was created during execution of `func`. */
+ settings->func_free(state->userdata, userdata_chunk_local);
+ }
+}
+
+static void task_parallel_iterator_do(const TaskParallelSettings *settings,
+ TaskParallelIteratorState *state)
+{
+ const int num_threads = BLI_task_scheduler_num_threads();
+
+ task_parallel_calc_chunk_size(
+ settings, state->tot_items, num_threads, &state->iter_shared.chunk_size);
+
+ if (!settings->use_threading) {
+ task_parallel_iterator_no_threads(settings, state);
+ return;
+ }
+
+ const int chunk_size = state->iter_shared.chunk_size;
+ const int tot_items = state->tot_items;
+ const size_t num_tasks = tot_items >= 0 ?
+ (size_t)min_ii(num_threads, state->tot_items / chunk_size) :
+ (size_t)num_threads;
+
+ BLI_assert(num_tasks > 0);
+ if (num_tasks == 1) {
+ task_parallel_iterator_no_threads(settings, state);
+ return;
+ }
+
+ SpinLock spin_lock;
+ BLI_spin_init(&spin_lock);
+ state->iter_shared.spin_lock = &spin_lock;
+
+ void *userdata_chunk = settings->userdata_chunk;
+ const size_t userdata_chunk_size = settings->userdata_chunk_size;
+ void *userdata_chunk_local = NULL;
+ void *userdata_chunk_array = NULL;
+ const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
+
+ TaskPool *task_pool = BLI_task_pool_create(state, TASK_PRIORITY_HIGH);
+
+ if (use_userdata_chunk) {
+ userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
+ }
+
+ for (size_t i = 0; i < num_tasks; i++) {
+ if (use_userdata_chunk) {
+ userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
+ memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
+ }
+ /* Use this pool's pre-allocated tasks. */
+ BLI_task_pool_push(task_pool, parallel_iterator_func, userdata_chunk_local, false, NULL);
+ }
+
+ BLI_task_pool_work_and_wait(task_pool);
+ BLI_task_pool_free(task_pool);
+
+ if (use_userdata_chunk && (settings->func_reduce != NULL || settings->func_free != NULL)) {
+ for (size_t i = 0; i < num_tasks; i++) {
+ userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
+ if (settings->func_reduce != NULL) {
+ settings->func_reduce(state->userdata, userdata_chunk, userdata_chunk_local);
+ }
+ if (settings->func_free != NULL) {
+ settings->func_free(state->userdata, userdata_chunk_local);
+ }
+ }
+ MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
+ }
+
+ BLI_spin_end(&spin_lock);
+ state->iter_shared.spin_lock = NULL;
+}
+
+/**
+ * This function allows to parallelize for loops using a generic iterator.
+ *
+ * \param userdata: Common userdata passed to all instances of \a func.
+ * \param iter_func: Callback function used to generate chunks of items.
+ * \param init_item: The initial item, if necessary (may be NULL if unused).
+ * \param init_index: The initial index.
+ * \param tot_items: The total amount of items to iterate over
+ * (if unknown, set it to a negative number).
+ * \param func: Callback function.
+ * \param settings: See public API doc of TaskParallelSettings for description of all settings.
+ *
+ * \note Static scheduling is only available when \a tot_items is >= 0.
+ */
+
+void BLI_task_parallel_iterator(void *userdata,
+ TaskParallelIteratorIterFunc iter_func,
+ void *init_item,
+ const int init_index,
+ const int tot_items,
+ TaskParallelIteratorFunc func,
+ const TaskParallelSettings *settings)
+{
+ TaskParallelIteratorState state = {0};
+
+ state.tot_items = tot_items;
+ state.iter_shared.next_index = init_index;
+ state.iter_shared.next_item = init_item;
+ state.iter_shared.is_finished = false;
+ state.userdata = userdata;
+ state.iter_func = iter_func;
+ state.func = func;
+
+ task_parallel_iterator_do(settings, &state);
+}
+
+static void task_parallel_listbase_get(void *__restrict UNUSED(userdata),
+ const TaskParallelTLS *__restrict UNUSED(tls),
+ void **r_next_item,
+ int *r_next_index,
+ bool *r_do_abort)
+{
+ /* Get current status. */
+ Link *link = *r_next_item;
+
+ if (link->next == NULL) {
+ *r_do_abort = true;
+ }
+ *r_next_item = link->next;
+ (*r_next_index)++;
+}
+
+/**
+ * This function allows to parallelize for loops over ListBase items.
+ *
+ * \param listbase: The double linked list to loop over.
+ * \param userdata: Common userdata passed to all instances of \a func.
+ * \param func: Callback function.
+ * \param settings: See public API doc of ParallelRangeSettings for description of all settings.
+ *
+ * \note There is no static scheduling here,
+ * since it would need another full loop over items to count them.
+ */
+void BLI_task_parallel_listbase(ListBase *listbase,
+ void *userdata,
+ TaskParallelIteratorFunc func,
+ const TaskParallelSettings *settings)
+{
+ if (BLI_listbase_is_empty(listbase)) {
+ return;
+ }
+
+ TaskParallelIteratorState state = {0};
+
+ state.tot_items = BLI_listbase_count(listbase);
+ state.iter_shared.next_index = 0;
+ state.iter_shared.next_item = listbase->first;
+ state.iter_shared.is_finished = false;
+ state.userdata = userdata;
+ state.iter_func = task_parallel_listbase_get;
+ state.func = func;
+
+ task_parallel_iterator_do(settings, &state);
+}
+
+#undef MALLOCA
+#undef MALLOCA_FREE
+
+typedef struct ParallelMempoolState {
+ void *userdata;
+ TaskParallelMempoolFunc func;
+} ParallelMempoolState;
+
+static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata)
+{
+ ParallelMempoolState *__restrict state = BLI_task_pool_user_data(pool);
+ BLI_mempool_iter *iter = taskdata;
+ MempoolIterData *item;
+
+ while ((item = BLI_mempool_iterstep(iter)) != NULL) {
+ state->func(state->userdata, item);
+ }
+}
+
+/**
+ * This function allows to parallelize for loops over Mempool items.
+ *
+ * \param mempool: The iterable BLI_mempool to loop over.
+ * \param userdata: Common userdata passed to all instances of \a func.
+ * \param func: Callback function.
+ * \param use_threading: If \a true, actually split-execute loop in threads,
+ * else just do a sequential for loop
+ * (allows caller to use any kind of test to switch on parallelization or not).
+ *
+ * \note There is no static scheduling here.
+ */
+void BLI_task_parallel_mempool(BLI_mempool *mempool,
+ void *userdata,
+ TaskParallelMempoolFunc func,
+ const bool use_threading)
+{
+ TaskPool *task_pool;
+ ParallelMempoolState state;
+ int i, num_threads, num_tasks;
+
+ if (BLI_mempool_len(mempool) == 0) {
+ return;
+ }
+
+ if (!use_threading) {
+ BLI_mempool_iter iter;
+ BLI_mempool_iternew(mempool, &iter);
+
+ for (void *item = BLI_mempool_iterstep(&iter); item != NULL;
+ item = BLI_mempool_iterstep(&iter)) {
+ func(userdata, item);
+ }
+ return;
+ }
+
+ task_pool = BLI_task_pool_create(&state, TASK_PRIORITY_HIGH);
+ num_threads = BLI_task_scheduler_num_threads();
+
+ /* The idea here is to prevent creating task for each of the loop iterations
+ * and instead have tasks which are evenly distributed across CPU cores and
+ * pull next item to be crunched using the threaded-aware BLI_mempool_iter.
+ */
+ num_tasks = num_threads + 2;
+
+ state.userdata = userdata;
+ state.func = func;
+
+ BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool,
+ (size_t)num_tasks);
+
+ for (i = 0; i < num_tasks; i++) {
+ /* Use this pool's pre-allocated tasks. */
+ BLI_task_pool_push(task_pool, parallel_mempool_func, &mempool_iterators[i], false, NULL);
+ }
+
+ BLI_task_pool_work_and_wait(task_pool);
+ BLI_task_pool_free(task_pool);
+
+ BLI_mempool_iter_threadsafe_free(mempool_iterators);
+}
diff --git a/source/blender/blenlib/intern/task_pool.cc b/source/blender/blenlib/intern/task_pool.cc
new file mode 100644
index 00000000000..cf328ec407c
--- /dev/null
+++ b/source/blender/blenlib/intern/task_pool.cc
@@ -0,0 +1,546 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/** \file
+ * \ingroup bli
+ *
+ * Task pool to run tasks in parallel.
+ */
+
+#include <memory>
+#include <stdlib.h>
+#include <utility>
+
+#include "MEM_guardedalloc.h"
+
+#include "DNA_listBase.h"
+
+#include "BLI_math.h"
+#include "BLI_mempool.h"
+#include "BLI_task.h"
+#include "BLI_threads.h"
+
+#ifdef WITH_TBB
+/* Quiet top level deprecation message, unrelated to API usage here. */
+# define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
+# include <tbb/tbb.h>
+#endif
+
+/* Task
+ *
+ * Unit of work to execute. This is a C++ class to work with TBB. */
+
+class Task {
+ public:
+ TaskPool *pool;
+ TaskRunFunction run;
+ void *taskdata;
+ bool free_taskdata;
+ TaskFreeFunction freedata;
+
+ Task(TaskPool *pool,
+ TaskRunFunction run,
+ void *taskdata,
+ bool free_taskdata,
+ TaskFreeFunction freedata)
+ : pool(pool), run(run), taskdata(taskdata), free_taskdata(free_taskdata), freedata(freedata)
+ {
+ }
+
+ ~Task()
+ {
+ if (free_taskdata) {
+ if (freedata) {
+ freedata(pool, taskdata);
+ }
+ else {
+ MEM_freeN(taskdata);
+ }
+ }
+ }
+
+ /* Move constructor.
+ * For performance, ensure we never copy the task and only move it.
+ * For TBB version 2017 and earlier we apply a workaround to make up for
+ * the lack of move constructor support. */
+ Task(Task &&other)
+ : pool(other.pool),
+ run(other.run),
+ taskdata(other.taskdata),
+ free_taskdata(other.free_taskdata),
+ freedata(other.freedata)
+ {
+ other.pool = NULL;
+ other.run = NULL;
+ other.taskdata = NULL;
+ other.free_taskdata = false;
+ other.freedata = NULL;
+ }
+
+#if defined(WITH_TBB) && TBB_INTERFACE_VERSION_MAJOR < 10
+ Task(const Task &other)
+ : pool(other.pool),
+ run(other.run),
+ taskdata(other.taskdata),
+ free_taskdata(other.free_taskdata),
+ freedata(other.freedata)
+ {
+ ((Task &)other).pool = NULL;
+ ((Task &)other).run = NULL;
+ ((Task &)other).taskdata = NULL;
+ ((Task &)other).free_taskdata = false;
+ ((Task &)other).freedata = NULL;
+ }
+#else
+ Task(const Task &other) = delete;
+#endif
+
+ Task &operator=(const Task &other) = delete;
+ Task &operator=(Task &&other) = delete;
+
+ /* Execute task. */
+ void operator()() const
+ {
+#ifdef WITH_TBB
+ tbb::this_task_arena::isolate([this] { run(pool, taskdata); });
+#else
+ run(pool, taskdata);
+#endif
+ }
+};
+
+/* TBB Task Group.
+ *
+ * Subclass since there seems to be no other way to set priority. */
+
+#ifdef WITH_TBB
+class TBBTaskGroup : public tbb::task_group {
+ public:
+ TBBTaskGroup(TaskPriority priority)
+ {
+ switch (priority) {
+ case TASK_PRIORITY_LOW:
+ my_context.set_priority(tbb::priority_low);
+ break;
+ case TASK_PRIORITY_HIGH:
+ my_context.set_priority(tbb::priority_normal);
+ break;
+ }
+ }
+
+ ~TBBTaskGroup()
+ {
+ }
+};
+#endif
+
+/* Task Pool */
+
+typedef enum TaskPoolType {
+ TASK_POOL_TBB,
+ TASK_POOL_TBB_SUSPENDED,
+ TASK_POOL_NO_THREADS,
+ TASK_POOL_BACKGROUND,
+ TASK_POOL_BACKGROUND_SERIAL,
+} TaskPoolType;
+
+struct TaskPool {
+ TaskPoolType type;
+ bool use_threads;
+
+ ThreadMutex user_mutex;
+ void *userdata;
+
+ /* TBB task pool. */
+#ifdef WITH_TBB
+ TBBTaskGroup tbb_group;
+#endif
+ volatile bool is_suspended;
+ BLI_mempool *suspended_mempool;
+
+ /* Background task pool. */
+ ListBase background_threads;
+ ThreadQueue *background_queue;
+ volatile bool background_is_canceling;
+};
+
+/* TBB Task Pool.
+ *
+ * Task pool using the TBB scheduler for tasks. When building without TBB
+ * support or running Blender with -t 1, this reverts to single threaded.
+ *
+ * Tasks may be suspended until in all are created, to make it possible to
+ * initialize data structures and create tasks in a single pass. */
+
+static void tbb_task_pool_create(TaskPool *pool, TaskPriority priority)
+{
+ if (pool->type == TASK_POOL_TBB_SUSPENDED) {
+ pool->is_suspended = true;
+ pool->suspended_mempool = BLI_mempool_create(sizeof(Task), 512, 512, BLI_MEMPOOL_ALLOW_ITER);
+ }
+
+#ifdef WITH_TBB
+ if (pool->use_threads) {
+ new (&pool->tbb_group) TBBTaskGroup(priority);
+ }
+#else
+ UNUSED_VARS(priority);
+#endif
+}
+
+static void tbb_task_pool_run(TaskPool *pool, Task &&task)
+{
+ if (pool->is_suspended) {
+ /* Suspended task that will be executed in work_and_wait(). */
+ Task *task_mem = (Task *)BLI_mempool_alloc(pool->suspended_mempool);
+ new (task_mem) Task(std::move(task));
+#ifdef __GNUC__
+ /* Work around apparent compiler bug where task is not properly copied
+ * to task_mem. This appears unrelated to the use of placement new or
+ * move semantics, happens even writing to a plain C struct. Rather the
+ * call into TBB seems to have some indirect effect. */
+ std::atomic_thread_fence(std::memory_order_release);
+#endif
+ }
+#ifdef WITH_TBB
+ else if (pool->use_threads) {
+ /* Execute in TBB task group. */
+ pool->tbb_group.run(std::move(task));
+ }
+#endif
+ else {
+ /* Execute immediately. */
+ task();
+ }
+}
+
+static void tbb_task_pool_work_and_wait(TaskPool *pool)
+{
+ /* Start any suspended task now. */
+ if (pool->suspended_mempool) {
+ pool->is_suspended = false;
+
+ BLI_mempool_iter iter;
+ BLI_mempool_iternew(pool->suspended_mempool, &iter);
+ while (Task *task = (Task *)BLI_mempool_iterstep(&iter)) {
+ tbb_task_pool_run(pool, std::move(*task));
+ }
+
+ BLI_mempool_clear(pool->suspended_mempool);
+ }
+
+#ifdef WITH_TBB
+ if (pool->use_threads) {
+ /* This is called wait(), but internally it can actually do work. This
+ * matters because we don't want recursive usage of task pools to run
+ * out of threads and get stuck. */
+ pool->tbb_group.wait();
+ }
+#endif
+}
+
+static void tbb_task_pool_cancel(TaskPool *pool)
+{
+#ifdef WITH_TBB
+ if (pool->use_threads) {
+ pool->tbb_group.cancel();
+ pool->tbb_group.wait();
+ }
+#else
+ UNUSED_VARS(pool);
+#endif
+}
+
+static bool tbb_task_pool_canceled(TaskPool *pool)
+{
+#ifdef WITH_TBB
+ if (pool->use_threads) {
+ return pool->tbb_group.is_canceling();
+ }
+#else
+ UNUSED_VARS(pool);
+#endif
+
+ return false;
+}
+
+static void tbb_task_pool_free(TaskPool *pool)
+{
+#ifdef WITH_TBB
+ if (pool->use_threads) {
+ pool->tbb_group.~TBBTaskGroup();
+ }
+#endif
+
+ if (pool->suspended_mempool) {
+ BLI_mempool_destroy(pool->suspended_mempool);
+ }
+}
+
+/* Background Task Pool.
+ *
+ * Fallback for running background tasks when building without TBB. */
+
+static void *background_task_run(void *userdata)
+{
+ TaskPool *pool = (TaskPool *)userdata;
+ while (Task *task = (Task *)BLI_thread_queue_pop(pool->background_queue)) {
+ (*task)();
+ task->~Task();
+ MEM_freeN(task);
+ }
+ return NULL;
+}
+
+static void background_task_pool_create(TaskPool *pool)
+{
+ pool->background_queue = BLI_thread_queue_init();
+ BLI_threadpool_init(&pool->background_threads, background_task_run, 1);
+}
+
+static void background_task_pool_run(TaskPool *pool, Task &&task)
+{
+ Task *task_mem = (Task *)MEM_mallocN(sizeof(Task), __func__);
+ new (task_mem) Task(std::move(task));
+ BLI_thread_queue_push(pool->background_queue, task_mem);
+
+ if (BLI_available_threads(&pool->background_threads)) {
+ BLI_threadpool_insert(&pool->background_threads, pool);
+ }
+}
+
+static void background_task_pool_work_and_wait(TaskPool *pool)
+{
+ /* Signal background thread to stop waiting for new tasks if none are
+ * left, and wait for tasks and thread to finish. */
+ BLI_thread_queue_nowait(pool->background_queue);
+ BLI_thread_queue_wait_finish(pool->background_queue);
+ BLI_threadpool_clear(&pool->background_threads);
+}
+
+static void background_task_pool_cancel(TaskPool *pool)
+{
+ pool->background_is_canceling = true;
+
+ /* Remove tasks not yet started by background thread. */
+ BLI_thread_queue_nowait(pool->background_queue);
+ while (Task *task = (Task *)BLI_thread_queue_pop(pool->background_queue)) {
+ task->~Task();
+ MEM_freeN(task);
+ }
+
+ /* Let background thread finish or cancel task it is working on. */
+ BLI_threadpool_remove(&pool->background_threads, pool);
+ pool->background_is_canceling = false;
+}
+
+static bool background_task_pool_canceled(TaskPool *pool)
+{
+ return pool->background_is_canceling;
+}
+
+static void background_task_pool_free(TaskPool *pool)
+{
+ background_task_pool_work_and_wait(pool);
+
+ BLI_threadpool_end(&pool->background_threads);
+ BLI_thread_queue_free(pool->background_queue);
+}
+
+/* Task Pool */
+
+static TaskPool *task_pool_create_ex(void *userdata, TaskPoolType type, TaskPriority priority)
+{
+ const bool use_threads = BLI_task_scheduler_num_threads() > 1 && type != TASK_POOL_NO_THREADS;
+
+ /* Background task pool uses regular TBB scheduling if available. Only when
+ * building without TBB or running with -t 1 do we need to ensure these tasks
+ * do not block the main thread. */
+ if (type == TASK_POOL_BACKGROUND && use_threads) {
+ type = TASK_POOL_TBB;
+ }
+
+ /* Allocate task pool. */
+ TaskPool *pool = (TaskPool *)MEM_callocN(sizeof(TaskPool), "TaskPool");
+
+ pool->type = type;
+ pool->use_threads = use_threads;
+
+ pool->userdata = userdata;
+ BLI_mutex_init(&pool->user_mutex);
+
+ switch (type) {
+ case TASK_POOL_TBB:
+ case TASK_POOL_TBB_SUSPENDED:
+ case TASK_POOL_NO_THREADS:
+ tbb_task_pool_create(pool, priority);
+ break;
+ case TASK_POOL_BACKGROUND:
+ case TASK_POOL_BACKGROUND_SERIAL:
+ background_task_pool_create(pool);
+ break;
+ }
+
+ return pool;
+}
+
+/**
+ * Create a normal task pool. Tasks will be executed as soon as they are added.
+ */
+TaskPool *BLI_task_pool_create(void *userdata, TaskPriority priority)
+{
+ return task_pool_create_ex(userdata, TASK_POOL_TBB, priority);
+}
+
+/**
+ * Create a background task pool.
+ * In multi-threaded context, there is no differences with #BLI_task_pool_create(),
+ * but in single-threaded case it is ensured to have at least one worker thread to run on
+ * (i.e. you don't have to call #BLI_task_pool_work_and_wait
+ * on it to be sure it will be processed).
+ *
+ * \note Background pools are non-recursive
+ * (that is, you should not create other background pools in tasks assigned to a background pool,
+ * they could end never being executed, since the 'fallback' background thread is already
+ * busy with parent task in single-threaded context).
+ */
+TaskPool *BLI_task_pool_create_background(void *userdata, TaskPriority priority)
+{
+ return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND, priority);
+}
+
+/**
+ * Similar to BLI_task_pool_create() but does not schedule any tasks for execution
+ * for until BLI_task_pool_work_and_wait() is called. This helps reducing threading
+ * overhead when pushing huge amount of small initial tasks from the main thread.
+ */
+TaskPool *BLI_task_pool_create_suspended(void *userdata, TaskPriority priority)
+{
+ return task_pool_create_ex(userdata, TASK_POOL_TBB_SUSPENDED, priority);
+}
+
+/**
+ * Single threaded task pool that executes pushed task immediately, for
+ * debugging purposes.
+ */
+TaskPool *BLI_task_pool_create_no_threads(void *userdata)
+{
+ return task_pool_create_ex(userdata, TASK_POOL_NO_THREADS, TASK_PRIORITY_HIGH);
+}
+
+/**
+ * Task pool that executes one task after the other, possibly on different threads
+ * but never in parallel.
+ */
+TaskPool *BLI_task_pool_create_background_serial(void *userdata, TaskPriority priority)
+{
+ return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND_SERIAL, priority);
+}
+
+void BLI_task_pool_free(TaskPool *pool)
+{
+ switch (pool->type) {
+ case TASK_POOL_TBB:
+ case TASK_POOL_TBB_SUSPENDED:
+ case TASK_POOL_NO_THREADS:
+ tbb_task_pool_free(pool);
+ break;
+ case TASK_POOL_BACKGROUND:
+ case TASK_POOL_BACKGROUND_SERIAL:
+ background_task_pool_free(pool);
+ break;
+ }
+
+ BLI_mutex_end(&pool->user_mutex);
+
+ MEM_freeN(pool);
+}
+
+void BLI_task_pool_push(TaskPool *pool,
+ TaskRunFunction run,
+ void *taskdata,
+ bool free_taskdata,
+ TaskFreeFunction freedata)
+{
+ Task task(pool, run, taskdata, free_taskdata, freedata);
+
+ switch (pool->type) {
+ case TASK_POOL_TBB:
+ case TASK_POOL_TBB_SUSPENDED:
+ case TASK_POOL_NO_THREADS:
+ tbb_task_pool_run(pool, std::move(task));
+ break;
+ case TASK_POOL_BACKGROUND:
+ case TASK_POOL_BACKGROUND_SERIAL:
+ background_task_pool_run(pool, std::move(task));
+ break;
+ }
+}
+
+void BLI_task_pool_work_and_wait(TaskPool *pool)
+{
+ switch (pool->type) {
+ case TASK_POOL_TBB:
+ case TASK_POOL_TBB_SUSPENDED:
+ case TASK_POOL_NO_THREADS:
+ tbb_task_pool_work_and_wait(pool);
+ break;
+ case TASK_POOL_BACKGROUND:
+ case TASK_POOL_BACKGROUND_SERIAL:
+ background_task_pool_work_and_wait(pool);
+ break;
+ }
+}
+
+void BLI_task_pool_cancel(TaskPool *pool)
+{
+ switch (pool->type) {
+ case TASK_POOL_TBB:
+ case TASK_POOL_TBB_SUSPENDED:
+ case TASK_POOL_NO_THREADS:
+ tbb_task_pool_cancel(pool);
+ break;
+ case TASK_POOL_BACKGROUND:
+ case TASK_POOL_BACKGROUND_SERIAL:
+ background_task_pool_cancel(pool);
+ break;
+ }
+}
+
+bool BLI_task_pool_canceled(TaskPool *pool)
+{
+ switch (pool->type) {
+ case TASK_POOL_TBB:
+ case TASK_POOL_TBB_SUSPENDED:
+ case TASK_POOL_NO_THREADS:
+ return tbb_task_pool_canceled(pool);
+ case TASK_POOL_BACKGROUND:
+ case TASK_POOL_BACKGROUND_SERIAL:
+ return background_task_pool_canceled(pool);
+ }
+ BLI_assert("BLI_task_pool_canceled: Control flow should not come here!");
+ return false;
+}
+
+void *BLI_task_pool_user_data(TaskPool *pool)
+{
+ return pool->userdata;
+}
+
+ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool)
+{
+ return &pool->user_mutex;
+}
diff --git a/source/blender/blenlib/intern/task_range.cc b/source/blender/blenlib/intern/task_range.cc
new file mode 100644
index 00000000000..67d8960434e
--- /dev/null
+++ b/source/blender/blenlib/intern/task_range.cc
@@ -0,0 +1,168 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/** \file
+ * \ingroup bli
+ *
+ * Task parallel range functions.
+ */
+
+#include <stdlib.h>
+
+#include "MEM_guardedalloc.h"
+
+#include "DNA_listBase.h"
+
+#include "BLI_task.h"
+#include "BLI_threads.h"
+
+#include "atomic_ops.h"
+
+#ifdef WITH_TBB
+/* Quiet top level deprecation message, unrelated to API usage here. */
+# define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
+# include <tbb/tbb.h>
+#endif
+
+#ifdef WITH_TBB
+
+/* Functor for running TBB parallel_for and parallel_reduce. */
+struct RangeTask {
+ TaskParallelRangeFunc func;
+ void *userdata;
+ const TaskParallelSettings *settings;
+
+ void *userdata_chunk;
+
+ /* Root constructor. */
+ RangeTask(TaskParallelRangeFunc func, void *userdata, const TaskParallelSettings *settings)
+ : func(func), userdata(userdata), settings(settings)
+ {
+ init_chunk(settings->userdata_chunk);
+ }
+
+ /* Copy constructor. */
+ RangeTask(const RangeTask &other)
+ : func(other.func), userdata(other.userdata), settings(other.settings)
+ {
+ init_chunk(settings->userdata_chunk);
+ }
+
+ /* Splitting constructor for parallel reduce. */
+ RangeTask(RangeTask &other, tbb::split)
+ : func(other.func), userdata(other.userdata), settings(other.settings)
+ {
+ init_chunk(settings->userdata_chunk);
+ }
+
+ ~RangeTask()
+ {
+ if (settings->func_free != NULL) {
+ settings->func_free(userdata, userdata_chunk);
+ }
+ MEM_SAFE_FREE(userdata_chunk);
+ }
+
+ void init_chunk(void *from_chunk)
+ {
+ if (from_chunk) {
+ userdata_chunk = MEM_mallocN(settings->userdata_chunk_size, "RangeTask");
+ memcpy(userdata_chunk, from_chunk, settings->userdata_chunk_size);
+ }
+ else {
+ userdata_chunk = NULL;
+ }
+ }
+
+ void operator()(const tbb::blocked_range<int> &r) const
+ {
+ tbb::this_task_arena::isolate([this, r] {
+ TaskParallelTLS tls;
+ tls.userdata_chunk = userdata_chunk;
+ for (int i = r.begin(); i != r.end(); ++i) {
+ func(userdata, i, &tls);
+ }
+ });
+ }
+
+ void join(const RangeTask &other)
+ {
+ settings->func_reduce(userdata, userdata_chunk, other.userdata_chunk);
+ }
+};
+
+#endif
+
+void BLI_task_parallel_range(const int start,
+ const int stop,
+ void *userdata,
+ TaskParallelRangeFunc func,
+ const TaskParallelSettings *settings)
+{
+#ifdef WITH_TBB
+ /* Multithreading. */
+ if (settings->use_threading && BLI_task_scheduler_num_threads() > 1) {
+ RangeTask task(func, userdata, settings);
+ const size_t grainsize = MAX2(settings->min_iter_per_thread, 1);
+ const tbb::blocked_range<int> range(start, stop, grainsize);
+
+ if (settings->func_reduce) {
+ parallel_reduce(range, task);
+ if (settings->userdata_chunk) {
+ memcpy(settings->userdata_chunk, task.userdata_chunk, settings->userdata_chunk_size);
+ }
+ }
+ else {
+ parallel_for(range, task);
+ }
+ return;
+ }
+#endif
+
+ /* Single threaded. Nothing to reduce as everything is accumulated into the
+ * main userdata chunk directly. */
+ TaskParallelTLS tls;
+ tls.userdata_chunk = settings->userdata_chunk;
+ for (int i = start; i < stop; i++) {
+ func(userdata, i, &tls);
+ }
+ if (settings->func_free != NULL) {
+ settings->func_free(userdata, settings->userdata_chunk);
+ }
+}
+
+int BLI_task_parallel_thread_id(const TaskParallelTLS *UNUSED(tls))
+{
+#ifdef WITH_TBB
+ /* Get a unique thread ID for texture nodes. In the future we should get rid
+ * of the thread ID and change texture evaluation to not require per-thread
+ * storage that can't be efficiently allocated on the stack. */
+ static tbb::enumerable_thread_specific<int> tbb_thread_id(-1);
+ static int tbb_thread_id_counter = 0;
+
+ int &thread_id = tbb_thread_id.local();
+ if (thread_id == -1) {
+ thread_id = atomic_fetch_and_add_int32(&tbb_thread_id_counter, 1);
+ if (thread_id >= BLENDER_MAX_THREADS) {
+ BLI_assert(!"Maximum number of threads exceeded for sculpting");
+ thread_id = thread_id % BLENDER_MAX_THREADS;
+ }
+ }
+ return thread_id;
+#else
+ return 0;
+#endif
+}
diff --git a/source/blender/blenlib/intern/task_scheduler.cc b/source/blender/blenlib/intern/task_scheduler.cc
new file mode 100644
index 00000000000..b0245da0385
--- /dev/null
+++ b/source/blender/blenlib/intern/task_scheduler.cc
@@ -0,0 +1,78 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/** \file
+ * \ingroup bli
+ *
+ * Task scheduler initialization.
+ */
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_task.h"
+#include "BLI_threads.h"
+
+#ifdef WITH_TBB
+/* Quiet top level deprecation message, unrelated to API usage here. */
+# define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
+# include <tbb/tbb.h>
+# if TBB_INTERFACE_VERSION_MAJOR >= 10
+# define WITH_TBB_GLOBAL_CONTROL
+# endif
+#endif
+
+/* Task Scheduler */
+
+static int task_scheduler_num_threads = 1;
+#ifdef WITH_TBB_GLOBAL_CONTROL
+static tbb::global_control *task_scheduler_global_control = nullptr;
+#endif
+
+void BLI_task_scheduler_init()
+{
+#ifdef WITH_TBB_GLOBAL_CONTROL
+ const int num_threads_override = BLI_system_num_threads_override_get();
+
+ if (num_threads_override > 0) {
+ /* Override number of threads. This settings is used within the lifetime
+ * of tbb::global_control, so we allocate it on the heap. */
+ task_scheduler_global_control = OBJECT_GUARDED_NEW(
+ tbb::global_control, tbb::global_control::max_allowed_parallelism, num_threads_override);
+ task_scheduler_num_threads = num_threads_override;
+ }
+ else {
+ /* Let TBB choose the number of threads. For (legacy) code that calls
+ * BLI_task_scheduler_num_threads() we provide the system thread count.
+ * Ideally such code should be rewritten not to use the number of threads
+ * at all. */
+ task_scheduler_num_threads = BLI_system_thread_count();
+ }
+#else
+ task_scheduler_num_threads = BLI_system_thread_count();
+#endif
+}
+
+void BLI_task_scheduler_exit()
+{
+#ifdef WITH_TBB_GLOBAL_CONTROL
+ OBJECT_GUARDED_DELETE(task_scheduler_global_control, tbb::global_control);
+#endif
+}
+
+int BLI_task_scheduler_num_threads()
+{
+ return task_scheduler_num_threads;
+}
diff --git a/source/blender/blenlib/intern/threads.c b/source/blender/blenlib/intern/threads.c
index 0a574ea34ca..be43c27e945 100644
--- a/source/blender/blenlib/intern/threads.c
+++ b/source/blender/blenlib/intern/threads.c
@@ -61,9 +61,6 @@ extern pthread_key_t gomp_tls_key;
static void *thread_tls_data;
#endif
-/* We're using one global task scheduler for all kind of tasks. */
-static TaskScheduler *task_scheduler = NULL;
-
/* ********** basic thread control API ************
*
* Many thread cases have an X amount of jobs, and only an Y amount of
@@ -93,11 +90,9 @@ static TaskScheduler *task_scheduler = NULL;
* for (go over all jobs)
* if (job is ready) {
* if (job was not removed) {
- * BLI_threadpool_remove(&lb, job);
- * }
+ * BLI_threadpool_remove(&lb, job); * }
* }
- * else cont = 1;
- * }
+ * else cont = 1; * }
* // conditions to exit loop
* if (if escape loop event) {
* if (BLI_available_threadslots(&lb) == maxthreads) {
@@ -109,7 +104,6 @@ static TaskScheduler *task_scheduler = NULL;
* BLI_threadpool_end(&lb);
*
************************************************ */
-static SpinLock _malloc_lock;
static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t _image_draw_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t _viewer_lock = PTHREAD_MUTEX_INITIALIZER;
@@ -137,21 +131,9 @@ typedef struct ThreadSlot {
int avail;
} ThreadSlot;
-static void BLI_lock_malloc_thread(void)
-{
- BLI_spin_lock(&_malloc_lock);
-}
-
-static void BLI_unlock_malloc_thread(void)
-{
- BLI_spin_unlock(&_malloc_lock);
-}
-
void BLI_threadapi_init(void)
{
mainid = pthread_self();
-
- BLI_spin_init(&_malloc_lock);
if (numaAPI_Initialize() == NUMAAPI_SUCCESS) {
is_numa_available = true;
}
@@ -159,25 +141,6 @@ void BLI_threadapi_init(void)
void BLI_threadapi_exit(void)
{
- if (task_scheduler) {
- BLI_task_scheduler_free(task_scheduler);
- task_scheduler = NULL;
- }
- BLI_spin_end(&_malloc_lock);
-}
-
-TaskScheduler *BLI_task_scheduler_get(void)
-{
- if (task_scheduler == NULL) {
- int tot_thread = BLI_system_thread_count();
-
- /* Do a lazy initialization, so it happens after
- * command line arguments parsing
- */
- task_scheduler = BLI_task_scheduler_create(tot_thread);
- }
-
- return task_scheduler;
}
/* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
@@ -208,8 +171,6 @@ void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int t
unsigned int level = atomic_fetch_and_add_u(&thread_levels, 1);
if (level == 0) {
- MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
-
#ifdef USE_APPLE_OMP_FIX
/* workaround for Apple gcc 4.2.1 omp vs background thread bug,
* we copy gomp thread local storage pointer to setting it again
@@ -336,11 +297,6 @@ void BLI_threadpool_end(ListBase *threadbase)
}
BLI_freelistN(threadbase);
}
-
- unsigned int level = atomic_sub_and_fetch_u(&thread_levels, 1);
- if (level == 0) {
- MEM_set_lock_callback(NULL, NULL);
- }
}
/* System Information */
@@ -834,29 +790,6 @@ void BLI_thread_queue_wait_finish(ThreadQueue *queue)
pthread_mutex_unlock(&queue->mutex);
}
-/* ************************************************ */
-
-void BLI_threaded_malloc_begin(void)
-{
- unsigned int level = atomic_fetch_and_add_u(&thread_levels, 1);
- if (level == 0) {
- MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
- /* There is a little chance that two threads will need to access to a
- * scheduler which was not yet created from main thread. which could
- * cause scheduler created multiple times.
- */
- BLI_task_scheduler_get();
- }
-}
-
-void BLI_threaded_malloc_end(void)
-{
- unsigned int level = atomic_sub_and_fetch_u(&thread_levels, 1);
- if (level == 0) {
- MEM_set_lock_callback(NULL, NULL);
- }
-}
-
/* **** Special functions to help performance on crazy NUMA setups. **** */
#if 0 /* UNUSED */
diff --git a/source/blender/blenlib/intern/timeit.cc b/source/blender/blenlib/intern/timeit.cc
new file mode 100644
index 00000000000..bab8fd81746
--- /dev/null
+++ b/source/blender/blenlib/intern/timeit.cc
@@ -0,0 +1,36 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "BLI_timeit.hh"
+
+namespace BLI {
+namespace Timeit {
+
+void print_duration(Nanoseconds duration)
+{
+ if (duration < std::chrono::microseconds(100)) {
+ std::cout << duration.count() << " ns";
+ }
+ else if (duration < std::chrono::seconds(5)) {
+ std::cout << duration.count() / 1.0e6 << " ms";
+ }
+ else {
+ std::cout << duration.count() / 1.0e9 << " s";
+ }
+}
+
+} // namespace Timeit
+} // namespace BLI