Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <campbell@blender.org>2022-03-30 09:26:42 +0300
committerCampbell Barton <campbell@blender.org>2022-03-30 10:01:22 +0300
commita8ec7845e0bdb9e63e9d3dbd7f4cd7caad36b5a2 (patch)
tree4531232281ddc4cda4df3fb1ccc0822018fe5682 /source/blender/blenlib/intern
parentaf3aaf80344e745e6c207102941513cb631194c3 (diff)
Cleanup: use "num" as a suffix in: source/blender/blenlib
Also replace "num" with: - "number" when it's not used to denote the number of items. - "digits" when digits in a string are being manipulated.
Diffstat (limited to 'source/blender/blenlib/intern')
-rw-r--r--source/blender/blenlib/intern/BLI_heap.c14
-rw-r--r--source/blender/blenlib/intern/BLI_heap_simple.c4
-rw-r--r--source/blender/blenlib/intern/BLI_kdopbvh.c214
-rw-r--r--source/blender/blenlib/intern/BLI_mempool.c16
-rw-r--r--source/blender/blenlib/intern/BLI_mempool_private.h2
-rw-r--r--source/blender/blenlib/intern/bitmap.c12
-rw-r--r--source/blender/blenlib/intern/convexhull_2d.c22
-rw-r--r--source/blender/blenlib/intern/delaunay_2d.cc28
-rw-r--r--source/blender/blenlib/intern/filereader_zstd.c34
-rw-r--r--source/blender/blenlib/intern/gsqueue.c10
-rw-r--r--source/blender/blenlib/intern/jitter_2d.c14
-rw-r--r--source/blender/blenlib/intern/math_geom.c4
-rw-r--r--source/blender/blenlib/intern/mesh_boolean.cc22
-rw-r--r--source/blender/blenlib/intern/mesh_intersect.cc50
-rw-r--r--source/blender/blenlib/intern/path_util.c34
-rw-r--r--source/blender/blenlib/intern/polyfill_2d.c142
-rw-r--r--source/blender/blenlib/intern/polyfill_2d_beautify.c6
-rw-r--r--source/blender/blenlib/intern/rand.cc24
-rw-r--r--source/blender/blenlib/intern/scanfill.c92
-rw-r--r--source/blender/blenlib/intern/scanfill_utils.c6
-rw-r--r--source/blender/blenlib/intern/stack.c24
-rw-r--r--source/blender/blenlib/intern/string.c8
-rw-r--r--source/blender/blenlib/intern/task_iterator.c70
-rw-r--r--source/blender/blenlib/intern/task_scheduler.cc8
-rw-r--r--source/blender/blenlib/intern/threads.cc10
-rw-r--r--source/blender/blenlib/intern/uuid.cc4
26 files changed, 437 insertions, 437 deletions
diff --git a/source/blender/blenlib/intern/BLI_heap.c b/source/blender/blenlib/intern/BLI_heap.c
index cf8073d4ba4..0bc50f62232 100644
--- a/source/blender/blenlib/intern/BLI_heap.c
+++ b/source/blender/blenlib/intern/BLI_heap.c
@@ -35,7 +35,7 @@ struct HeapNode_Chunk {
* or we allocate past the reserved number.
*
* \note Optimize number for 64kb allocs.
- * \note keep type in sync with tot_nodes in heap_node_alloc_chunk.
+ * \note keep type in sync with nodes_num in heap_node_alloc_chunk.
*/
#define HEAP_CHUNK_DEFAULT_NUM \
((uint)((MEM_SIZE_OPTIMAL((1 << 16) - sizeof(struct HeapNode_Chunk))) / sizeof(HeapNode)))
@@ -137,13 +137,13 @@ static void heap_up(Heap *heap, uint i)
/** \name Internal Memory Management
* \{ */
-static struct HeapNode_Chunk *heap_node_alloc_chunk(uint tot_nodes,
+static struct HeapNode_Chunk *heap_node_alloc_chunk(uint nodes_num,
struct HeapNode_Chunk *chunk_prev)
{
struct HeapNode_Chunk *chunk = MEM_mallocN(
- sizeof(struct HeapNode_Chunk) + (sizeof(HeapNode) * tot_nodes), __func__);
+ sizeof(struct HeapNode_Chunk) + (sizeof(HeapNode) * nodes_num), __func__);
chunk->prev = chunk_prev;
- chunk->bufsize = tot_nodes;
+ chunk->bufsize = nodes_num;
chunk->size = 0;
return chunk;
}
@@ -179,16 +179,16 @@ static void heap_node_free(Heap *heap, HeapNode *node)
/** \name Public Heap API
* \{ */
-Heap *BLI_heap_new_ex(uint tot_reserve)
+Heap *BLI_heap_new_ex(uint reserve_num)
{
Heap *heap = MEM_mallocN(sizeof(Heap), __func__);
/* ensure we have at least one so we can keep doubling it */
heap->size = 0;
- heap->bufsize = MAX2(1u, tot_reserve);
+ heap->bufsize = MAX2(1u, reserve_num);
heap->tree = MEM_mallocN(heap->bufsize * sizeof(HeapNode *), "BLIHeapTree");
heap->nodes.chunk = heap_node_alloc_chunk(
- (tot_reserve > 1) ? tot_reserve : HEAP_CHUNK_DEFAULT_NUM, NULL);
+ (reserve_num > 1) ? reserve_num : HEAP_CHUNK_DEFAULT_NUM, NULL);
heap->nodes.free = NULL;
return heap;
diff --git a/source/blender/blenlib/intern/BLI_heap_simple.c b/source/blender/blenlib/intern/BLI_heap_simple.c
index 0876888bcc5..b6c045cbefa 100644
--- a/source/blender/blenlib/intern/BLI_heap_simple.c
+++ b/source/blender/blenlib/intern/BLI_heap_simple.c
@@ -133,12 +133,12 @@ static void heapsimple_up(HeapSimple *heap, uint i, float active_val, void *acti
/** \name Public HeapSimple API
* \{ */
-HeapSimple *BLI_heapsimple_new_ex(uint tot_reserve)
+HeapSimple *BLI_heapsimple_new_ex(uint reserve_num)
{
HeapSimple *heap = MEM_mallocN(sizeof(HeapSimple), __func__);
/* ensure we have at least one so we can keep doubling it */
heap->size = 0;
- heap->bufsize = MAX2(1u, tot_reserve);
+ heap->bufsize = MAX2(1u, reserve_num);
heap->tree = MEM_mallocN(heap->bufsize * sizeof(HeapSimpleNode), "BLIHeapSimpleTree");
return heap;
}
diff --git a/source/blender/blenlib/intern/BLI_kdopbvh.c b/source/blender/blenlib/intern/BLI_kdopbvh.c
index 0c3497d3edf..0f52c84c45e 100644
--- a/source/blender/blenlib/intern/BLI_kdopbvh.c
+++ b/source/blender/blenlib/intern/BLI_kdopbvh.c
@@ -68,7 +68,7 @@ typedef struct BVHNode {
#endif
float *bv; /* Bounding volume of all nodes, max 13 axis */
int index; /* face, edge, vertex index */
- char totnode; /* how many nodes are used, used for speedup */
+ char node_num; /* how many nodes are used, used for speedup */
char main_axis; /* Axis used to split this node */
} BVHNode;
@@ -79,8 +79,8 @@ struct BVHTree {
BVHNode **nodechild; /* pre-alloc children for nodes */
float *nodebv; /* pre-alloc bounding-volumes for nodes */
float epsilon; /* Epsilon is used for inflation of the K-DOP. */
- int totleaf; /* leafs */
- int totbranch;
+ int leaf_num; /* leafs */
+ int branch_num;
axis_t start_axis, stop_axis; /* bvhtree_kdop_axes array indices according to axis */
axis_t axis; /* KDOP type (6 => OBB, 7 => AABB, ...) */
char tree_type; /* type of tree (4 => quad-tree). */
@@ -325,8 +325,8 @@ static void build_skip_links(BVHTree *tree, BVHNode *node, BVHNode *left, BVHNod
node->skip[0] = left;
node->skip[1] = right;
- for (i = 0; i < node->totnode; i++) {
- if (i + 1 < node->totnode) {
+ for (i = 0; i < node->node_num; i++) {
+ if (i + 1 < node->node_num) {
build_skip_links(tree, node->children[i], left, node->children[i + 1]);
}
else {
@@ -485,9 +485,9 @@ static void bvhtree_info(BVHTree *tree)
tree->axis,
tree->epsilon);
printf("nodes = %d, branches = %d, leafs = %d\n",
- tree->totbranch + tree->totleaf,
- tree->totbranch,
- tree->totleaf);
+ tree->branch_num + tree->leaf_num,
+ tree->branch_num,
+ tree->leaf_num);
printf(
"Memory per node = %ubytes\n",
(uint)(sizeof(BVHNode) + sizeof(BVHNode *) * tree->tree_type + sizeof(float) * tree->axis));
@@ -497,7 +497,7 @@ static void bvhtree_info(BVHTree *tree)
(uint)(sizeof(BVHTree) + MEM_allocN_len(tree->nodes) + MEM_allocN_len(tree->nodearray) +
MEM_allocN_len(tree->nodechild) + MEM_allocN_len(tree->nodebv)));
- bvhtree_print_tree(tree, tree->nodes[tree->totleaf], 0);
+ bvhtree_print_tree(tree, tree->nodes[tree->leaf_num], 0);
}
#endif /* USE_PRINT_TREE */
@@ -508,7 +508,7 @@ static void bvhtree_verify(BVHTree *tree)
int i, j, check = 0;
/* check the pointer list */
- for (i = 0; i < tree->totleaf; i++) {
+ for (i = 0; i < tree->leaf_num; i++) {
if (tree->nodes[i]->parent == NULL) {
printf("Leaf has no parent: %d\n", i);
}
@@ -526,7 +526,7 @@ static void bvhtree_verify(BVHTree *tree)
}
/* check the leaf list */
- for (i = 0; i < tree->totleaf; i++) {
+ for (i = 0; i < tree->leaf_num; i++) {
if (tree->nodearray[i].parent == NULL) {
printf("Leaf has no parent: %d\n", i);
}
@@ -544,9 +544,9 @@ static void bvhtree_verify(BVHTree *tree)
}
printf("branches: %d, leafs: %d, total: %d\n",
- tree->totbranch,
- tree->totleaf,
- tree->totbranch + tree->totleaf);
+ tree->branch_num,
+ tree->leaf_num,
+ tree->branch_num + tree->leaf_num);
}
#endif /* USE_VERIFY_TREE */
@@ -555,7 +555,7 @@ static void bvhtree_verify(BVHTree *tree)
* (basically this is only method to calculate pow(k, n) in O(1).. and stuff like that) */
typedef struct BVHBuildHelper {
int tree_type;
- int totleafs;
+ int leafs_num;
/** Min number of leafs that are achievable from a node at depth `N`. */
int leafs_per_child[32];
@@ -573,11 +573,11 @@ static void build_implicit_tree_helper(const BVHTree *tree, BVHBuildHelper *data
int remain;
int nnodes;
- data->totleafs = tree->totleaf;
+ data->leafs_num = tree->leaf_num;
data->tree_type = tree->tree_type;
- /* Calculate the smallest tree_type^n such that tree_type^n >= num_leafs */
- for (data->leafs_per_child[0] = 1; data->leafs_per_child[0] < data->totleafs;
+ /* Calculate the smallest tree_type^n such that tree_type^n >= leafs_num */
+ for (data->leafs_per_child[0] = 1; data->leafs_per_child[0] < data->leafs_num;
data->leafs_per_child[0] *= data->tree_type) {
/* pass */
}
@@ -589,7 +589,7 @@ static void build_implicit_tree_helper(const BVHTree *tree, BVHBuildHelper *data
data->leafs_per_child[depth] = data->leafs_per_child[depth - 1] / data->tree_type;
}
- remain = data->totleafs - data->leafs_per_child[1];
+ remain = data->leafs_num - data->leafs_per_child[1];
nnodes = (remain + data->tree_type - 2) / (data->tree_type - 1);
data->remain_leafs = remain + nnodes;
}
@@ -604,7 +604,7 @@ static int implicit_leafs_index(const BVHBuildHelper *data, const int depth, con
return min_leaf_index;
}
if (data->leafs_per_child[depth]) {
- return data->totleafs -
+ return data->leafs_num -
(data->branches_on_level[depth - 1] - child_index) * data->leafs_per_child[depth];
}
return data->remain_leafs;
@@ -725,7 +725,7 @@ static void non_recursive_bvh_div_nodes_task_cb(void *__restrict userdata,
split_leafs(data->leafs_array, nth_positions, data->tree_type, split_axis);
- /* Setup children and totnode counters
+ /* Setup `children` and `node_num` counters
* Not really needed but currently most of BVH code
* relies on having an explicit children structure */
for (k = 0; k < data->tree_type; k++) {
@@ -750,7 +750,7 @@ static void non_recursive_bvh_div_nodes_task_cb(void *__restrict userdata,
break;
}
}
- parent->totnode = (char)k;
+ parent->node_num = (char)k;
}
/**
@@ -774,7 +774,7 @@ static void non_recursive_bvh_div_nodes_task_cb(void *__restrict userdata,
static void non_recursive_bvh_div_nodes(const BVHTree *tree,
BVHNode *branches_array,
BVHNode **leafs_array,
- int num_leafs)
+ int leafs_num)
{
int i;
@@ -782,7 +782,7 @@ static void non_recursive_bvh_div_nodes(const BVHTree *tree,
/* this value is 0 (on binary trees) and negative on the others */
const int tree_offset = 2 - tree->tree_type;
- const int num_branches = implicit_needed_branches(tree_type, num_leafs);
+ const int branches_num = implicit_needed_branches(tree_type, leafs_num);
BVHBuildHelper data;
int depth;
@@ -794,10 +794,10 @@ static void non_recursive_bvh_div_nodes(const BVHTree *tree,
/* Most of bvhtree code relies on 1-leaf trees having at least one branch
* We handle that special case here */
- if (num_leafs == 1) {
- refit_kdop_hull(tree, root, 0, num_leafs);
+ if (leafs_num == 1) {
+ refit_kdop_hull(tree, root, 0, leafs_num);
root->main_axis = get_largest_axis(root->bv) / 2;
- root->totnode = 1;
+ root->node_num = 1;
root->children[0] = leafs_array[0];
root->children[0]->parent = root;
return;
@@ -819,10 +819,10 @@ static void non_recursive_bvh_div_nodes(const BVHTree *tree,
};
/* Loop tree levels (log N) loops */
- for (i = 1, depth = 1; i <= num_branches; i = i * tree_type + tree_offset, depth++) {
+ for (i = 1, depth = 1; i <= branches_num; i = i * tree_type + tree_offset, depth++) {
const int first_of_next_level = i * tree_type + tree_offset;
/* index of last branch on this level */
- const int i_stop = min_ii(first_of_next_level, num_branches + 1);
+ const int i_stop = min_ii(first_of_next_level, branches_num + 1);
/* Loop all branches on this level */
cb_data.first_of_next_level = first_of_next_level;
@@ -832,7 +832,7 @@ static void non_recursive_bvh_div_nodes(const BVHTree *tree,
if (true) {
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
- settings.use_threading = (num_leafs > KDOPBVH_THREAD_LEAF_THRESHOLD);
+ settings.use_threading = (leafs_num > KDOPBVH_THREAD_LEAF_THRESHOLD);
BLI_task_parallel_range(i, i_stop, &cb_data, non_recursive_bvh_div_nodes_task_cb, &settings);
}
else {
@@ -940,21 +940,21 @@ void BLI_bvhtree_balance(BVHTree *tree)
/* This function should only be called once
* (some big bug goes here if its being called more than once per tree) */
- BLI_assert(tree->totbranch == 0);
+ BLI_assert(tree->branch_num == 0);
/* Build the implicit tree */
non_recursive_bvh_div_nodes(
- tree, tree->nodearray + (tree->totleaf - 1), leafs_array, tree->totleaf);
+ tree, tree->nodearray + (tree->leaf_num - 1), leafs_array, tree->leaf_num);
/* current code expects the branches to be linked to the nodes array
* we perform that linkage here */
- tree->totbranch = implicit_needed_branches(tree->tree_type, tree->totleaf);
- for (int i = 0; i < tree->totbranch; i++) {
- tree->nodes[tree->totleaf + i] = &tree->nodearray[tree->totleaf + i];
+ tree->branch_num = implicit_needed_branches(tree->tree_type, tree->leaf_num);
+ for (int i = 0; i < tree->branch_num; i++) {
+ tree->nodes[tree->leaf_num + i] = &tree->nodearray[tree->leaf_num + i];
}
#ifdef USE_SKIP_LINKS
- build_skip_links(tree, tree->nodes[tree->totleaf], NULL, NULL);
+ build_skip_links(tree, tree->nodes[tree->leaf_num], NULL, NULL);
#endif
#ifdef USE_VERIFY_TREE
@@ -980,12 +980,12 @@ void BLI_bvhtree_insert(BVHTree *tree, int index, const float co[3], int numpoin
{
BVHNode *node = NULL;
- /* insert should only possible as long as tree->totbranch is 0 */
- BLI_assert(tree->totbranch <= 0);
- BLI_assert((size_t)tree->totleaf < MEM_allocN_len(tree->nodes) / sizeof(*(tree->nodes)));
+ /* insert should only possible as long as tree->branch_num is 0 */
+ BLI_assert(tree->branch_num <= 0);
+ BLI_assert((size_t)tree->leaf_num < MEM_allocN_len(tree->nodes) / sizeof(*(tree->nodes)));
- node = tree->nodes[tree->totleaf] = &(tree->nodearray[tree->totleaf]);
- tree->totleaf++;
+ node = tree->nodes[tree->leaf_num] = &(tree->nodearray[tree->leaf_num]);
+ tree->leaf_num++;
create_kdop_hull(tree, node, co, numpoints, 0);
node->index = index;
@@ -1000,7 +1000,7 @@ bool BLI_bvhtree_update_node(
BVHNode *node = NULL;
/* check if index exists */
- if (index > tree->totleaf) {
+ if (index > tree->leaf_num) {
return false;
}
@@ -1024,8 +1024,8 @@ void BLI_bvhtree_update_tree(BVHTree *tree)
* TRICKY: the way we build the tree all the children have an index greater than the parent
* This allows us todo a bottom up update by starting on the bigger numbered branch. */
- BVHNode **root = tree->nodes + tree->totleaf;
- BVHNode **index = tree->nodes + tree->totleaf + tree->totbranch - 1;
+ BVHNode **root = tree->nodes + tree->leaf_num;
+ BVHNode **index = tree->nodes + tree->leaf_num + tree->branch_num - 1;
for (; index >= root; index--) {
node_join(tree, *index);
@@ -1033,7 +1033,7 @@ void BLI_bvhtree_update_tree(BVHTree *tree)
}
int BLI_bvhtree_get_len(const BVHTree *tree)
{
- return tree->totleaf;
+ return tree->leaf_num;
}
int BLI_bvhtree_get_tree_type(const BVHTree *tree)
@@ -1048,7 +1048,7 @@ float BLI_bvhtree_get_epsilon(const BVHTree *tree)
void BLI_bvhtree_get_bounding_box(BVHTree *tree, float r_bb_min[3], float r_bb_max[3])
{
- BVHNode *root = tree->nodes[tree->totleaf];
+ BVHNode *root = tree->nodes[tree->leaf_num];
if (root != NULL) {
const float bb_min[3] = {root->bv[0], root->bv[2], root->bv[4]};
const float bb_max[3] = {root->bv[1], root->bv[3], root->bv[5]};
@@ -1099,9 +1099,9 @@ static void tree_overlap_traverse(BVHOverlapData_Thread *data_thread,
if (tree_overlap_test(node1, node2, data->start_axis, data->stop_axis)) {
/* check if node1 is a leaf */
- if (!node1->totnode) {
+ if (!node1->node_num) {
/* check if node2 is a leaf */
- if (!node2->totnode) {
+ if (!node2->node_num) {
BVHTreeOverlap *overlap;
if (UNLIKELY(node1 == node2)) {
@@ -1143,9 +1143,9 @@ static void tree_overlap_traverse_cb(BVHOverlapData_Thread *data_thread,
if (tree_overlap_test(node1, node2, data->start_axis, data->stop_axis)) {
/* check if node1 is a leaf */
- if (!node1->totnode) {
+ if (!node1->node_num) {
/* check if node2 is a leaf */
- if (!node2->totnode) {
+ if (!node2->node_num) {
BVHTreeOverlap *overlap;
if (UNLIKELY(node1 == node2)) {
@@ -1190,9 +1190,9 @@ static bool tree_overlap_traverse_num(BVHOverlapData_Thread *data_thread,
if (tree_overlap_test(node1, node2, data->start_axis, data->stop_axis)) {
/* check if node1 is a leaf */
- if (!node1->totnode) {
+ if (!node1->node_num) {
/* check if node2 is a leaf */
- if (!node2->totnode) {
+ if (!node2->node_num) {
BVHTreeOverlap *overlap;
if (UNLIKELY(node1 == node2)) {
@@ -1212,7 +1212,7 @@ static bool tree_overlap_traverse_num(BVHOverlapData_Thread *data_thread,
}
}
else {
- for (j = 0; j < node2->totnode; j++) {
+ for (j = 0; j < node2->node_num; j++) {
if (tree_overlap_traverse_num(data_thread, node1, node2->children[j])) {
return true;
}
@@ -1221,7 +1221,7 @@ static bool tree_overlap_traverse_num(BVHOverlapData_Thread *data_thread,
}
else {
const uint max_interactions = data_thread->max_interactions;
- for (j = 0; j < node1->totnode; j++) {
+ for (j = 0; j < node1->node_num; j++) {
if (tree_overlap_traverse_num(data_thread, node1->children[j], node2)) {
data_thread->max_interactions = max_interactions;
}
@@ -1233,7 +1233,7 @@ static bool tree_overlap_traverse_num(BVHOverlapData_Thread *data_thread,
int BLI_bvhtree_overlap_thread_num(const BVHTree *tree)
{
- return (int)MIN2(tree->tree_type, tree->nodes[tree->totleaf]->totnode);
+ return (int)MIN2(tree->tree_type, tree->nodes[tree->leaf_num]->node_num);
}
static void bvhtree_overlap_task_cb(void *__restrict userdata,
@@ -1245,25 +1245,25 @@ static void bvhtree_overlap_task_cb(void *__restrict userdata,
if (data->max_interactions) {
tree_overlap_traverse_num(data,
- data_shared->tree1->nodes[data_shared->tree1->totleaf]->children[j],
- data_shared->tree2->nodes[data_shared->tree2->totleaf]);
+ data_shared->tree1->nodes[data_shared->tree1->leaf_num]->children[j],
+ data_shared->tree2->nodes[data_shared->tree2->leaf_num]);
}
else if (data_shared->callback) {
tree_overlap_traverse_cb(data,
- data_shared->tree1->nodes[data_shared->tree1->totleaf]->children[j],
- data_shared->tree2->nodes[data_shared->tree2->totleaf]);
+ data_shared->tree1->nodes[data_shared->tree1->leaf_num]->children[j],
+ data_shared->tree2->nodes[data_shared->tree2->leaf_num]);
}
else {
tree_overlap_traverse(data,
- data_shared->tree1->nodes[data_shared->tree1->totleaf]->children[j],
- data_shared->tree2->nodes[data_shared->tree2->totleaf]);
+ data_shared->tree1->nodes[data_shared->tree1->leaf_num]->children[j],
+ data_shared->tree2->nodes[data_shared->tree2->leaf_num]);
}
}
BVHTreeOverlap *BLI_bvhtree_overlap_ex(
const BVHTree *tree1,
const BVHTree *tree2,
- uint *r_overlap_tot,
+ uint *r_overlap_num,
/* optional callback to test the overlap before adding (must be thread-safe!) */
BVHTree_OverlapCallback callback,
void *userdata,
@@ -1272,7 +1272,7 @@ BVHTreeOverlap *BLI_bvhtree_overlap_ex(
{
bool overlap_pairs = (flag & BVH_OVERLAP_RETURN_PAIRS) != 0;
bool use_threading = (flag & BVH_OVERLAP_USE_THREADING) != 0 &&
- (tree1->totleaf > KDOPBVH_THREAD_LEAF_THRESHOLD);
+ (tree1->leaf_num > KDOPBVH_THREAD_LEAF_THRESHOLD);
/* 'RETURN_PAIRS' was not implemented without 'max_interactions'. */
BLI_assert(overlap_pairs || max_interactions);
@@ -1293,8 +1293,8 @@ BVHTreeOverlap *BLI_bvhtree_overlap_ex(
return NULL;
}
- const BVHNode *root1 = tree1->nodes[tree1->totleaf];
- const BVHNode *root2 = tree2->nodes[tree2->totleaf];
+ const BVHNode *root1 = tree1->nodes[tree1->leaf_num];
+ const BVHNode *root2 = tree2->nodes[tree2->leaf_num];
start_axis = min_axis(tree1->start_axis, tree2->start_axis);
stop_axis = min_axis(tree1->stop_axis, tree2->stop_axis);
@@ -1354,7 +1354,7 @@ BVHTreeOverlap *BLI_bvhtree_overlap_ex(
BLI_stack_free(data[j].overlap);
to += count;
}
- *r_overlap_tot = (uint)total;
+ *r_overlap_num = (uint)total;
}
return overlap;
@@ -1363,14 +1363,14 @@ BVHTreeOverlap *BLI_bvhtree_overlap_ex(
BVHTreeOverlap *BLI_bvhtree_overlap(
const BVHTree *tree1,
const BVHTree *tree2,
- uint *r_overlap_tot,
+ uint *r_overlap_num,
/* optional callback to test the overlap before adding (must be thread-safe!) */
BVHTree_OverlapCallback callback,
void *userdata)
{
return BLI_bvhtree_overlap_ex(tree1,
tree2,
- r_overlap_tot,
+ r_overlap_num,
callback,
userdata,
0,
@@ -1403,7 +1403,7 @@ static void bvhtree_intersect_plane_dfs_recursive(BVHIntersectPlaneData *__restr
{
if (tree_intersect_plane_test(node->bv, data->plane)) {
/* check if node is a leaf */
- if (!node->totnode) {
+ if (!node->node_num) {
int *intersect = BLI_stack_push_r(data->intersect);
*intersect = node->index;
}
@@ -1417,18 +1417,18 @@ static void bvhtree_intersect_plane_dfs_recursive(BVHIntersectPlaneData *__restr
}
}
-int *BLI_bvhtree_intersect_plane(BVHTree *tree, float plane[4], uint *r_intersect_tot)
+int *BLI_bvhtree_intersect_plane(BVHTree *tree, float plane[4], uint *r_intersect_num)
{
int *intersect = NULL;
size_t total = 0;
- if (tree->totleaf) {
+ if (tree->leaf_num) {
BVHIntersectPlaneData data;
data.tree = tree;
copy_v4_v4(data.plane, plane);
data.intersect = BLI_stack_new(sizeof(int), __func__);
- BVHNode *root = tree->nodes[tree->totleaf];
+ BVHNode *root = tree->nodes[tree->leaf_num];
bvhtree_intersect_plane_dfs_recursive(&data, root);
total = BLI_stack_count(data.intersect);
@@ -1438,7 +1438,7 @@ int *BLI_bvhtree_intersect_plane(BVHTree *tree, float plane[4], uint *r_intersec
}
BLI_stack_free(data.intersect);
}
- *r_intersect_tot = (uint)total;
+ *r_intersect_num = (uint)total;
return intersect;
}
@@ -1473,7 +1473,7 @@ static float calc_nearest_point_squared(const float proj[3], BVHNode *node, floa
/* Depth first search method */
static void dfs_find_nearest_dfs(BVHNearestData *data, BVHNode *node)
{
- if (node->totnode == 0) {
+ if (node->node_num == 0) {
if (data->callback) {
data->callback(data->userdata, node->index, data->co, &data->nearest);
}
@@ -1489,7 +1489,7 @@ static void dfs_find_nearest_dfs(BVHNearestData *data, BVHNode *node)
if (data->proj[node->main_axis] <= node->children[0]->bv[node->main_axis * 2 + 1]) {
- for (i = 0; i != node->totnode; i++) {
+ for (i = 0; i != node->node_num; i++) {
if (calc_nearest_point_squared(data->proj, node->children[i], nearest) >=
data->nearest.dist_sq) {
continue;
@@ -1498,7 +1498,7 @@ static void dfs_find_nearest_dfs(BVHNearestData *data, BVHNode *node)
}
}
else {
- for (i = node->totnode - 1; i >= 0; i--) {
+ for (i = node->node_num - 1; i >= 0; i--) {
if (calc_nearest_point_squared(data->proj, node->children[i], nearest) >=
data->nearest.dist_sq) {
continue;
@@ -1522,7 +1522,7 @@ static void dfs_find_nearest_begin(BVHNearestData *data, BVHNode *node)
/* Priority queue method */
static void heap_find_nearest_inner(BVHNearestData *data, HeapSimple *heap, BVHNode *node)
{
- if (node->totnode == 0) {
+ if (node->node_num == 0) {
if (data->callback) {
data->callback(data->userdata, node->index, data->co, &data->nearest);
}
@@ -1534,7 +1534,7 @@ static void heap_find_nearest_inner(BVHNearestData *data, HeapSimple *heap, BVHN
else {
float nearest[3];
- for (int i = 0; i != node->totnode; i++) {
+ for (int i = 0; i != node->node_num; i++) {
float dist_sq = calc_nearest_point_squared(data->proj, node->children[i], nearest);
if (dist_sq < data->nearest.dist_sq) {
@@ -1574,7 +1574,7 @@ int BLI_bvhtree_find_nearest_ex(BVHTree *tree,
axis_t axis_iter;
BVHNearestData data;
- BVHNode *root = tree->nodes[tree->totleaf];
+ BVHNode *root = tree->nodes[tree->leaf_num];
/* init data to search */
data.tree = tree;
@@ -1642,7 +1642,7 @@ static bool isect_aabb_v3(BVHNode *node, const float co[3])
static bool dfs_find_duplicate_fast_dfs(BVHNearestData *data, BVHNode *node)
{
- if (node->totnode == 0) {
+ if (node->node_num == 0) {
if (isect_aabb_v3(node, data->co)) {
if (data->callback) {
const float dist_sq = data->nearest.dist_sq;
@@ -1658,7 +1658,7 @@ static bool dfs_find_duplicate_fast_dfs(BVHNearestData *data, BVHNode *node)
int i;
if (data->proj[node->main_axis] <= node->children[0]->bv[node->main_axis * 2 + 1]) {
- for (i = 0; i != node->totnode; i++) {
+ for (i = 0; i != node->node_num; i++) {
if (isect_aabb_v3(node->children[i], data->co)) {
if (dfs_find_duplicate_fast_dfs(data, node->children[i])) {
return true;
@@ -1667,7 +1667,7 @@ static bool dfs_find_duplicate_fast_dfs(BVHNearestData *data, BVHNode *node)
}
}
else {
- for (i = node->totnode; i--;) {
+ for (i = node->node_num; i--;) {
if (isect_aabb_v3(node->children[i], data->co)) {
if (dfs_find_duplicate_fast_dfs(data, node->children[i])) {
return true;
@@ -1686,7 +1686,7 @@ int BLI_bvhtree_find_nearest_first(BVHTree *tree,
void *userdata)
{
BVHNearestData data;
- BVHNode *root = tree->nodes[tree->totleaf];
+ BVHNode *root = tree->nodes[tree->leaf_num];
/* init data to search */
data.tree = tree;
@@ -1796,7 +1796,7 @@ static void dfs_raycast(BVHRayCastData *data, BVHNode *node)
return;
}
- if (node->totnode == 0) {
+ if (node->node_num == 0) {
if (data->callback) {
data->callback(data->userdata, node->index, &data->ray, &data->hit);
}
@@ -1809,12 +1809,12 @@ static void dfs_raycast(BVHRayCastData *data, BVHNode *node)
else {
/* pick loop direction to dive into the tree (based on ray direction and split axis) */
if (data->ray_dot_axis[node->main_axis] > 0.0f) {
- for (i = 0; i != node->totnode; i++) {
+ for (i = 0; i != node->node_num; i++) {
dfs_raycast(data, node->children[i]);
}
}
else {
- for (i = node->totnode - 1; i >= 0; i--) {
+ for (i = node->node_num - 1; i >= 0; i--) {
dfs_raycast(data, node->children[i]);
}
}
@@ -1837,7 +1837,7 @@ static void dfs_raycast_all(BVHRayCastData *data, BVHNode *node)
return;
}
- if (node->totnode == 0) {
+ if (node->node_num == 0) {
/* no need to check for 'data->callback' (using 'all' only makes sense with a callback). */
dist = data->hit.dist;
data->callback(data->userdata, node->index, &data->ray, &data->hit);
@@ -1847,12 +1847,12 @@ static void dfs_raycast_all(BVHRayCastData *data, BVHNode *node)
else {
/* pick loop direction to dive into the tree (based on ray direction and split axis) */
if (data->ray_dot_axis[node->main_axis] > 0.0f) {
- for (i = 0; i != node->totnode; i++) {
+ for (i = 0; i != node->node_num; i++) {
dfs_raycast_all(data, node->children[i]);
}
}
else {
- for (i = node->totnode - 1; i >= 0; i--) {
+ for (i = node->node_num - 1; i >= 0; i--) {
dfs_raycast_all(data, node->children[i]);
}
}
@@ -1904,7 +1904,7 @@ int BLI_bvhtree_ray_cast_ex(BVHTree *tree,
int flag)
{
BVHRayCastData data;
- BVHNode *root = tree->nodes[tree->totleaf];
+ BVHNode *root = tree->nodes[tree->leaf_num];
BLI_ASSERT_UNIT_V3(dir);
@@ -1988,7 +1988,7 @@ void BLI_bvhtree_ray_cast_all_ex(BVHTree *tree,
int flag)
{
BVHRayCastData data;
- BVHNode *root = tree->nodes[tree->totleaf];
+ BVHNode *root = tree->nodes[tree->leaf_num];
BLI_ASSERT_UNIT_V3(dir);
BLI_assert(callback != NULL);
@@ -2048,7 +2048,7 @@ typedef struct RangeQueryData {
static void dfs_range_query(RangeQueryData *data, BVHNode *node)
{
- if (node->totnode == 0) {
+ if (node->node_num == 0) {
#if 0 /*UNUSED*/
/* Calculate the node min-coords
* (if the node was a point then this is the point coordinates) */
@@ -2060,12 +2060,12 @@ static void dfs_range_query(RangeQueryData *data, BVHNode *node)
}
else {
int i;
- for (i = 0; i != node->totnode; i++) {
+ for (i = 0; i != node->node_num; i++) {
float nearest[3];
float dist_sq = calc_nearest_point_squared(data->center, node->children[i], nearest);
if (dist_sq < data->radius_sq) {
/* Its a leaf.. call the callback */
- if (node->children[i]->totnode == 0) {
+ if (node->children[i]->node_num == 0) {
data->hits++;
data->callback(data->userdata, node->children[i]->index, data->center, dist_sq);
}
@@ -2080,7 +2080,7 @@ static void dfs_range_query(RangeQueryData *data, BVHNode *node)
int BLI_bvhtree_range_query(
BVHTree *tree, const float co[3], float radius, BVHTree_RangeQuery callback, void *userdata)
{
- BVHNode *root = tree->nodes[tree->totleaf];
+ BVHNode *root = tree->nodes[tree->leaf_num];
RangeQueryData data;
data.tree = tree;
@@ -2096,7 +2096,7 @@ int BLI_bvhtree_range_query(
float dist_sq = calc_nearest_point_squared(data.center, root, nearest);
if (dist_sq < data.radius_sq) {
/* Its a leaf.. call the callback */
- if (root->totnode == 0) {
+ if (root->node_num == 0) {
data.hits++;
data.callback(data.userdata, root->index, co, dist_sq);
}
@@ -2118,7 +2118,7 @@ int BLI_bvhtree_range_query(
static void bvhtree_nearest_projected_dfs_recursive(BVHNearestProjectedData *__restrict data,
const BVHNode *node)
{
- if (node->totnode == 0) {
+ if (node->node_num == 0) {
if (data->callback) {
data->callback(data->userdata, node->index, &data->precalc, NULL, 0, &data->nearest);
}
@@ -2134,7 +2134,7 @@ static void bvhtree_nearest_projected_dfs_recursive(BVHNearestProjectedData *__r
else {
/* First pick the closest node to recurse into */
if (data->closest_axis[node->main_axis]) {
- for (int i = 0; i != node->totnode; i++) {
+ for (int i = 0; i != node->node_num; i++) {
const float *bv = node->children[i]->bv;
if (dist_squared_to_projected_aabb(&data->precalc,
@@ -2146,7 +2146,7 @@ static void bvhtree_nearest_projected_dfs_recursive(BVHNearestProjectedData *__r
}
}
else {
- for (int i = node->totnode; i--;) {
+ for (int i = node->node_num; i--;) {
const float *bv = node->children[i]->bv;
if (dist_squared_to_projected_aabb(&data->precalc,
@@ -2163,7 +2163,7 @@ static void bvhtree_nearest_projected_dfs_recursive(BVHNearestProjectedData *__r
static void bvhtree_nearest_projected_with_clipplane_test_dfs_recursive(
BVHNearestProjectedData *__restrict data, const BVHNode *node)
{
- if (node->totnode == 0) {
+ if (node->node_num == 0) {
if (data->callback) {
data->callback(data->userdata,
node->index,
@@ -2184,7 +2184,7 @@ static void bvhtree_nearest_projected_with_clipplane_test_dfs_recursive(
else {
/* First pick the closest node to recurse into */
if (data->closest_axis[node->main_axis]) {
- for (int i = 0; i != node->totnode; i++) {
+ for (int i = 0; i != node->node_num; i++) {
const float *bv = node->children[i]->bv;
const float bb_min[3] = {bv[0], bv[2], bv[4]};
const float bb_max[3] = {bv[1], bv[3], bv[5]};
@@ -2206,7 +2206,7 @@ static void bvhtree_nearest_projected_with_clipplane_test_dfs_recursive(
}
}
else {
- for (int i = node->totnode; i--;) {
+ for (int i = node->node_num; i--;) {
const float *bv = node->children[i]->bv;
const float bb_min[3] = {bv[0], bv[2], bv[4]};
const float bb_max[3] = {bv[1], bv[3], bv[5]};
@@ -2240,7 +2240,7 @@ int BLI_bvhtree_find_nearest_projected(BVHTree *tree,
BVHTree_NearestProjectedCallback callback,
void *userdata)
{
- BVHNode *root = tree->nodes[tree->totleaf];
+ BVHNode *root = tree->nodes[tree->leaf_num];
if (root != NULL) {
BVHNearestProjectedData data;
dist_squared_to_projected_aabb_precalc(&data.precalc, projmat, winsize, mval);
@@ -2314,7 +2314,7 @@ typedef struct BVHTree_WalkData {
*/
static bool bvhtree_walk_dfs_recursive(BVHTree_WalkData *walk_data, const BVHNode *node)
{
- if (node->totnode == 0) {
+ if (node->node_num == 0) {
return walk_data->walk_leaf_cb(
(const BVHTreeAxisRange *)node->bv, node->index, walk_data->userdata);
}
@@ -2322,7 +2322,7 @@ static bool bvhtree_walk_dfs_recursive(BVHTree_WalkData *walk_data, const BVHNod
/* First pick the closest node to recurse into */
if (walk_data->walk_order_cb(
(const BVHTreeAxisRange *)node->bv, node->main_axis, walk_data->userdata)) {
- for (int i = 0; i != node->totnode; i++) {
+ for (int i = 0; i != node->node_num; i++) {
if (walk_data->walk_parent_cb((const BVHTreeAxisRange *)node->children[i]->bv,
walk_data->userdata)) {
if (!bvhtree_walk_dfs_recursive(walk_data, node->children[i])) {
@@ -2332,7 +2332,7 @@ static bool bvhtree_walk_dfs_recursive(BVHTree_WalkData *walk_data, const BVHNod
}
}
else {
- for (int i = node->totnode - 1; i >= 0; i--) {
+ for (int i = node->node_num - 1; i >= 0; i--) {
if (walk_data->walk_parent_cb((const BVHTreeAxisRange *)node->children[i]->bv,
walk_data->userdata)) {
if (!bvhtree_walk_dfs_recursive(walk_data, node->children[i])) {
@@ -2350,7 +2350,7 @@ void BLI_bvhtree_walk_dfs(BVHTree *tree,
BVHTree_WalkOrderCallback walk_order_cb,
void *userdata)
{
- const BVHNode *root = tree->nodes[tree->totleaf];
+ const BVHNode *root = tree->nodes[tree->leaf_num];
if (root != NULL) {
BVHTree_WalkData walk_data = {walk_parent_cb, walk_leaf_cb, walk_order_cb, userdata};
/* first make sure the bv of root passes in the test too */
diff --git a/source/blender/blenlib/intern/BLI_mempool.c b/source/blender/blenlib/intern/BLI_mempool.c
index 76a82e505e3..f70b5ddd766 100644
--- a/source/blender/blenlib/intern/BLI_mempool.c
+++ b/source/blender/blenlib/intern/BLI_mempool.c
@@ -159,9 +159,9 @@ BLI_INLINE BLI_mempool_chunk *mempool_chunk_find(BLI_mempool_chunk *head, uint i
* \note for small pools 1 is a good default, the elements need to be initialized,
* adding overhead on creation which is redundant if they aren't used.
*/
-BLI_INLINE uint mempool_maxchunks(const uint totelem, const uint pchunk)
+BLI_INLINE uint mempool_maxchunks(const uint elem_num, const uint pchunk)
{
- return (totelem <= pchunk) ? 1 : ((totelem / pchunk) + 1);
+ return (elem_num <= pchunk) ? 1 : ((elem_num / pchunk) + 1);
}
static BLI_mempool_chunk *mempool_chunk_alloc(BLI_mempool *pool)
@@ -250,7 +250,7 @@ static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk)
}
}
-BLI_mempool *BLI_mempool_create(uint esize, uint totelem, uint pchunk, uint flag)
+BLI_mempool *BLI_mempool_create(uint esize, uint elem_num, uint pchunk, uint flag)
{
BLI_mempool *pool;
BLI_freenode *last_tail = NULL;
@@ -268,7 +268,7 @@ BLI_mempool *BLI_mempool_create(uint esize, uint totelem, uint pchunk, uint flag
esize = MAX2(esize, (uint)sizeof(BLI_freenode));
}
- maxchunks = mempool_maxchunks(totelem, pchunk);
+ maxchunks = mempool_maxchunks(elem_num, pchunk);
pool->chunks = NULL;
pool->chunk_tail = NULL;
@@ -301,7 +301,7 @@ BLI_mempool *BLI_mempool_create(uint esize, uint totelem, uint pchunk, uint flag
#endif
pool->totused = 0;
- if (totelem) {
+ if (elem_num) {
/* Allocate the actual chunks. */
for (i = 0; i < maxchunks; i++) {
BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
@@ -510,18 +510,18 @@ static void mempool_threadsafe_iternew(BLI_mempool *pool, BLI_mempool_threadsafe
ts_iter->curchunk_threaded_shared = NULL;
}
-ParallelMempoolTaskData *mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t num_iter)
+ParallelMempoolTaskData *mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t iter_num)
{
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
- ParallelMempoolTaskData *iter_arr = MEM_mallocN(sizeof(*iter_arr) * num_iter, __func__);
+ ParallelMempoolTaskData *iter_arr = MEM_mallocN(sizeof(*iter_arr) * iter_num, __func__);
BLI_mempool_chunk **curchunk_threaded_shared = MEM_mallocN(sizeof(void *), __func__);
mempool_threadsafe_iternew(pool, &iter_arr->ts_iter);
*curchunk_threaded_shared = iter_arr->ts_iter.iter.curchunk;
iter_arr->ts_iter.curchunk_threaded_shared = curchunk_threaded_shared;
- for (size_t i = 1; i < num_iter; i++) {
+ for (size_t i = 1; i < iter_num; i++) {
iter_arr[i].ts_iter = iter_arr[0].ts_iter;
*curchunk_threaded_shared = iter_arr[i].ts_iter.iter.curchunk =
((*curchunk_threaded_shared) ? (*curchunk_threaded_shared)->next : NULL);
diff --git a/source/blender/blenlib/intern/BLI_mempool_private.h b/source/blender/blenlib/intern/BLI_mempool_private.h
index 5e17d4af05a..042b39c2e7f 100644
--- a/source/blender/blenlib/intern/BLI_mempool_private.h
+++ b/source/blender/blenlib/intern/BLI_mempool_private.h
@@ -39,7 +39,7 @@ typedef struct ParallelMempoolTaskData {
* See #BLI_task_parallel_mempool implementation for detailed usage example.
*/
ParallelMempoolTaskData *mempool_iter_threadsafe_create(BLI_mempool *pool,
- size_t num_iter) ATTR_WARN_UNUSED_RESULT
+ size_t iter_num) ATTR_WARN_UNUSED_RESULT
ATTR_NONNULL();
void mempool_iter_threadsafe_destroy(ParallelMempoolTaskData *iter_arr) ATTR_NONNULL();
diff --git a/source/blender/blenlib/intern/bitmap.c b/source/blender/blenlib/intern/bitmap.c
index dd022986e14..7fcbc31c066 100644
--- a/source/blender/blenlib/intern/bitmap.c
+++ b/source/blender/blenlib/intern/bitmap.c
@@ -20,8 +20,8 @@ void BLI_bitmap_set_all(BLI_bitmap *bitmap, bool set, size_t bits)
void BLI_bitmap_flip_all(BLI_bitmap *bitmap, size_t bits)
{
- size_t num_blocks = _BITMAP_NUM_BLOCKS(bits);
- for (size_t i = 0; i < num_blocks; i++) {
+ size_t blocks_num = _BITMAP_NUM_BLOCKS(bits);
+ for (size_t i = 0; i < blocks_num; i++) {
bitmap[i] ^= ~(BLI_bitmap)0;
}
}
@@ -33,16 +33,16 @@ void BLI_bitmap_copy_all(BLI_bitmap *dst, const BLI_bitmap *src, size_t bits)
void BLI_bitmap_and_all(BLI_bitmap *dst, const BLI_bitmap *src, size_t bits)
{
- size_t num_blocks = _BITMAP_NUM_BLOCKS(bits);
- for (size_t i = 0; i < num_blocks; i++) {
+ size_t blocks_num = _BITMAP_NUM_BLOCKS(bits);
+ for (size_t i = 0; i < blocks_num; i++) {
dst[i] &= src[i];
}
}
void BLI_bitmap_or_all(BLI_bitmap *dst, const BLI_bitmap *src, size_t bits)
{
- size_t num_blocks = _BITMAP_NUM_BLOCKS(bits);
- for (size_t i = 0; i < num_blocks; i++) {
+ size_t blocks_num = _BITMAP_NUM_BLOCKS(bits);
+ for (size_t i = 0; i < blocks_num; i++) {
dst[i] |= src[i];
}
}
diff --git a/source/blender/blenlib/intern/convexhull_2d.c b/source/blender/blenlib/intern/convexhull_2d.c
index d1daa511b1a..33d1a68a76e 100644
--- a/source/blender/blenlib/intern/convexhull_2d.c
+++ b/source/blender/blenlib/intern/convexhull_2d.c
@@ -165,7 +165,7 @@ int BLI_convexhull_2d(const float (*points)[2], const int n, int r_points[])
struct PointRef *points_ref = MEM_mallocN(sizeof(*points_ref) * (size_t)n, __func__);
float(*points_sort)[2] = MEM_mallocN(sizeof(*points_sort) * (size_t)n, __func__);
int *points_map;
- int tot, i;
+ int points_hull_num, i;
for (i = 0; i < n; i++) {
points_ref[i].pt = points[i];
@@ -178,20 +178,20 @@ int BLI_convexhull_2d(const float (*points)[2], const int n, int r_points[])
memcpy(points_sort[i], points_ref[i].pt, sizeof(float[2]));
}
- tot = BLI_convexhull_2d_sorted(points_sort, n, r_points);
+ points_hull_num = BLI_convexhull_2d_sorted(points_sort, n, r_points);
/* map back to the original index values */
points_map = (int *)points_sort; /* abuse float array for temp storage */
- for (i = 0; i < tot; i++) {
+ for (i = 0; i < points_hull_num; i++) {
points_map[i] = (int)((const float(*)[2])points_ref[r_points[i]].pt - points);
}
- memcpy(r_points, points_map, (size_t)tot * sizeof(*points_map));
+ memcpy(r_points, points_map, (size_t)points_hull_num * sizeof(*points_map));
MEM_freeN(points_ref);
MEM_freeN(points_sort);
- return tot;
+ return points_hull_num;
}
/** \} */
@@ -252,24 +252,24 @@ float BLI_convexhull_aabb_fit_hull_2d(const float (*points_hull)[2], unsigned in
float BLI_convexhull_aabb_fit_points_2d(const float (*points)[2], unsigned int n)
{
int *index_map;
- int tot;
+ int points_hull_num;
float angle;
index_map = MEM_mallocN(sizeof(*index_map) * n * 2, __func__);
- tot = BLI_convexhull_2d(points, (int)n, index_map);
+ points_hull_num = BLI_convexhull_2d(points, (int)n, index_map);
- if (tot) {
+ if (points_hull_num) {
float(*points_hull)[2];
int j;
- points_hull = MEM_mallocN(sizeof(*points_hull) * (size_t)tot, __func__);
- for (j = 0; j < tot; j++) {
+ points_hull = MEM_mallocN(sizeof(*points_hull) * (size_t)points_hull_num, __func__);
+ for (j = 0; j < points_hull_num; j++) {
copy_v2_v2(points_hull[j], points[index_map[j]]);
}
- angle = BLI_convexhull_aabb_fit_hull_2d(points_hull, (unsigned int)tot);
+ angle = BLI_convexhull_aabb_fit_hull_2d(points_hull, (unsigned int)points_hull_num);
MEM_freeN(points_hull);
}
else {
diff --git a/source/blender/blenlib/intern/delaunay_2d.cc b/source/blender/blenlib/intern/delaunay_2d.cc
index 3039b72128d..804ba5c3c80 100644
--- a/source/blender/blenlib/intern/delaunay_2d.cc
+++ b/source/blender/blenlib/intern/delaunay_2d.cc
@@ -245,7 +245,7 @@ template<typename Arith_t> struct CDTArrangement {
/** Hint to how much space to reserve in the Vectors of the arrangement,
* based on these counts of input elements. */
- void reserve(int num_verts, int num_edges, int num_faces);
+ void reserve(int verts_num, int edges_num, int faces_num);
/**
* Add a new vertex to the arrangement, with the given 2D coordinate.
@@ -318,7 +318,7 @@ template<typename T> class CDT_state {
public:
CDTArrangement<T> cdt;
/** How many verts were in input (will be first in vert_array). */
- int input_vert_tot;
+ int input_vert_num;
/** Used for visiting things without having to initialized their visit fields. */
int visit_count;
/**
@@ -332,7 +332,7 @@ template<typename T> class CDT_state {
bool need_ids;
explicit CDT_state(
- int num_input_verts, int num_input_edges, int num_input_faces, T epsilon, bool need_ids);
+ int input_verts_num, int input_edges_num, int input_faces_num, T epsilon, bool need_ids);
};
template<typename T> CDTArrangement<T>::~CDTArrangement()
@@ -859,20 +859,20 @@ template<typename T> CDTFace<T> *CDTArrangement<T>::add_face()
return f;
}
-template<typename T> void CDTArrangement<T>::reserve(int num_verts, int num_edges, int num_faces)
+template<typename T> void CDTArrangement<T>::reserve(int verts_num, int edges_num, int faces_num)
{
/* These reserves are just guesses; OK if they aren't exactly right since vectors will resize. */
- this->verts.reserve(2 * num_verts);
- this->edges.reserve(3 * num_verts + 2 * num_edges + 3 * 2 * num_faces);
- this->faces.reserve(2 * num_verts + 2 * num_edges + 2 * num_faces);
+ this->verts.reserve(2 * verts_num);
+ this->edges.reserve(3 * verts_num + 2 * edges_num + 3 * 2 * faces_num);
+ this->faces.reserve(2 * verts_num + 2 * edges_num + 2 * faces_num);
}
template<typename T>
CDT_state<T>::CDT_state(
- int num_input_verts, int num_input_edges, int num_input_faces, T epsilon, bool need_ids)
+ int input_verts_num, int input_edges_num, int input_faces_num, T epsilon, bool need_ids)
{
- this->input_vert_tot = num_input_verts;
- this->cdt.reserve(num_input_verts, num_input_edges, num_input_faces);
+ this->input_vert_num = input_verts_num;
+ this->cdt.reserve(input_verts_num, input_edges_num, input_faces_num);
this->cdt.outer_face = this->cdt.add_face();
this->epsilon = epsilon;
this->need_ids = need_ids;
@@ -919,7 +919,7 @@ template<typename T> inline bool is_deleted_edge(const CDTEdge<T> *e)
template<typename T> inline bool is_original_vert(const CDTVert<T> *v, CDT_state<T> *cdt)
{
- return (v->index < cdt->input_vert_tot);
+ return (v->index < cdt->input_vert_num);
}
/**
@@ -2678,7 +2678,7 @@ CDT_result<T> get_cdt_output(CDT_state<T> *cdt_state,
CDTVert<T> *v = cdt->verts[i];
if (v->merge_to_index != -1) {
if (cdt_state->need_ids) {
- if (i < cdt_state->input_vert_tot) {
+ if (i < cdt_state->input_vert_num) {
add_to_input_ids(cdt->verts[v->merge_to_index]->input_ids, i);
}
}
@@ -2696,7 +2696,7 @@ CDT_result<T> get_cdt_output(CDT_state<T> *cdt_state,
if (v->merge_to_index == -1) {
result.vert[i_out] = v->co.exact;
if (cdt_state->need_ids) {
- if (i < cdt_state->input_vert_tot) {
+ if (i < cdt_state->input_vert_num) {
result.vert_orig[i_out].append(i);
}
for (int vert : v->input_ids) {
@@ -2765,7 +2765,7 @@ CDT_result<T> get_cdt_output(CDT_state<T> *cdt_state,
*/
template<typename T> void add_input_verts(CDT_state<T> *cdt_state, const CDT_input<T> &input)
{
- for (int i = 0; i < cdt_state->input_vert_tot; ++i) {
+ for (int i = 0; i < cdt_state->input_vert_num; ++i) {
cdt_state->cdt.add_vert(input.vert[i]);
}
}
diff --git a/source/blender/blenlib/intern/filereader_zstd.c b/source/blender/blenlib/intern/filereader_zstd.c
index 0a04a443e76..5f114f24fb0 100644
--- a/source/blender/blenlib/intern/filereader_zstd.c
+++ b/source/blender/blenlib/intern/filereader_zstd.c
@@ -25,7 +25,7 @@ typedef struct {
size_t in_buf_max_size;
struct {
- int num_frames;
+ int frames_num;
size_t *compressed_ofs;
size_t *uncompressed_ofs;
@@ -69,21 +69,21 @@ static bool zstd_read_seek_table(ZstdReader *zstd)
return false;
}
- uint32_t num_frames;
- if (base->seek(base, -9, SEEK_END) < 0 || !zstd_read_u32(base, &num_frames)) {
+ uint32_t frames_num;
+ if (base->seek(base, -9, SEEK_END) < 0 || !zstd_read_u32(base, &frames_num)) {
return false;
}
/* Each frame has either 2 or 3 uint32_t, and after that we have
- * num_frames, flags and magic for another 9 bytes. */
- uint32_t expected_frame_length = num_frames * (has_checksums ? 12 : 8) + 9;
+ * frames_num, flags and magic for another 9 bytes. */
+ uint32_t expected_frame_length = frames_num * (has_checksums ? 12 : 8) + 9;
/* The frame starts with another magic number and its length, but these
* two fields are not included when counting length. */
off64_t frame_start_ofs = 8 + expected_frame_length;
/* Sanity check: Before the start of the seek table frame,
- * there must be num_frames frames, each of which at least 8 bytes long. */
+ * there must be frames_num frames, each of which at least 8 bytes long. */
off64_t seek_frame_start = base->seek(base, -frame_start_ofs, SEEK_END);
- if (seek_frame_start < num_frames * 8) {
+ if (seek_frame_start < frames_num * 8) {
return false;
}
@@ -96,13 +96,13 @@ static bool zstd_read_seek_table(ZstdReader *zstd)
return false;
}
- zstd->seek.num_frames = num_frames;
- zstd->seek.compressed_ofs = MEM_malloc_arrayN(num_frames + 1, sizeof(size_t), __func__);
- zstd->seek.uncompressed_ofs = MEM_malloc_arrayN(num_frames + 1, sizeof(size_t), __func__);
+ zstd->seek.frames_num = frames_num;
+ zstd->seek.compressed_ofs = MEM_malloc_arrayN(frames_num + 1, sizeof(size_t), __func__);
+ zstd->seek.uncompressed_ofs = MEM_malloc_arrayN(frames_num + 1, sizeof(size_t), __func__);
size_t compressed_ofs = 0;
size_t uncompressed_ofs = 0;
- for (int i = 0; i < num_frames; i++) {
+ for (int i = 0; i < frames_num; i++) {
uint32_t compressed_size, uncompressed_size;
if (!zstd_read_u32(base, &compressed_size) || !zstd_read_u32(base, &uncompressed_size)) {
break;
@@ -115,8 +115,8 @@ static bool zstd_read_seek_table(ZstdReader *zstd)
compressed_ofs += compressed_size;
uncompressed_ofs += uncompressed_size;
}
- zstd->seek.compressed_ofs[num_frames] = compressed_ofs;
- zstd->seek.uncompressed_ofs[num_frames] = uncompressed_ofs;
+ zstd->seek.compressed_ofs[frames_num] = compressed_ofs;
+ zstd->seek.uncompressed_ofs[frames_num] = uncompressed_ofs;
/* Seek to the end of the previous frame for the following #BHead frame detection. */
if (seek_frame_start != compressed_ofs || base->seek(base, seek_frame_start, SEEK_SET) < 0) {
@@ -135,9 +135,9 @@ static bool zstd_read_seek_table(ZstdReader *zstd)
* Basically just bisection. */
static int zstd_frame_from_pos(ZstdReader *zstd, size_t pos)
{
- int low = 0, high = zstd->seek.num_frames;
+ int low = 0, high = zstd->seek.frames_num;
- if (pos >= zstd->seek.uncompressed_ofs[zstd->seek.num_frames]) {
+ if (pos >= zstd->seek.uncompressed_ofs[zstd->seek.frames_num]) {
return -1;
}
@@ -229,13 +229,13 @@ static off64_t zstd_seek(FileReader *reader, off64_t offset, int whence)
new_pos = offset;
}
else if (whence == SEEK_END) {
- new_pos = zstd->seek.uncompressed_ofs[zstd->seek.num_frames] + offset;
+ new_pos = zstd->seek.uncompressed_ofs[zstd->seek.frames_num] + offset;
}
else {
new_pos = zstd->reader.offset + offset;
}
- if (new_pos < 0 || new_pos > zstd->seek.uncompressed_ofs[zstd->seek.num_frames]) {
+ if (new_pos < 0 || new_pos > zstd->seek.uncompressed_ofs[zstd->seek.frames_num]) {
return -1;
}
zstd->reader.offset = new_pos;
diff --git a/source/blender/blenlib/intern/gsqueue.c b/source/blender/blenlib/intern/gsqueue.c
index 4bf4e15d864..b09f5113be7 100644
--- a/source/blender/blenlib/intern/gsqueue.c
+++ b/source/blender/blenlib/intern/gsqueue.c
@@ -33,7 +33,7 @@ struct _GSQueue {
size_t chunk_last_index; /* index into 'chunk_last' */
size_t chunk_elem_max; /* number of elements per chunk */
size_t elem_size; /* memory size of elements */
- size_t totelem; /* total number of elements */
+ size_t elem_num; /* total number of elements */
};
static void *queue_get_first_elem(GSQueue *queue)
@@ -97,7 +97,7 @@ void BLI_gsqueue_free(GSQueue *queue)
void BLI_gsqueue_push(GSQueue *queue, const void *item)
{
queue->chunk_last_index++;
- queue->totelem++;
+ queue->elem_num++;
if (UNLIKELY(queue->chunk_last_index == queue->chunk_elem_max)) {
struct QueueChunk *chunk;
@@ -134,9 +134,9 @@ void BLI_gsqueue_pop(GSQueue *queue, void *r_item)
memcpy(r_item, queue_get_first_elem(queue), queue->elem_size);
queue->chunk_first_index++;
- queue->totelem--;
+ queue->elem_num--;
- if (UNLIKELY(queue->chunk_first_index == queue->chunk_elem_max || queue->totelem == 0)) {
+ if (UNLIKELY(queue->chunk_first_index == queue->chunk_elem_max || queue->elem_num == 0)) {
struct QueueChunk *chunk_free = queue->chunk_first;
queue->chunk_first = queue->chunk_first->next;
@@ -153,7 +153,7 @@ void BLI_gsqueue_pop(GSQueue *queue, void *r_item)
size_t BLI_gsqueue_len(const GSQueue *queue)
{
- return queue->totelem;
+ return queue->elem_num;
}
bool BLI_gsqueue_is_empty(const GSQueue *queue)
diff --git a/source/blender/blenlib/intern/jitter_2d.c b/source/blender/blenlib/intern/jitter_2d.c
index 5e840e8178e..8fa0f2c1e15 100644
--- a/source/blender/blenlib/intern/jitter_2d.c
+++ b/source/blender/blenlib/intern/jitter_2d.c
@@ -126,7 +126,7 @@ void BLI_jitterate2(float (*jit1)[2], float (*jit2)[2], int num, float radius2)
void BLI_jitter_init(float (*jitarr)[2], int num)
{
float(*jit2)[2];
- float num_fl, num_fl_sqrt;
+ float number_fl, number_fl_sqrt;
float x, rad1, rad2, rad3;
RNG *rng;
int i;
@@ -135,20 +135,20 @@ void BLI_jitter_init(float (*jitarr)[2], int num)
return;
}
- num_fl = (float)num;
- num_fl_sqrt = sqrtf(num_fl);
+ number_fl = (float)num;
+ number_fl_sqrt = sqrtf(number_fl);
jit2 = MEM_mallocN(12 + (unsigned int)num * sizeof(float[2]), "initjit");
- rad1 = 1.0f / num_fl_sqrt;
- rad2 = 1.0f / num_fl;
- rad3 = num_fl_sqrt / num_fl;
+ rad1 = 1.0f / number_fl_sqrt;
+ rad2 = 1.0f / number_fl;
+ rad3 = number_fl_sqrt / number_fl;
rng = BLI_rng_new(31415926 + (unsigned int)num);
x = 0;
for (i = 0; i < num; i++) {
jitarr[i][0] = x + rad1 * (float)(0.5 - BLI_rng_get_double(rng));
- jitarr[i][1] = (float)i / num_fl + rad1 * (float)(0.5 - BLI_rng_get_double(rng));
+ jitarr[i][1] = (float)i / number_fl + rad1 * (float)(0.5 - BLI_rng_get_double(rng));
x += rad3;
x -= floorf(x);
}
diff --git a/source/blender/blenlib/intern/math_geom.c b/source/blender/blenlib/intern/math_geom.c
index e1ec22063e0..1b13493e00c 100644
--- a/source/blender/blenlib/intern/math_geom.c
+++ b/source/blender/blenlib/intern/math_geom.c
@@ -3404,7 +3404,7 @@ bool clip_segment_v3_plane(
bool clip_segment_v3_plane_n(const float p1[3],
const float p2[3],
const float plane_array[][4],
- const int plane_tot,
+ const int plane_num,
float r_p1[3],
float r_p2[3])
{
@@ -3414,7 +3414,7 @@ bool clip_segment_v3_plane_n(const float p1[3],
float dp[3];
sub_v3_v3v3(dp, p2, p1);
- for (int i = 0; i < plane_tot; i++) {
+ for (int i = 0; i < plane_num; i++) {
const float *plane = plane_array[i];
const float div = dot_v3v3(dp, plane);
diff --git a/source/blender/blenlib/intern/mesh_boolean.cc b/source/blender/blenlib/intern/mesh_boolean.cc
index 70030fc2bdf..700c126ca4c 100644
--- a/source/blender/blenlib/intern/mesh_boolean.cc
+++ b/source/blender/blenlib/intern/mesh_boolean.cc
@@ -171,9 +171,9 @@ TriMeshTopology::TriMeshTopology(const IMesh &tm)
/* If everything were manifold, `F+V-E=2` and `E=3F/2`.
* So an likely overestimate, allowing for non-manifoldness, is `E=2F` and `V=F`. */
const int estimate_num_edges = 2 * tm.face_size();
- const int estimate_num_verts = tm.face_size();
+ const int estimate_verts_num = tm.face_size();
edge_tri_.reserve(estimate_num_edges);
- vert_edges_.reserve(estimate_num_verts);
+ vert_edges_.reserve(estimate_verts_num);
for (int t : tm.face_index_range()) {
const Face &tri = *tm.face(t);
BLI_assert(tri.is_tri());
@@ -2607,18 +2607,18 @@ static void test_tri_inside_shapes(const IMesh &tm,
* Perturb their directions slightly to make it less likely to hit a seam.
* Ray-cast assumes they have unit length, so use r1 near 1 and
* ra near 0.5, and rb near .01, but normalized so `sqrt(r1^2 + ra^2 + rb^2) == 1`. */
- constexpr int num_rays = 6;
+ constexpr int rays_num = 6;
constexpr float r1 = 0.9987025295199663f;
constexpr float ra = 0.04993512647599832f;
constexpr float rb = 0.009987025295199663f;
- const float test_rays[num_rays][3] = {
+ const float test_rays[rays_num][3] = {
{r1, ra, rb}, {-r1, -ra, -rb}, {rb, r1, ra}, {-rb, -r1, -ra}, {ra, rb, r1}, {-ra, -rb, -r1}};
InsideShapeTestData data(tm, shape_fn, nshapes);
data.hit_parity = Array<int>(nshapes, 0);
Array<int> count_insides(nshapes, 0);
const float co[3] = {
float(offset_test_point[0]), float(offset_test_point[1]), float(offset_test_point[2])};
- for (int i = 0; i < num_rays; ++i) {
+ for (int i = 0; i < rays_num; ++i) {
if (dbg_level > 0) {
std::cout << "shoot ray " << i << "(" << test_rays[i][0] << "," << test_rays[i][1] << ","
<< test_rays[i][2] << ")\n";
@@ -2643,7 +2643,7 @@ static void test_tri_inside_shapes(const IMesh &tm,
in_shape[j] = 1.0f; /* Let's say a shape is always inside itself. */
}
else {
- in_shape[j] = float(count_insides[j]) / float(num_rays);
+ in_shape[j] = float(count_insides[j]) / float(rays_num);
}
if (dbg_level > 0) {
std::cout << "shape " << j << " inside = " << in_shape[j] << "\n";
@@ -3400,19 +3400,19 @@ static void dissolve_verts(IMesh *imesh, const Array<bool> dissolve, IMeshArena
for (int f : imesh->face_index_range()) {
const Face &face = *imesh->face(f);
face_pos_erase.clear();
- int num_erase = 0;
+ int erase_num = 0;
for (const Vert *v : face) {
int v_index = imesh->lookup_vert(v);
BLI_assert(v_index != NO_INDEX);
if (dissolve[v_index]) {
face_pos_erase.append(true);
- ++num_erase;
+ ++erase_num;
}
else {
face_pos_erase.append(false);
}
}
- if (num_erase > 0) {
+ if (erase_num > 0) {
any_faces_erased |= imesh->erase_face_positions(f, face_pos_erase, arena);
}
}
@@ -3475,8 +3475,8 @@ static IMesh polymesh_from_trimesh_with_dissolve(const IMesh &tm_out,
if (dbg_level > 1) {
std::cout << "merge tris for face " << in_f << "\n";
}
- int num_out_tris_for_face = face_output_tris.size();
- if (num_out_tris_for_face == 0) {
+ int out_tris_for_face_num = face_output_tris.size();
+ if (out_tris_for_face_num == 0) {
continue;
}
face_output_face[in_f] = merge_tris_for_face(face_output_tris[in_f], tm_out, imesh_in, arena);
diff --git a/source/blender/blenlib/intern/mesh_intersect.cc b/source/blender/blenlib/intern/mesh_intersect.cc
index 96ae0750899..d5585f953ec 100644
--- a/source/blender/blenlib/intern/mesh_intersect.cc
+++ b/source/blender/blenlib/intern/mesh_intersect.cc
@@ -635,8 +635,8 @@ void IMesh::populate_vert()
/* This is likely an overestimate, since verts are shared between
* faces. It is ok if estimate is over or even under. */
constexpr int ESTIMATE_VERTS_PER_FACE = 4;
- int estimate_num_verts = ESTIMATE_VERTS_PER_FACE * face_.size();
- populate_vert(estimate_num_verts);
+ int estimate_verts_num = ESTIMATE_VERTS_PER_FACE * face_.size();
+ populate_vert(estimate_verts_num);
}
void IMesh::populate_vert(int max_verts)
@@ -693,16 +693,16 @@ bool IMesh::erase_face_positions(int f_index, Span<bool> face_pos_erase, IMeshAr
{
const Face *cur_f = this->face(f_index);
int cur_len = cur_f->size();
- int num_to_erase = 0;
+ int to_erase_num = 0;
for (int i : cur_f->index_range()) {
if (face_pos_erase[i]) {
- ++num_to_erase;
+ ++to_erase_num;
}
}
- if (num_to_erase == 0) {
+ if (to_erase_num == 0) {
return false;
}
- int new_len = cur_len - num_to_erase;
+ int new_len = cur_len - to_erase_num;
if (new_len < 3) {
/* This erase causes removal of whole face.
* Because this may be called from a loop over the face array,
@@ -2324,7 +2324,7 @@ class TriOverlaps {
BVHTree *tree_b_{nullptr};
BVHTreeOverlap *overlap_{nullptr};
Array<int> first_overlap_;
- uint overlap_tot_{0};
+ uint overlap_num_{0};
struct CBData {
const IMesh &tm;
@@ -2386,16 +2386,16 @@ class TriOverlaps {
if (two_trees_no_self) {
BLI_bvhtree_balance(tree_b_);
/* Don't expect a lot of trivial intersects in this case. */
- overlap_ = BLI_bvhtree_overlap(tree_, tree_b_, &overlap_tot_, nullptr, nullptr);
+ overlap_ = BLI_bvhtree_overlap(tree_, tree_b_, &overlap_num_, nullptr, nullptr);
}
else {
CBData cbdata{tm, shape_fn, nshapes, use_self};
if (nshapes == 1) {
- overlap_ = BLI_bvhtree_overlap(tree_, tree_, &overlap_tot_, nullptr, nullptr);
+ overlap_ = BLI_bvhtree_overlap(tree_, tree_, &overlap_num_, nullptr, nullptr);
}
else {
overlap_ = BLI_bvhtree_overlap(
- tree_, tree_, &overlap_tot_, only_different_shapes, &cbdata);
+ tree_, tree_, &overlap_num_, only_different_shapes, &cbdata);
}
}
/* The rest of the code is simpler and easier to parallelize if, in the two-trees case,
@@ -2403,23 +2403,23 @@ class TriOverlaps {
* in the repeated part, sorting will then bring things with indexB together. */
if (two_trees_no_self) {
overlap_ = static_cast<BVHTreeOverlap *>(
- MEM_reallocN(overlap_, 2 * overlap_tot_ * sizeof(overlap_[0])));
- for (uint i = 0; i < overlap_tot_; ++i) {
- overlap_[overlap_tot_ + i].indexA = overlap_[i].indexB;
- overlap_[overlap_tot_ + i].indexB = overlap_[i].indexA;
+ MEM_reallocN(overlap_, 2 * overlap_num_ * sizeof(overlap_[0])));
+ for (uint i = 0; i < overlap_num_; ++i) {
+ overlap_[overlap_num_ + i].indexA = overlap_[i].indexB;
+ overlap_[overlap_num_ + i].indexB = overlap_[i].indexA;
}
- overlap_tot_ += overlap_tot_;
+ overlap_num_ += overlap_num_;
}
/* Sort the overlaps to bring all the intersects with a given indexA together. */
- std::sort(overlap_, overlap_ + overlap_tot_, bvhtreeverlap_cmp);
+ std::sort(overlap_, overlap_ + overlap_num_, bvhtreeverlap_cmp);
if (dbg_level > 0) {
- std::cout << overlap_tot_ << " overlaps found:\n";
+ std::cout << overlap_num_ << " overlaps found:\n";
for (BVHTreeOverlap ov : overlap()) {
std::cout << "A: " << ov.indexA << ", B: " << ov.indexB << "\n";
}
}
first_overlap_ = Array<int>(tm.face_size(), -1);
- for (int i = 0; i < static_cast<int>(overlap_tot_); ++i) {
+ for (int i = 0; i < static_cast<int>(overlap_num_); ++i) {
int t = overlap_[i].indexA;
if (first_overlap_[t] == -1) {
first_overlap_[t] = i;
@@ -2442,7 +2442,7 @@ class TriOverlaps {
Span<BVHTreeOverlap> overlap() const
{
- return Span<BVHTreeOverlap>(overlap_, overlap_tot_);
+ return Span<BVHTreeOverlap>(overlap_, overlap_num_);
}
int first_overlap_index(int t) const
@@ -2557,13 +2557,13 @@ static void calc_subdivided_non_cluster_tris(Array<IMesh> &r_tri_subdivided,
int len;
};
Vector<OverlapTriRange> overlap_tri_range;
- int overlap_tot = overlap.size();
- overlap_tri_range.reserve(overlap_tot);
+ int overlap_num = overlap.size();
+ overlap_tri_range.reserve(overlap_num);
int overlap_index = 0;
- while (overlap_index < overlap_tot) {
+ while (overlap_index < overlap_num) {
int t = overlap[overlap_index].indexA;
int i = overlap_index;
- while (i + 1 < overlap_tot && overlap[i + 1].indexA == t) {
+ while (i + 1 < overlap_num && overlap[i + 1].indexA == t) {
++i;
}
/* Now overlap[overlap_index] to overlap[i] have indexA == t.
@@ -2581,8 +2581,8 @@ static void calc_subdivided_non_cluster_tris(Array<IMesh> &r_tri_subdivided,
}
overlap_index = i + 1;
}
- int overlap_tri_range_tot = overlap_tri_range.size();
- Array<CDT_data> cd_data(overlap_tri_range_tot);
+ int overlap_tri_range_num = overlap_tri_range.size();
+ Array<CDT_data> cd_data(overlap_tri_range_num);
int grain_size = 64;
threading::parallel_for(overlap_tri_range.index_range(), grain_size, [&](IndexRange range) {
for (int otr_index : range) {
diff --git a/source/blender/blenlib/intern/path_util.c b/source/blender/blenlib/intern/path_util.c
index 6c576627fa0..5a96221c8d1 100644
--- a/source/blender/blenlib/intern/path_util.c
+++ b/source/blender/blenlib/intern/path_util.c
@@ -53,7 +53,7 @@ static bool BLI_path_is_abs(const char *name);
/* implementation */
-int BLI_path_sequence_decode(const char *string, char *head, char *tail, ushort *r_num_len)
+int BLI_path_sequence_decode(const char *string, char *head, char *tail, ushort *r_digits_len)
{
uint nums = 0, nume = 0;
int i;
@@ -98,8 +98,8 @@ int BLI_path_sequence_decode(const char *string, char *head, char *tail, ushort
strcpy(head, string);
head[nums] = 0;
}
- if (r_num_len) {
- *r_num_len = nume - nums + 1;
+ if (r_digits_len) {
+ *r_digits_len = nume - nums + 1;
}
return (int)ret;
}
@@ -114,8 +114,8 @@ int BLI_path_sequence_decode(const char *string, char *head, char *tail, ushort
*/
BLI_strncpy(head, string, name_end + 1);
}
- if (r_num_len) {
- *r_num_len = 0;
+ if (r_digits_len) {
+ *r_digits_len = 0;
}
return 0;
}
@@ -750,14 +750,14 @@ bool BLI_path_frame_range(char *path, int sta, int end, int digits)
return false;
}
-bool BLI_path_frame_get(char *path, int *r_frame, int *r_numdigits)
+bool BLI_path_frame_get(char *path, int *r_frame, int *r_digits_len)
{
if (*path) {
char *file = (char *)BLI_path_slash_rfind(path);
char *c;
- int len, numdigits;
+ int len, digits_len;
- numdigits = *r_numdigits = 0;
+ digits_len = *r_digits_len = 0;
if (file == NULL) {
file = path;
@@ -779,21 +779,21 @@ bool BLI_path_frame_get(char *path, int *r_frame, int *r_numdigits)
/* find start of number */
while (c != (file - 1) && isdigit(*c)) {
c--;
- numdigits++;
+ digits_len++;
}
- if (numdigits) {
+ if (digits_len) {
char prevchar;
c++;
- prevchar = c[numdigits];
- c[numdigits] = 0;
+ prevchar = c[digits_len];
+ c[digits_len] = 0;
/* was the number really an extension? */
*r_frame = atoi(c);
- c[numdigits] = prevchar;
+ c[digits_len] = prevchar;
- *r_numdigits = numdigits;
+ *r_digits_len = digits_len;
return true;
}
@@ -812,7 +812,7 @@ void BLI_path_frame_strip(char *path, char *r_ext)
char *file = (char *)BLI_path_slash_rfind(path);
char *c, *suffix;
int len;
- int numdigits = 0;
+ int digits_len = 0;
if (file == NULL) {
file = path;
@@ -836,7 +836,7 @@ void BLI_path_frame_strip(char *path, char *r_ext)
/* find start of number */
while (c != (file - 1) && isdigit(*c)) {
c--;
- numdigits++;
+ digits_len++;
}
c++;
@@ -845,7 +845,7 @@ void BLI_path_frame_strip(char *path, char *r_ext)
BLI_strncpy(r_ext, suffix, suffix_length + 1);
/* replace the number with the suffix and terminate the string */
- while (numdigits--) {
+ while (digits_len--) {
*c++ = '#';
}
*c = '\0';
diff --git a/source/blender/blenlib/intern/polyfill_2d.c b/source/blender/blenlib/intern/polyfill_2d.c
index 76cf7880c7a..eed87eda436 100644
--- a/source/blender/blenlib/intern/polyfill_2d.c
+++ b/source/blender/blenlib/intern/polyfill_2d.c
@@ -100,7 +100,7 @@ struct KDTree2D {
KDTreeNode2D *nodes;
const float (*coords)[2];
uint root;
- uint totnode;
+ uint node_num;
uint *nodes_map; /* index -> node lookup */
};
@@ -119,14 +119,14 @@ typedef struct PolyFill {
struct PolyIndex *indices; /* vertex aligned */
const float (*coords)[2];
- uint coords_tot;
+ uint coords_num;
#ifdef USE_CONVEX_SKIP
- uint coords_tot_concave;
+ uint coords_num_concave;
#endif
/* A polygon with n vertices has a triangulation of n-2 triangles. */
uint (*tris)[3];
- uint tris_tot;
+ uint tris_num;
#ifdef USE_KDTREE
struct KDTree2D kdtree;
@@ -202,18 +202,18 @@ static void kdtree2d_new(struct KDTree2D *tree, uint tot, const float (*coords)[
// tree->nodes = nodes;
tree->coords = coords;
tree->root = KDNODE_UNSET;
- tree->totnode = tot;
+ tree->node_num = tot;
}
/**
* no need for kdtree2d_insert, since we know the coords array.
*/
-static void kdtree2d_init(struct KDTree2D *tree, const uint coords_tot, const PolyIndex *indices)
+static void kdtree2d_init(struct KDTree2D *tree, const uint coords_num, const PolyIndex *indices)
{
KDTreeNode2D *node;
uint i;
- for (i = 0, node = tree->nodes; i < coords_tot; i++) {
+ for (i = 0, node = tree->nodes; i < coords_num; i++) {
if (indices[i].sign != CONVEX) {
node->neg = node->pos = KDNODE_UNSET;
node->index = indices[i].index;
@@ -223,26 +223,26 @@ static void kdtree2d_init(struct KDTree2D *tree, const uint coords_tot, const Po
}
}
- BLI_assert(tree->totnode == (uint)(node - tree->nodes));
+ BLI_assert(tree->node_num == (uint)(node - tree->nodes));
}
static uint kdtree2d_balance_recursive(
- KDTreeNode2D *nodes, uint totnode, axis_t axis, const float (*coords)[2], const uint ofs)
+ KDTreeNode2D *nodes, uint node_num, axis_t axis, const float (*coords)[2], const uint ofs)
{
KDTreeNode2D *node;
uint neg, pos, median, i, j;
- if (totnode <= 0) {
+ if (node_num <= 0) {
return KDNODE_UNSET;
}
- if (totnode == 1) {
+ if (node_num == 1) {
return 0 + ofs;
}
/* Quick-sort style sorting around median. */
neg = 0;
- pos = totnode - 1;
- median = totnode / 2;
+ pos = node_num - 1;
+ median = node_num / 2;
while (pos > neg) {
const float co = coords[nodes[pos].index][axis];
@@ -276,14 +276,14 @@ static uint kdtree2d_balance_recursive(
axis = !axis;
node->neg = kdtree2d_balance_recursive(nodes, median, axis, coords, ofs);
node->pos = kdtree2d_balance_recursive(
- &nodes[median + 1], (totnode - (median + 1)), axis, coords, (median + 1) + ofs);
+ &nodes[median + 1], (node_num - (median + 1)), axis, coords, (median + 1) + ofs);
return median + ofs;
}
static void kdtree2d_balance(struct KDTree2D *tree)
{
- tree->root = kdtree2d_balance_recursive(tree->nodes, tree->totnode, 0, tree->coords, 0);
+ tree->root = kdtree2d_balance_recursive(tree->nodes, tree->node_num, 0, tree->coords, 0);
}
static void kdtree2d_init_mapping(struct KDTree2D *tree)
@@ -291,7 +291,7 @@ static void kdtree2d_init_mapping(struct KDTree2D *tree)
uint i;
KDTreeNode2D *node;
- for (i = 0, node = tree->nodes; i < tree->totnode; i++, node++) {
+ for (i = 0, node = tree->nodes; i < tree->node_num; i++, node++) {
if (node->neg != KDNODE_UNSET) {
tree->nodes[node->neg].parent = i;
}
@@ -319,7 +319,7 @@ static void kdtree2d_node_remove(struct KDTree2D *tree, uint index)
tree->nodes_map[index] = KDNODE_UNSET;
node = &tree->nodes[node_index];
- tree->totnode -= 1;
+ tree->node_num -= 1;
BLI_assert((node->flag & KDNODE_FLAG_REMOVED) == 0);
node->flag |= KDNODE_FLAG_REMOVED;
@@ -435,14 +435,14 @@ static bool kdtree2d_isect_tri(struct KDTree2D *tree, const uint ind[3])
static uint *pf_tri_add(PolyFill *pf)
{
- return pf->tris[pf->tris_tot++];
+ return pf->tris[pf->tris_num++];
}
static void pf_coord_remove(PolyFill *pf, PolyIndex *pi)
{
#ifdef USE_KDTREE
/* avoid double lookups, since convex coords are ignored when testing intersections */
- if (pf->kdtree.totnode) {
+ if (pf->kdtree.node_num) {
kdtree2d_node_remove(&pf->kdtree, pi->index);
}
#endif
@@ -458,7 +458,7 @@ static void pf_coord_remove(PolyFill *pf, PolyIndex *pi)
pi->next = pi->prev = NULL;
#endif
- pf->coords_tot -= 1;
+ pf->coords_num -= 1;
}
static void pf_triangulate(PolyFill *pf)
@@ -473,7 +473,7 @@ static void pf_triangulate(PolyFill *pf)
bool reverse = false;
#endif
- while (pf->coords_tot > 3) {
+ while (pf->coords_num > 3) {
PolyIndex *pi_prev, *pi_next;
eSign sign_orig_prev, sign_orig_next;
@@ -490,7 +490,7 @@ static void pf_triangulate(PolyFill *pf)
#ifdef USE_CONVEX_SKIP
if (pi_ear->sign != CONVEX) {
- pf->coords_tot_concave -= 1;
+ pf->coords_num_concave -= 1;
}
#endif
@@ -509,7 +509,7 @@ static void pf_triangulate(PolyFill *pf)
pf_coord_sign_calc(pf, pi_prev);
#ifdef USE_CONVEX_SKIP
if (pi_prev->sign == CONVEX) {
- pf->coords_tot_concave -= 1;
+ pf->coords_num_concave -= 1;
# ifdef USE_KDTREE
kdtree2d_node_remove(&pf->kdtree, pi_prev->index);
# endif
@@ -520,7 +520,7 @@ static void pf_triangulate(PolyFill *pf)
pf_coord_sign_calc(pf, pi_next);
#ifdef USE_CONVEX_SKIP
if (pi_next->sign == CONVEX) {
- pf->coords_tot_concave -= 1;
+ pf->coords_num_concave -= 1;
# ifdef USE_KDTREE
kdtree2d_node_remove(&pf->kdtree, pi_next->index);
# endif
@@ -551,7 +551,7 @@ static void pf_triangulate(PolyFill *pf)
#endif
}
- if (pf->coords_tot == 3) {
+ if (pf->coords_num == 3) {
uint *tri = pf_tri_add(pf);
pi_ear = pf->indices;
tri[0] = pi_ear->index;
@@ -585,7 +585,7 @@ static PolyIndex *pf_ear_tip_find(PolyFill *pf
)
{
/* localize */
- const uint coords_tot = pf->coords_tot;
+ const uint coords_num = pf->coords_num;
PolyIndex *pi_ear;
uint i;
@@ -596,7 +596,7 @@ static PolyIndex *pf_ear_tip_find(PolyFill *pf
pi_ear = pf->indices;
#endif
- i = coords_tot;
+ i = coords_num;
while (i--) {
if (pf_ear_tip_check(pf, pi_ear)) {
return pi_ear;
@@ -626,7 +626,7 @@ static PolyIndex *pf_ear_tip_find(PolyFill *pf
pi_ear = pf->indices;
#endif
- i = coords_tot;
+ i = coords_num;
while (i--) {
if (pi_ear->sign != CONCAVE) {
return pi_ear;
@@ -649,7 +649,7 @@ static bool pf_ear_tip_check(PolyFill *pf, PolyIndex *pi_ear_tip)
#endif
#if defined(USE_CONVEX_SKIP) && !defined(USE_KDTREE)
- uint coords_tot_concave_checked = 0;
+ uint coords_num_concave_checked = 0;
#endif
#ifdef USE_CONVEX_SKIP
@@ -657,19 +657,19 @@ static bool pf_ear_tip_check(PolyFill *pf, PolyIndex *pi_ear_tip)
# ifdef USE_CONVEX_SKIP_TEST
/* check if counting is wrong */
{
- uint coords_tot_concave_test = 0;
+ uint coords_num_concave_test = 0;
PolyIndex *pi_iter = pi_ear_tip;
do {
if (pi_iter->sign != CONVEX) {
- coords_tot_concave_test += 1;
+ coords_num_concave_test += 1;
}
} while ((pi_iter = pi_iter->next) != pi_ear_tip);
- BLI_assert(coords_tot_concave_test == pf->coords_tot_concave);
+ BLI_assert(coords_num_concave_test == pf->coords_num_concave);
}
# endif
/* fast-path for circles */
- if (pf->coords_tot_concave == 0) {
+ if (pf->coords_num_concave == 0) {
return true;
}
#endif
@@ -715,8 +715,8 @@ static bool pf_ear_tip_check(PolyFill *pf, PolyIndex *pi_ear_tip)
}
# ifdef USE_CONVEX_SKIP
- coords_tot_concave_checked += 1;
- if (coords_tot_concave_checked == pf->coords_tot_concave) {
+ coords_num_concave_checked += 1;
+ if (coords_num_concave_checked == pf->coords_num_concave) {
break;
}
# endif
@@ -743,7 +743,7 @@ static void pf_ear_tip_cut(PolyFill *pf, PolyIndex *pi_ear_tip)
*/
static void polyfill_prepare(PolyFill *pf,
const float (*coords)[2],
- const uint coords_tot,
+ const uint coords_num,
int coords_sign,
uint (*r_tris)[3],
PolyIndex *r_indices)
@@ -756,32 +756,32 @@ static void polyfill_prepare(PolyFill *pf,
/* assign all polyfill members here */
pf->indices = r_indices;
pf->coords = coords;
- pf->coords_tot = coords_tot;
+ pf->coords_num = coords_num;
#ifdef USE_CONVEX_SKIP
- pf->coords_tot_concave = 0;
+ pf->coords_num_concave = 0;
#endif
pf->tris = r_tris;
- pf->tris_tot = 0;
+ pf->tris_num = 0;
if (coords_sign == 0) {
- coords_sign = (cross_poly_v2(coords, coords_tot) >= 0.0f) ? 1 : -1;
+ coords_sign = (cross_poly_v2(coords, coords_num) >= 0.0f) ? 1 : -1;
}
else {
/* check we're passing in correct args */
#ifdef USE_STRICT_ASSERT
# ifndef NDEBUG
if (coords_sign == 1) {
- BLI_assert(cross_poly_v2(coords, coords_tot) >= 0.0f);
+ BLI_assert(cross_poly_v2(coords, coords_num) >= 0.0f);
}
else {
- BLI_assert(cross_poly_v2(coords, coords_tot) <= 0.0f);
+ BLI_assert(cross_poly_v2(coords, coords_num) <= 0.0f);
}
# endif
#endif
}
if (coords_sign == 1) {
- for (i = 0; i < coords_tot; i++) {
+ for (i = 0; i < coords_num; i++) {
indices[i].next = &indices[i + 1];
indices[i].prev = &indices[i - 1];
indices[i].index = i;
@@ -789,22 +789,22 @@ static void polyfill_prepare(PolyFill *pf,
}
else {
/* reversed */
- uint n = coords_tot - 1;
- for (i = 0; i < coords_tot; i++) {
+ uint n = coords_num - 1;
+ for (i = 0; i < coords_num; i++) {
indices[i].next = &indices[i + 1];
indices[i].prev = &indices[i - 1];
indices[i].index = (n - i);
}
}
- indices[0].prev = &indices[coords_tot - 1];
- indices[coords_tot - 1].next = &indices[0];
+ indices[0].prev = &indices[coords_num - 1];
+ indices[coords_num - 1].next = &indices[0];
- for (i = 0; i < coords_tot; i++) {
+ for (i = 0; i < coords_num; i++) {
PolyIndex *pi = &indices[i];
pf_coord_sign_calc(pf, pi);
#ifdef USE_CONVEX_SKIP
if (pi->sign != CONVEX) {
- pf->coords_tot_concave += 1;
+ pf->coords_num_concave += 1;
}
#endif
}
@@ -814,11 +814,11 @@ static void polyfill_calc(PolyFill *pf)
{
#ifdef USE_KDTREE
# ifdef USE_CONVEX_SKIP
- if (pf->coords_tot_concave)
+ if (pf->coords_num_concave)
# endif
{
- kdtree2d_new(&pf->kdtree, pf->coords_tot_concave, pf->coords);
- kdtree2d_init(&pf->kdtree, pf->coords_tot, pf->indices);
+ kdtree2d_new(&pf->kdtree, pf->coords_num_concave, pf->coords);
+ kdtree2d_init(&pf->kdtree, pf->coords_num, pf->indices);
kdtree2d_balance(&pf->kdtree);
kdtree2d_init_mapping(&pf->kdtree);
}
@@ -828,14 +828,14 @@ static void polyfill_calc(PolyFill *pf)
}
void BLI_polyfill_calc_arena(const float (*coords)[2],
- const uint coords_tot,
+ const uint coords_num,
const int coords_sign,
uint (*r_tris)[3],
struct MemArena *arena)
{
PolyFill pf;
- PolyIndex *indices = BLI_memarena_alloc(arena, sizeof(*indices) * coords_tot);
+ PolyIndex *indices = BLI_memarena_alloc(arena, sizeof(*indices) * coords_num);
#ifdef DEBUG_TIME
TIMEIT_START(polyfill2d);
@@ -843,22 +843,22 @@ void BLI_polyfill_calc_arena(const float (*coords)[2],
polyfill_prepare(&pf,
coords,
- coords_tot,
+ coords_num,
coords_sign,
r_tris,
/* cache */
indices);
#ifdef USE_KDTREE
- if (pf.coords_tot_concave) {
- pf.kdtree.nodes = BLI_memarena_alloc(arena, sizeof(*pf.kdtree.nodes) * pf.coords_tot_concave);
+ if (pf.coords_num_concave) {
+ pf.kdtree.nodes = BLI_memarena_alloc(arena, sizeof(*pf.kdtree.nodes) * pf.coords_num_concave);
pf.kdtree.nodes_map = memset(
- BLI_memarena_alloc(arena, sizeof(*pf.kdtree.nodes_map) * coords_tot),
+ BLI_memarena_alloc(arena, sizeof(*pf.kdtree.nodes_map) * coords_num),
0xff,
- sizeof(*pf.kdtree.nodes_map) * coords_tot);
+ sizeof(*pf.kdtree.nodes_map) * coords_num);
}
else {
- pf.kdtree.totnode = 0;
+ pf.kdtree.node_num = 0;
}
#endif
@@ -873,25 +873,25 @@ void BLI_polyfill_calc_arena(const float (*coords)[2],
}
void BLI_polyfill_calc(const float (*coords)[2],
- const uint coords_tot,
+ const uint coords_num,
const int coords_sign,
uint (*r_tris)[3])
{
/* Fallback to heap memory for large allocations.
* Avoid running out of stack memory on systems with 512kb stack (macOS).
* This happens at around 13,000 points, use a much lower value to be safe. */
- if (UNLIKELY(coords_tot > 8192)) {
+ if (UNLIKELY(coords_num > 8192)) {
/* The buffer size only accounts for the index allocation,
* worst case we do two allocations when concave, while we should try to be efficient,
* any caller that relies on this frequently should use #BLI_polyfill_calc_arena directly. */
- MemArena *arena = BLI_memarena_new(sizeof(PolyIndex) * coords_tot, __func__);
- BLI_polyfill_calc_arena(coords, coords_tot, coords_sign, r_tris, arena);
+ MemArena *arena = BLI_memarena_new(sizeof(PolyIndex) * coords_num, __func__);
+ BLI_polyfill_calc_arena(coords, coords_num, coords_sign, r_tris, arena);
BLI_memarena_free(arena);
return;
}
PolyFill pf;
- PolyIndex *indices = BLI_array_alloca(indices, coords_tot);
+ PolyIndex *indices = BLI_array_alloca(indices, coords_num);
#ifdef DEBUG_TIME
TIMEIT_START(polyfill2d);
@@ -899,21 +899,21 @@ void BLI_polyfill_calc(const float (*coords)[2],
polyfill_prepare(&pf,
coords,
- coords_tot,
+ coords_num,
coords_sign,
r_tris,
/* cache */
indices);
#ifdef USE_KDTREE
- if (pf.coords_tot_concave) {
- pf.kdtree.nodes = BLI_array_alloca(pf.kdtree.nodes, pf.coords_tot_concave);
- pf.kdtree.nodes_map = memset(BLI_array_alloca(pf.kdtree.nodes_map, coords_tot),
+ if (pf.coords_num_concave) {
+ pf.kdtree.nodes = BLI_array_alloca(pf.kdtree.nodes, pf.coords_num_concave);
+ pf.kdtree.nodes_map = memset(BLI_array_alloca(pf.kdtree.nodes_map, coords_num),
0xff,
- sizeof(*pf.kdtree.nodes_map) * coords_tot);
+ sizeof(*pf.kdtree.nodes_map) * coords_num);
}
else {
- pf.kdtree.totnode = 0;
+ pf.kdtree.node_num = 0;
}
#endif
diff --git a/source/blender/blenlib/intern/polyfill_2d_beautify.c b/source/blender/blenlib/intern/polyfill_2d_beautify.c
index c527e88b440..38cf97d6a8f 100644
--- a/source/blender/blenlib/intern/polyfill_2d_beautify.c
+++ b/source/blender/blenlib/intern/polyfill_2d_beautify.c
@@ -288,15 +288,15 @@ static void polyedge_rotate(struct HalfEdge *edges, struct HalfEdge *e)
}
void BLI_polyfill_beautify(const float (*coords)[2],
- const uint coords_tot,
+ const uint coords_num,
uint (*tris)[3],
/* structs for reuse */
MemArena *arena,
Heap *eheap)
{
- const uint coord_last = coords_tot - 1;
- const uint tris_len = coords_tot - 2;
+ const uint coord_last = coords_num - 1;
+ const uint tris_len = coords_num - 2;
/* internal edges only (between 2 tris) */
const uint edges_len = tris_len - 1;
diff --git a/source/blender/blenlib/intern/rand.cc b/source/blender/blenlib/intern/rand.cc
index 17bf5585f3f..f6d91cdcc4f 100644
--- a/source/blender/blenlib/intern/rand.cc
+++ b/source/blender/blenlib/intern/rand.cc
@@ -117,18 +117,18 @@ void BLI_rng_get_tri_sample_float_v3(
copy_v3_v3(r_pt, rng->rng.get_triangle_sample_3d(v1, v2, v3));
}
-void BLI_rng_shuffle_array(RNG *rng, void *data, unsigned int elem_size_i, unsigned int elem_tot)
+void BLI_rng_shuffle_array(RNG *rng, void *data, unsigned int elem_size_i, unsigned int elem_num)
{
- if (elem_tot <= 1) {
+ if (elem_num <= 1) {
return;
}
const uint elem_size = elem_size_i;
- unsigned int i = elem_tot;
+ unsigned int i = elem_num;
void *temp = malloc(elem_size);
while (i--) {
- const unsigned int j = BLI_rng_get_uint(rng) % elem_tot;
+ const unsigned int j = BLI_rng_get_uint(rng) % elem_num;
if (i != j) {
void *iElem = (unsigned char *)data + i * elem_size_i;
void *jElem = (unsigned char *)data + j * elem_size_i;
@@ -141,15 +141,15 @@ void BLI_rng_shuffle_array(RNG *rng, void *data, unsigned int elem_size_i, unsig
free(temp);
}
-void BLI_rng_shuffle_bitmap(struct RNG *rng, BLI_bitmap *bitmap, unsigned int bits_tot)
+void BLI_rng_shuffle_bitmap(struct RNG *rng, BLI_bitmap *bitmap, unsigned int bits_num)
{
- if (bits_tot <= 1) {
+ if (bits_num <= 1) {
return;
}
- unsigned int i = bits_tot;
+ unsigned int i = bits_num;
while (i--) {
- const unsigned int j = BLI_rng_get_uint(rng) % bits_tot;
+ const unsigned int j = BLI_rng_get_uint(rng) % bits_num;
if (i != j) {
const bool i_bit = BLI_BITMAP_TEST(bitmap, i);
const bool j_bit = BLI_BITMAP_TEST(bitmap, j);
@@ -187,21 +187,21 @@ float BLI_hash_frand(unsigned int seed)
void BLI_array_randomize(void *data,
unsigned int elem_size,
- unsigned int elem_tot,
+ unsigned int elem_num,
unsigned int seed)
{
RNG rng;
BLI_rng_seed(&rng, seed);
- BLI_rng_shuffle_array(&rng, data, elem_size, elem_tot);
+ BLI_rng_shuffle_array(&rng, data, elem_size, elem_num);
}
-void BLI_bitmap_randomize(BLI_bitmap *bitmap, unsigned int bits_tot, unsigned int seed)
+void BLI_bitmap_randomize(BLI_bitmap *bitmap, unsigned int bits_num, unsigned int seed)
{
RNG rng;
BLI_rng_seed(&rng, seed);
- BLI_rng_shuffle_bitmap(&rng, bitmap, bits_tot);
+ BLI_rng_shuffle_bitmap(&rng, bitmap, bits_num);
}
/* ********* for threaded random ************** */
diff --git a/source/blender/blenlib/intern/scanfill.c b/source/blender/blenlib/intern/scanfill.c
index 7e9893925a4..32932c3dee1 100644
--- a/source/blender/blenlib/intern/scanfill.c
+++ b/source/blender/blenlib/intern/scanfill.c
@@ -124,7 +124,7 @@ ScanFillVert *BLI_scanfill_vert_add(ScanFillContext *sf_ctx, const float vec[3])
zero_v2(sf_v->xy);
sf_v->keyindex = 0;
sf_v->poly_nr = sf_ctx->poly_nr;
- sf_v->edge_tot = 0;
+ sf_v->edge_count = 0;
sf_v->f = SF_VERT_NEW;
sf_v->user_flag = 0;
@@ -373,14 +373,14 @@ static bool boundinsideEV(ScanFillEdge *eed, ScanFillVert *eve)
static void testvertexnearedge(ScanFillContext *sf_ctx)
{
- /* only vertices with (->edge_tot == 1) are being tested for
+ /* only vertices with (->edge_count == 1) are being tested for
* being close to an edge, if true insert */
ScanFillVert *eve;
ScanFillEdge *eed, *ed1;
for (eve = sf_ctx->fillvertbase.first; eve; eve = eve->next) {
- if (eve->edge_tot == 1) {
+ if (eve->edge_count == 1) {
/* find the edge which has vertex eve,
* NOTE: we _know_ this will crash if 'ed1' becomes NULL
* but this will never happen. */
@@ -398,14 +398,14 @@ static void testvertexnearedge(ScanFillContext *sf_ctx)
if (eve != eed->v1 && eve != eed->v2 && eve->poly_nr == eed->poly_nr) {
if (compare_v2v2(eve->xy, eed->v1->xy, SF_EPSILON)) {
ed1->v2 = eed->v1;
- eed->v1->edge_tot++;
- eve->edge_tot = 0;
+ eed->v1->edge_count++;
+ eve->edge_count = 0;
break;
}
if (compare_v2v2(eve->xy, eed->v2->xy, SF_EPSILON)) {
ed1->v2 = eed->v2;
- eed->v2->edge_tot++;
- eve->edge_tot = 0;
+ eed->v2->edge_count++;
+ eve->edge_count = 0;
break;
}
@@ -418,7 +418,7 @@ static void testvertexnearedge(ScanFillContext *sf_ctx)
// printf("fill: vertex near edge %x\n", eve);
ed1->poly_nr = eed->poly_nr;
eed->v1 = eve;
- eve->edge_tot = 3;
+ eve->edge_count = 3;
break;
}
}
@@ -597,14 +597,14 @@ static unsigned int scanfill(ScanFillContext *sf_ctx, PolyFill *pf, const int fl
/* Set connect-flags. */
for (ed1 = sc->edge_first; ed1; ed1 = eed_next) {
eed_next = ed1->next;
- if (ed1->v1->edge_tot == 1 || ed1->v2->edge_tot == 1) {
+ if (ed1->v1->edge_count == 1 || ed1->v2->edge_count == 1) {
BLI_remlink((ListBase *)&(sc->edge_first), ed1);
BLI_addtail(&sf_ctx->filledgebase, ed1);
- if (ed1->v1->edge_tot > 1) {
- ed1->v1->edge_tot--;
+ if (ed1->v1->edge_count > 1) {
+ ed1->v1->edge_count--;
}
- if (ed1->v2->edge_tot > 1) {
- ed1->v2->edge_tot--;
+ if (ed1->v2->edge_count > 1) {
+ ed1->v2->edge_count--;
}
}
else {
@@ -628,8 +628,8 @@ static unsigned int scanfill(ScanFillContext *sf_ctx, PolyFill *pf, const int fl
// printf("just 1 edge to vert\n");
BLI_addtail(&sf_ctx->filledgebase, ed1);
ed1->v2->f = SF_VERT_NEW;
- ed1->v1->edge_tot--;
- ed1->v2->edge_tot--;
+ ed1->v1->edge_count--;
+ ed1->v2->edge_count--;
}
else {
/* test rest of vertices */
@@ -697,8 +697,8 @@ static unsigned int scanfill(ScanFillContext *sf_ctx, PolyFill *pf, const int fl
BLI_insertlinkbefore((ListBase *)&(sc->edge_first), ed2, ed3);
ed3->v2->f = SF_VERT_AVAILABLE;
ed3->f = SF_EDGE_INTERNAL;
- ed3->v1->edge_tot++;
- ed3->v2->edge_tot++;
+ ed3->v1->edge_count++;
+ ed3->v2->edge_count++;
}
else {
/* new triangle */
@@ -708,39 +708,39 @@ static unsigned int scanfill(ScanFillContext *sf_ctx, PolyFill *pf, const int fl
BLI_remlink((ListBase *)&(sc->edge_first), ed1);
BLI_addtail(&sf_ctx->filledgebase, ed1);
ed1->v2->f = SF_VERT_NEW;
- ed1->v1->edge_tot--;
- ed1->v2->edge_tot--;
+ ed1->v1->edge_count--;
+ ed1->v2->edge_count--;
/* ed2 can be removed when it's a boundary edge */
if (((ed2->f == SF_EDGE_NEW) && twoconnected) /* || (ed2->f == SF_EDGE_BOUNDARY) */) {
BLI_remlink((ListBase *)&(sc->edge_first), ed2);
BLI_addtail(&sf_ctx->filledgebase, ed2);
ed2->v2->f = SF_VERT_NEW;
- ed2->v1->edge_tot--;
- ed2->v2->edge_tot--;
+ ed2->v1->edge_count--;
+ ed2->v2->edge_count--;
}
/* new edge */
ed3 = BLI_scanfill_edge_add(sf_ctx, v1, v3);
BLI_remlink(&sf_ctx->filledgebase, ed3);
ed3->f = SF_EDGE_INTERNAL;
- ed3->v1->edge_tot++;
- ed3->v2->edge_tot++;
+ ed3->v1->edge_count++;
+ ed3->v2->edge_count++;
// printf("add new edge %x %x\n", v1, v3);
sc1 = addedgetoscanlist(scdata, ed3, verts);
if (sc1) { /* ed3 already exists: remove if a boundary */
// printf("Edge exists\n");
- ed3->v1->edge_tot--;
- ed3->v2->edge_tot--;
+ ed3->v1->edge_count--;
+ ed3->v2->edge_count--;
for (ed3 = sc1->edge_first; ed3; ed3 = ed3->next) {
if ((ed3->v1 == v1 && ed3->v2 == v3) || (ed3->v1 == v3 && ed3->v2 == v1)) {
if (twoconnected /* || (ed3->f == SF_EDGE_BOUNDARY) */) {
BLI_remlink((ListBase *)&(sc1->edge_first), ed3);
BLI_addtail(&sf_ctx->filledgebase, ed3);
- ed3->v1->edge_tot--;
- ed3->v2->edge_tot--;
+ ed3->v1->edge_count--;
+ ed3->v2->edge_count--;
}
break;
}
@@ -752,14 +752,14 @@ static unsigned int scanfill(ScanFillContext *sf_ctx, PolyFill *pf, const int fl
/* test for loose edges */
for (ed1 = sc->edge_first; ed1; ed1 = eed_next) {
eed_next = ed1->next;
- if (ed1->v1->edge_tot < 2 || ed1->v2->edge_tot < 2) {
+ if (ed1->v1->edge_count < 2 || ed1->v2->edge_count < 2) {
BLI_remlink((ListBase *)&(sc->edge_first), ed1);
BLI_addtail(&sf_ctx->filledgebase, ed1);
- if (ed1->v1->edge_tot > 1) {
- ed1->v1->edge_tot--;
+ if (ed1->v1->edge_count > 1) {
+ ed1->v1->edge_count--;
}
- if (ed1->v2->edge_tot > 1) {
- ed1->v2->edge_tot--;
+ if (ed1->v2->edge_count > 1) {
+ ed1->v2->edge_count--;
}
}
}
@@ -838,7 +838,7 @@ unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const
* however they should always be zero'd so check instead */
BLI_assert(eve->f == 0);
BLI_assert(sf_ctx->poly_nr || eve->poly_nr == 0);
- BLI_assert(eve->edge_tot == 0);
+ BLI_assert(eve->edge_count == 0);
}
#endif
@@ -964,10 +964,10 @@ unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const
if (flag & BLI_SCANFILL_CALC_LOOSE) {
unsigned int toggle = 0;
for (eed = sf_ctx->filledgebase.first; eed; eed = eed->next) {
- if (eed->v1->edge_tot++ > 250) {
+ if (eed->v1->edge_count++ > 250) {
break;
}
- if (eed->v2->edge_tot++ > 250) {
+ if (eed->v2->edge_count++ > 250) {
break;
}
}
@@ -979,7 +979,7 @@ unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const
return 0;
}
- /* does it only for vertices with (->edge_tot == 1) */
+ /* does it only for vertices with (->edge_count == 1) */
testvertexnearedge(sf_ctx);
ok = true;
@@ -990,14 +990,14 @@ unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const
for (eed = (toggle & 1) ? sf_ctx->filledgebase.first : sf_ctx->filledgebase.last; eed;
eed = eed_next) {
eed_next = (toggle & 1) ? eed->next : eed->prev;
- if (eed->v1->edge_tot == 1) {
- eed->v2->edge_tot--;
+ if (eed->v1->edge_count == 1) {
+ eed->v2->edge_count--;
BLI_remlink(&sf_ctx->fillvertbase, eed->v1);
BLI_remlink(&sf_ctx->filledgebase, eed);
ok = true;
}
- else if (eed->v2->edge_tot == 1) {
- eed->v1->edge_tot--;
+ else if (eed->v2->edge_count == 1) {
+ eed->v1->edge_count--;
BLI_remlink(&sf_ctx->fillvertbase, eed->v2);
BLI_remlink(&sf_ctx->filledgebase, eed);
ok = true;
@@ -1012,14 +1012,14 @@ unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const
else {
/* skip checks for loose edges */
for (eed = sf_ctx->filledgebase.first; eed; eed = eed->next) {
- eed->v1->edge_tot++;
- eed->v2->edge_tot++;
+ eed->v1->edge_count++;
+ eed->v2->edge_count++;
}
#ifdef DEBUG
/* ensure we're right! */
for (eed = sf_ctx->filledgebase.first; eed; eed = eed->next) {
- BLI_assert(eed->v1->edge_tot != 1);
- BLI_assert(eed->v2->edge_tot != 1);
+ BLI_assert(eed->v1->edge_count != 1);
+ BLI_assert(eed->v2->edge_count != 1);
}
#endif
}
@@ -1027,7 +1027,7 @@ unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const
/* CURRENT STATUS:
* - `eve->f`: 1 = available in edges.
* - `eve->poly_nr`: poly-number.
- * - `eve->edge_tot`: amount of edges connected to vertex.
+ * - `eve->edge_count`: amount of edges connected to vertex.
* - `eve->tmp.v`: store! original vertex number.
*
* - `eed->f`: 1 = boundary edge (optionally set by caller).
@@ -1058,7 +1058,7 @@ unsigned int BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const
min_xy_p[1] = (min_xy_p[1]) < (eve->xy[1]) ? (min_xy_p[1]) : (eve->xy[1]);
max_xy_p[0] = (max_xy_p[0]) > (eve->xy[0]) ? (max_xy_p[0]) : (eve->xy[0]);
max_xy_p[1] = (max_xy_p[1]) > (eve->xy[1]) ? (max_xy_p[1]) : (eve->xy[1]);
- if (eve->edge_tot > 2) {
+ if (eve->edge_count > 2) {
pflist[eve->poly_nr].f = SF_POLY_VALID;
}
}
diff --git a/source/blender/blenlib/intern/scanfill_utils.c b/source/blender/blenlib/intern/scanfill_utils.c
index 149589fb933..1d2225a5b56 100644
--- a/source/blender/blenlib/intern/scanfill_utils.c
+++ b/source/blender/blenlib/intern/scanfill_utils.c
@@ -359,7 +359,7 @@ bool BLI_scanfill_calc_self_isect(ScanFillContext *sf_ctx,
ListBase *remvertbase,
ListBase *remedgebase)
{
- const unsigned int poly_tot = (unsigned int)sf_ctx->poly_nr + 1;
+ const unsigned int poly_num = (unsigned int)sf_ctx->poly_nr + 1;
unsigned int eed_index = 0;
int totvert_new = 0;
bool changed = false;
@@ -370,7 +370,7 @@ bool BLI_scanfill_calc_self_isect(ScanFillContext *sf_ctx,
return false;
}
- poly_info = MEM_callocN(sizeof(*poly_info) * poly_tot, __func__);
+ poly_info = MEM_callocN(sizeof(*poly_info) * poly_num, __func__);
/* get the polygon span */
if (sf_ctx->poly_nr == 0) {
@@ -408,7 +408,7 @@ bool BLI_scanfill_calc_self_isect(ScanFillContext *sf_ctx,
/* self-intersect each polygon */
{
unsigned short poly_nr;
- for (poly_nr = 0; poly_nr < poly_tot; poly_nr++) {
+ for (poly_nr = 0; poly_nr < poly_num; poly_nr++) {
changed |= scanfill_preprocess_self_isect(sf_ctx, poly_info, poly_nr, remedgebase);
}
}
diff --git a/source/blender/blenlib/intern/stack.c b/source/blender/blenlib/intern/stack.c
index 61319613859..ff34cfe41cb 100644
--- a/source/blender/blenlib/intern/stack.c
+++ b/source/blender/blenlib/intern/stack.c
@@ -34,7 +34,7 @@ struct BLI_Stack {
size_t chunk_elem_max; /* number of elements per chunk */
size_t elem_size;
#ifdef USE_TOTELEM
- size_t totelem;
+ size_t elem_num;
#endif
};
@@ -119,7 +119,7 @@ void *BLI_stack_push_r(BLI_Stack *stack)
BLI_assert(stack->chunk_index < stack->chunk_elem_max);
#ifdef USE_TOTELEM
- stack->totelem++;
+ stack->elem_num++;
#endif
/* Return end of stack */
@@ -175,7 +175,7 @@ void BLI_stack_discard(BLI_Stack *stack)
BLI_assert(BLI_stack_is_empty(stack) == false);
#ifdef USE_TOTELEM
- stack->totelem--;
+ stack->elem_num--;
#endif
if (UNLIKELY(--stack->chunk_index == CHUNK_EMPTY)) {
struct StackChunk *chunk_free;
@@ -193,10 +193,10 @@ void BLI_stack_discard(BLI_Stack *stack)
void BLI_stack_clear(BLI_Stack *stack)
{
#ifdef USE_TOTELEM
- if (UNLIKELY(stack->totelem == 0)) {
+ if (UNLIKELY(stack->elem_num == 0)) {
return;
}
- stack->totelem = 0;
+ stack->elem_num = 0;
#else
if (UNLIKELY(stack->chunk_curr == NULL)) {
return;
@@ -225,29 +225,29 @@ void BLI_stack_clear(BLI_Stack *stack)
size_t BLI_stack_count(const BLI_Stack *stack)
{
#ifdef USE_TOTELEM
- return stack->totelem;
+ return stack->elem_num;
#else
struct StackChunk *data = stack->chunk_curr;
- size_t totelem = stack->chunk_index + 1;
+ size_t elem_num = stack->chunk_index + 1;
size_t i;
- if (totelem != stack->chunk_elem_max) {
+ if (elem_num != stack->chunk_elem_max) {
data = data->next;
}
else {
- totelem = 0;
+ elem_num = 0;
}
for (i = 0; data; data = data->next) {
i++;
}
- totelem += stack->chunk_elem_max * i;
- return totelem;
+ elem_num += stack->chunk_elem_max * i;
+ return elem_num;
#endif
}
bool BLI_stack_is_empty(const BLI_Stack *stack)
{
#ifdef USE_TOTELEM
- BLI_assert((stack->chunk_curr == NULL) == (stack->totelem == 0));
+ BLI_assert((stack->chunk_curr == NULL) == (stack->elem_num == 0));
#endif
return (stack->chunk_curr == NULL);
}
diff --git a/source/blender/blenlib/intern/string.c b/source/blender/blenlib/intern/string.c
index 75fa628e701..74559751d91 100644
--- a/source/blender/blenlib/intern/string.c
+++ b/source/blender/blenlib/intern/string.c
@@ -1131,11 +1131,11 @@ void BLI_str_format_byte_unit(char dst[15], long long int bytes, const bool base
const int base = base_10 ? 1000 : 1024;
const char *units_base_10[] = {"B", "KB", "MB", "GB", "TB", "PB"};
const char *units_base_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB"};
- const int tot_units = ARRAY_SIZE(units_base_2);
+ const int units_num = ARRAY_SIZE(units_base_2);
BLI_STATIC_ASSERT(ARRAY_SIZE(units_base_2) == ARRAY_SIZE(units_base_10), "array size mismatch");
- while ((fabs(bytes_converted) >= base) && ((order + 1) < tot_units)) {
+ while ((fabs(bytes_converted) >= base) && ((order + 1) < units_num)) {
bytes_converted /= base;
order++;
}
@@ -1155,9 +1155,9 @@ void BLI_str_format_attribute_domain_size(char dst[7], int number_to_format)
int order = 0;
const float base = 1000;
const char *units[] = {"", "K", "M", "B"};
- const int tot_units = ARRAY_SIZE(units);
+ const int units_num = ARRAY_SIZE(units);
- while ((fabsf(number_to_format_converted) >= base) && ((order + 1) < tot_units)) {
+ while ((fabsf(number_to_format_converted) >= base) && ((order + 1) < units_num)) {
number_to_format_converted /= base;
order++;
}
diff --git a/source/blender/blenlib/intern/task_iterator.c b/source/blender/blenlib/intern/task_iterator.c
index 4ee4e6c6ff2..d5afbb2b117 100644
--- a/source/blender/blenlib/intern/task_iterator.c
+++ b/source/blender/blenlib/intern/task_iterator.c
@@ -40,8 +40,8 @@
* \{ */
BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings,
- const int tot_items,
- int num_tasks,
+ const int items_num,
+ int tasks_num,
int *r_chunk_size)
{
int chunk_size = 0;
@@ -50,7 +50,7 @@ BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settin
/* Some users of this helper will still need a valid chunk size in case processing is not
* threaded. We can use a bigger one than in default threaded case then. */
chunk_size = 1024;
- num_tasks = 1;
+ tasks_num = 1;
}
else if (settings->min_iter_per_thread > 0) {
/* Already set by user, no need to do anything here. */
@@ -61,24 +61,24 @@ BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settin
* The idea here is to increase the chunk size to compensate for a rather measurable threading
* overhead caused by fetching tasks. With too many CPU threads we are starting
* to spend too much time in those overheads.
- * First values are: 1 if num_tasks < 16;
- * else 2 if num_tasks < 32;
- * else 3 if num_tasks < 48;
- * else 4 if num_tasks < 64;
+ * First values are: 1 if tasks_num < 16;
+ * else 2 if tasks_num < 32;
+ * else 3 if tasks_num < 48;
+ * else 4 if tasks_num < 64;
* etc.
* NOTE: If we wanted to keep the 'power of two' multiplier, we'd need something like:
- * 1 << max_ii(0, (int)(sizeof(int) * 8) - 1 - bitscan_reverse_i(num_tasks) - 3)
+ * 1 << max_ii(0, (int)(sizeof(int) * 8) - 1 - bitscan_reverse_i(tasks_num) - 3)
*/
- const int num_tasks_factor = max_ii(1, num_tasks >> 3);
+ const int tasks_num_factor = max_ii(1, tasks_num >> 3);
/* We could make that 'base' 32 number configurable in TaskParallelSettings too, or maybe just
* always use that heuristic using TaskParallelSettings.min_iter_per_thread as basis? */
- chunk_size = 32 * num_tasks_factor;
+ chunk_size = 32 * tasks_num_factor;
/* Basic heuristic to avoid threading on low amount of items.
* We could make that limit configurable in settings too. */
- if (tot_items > 0 && tot_items < max_ii(256, chunk_size * 2)) {
- chunk_size = tot_items;
+ if (items_num > 0 && items_num < max_ii(256, chunk_size * 2)) {
+ chunk_size = items_num;
}
}
@@ -95,7 +95,7 @@ typedef struct TaskParallelIteratorState {
/* Common data also passed to the generator callback. */
TaskParallelIteratorStateShared iter_shared;
/* Total number of items. If unknown, set it to a negative number. */
- int tot_items;
+ int items_num;
} TaskParallelIteratorState;
static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict state,
@@ -188,10 +188,10 @@ static void task_parallel_iterator_no_threads(const TaskParallelSettings *settin
static void task_parallel_iterator_do(const TaskParallelSettings *settings,
TaskParallelIteratorState *state)
{
- const int num_threads = BLI_task_scheduler_num_threads();
+ const int threads_num = BLI_task_scheduler_num_threads();
task_parallel_calc_chunk_size(
- settings, state->tot_items, num_threads, &state->iter_shared.chunk_size);
+ settings, state->items_num, threads_num, &state->iter_shared.chunk_size);
if (!settings->use_threading) {
task_parallel_iterator_no_threads(settings, state);
@@ -199,13 +199,13 @@ static void task_parallel_iterator_do(const TaskParallelSettings *settings,
}
const int chunk_size = state->iter_shared.chunk_size;
- const int tot_items = state->tot_items;
- const size_t num_tasks = tot_items >= 0 ?
- (size_t)min_ii(num_threads, state->tot_items / chunk_size) :
- (size_t)num_threads;
+ const int items_num = state->items_num;
+ const size_t tasks_num = items_num >= 0 ?
+ (size_t)min_ii(threads_num, state->items_num / chunk_size) :
+ (size_t)threads_num;
- BLI_assert(num_tasks > 0);
- if (num_tasks == 1) {
+ BLI_assert(tasks_num > 0);
+ if (tasks_num == 1) {
task_parallel_iterator_no_threads(settings, state);
return;
}
@@ -223,10 +223,10 @@ static void task_parallel_iterator_do(const TaskParallelSettings *settings,
TaskPool *task_pool = BLI_task_pool_create(state, TASK_PRIORITY_HIGH);
if (use_userdata_chunk) {
- userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
+ userdata_chunk_array = MALLOCA(userdata_chunk_size * tasks_num);
}
- for (size_t i = 0; i < num_tasks; i++) {
+ for (size_t i = 0; i < tasks_num; i++) {
if (use_userdata_chunk) {
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
@@ -243,7 +243,7 @@ static void task_parallel_iterator_do(const TaskParallelSettings *settings,
if (use_userdata_chunk) {
if (settings->func_reduce != NULL || settings->func_free != NULL) {
- for (size_t i = 0; i < num_tasks; i++) {
+ for (size_t i = 0; i < tasks_num; i++) {
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
if (settings->func_reduce != NULL) {
settings->func_reduce(state->userdata, userdata_chunk, userdata_chunk_local);
@@ -253,7 +253,7 @@ static void task_parallel_iterator_do(const TaskParallelSettings *settings,
}
}
}
- MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
+ MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * tasks_num);
}
BLI_spin_end(&spin_lock);
@@ -264,13 +264,13 @@ void BLI_task_parallel_iterator(void *userdata,
TaskParallelIteratorIterFunc iter_func,
void *init_item,
const int init_index,
- const int tot_items,
+ const int items_num,
TaskParallelIteratorFunc func,
const TaskParallelSettings *settings)
{
TaskParallelIteratorState state = {0};
- state.tot_items = tot_items;
+ state.items_num = items_num;
state.iter_shared.next_index = init_index;
state.iter_shared.next_item = init_item;
state.iter_shared.is_finished = false;
@@ -314,7 +314,7 @@ void BLI_task_parallel_listbase(ListBase *listbase,
TaskParallelIteratorState state = {0};
- state.tot_items = BLI_listbase_count(listbase);
+ state.items_num = BLI_listbase_count(listbase);
state.iter_shared.next_index = 0;
state.iter_shared.next_item = listbase->first;
state.iter_shared.is_finished = false;
@@ -391,25 +391,25 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
ParallelMempoolState state;
TaskPool *task_pool = BLI_task_pool_create(&state, TASK_PRIORITY_HIGH);
- const int num_threads = BLI_task_scheduler_num_threads();
+ const int threads_num = BLI_task_scheduler_num_threads();
/* The idea here is to prevent creating task for each of the loop iterations
* and instead have tasks which are evenly distributed across CPU cores and
* pull next item to be crunched using the threaded-aware BLI_mempool_iter.
*/
- const int num_tasks = num_threads + 2;
+ const int tasks_num = threads_num + 2;
state.userdata = userdata;
state.func = func;
if (use_userdata_chunk) {
- userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
+ userdata_chunk_array = MALLOCA(userdata_chunk_size * tasks_num);
}
ParallelMempoolTaskData *mempool_iterator_data = mempool_iter_threadsafe_create(
- mempool, (size_t)num_tasks);
+ mempool, (size_t)tasks_num);
- for (int i = 0; i < num_tasks; i++) {
+ for (int i = 0; i < tasks_num; i++) {
void *userdata_chunk_local = NULL;
if (use_userdata_chunk) {
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
@@ -429,7 +429,7 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
if (use_userdata_chunk) {
if ((settings->func_free != NULL) || (settings->func_reduce != NULL)) {
- for (int i = 0; i < num_tasks; i++) {
+ for (int i = 0; i < tasks_num; i++) {
if (settings->func_reduce) {
settings->func_reduce(
userdata, userdata_chunk, mempool_iterator_data[i].tls.userdata_chunk);
@@ -439,7 +439,7 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
}
}
}
- MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
+ MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * tasks_num);
}
mempool_iter_threadsafe_destroy(mempool_iterator_data);
diff --git a/source/blender/blenlib/intern/task_scheduler.cc b/source/blender/blenlib/intern/task_scheduler.cc
index 32c833fae38..1f7747453c1 100644
--- a/source/blender/blenlib/intern/task_scheduler.cc
+++ b/source/blender/blenlib/intern/task_scheduler.cc
@@ -31,14 +31,14 @@ static tbb::global_control *task_scheduler_global_control = nullptr;
void BLI_task_scheduler_init()
{
#ifdef WITH_TBB_GLOBAL_CONTROL
- const int num_threads_override = BLI_system_num_threads_override_get();
+ const int threads_override_num = BLI_system_num_threads_override_get();
- if (num_threads_override > 0) {
+ if (threads_override_num > 0) {
/* Override number of threads. This settings is used within the lifetime
* of tbb::global_control, so we allocate it on the heap. */
task_scheduler_global_control = MEM_new<tbb::global_control>(
- __func__, tbb::global_control::max_allowed_parallelism, num_threads_override);
- task_scheduler_num_threads = num_threads_override;
+ __func__, tbb::global_control::max_allowed_parallelism, threads_override_num);
+ task_scheduler_num_threads = threads_override_num;
}
else {
/* Let TBB choose the number of threads. For (legacy) code that calls
diff --git a/source/blender/blenlib/intern/threads.cc b/source/blender/blenlib/intern/threads.cc
index ff67f821d1e..70c1e701348 100644
--- a/source/blender/blenlib/intern/threads.cc
+++ b/source/blender/blenlib/intern/threads.cc
@@ -109,7 +109,7 @@ static pthread_mutex_t _fftw_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t _view3d_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_t mainid;
static unsigned int thread_levels = 0; /* threads can be invoked inside threads */
-static int num_threads_override = 0;
+static int threads_override_num = 0;
/* just a max for security reasons */
#define RE_MAX_THREAD BLENDER_MAX_THREADS
@@ -282,8 +282,8 @@ int BLI_system_thread_count()
{
static int t = -1;
- if (num_threads_override != 0) {
- return num_threads_override;
+ if (threads_override_num != 0) {
+ return threads_override_num;
}
if (LIKELY(t != -1)) {
return t;
@@ -316,12 +316,12 @@ int BLI_system_thread_count()
void BLI_system_num_threads_override_set(int num)
{
- num_threads_override = num;
+ threads_override_num = num;
}
int BLI_system_num_threads_override_get()
{
- return num_threads_override;
+ return threads_override_num;
}
/* Global Mutex Locks */
diff --git a/source/blender/blenlib/intern/uuid.cc b/source/blender/blenlib/intern/uuid.cc
index b175ed4a770..890a721a9d1 100644
--- a/source/blender/blenlib/intern/uuid.cc
+++ b/source/blender/blenlib/intern/uuid.cc
@@ -102,7 +102,7 @@ void BLI_uuid_format(char *buffer, const bUUID uuid)
bool BLI_uuid_parse_string(bUUID *uuid, const char *buffer)
{
- const int num_fields_parsed = std::sscanf(
+ const int fields_parsed_num = std::sscanf(
buffer,
"%8x-%4hx-%4hx-%2hhx%2hhx-%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx",
&uuid->time_low,
@@ -116,7 +116,7 @@ bool BLI_uuid_parse_string(bUUID *uuid, const char *buffer)
&uuid->node[3],
&uuid->node[4],
&uuid->node[5]);
- return num_fields_parsed == 11;
+ return fields_parsed_num == 11;
}
std::ostream &operator<<(std::ostream &stream, bUUID uuid)