Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenlib/intern')
-rw-r--r--source/blender/blenlib/intern/BLI_kdopbvh.c14
-rw-r--r--source/blender/blenlib/intern/BLI_memblock.c6
-rw-r--r--source/blender/blenlib/intern/BLI_mempool.c2
-rw-r--r--source/blender/blenlib/intern/array_store.c4
-rw-r--r--source/blender/blenlib/intern/boxpack_2d.c2
-rw-r--r--source/blender/blenlib/intern/fileops.c4
-rw-r--r--source/blender/blenlib/intern/math_geom.c6
-rw-r--r--source/blender/blenlib/intern/math_statistics.c4
-rw-r--r--source/blender/blenlib/intern/polyfill_2d.c2
-rw-r--r--source/blender/blenlib/intern/rct.c8
-rw-r--r--source/blender/blenlib/intern/storage.c123
-rw-r--r--source/blender/blenlib/intern/string_cursor_utf8.c2
-rw-r--r--source/blender/blenlib/intern/task.c61
13 files changed, 127 insertions, 111 deletions
diff --git a/source/blender/blenlib/intern/BLI_kdopbvh.c b/source/blender/blenlib/intern/BLI_kdopbvh.c
index f43f55a352b..0e93fd8e13b 100644
--- a/source/blender/blenlib/intern/BLI_kdopbvh.c
+++ b/source/blender/blenlib/intern/BLI_kdopbvh.c
@@ -561,7 +561,7 @@ static void bvhtree_verify(BVHTree *tree)
/* Helper data and structures to build a min-leaf generalized implicit tree
* This code can be easily reduced
- * (basicly this is only method to calculate pow(k, n) in O(1).. and stuff like that) */
+ * (basically this is only method to calculate pow(k, n) in O(1).. and stuff like that) */
typedef struct BVHBuildHelper {
int tree_type;
int totleafs;
@@ -626,7 +626,7 @@ static int implicit_leafs_index(const BVHBuildHelper *data, const int depth, con
*
* An implicit tree is a tree where its structure is implied,
* thus there is no need to store child pointers or indexes.
- * Its possible to find the position of the child or the parent with simple maths
+ * It's possible to find the position of the child or the parent with simple maths
* (multiplication and addition).
* This type of tree is for example used on heaps..
* where node N has its child at indices N*2 and N*2+1.
@@ -700,7 +700,7 @@ typedef struct BVHDivNodesData {
static void non_recursive_bvh_div_nodes_task_cb(void *__restrict userdata,
const int j,
- const ParallelRangeTLS *__restrict UNUSED(tls))
+ const TaskParallelTLS *__restrict UNUSED(tls))
{
BVHDivNodesData *data = userdata;
@@ -841,14 +841,14 @@ static void non_recursive_bvh_div_nodes(const BVHTree *tree,
cb_data.depth = depth;
if (true) {
- ParallelRangeSettings settings;
+ TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = (num_leafs > KDOPBVH_THREAD_LEAF_THRESHOLD);
BLI_task_parallel_range(i, i_stop, &cb_data, non_recursive_bvh_div_nodes_task_cb, &settings);
}
else {
/* Less hassle for debugging. */
- ParallelRangeTLS tls = {0};
+ TaskParallelTLS tls = {0};
for (int i_task = i; i_task < i_stop; i_task++) {
non_recursive_bvh_div_nodes_task_cb(&cb_data, i_task, &tls);
}
@@ -1195,7 +1195,7 @@ int BLI_bvhtree_overlap_thread_num(const BVHTree *tree)
static void bvhtree_overlap_task_cb(void *__restrict userdata,
const int j,
- const ParallelRangeTLS *__restrict UNUSED(tls))
+ const TaskParallelTLS *__restrict UNUSED(tls))
{
BVHOverlapData_Thread *data = &((BVHOverlapData_Thread *)userdata)[j];
BVHOverlapData_Shared *data_shared = data->shared;
@@ -1262,7 +1262,7 @@ BVHTreeOverlap *BLI_bvhtree_overlap(
data[j].thread = j;
}
- ParallelRangeSettings settings;
+ TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = (tree1->totleaf > KDOPBVH_THREAD_LEAF_THRESHOLD);
BLI_task_parallel_range(0, thread_num, data, bvhtree_overlap_task_cb, &settings);
diff --git a/source/blender/blenlib/intern/BLI_memblock.c b/source/blender/blenlib/intern/BLI_memblock.c
index ec9b74f2b50..f26860afe77 100644
--- a/source/blender/blenlib/intern/BLI_memblock.c
+++ b/source/blender/blenlib/intern/BLI_memblock.c
@@ -55,9 +55,9 @@ struct BLI_memblock {
int chunk_max_ofs;
/** Id of the chunk used for the next allocation. */
int chunk_next;
- /** Chunck size in bytes. */
+ /** Chunk size in bytes. */
int chunk_size;
- /** Number of allocated chunck. */
+ /** Number of allocated chunk. */
int chunk_len;
};
@@ -123,7 +123,7 @@ void BLI_memblock_clear(BLI_memblock *mblk, MemblockValFreeFP free_callback)
void *BLI_memblock_alloc(BLI_memblock *mblk)
{
- /* Bookeeping. */
+ /* Bookkeeping. */
if (mblk->elem_last < mblk->elem_next) {
mblk->elem_last = mblk->elem_next;
}
diff --git a/source/blender/blenlib/intern/BLI_mempool.c b/source/blender/blenlib/intern/BLI_mempool.c
index 5765540a8f1..2b931507633 100644
--- a/source/blender/blenlib/intern/BLI_mempool.c
+++ b/source/blender/blenlib/intern/BLI_mempool.c
@@ -65,7 +65,7 @@
* Important that this value is an is _not_ aligned with ``sizeof(void *)``.
* So having a pointer to 2/4/8... aligned memory is enough to ensure
* the freeword will never be used.
- * To be safe, use a word thats the same in both directions.
+ * To be safe, use a word that's the same in both directions.
*/
#define FREEWORD \
((sizeof(void *) > sizeof(int32_t)) ? MAKE_ID_8('e', 'e', 'r', 'f', 'f', 'r', 'e', 'e') : \
diff --git a/source/blender/blenlib/intern/array_store.c b/source/blender/blenlib/intern/array_store.c
index 825f2e36c6e..c87dbee0f0e 100644
--- a/source/blender/blenlib/intern/array_store.c
+++ b/source/blender/blenlib/intern/array_store.c
@@ -82,7 +82,7 @@
* matching chunks from this state are re-used in the new state.
*
* First matches at either end of the array are detected.
- * For identical arrays this is all thats needed.
+ * For identical arrays this is all that's needed.
*
* De-duplication is performed on any remaining chunks, by hashing the first few bytes of the chunk
* (see: BCHUNK_HASH_TABLE_ACCUMULATE_STEPS).
@@ -301,7 +301,7 @@ typedef struct BChunkRef {
* used for lookups.
*
* Point to the #BChunkRef, not the #BChunk,
- * to allow talking down the chunks in-order until a mis-match is found,
+ * to allow talking down the chunks in-order until a mismatch is found,
* this avoids having to do so many table lookups.
*/
typedef struct BTableRef {
diff --git a/source/blender/blenlib/intern/boxpack_2d.c b/source/blender/blenlib/intern/boxpack_2d.c
index f05523b8897..196d45967be 100644
--- a/source/blender/blenlib/intern/boxpack_2d.c
+++ b/source/blender/blenlib/intern/boxpack_2d.c
@@ -651,7 +651,7 @@ void BLI_box_pack_2d(BoxPack *boxarray, const uint len, float *r_tot_x, float *r
}
}
/* The Box verts are only used internally
- * Update the box x and y since thats what external
+ * Update the box x and y since that's what external
* functions will see */
box->x = box_xmin_get(box);
box->y = box_ymin_get(box);
diff --git a/source/blender/blenlib/intern/fileops.c b/source/blender/blenlib/intern/fileops.c
index 8fff9603a88..0e3987eefc9 100644
--- a/source/blender/blenlib/intern/fileops.c
+++ b/source/blender/blenlib/intern/fileops.c
@@ -105,7 +105,7 @@ int BLI_file_gzip(const char *from, const char *to)
#endif
/* gzip the file in from_file and write it to memory to_mem, at most size bytes.
- * return the unziped size
+ * return the unzipped size
*/
char *BLI_file_ungzip_to_mem(const char *from_file, int *r_size)
{
@@ -519,7 +519,7 @@ enum {
/* operation requested not to perform recursive digging for current path */
RecursiveOp_Callback_StopRecurs = 1,
- /* error occured in callback and recursive walking should stop immediately */
+ /* error occurred in callback and recursive walking should stop immediately */
RecursiveOp_Callback_Error = 2,
};
diff --git a/source/blender/blenlib/intern/math_geom.c b/source/blender/blenlib/intern/math_geom.c
index 8b715ebf87b..7cdac6b1497 100644
--- a/source/blender/blenlib/intern/math_geom.c
+++ b/source/blender/blenlib/intern/math_geom.c
@@ -2826,12 +2826,12 @@ bool isect_ray_ray_v3(const float ray_origin_a[3],
sub_v3_v3v3(c, n, t);
if (r_lambda_a != NULL) {
- cross_v3_v3v3(cray, c, ray_direction_a);
+ cross_v3_v3v3(cray, c, ray_direction_b);
*r_lambda_a = dot_v3v3(cray, n) / nlen;
}
if (r_lambda_b != NULL) {
- cross_v3_v3v3(cray, c, ray_direction_b);
+ cross_v3_v3v3(cray, c, ray_direction_a);
*r_lambda_b = dot_v3v3(cray, n) / nlen;
}
@@ -4914,7 +4914,7 @@ void vcloud_estimate_transform_v3(const int list_size,
unit_m3(lscale);
}
/* do com for both clouds */
- if (pos && rpos && (list_size > 0)) { /* paranoya check */
+ if (pos && rpos && (list_size > 0)) { /* paranoia check */
/* do com for both clouds */
for (a = 0; a < list_size; a++) {
if (weight) {
diff --git a/source/blender/blenlib/intern/math_statistics.c b/source/blender/blenlib/intern/math_statistics.c
index 7c461120520..d4fcb32ce37 100644
--- a/source/blender/blenlib/intern/math_statistics.c
+++ b/source/blender/blenlib/intern/math_statistics.c
@@ -42,7 +42,7 @@ typedef struct CovarianceData {
static void covariance_m_vn_ex_task_cb(void *__restrict userdata,
const int a,
- const ParallelRangeTLS *__restrict UNUSED(tls))
+ const TaskParallelTLS *__restrict UNUSED(tls))
{
CovarianceData *data = userdata;
const float *cos_vn = data->cos_vn;
@@ -122,7 +122,7 @@ void BLI_covariance_m_vn_ex(const int n,
.nbr_cos_vn = nbr_cos_vn,
};
- ParallelRangeSettings settings;
+ TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = ((nbr_cos_vn * n * n) >= 10000);
BLI_task_parallel_range(0, n * n, &data, covariance_m_vn_ex_task_cb, &settings);
diff --git a/source/blender/blenlib/intern/polyfill_2d.c b/source/blender/blenlib/intern/polyfill_2d.c
index 1538b5922b1..575a4a06d6a 100644
--- a/source/blender/blenlib/intern/polyfill_2d.c
+++ b/source/blender/blenlib/intern/polyfill_2d.c
@@ -780,7 +780,7 @@ static void polyfill_prepare(PolyFill *pf,
coords_sign = (cross_poly_v2(coords, coords_tot) >= 0.0f) ? 1 : -1;
}
else {
- /* check we're passing in correcty args */
+ /* check we're passing in correct args */
#ifdef USE_STRICT_ASSERT
# ifndef NDEBUG
if (coords_sign == 1) {
diff --git a/source/blender/blenlib/intern/rct.c b/source/blender/blenlib/intern/rct.c
index 99288abb38c..ecff2ebffef 100644
--- a/source/blender/blenlib/intern/rct.c
+++ b/source/blender/blenlib/intern/rct.c
@@ -285,12 +285,12 @@ bool BLI_rcti_isect_segment(const rcti *rect, const int s1[2], const int s2[2])
return false;
}
- /* if either points intersect then we definetly intersect */
+ /* if either points intersect then we definitely intersect */
if (BLI_rcti_isect_pt_v(rect, s1) || BLI_rcti_isect_pt_v(rect, s2)) {
return true;
}
else {
- /* both points are outside but may insersect the rect */
+ /* both points are outside but may intersect the rect */
int tvec1[2];
int tvec2[2];
/* diagonal: [/] */
@@ -332,12 +332,12 @@ bool BLI_rctf_isect_segment(const rctf *rect, const float s1[2], const float s2[
return false;
}
- /* if either points intersect then we definetly intersect */
+ /* if either points intersect then we definitely intersect */
if (BLI_rctf_isect_pt_v(rect, s1) || BLI_rctf_isect_pt_v(rect, s2)) {
return true;
}
else {
- /* both points are outside but may insersect the rect */
+ /* both points are outside but may intersect the rect */
float tvec1[2];
float tvec2[2];
/* diagonal: [/] */
diff --git a/source/blender/blenlib/intern/storage.c b/source/blender/blenlib/intern/storage.c
index 39af73ac175..8d921b062dc 100644
--- a/source/blender/blenlib/intern/storage.c
+++ b/source/blender/blenlib/intern/storage.c
@@ -297,56 +297,72 @@ bool BLI_is_file(const char *path)
return (mode && !S_ISDIR(mode));
}
-void *BLI_file_read_text_as_mem(const char *filepath, size_t pad_bytes, size_t *r_size)
+/**
+ * Use for both text and binary file reading.
+ */
+static void *file_read_data_as_mem_impl(FILE *fp,
+ bool read_size_exact,
+ size_t pad_bytes,
+ size_t *r_size)
{
- FILE *fp = BLI_fopen(filepath, "r");
- void *mem = NULL;
+ struct stat st;
+ if (fstat(fileno(fp), &st) == -1) {
+ return NULL;
+ }
+ if (S_ISDIR(st.st_mode)) {
+ return NULL;
+ }
+ if (fseek(fp, 0L, SEEK_END) == -1) {
+ return NULL;
+ }
+ /* Don't use the 'st_size' because it may be the symlink. */
+ const long int filelen = ftell(fp);
+ if (filelen == -1) {
+ return NULL;
+ }
+ if (fseek(fp, 0L, SEEK_SET) == -1) {
+ return NULL;
+ }
- if (fp) {
- struct stat st;
- if (fstat(fileno(fp), &st) == -1) {
- goto finally;
- }
- if (S_ISDIR(st.st_mode)) {
- goto finally;
- }
- if (fseek(fp, 0L, SEEK_END) == -1) {
- goto finally;
- }
- /* Don't use the 'st_size' because it may be the symlink. */
- const long int filelen = ftell(fp);
- if (filelen == -1) {
- goto finally;
- }
- if (fseek(fp, 0L, SEEK_SET) == -1) {
- goto finally;
- }
+ void *mem = MEM_mallocN(filelen + pad_bytes, __func__);
+ if (mem == NULL) {
+ return NULL;
+ }
- mem = MEM_mallocN(filelen + pad_bytes, __func__);
- if (mem == NULL) {
- goto finally;
- }
+ const long int filelen_read = fread(mem, 1, filelen, fp);
+ if ((filelen_read < 0) || ferror(fp)) {
+ MEM_freeN(mem);
+ return NULL;
+ }
- const long int filelen_read = fread(mem, 1, filelen, fp);
- if ((filelen_read < 0) || ferror(fp)) {
+ if (read_size_exact) {
+ if (filelen_read != filelen) {
MEM_freeN(mem);
- mem = NULL;
- goto finally;
+ return NULL;
}
-
+ }
+ else {
if (filelen_read < filelen) {
mem = MEM_reallocN(mem, filelen_read + pad_bytes);
if (mem == NULL) {
- goto finally;
+ return NULL;
}
}
+ }
- *r_size = filelen_read;
+ *r_size = filelen_read;
+
+ return mem;
+}
- finally:
+void *BLI_file_read_text_as_mem(const char *filepath, size_t pad_bytes, size_t *r_size)
+{
+ FILE *fp = BLI_fopen(filepath, "r");
+ void *mem = NULL;
+ if (fp) {
+ mem = file_read_data_as_mem_impl(fp, false, pad_bytes, r_size);
fclose(fp);
}
-
return mem;
}
@@ -354,45 +370,10 @@ void *BLI_file_read_binary_as_mem(const char *filepath, size_t pad_bytes, size_t
{
FILE *fp = BLI_fopen(filepath, "rb");
void *mem = NULL;
-
if (fp) {
- struct stat st;
- if (fstat(fileno(fp), &st) == -1) {
- goto finally;
- }
- if (S_ISDIR(st.st_mode)) {
- goto finally;
- }
- if (fseek(fp, 0L, SEEK_END) == -1) {
- goto finally;
- }
- /* Don't use the 'st_size' because it may be the symlink. */
- const long int filelen = ftell(fp);
- if (filelen == -1) {
- goto finally;
- }
- if (fseek(fp, 0L, SEEK_SET) == -1) {
- goto finally;
- }
-
- mem = MEM_mallocN(filelen + pad_bytes, __func__);
- if (mem == NULL) {
- goto finally;
- }
-
- const long int filelen_read = fread(mem, 1, filelen, fp);
- if ((filelen_read != filelen) || ferror(fp)) {
- MEM_freeN(mem);
- mem = NULL;
- goto finally;
- }
-
- *r_size = filelen_read;
-
- finally:
+ mem = file_read_data_as_mem_impl(fp, true, pad_bytes, r_size);
fclose(fp);
}
-
return mem;
}
diff --git a/source/blender/blenlib/intern/string_cursor_utf8.c b/source/blender/blenlib/intern/string_cursor_utf8.c
index 1526a052aad..f0113a7028a 100644
--- a/source/blender/blenlib/intern/string_cursor_utf8.c
+++ b/source/blender/blenlib/intern/string_cursor_utf8.c
@@ -104,7 +104,7 @@ static eStrCursorDelimType cursor_delim_type_unicode(const uint uch)
static eStrCursorDelimType cursor_delim_type_utf8(const char *ch_utf8)
{
/* for full unicode support we really need to have large lookup tables to figure
- * out whats what in every possible char set - and python, glib both have these. */
+ * out what's what in every possible char set - and python, glib both have these. */
uint uch = BLI_str_utf8_as_unicode(ch_utf8);
return cursor_delim_type_unicode(uch);
}
diff --git a/source/blender/blenlib/intern/task.c b/source/blender/blenlib/intern/task.c
index 85d39f2f98e..bea38a232cc 100644
--- a/source/blender/blenlib/intern/task.c
+++ b/source/blender/blenlib/intern/task.c
@@ -1054,6 +1054,49 @@ typedef struct ParallelRangeState {
int chunk_size;
} ParallelRangeState;
+BLI_INLINE void task_parallel_range_calc_chunk_size(const TaskParallelSettings *settings,
+ const int num_tasks,
+ ParallelRangeState *state)
+{
+ const int tot_items = state->stop - state->start;
+ int chunk_size = 0;
+
+ if (settings->min_iter_per_thread > 0) {
+ /* Already set by user, no need to do anything here. */
+ chunk_size = settings->min_iter_per_thread;
+ }
+ else {
+ /* Basic heuristic to avoid threading on low amount of items. We could make that limit
+ * configurable in settings too... */
+ if (tot_items > 0 && tot_items < 256) {
+ chunk_size = tot_items;
+ }
+ /* NOTE: The idea here is to compensate for rather measurable threading
+ * overhead caused by fetching tasks. With too many CPU threads we are starting
+ * to spend too much time in those overheads. */
+ else if (num_tasks > 32) {
+ chunk_size = 128;
+ }
+ else if (num_tasks > 16) {
+ chunk_size = 64;
+ }
+ else {
+ chunk_size = 32;
+ }
+ }
+
+ BLI_assert(chunk_size > 0);
+
+ switch (settings->scheduling_mode) {
+ case TASK_SCHEDULING_STATIC:
+ state->chunk_size = max_ii(chunk_size, tot_items / (num_tasks));
+ break;
+ case TASK_SCHEDULING_DYNAMIC:
+ state->chunk_size = chunk_size;
+ break;
+ }
+}
+
BLI_INLINE bool parallel_range_next_iter_get(ParallelRangeState *__restrict state,
int *__restrict iter,
int *__restrict count)
@@ -1069,7 +1112,7 @@ BLI_INLINE bool parallel_range_next_iter_get(ParallelRangeState *__restrict stat
static void parallel_range_func(TaskPool *__restrict pool, void *userdata_chunk, int thread_id)
{
ParallelRangeState *__restrict state = BLI_task_pool_userdata(pool);
- ParallelRangeTLS tls = {
+ TaskParallelTLS tls = {
.thread_id = thread_id,
.userdata_chunk = userdata_chunk,
};
@@ -1085,7 +1128,7 @@ static void parallel_range_single_thread(const int start,
int const stop,
void *userdata,
TaskParallelRangeFunc func,
- const ParallelRangeSettings *settings)
+ const TaskParallelSettings *settings)
{
void *userdata_chunk = settings->userdata_chunk;
const size_t userdata_chunk_size = settings->userdata_chunk_size;
@@ -1095,7 +1138,7 @@ static void parallel_range_single_thread(const int start,
userdata_chunk_local = MALLOCA(userdata_chunk_size);
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
}
- ParallelRangeTLS tls = {
+ TaskParallelTLS tls = {
.thread_id = 0,
.userdata_chunk = userdata_chunk_local,
};
@@ -1118,7 +1161,7 @@ void BLI_task_parallel_range(const int start,
const int stop,
void *userdata,
TaskParallelRangeFunc func,
- const ParallelRangeSettings *settings)
+ const TaskParallelSettings *settings)
{
TaskScheduler *task_scheduler;
TaskPool *task_pool;
@@ -1162,16 +1205,8 @@ void BLI_task_parallel_range(const int start,
state.userdata = userdata;
state.func = func;
state.iter = start;
- switch (settings->scheduling_mode) {
- case TASK_SCHEDULING_STATIC:
- state.chunk_size = max_ii(settings->min_iter_per_thread, (stop - start) / (num_tasks));
- break;
- case TASK_SCHEDULING_DYNAMIC:
- /* TODO(sergey): Make it configurable from min_iter_per_thread. */
- state.chunk_size = 32;
- break;
- }
+ task_parallel_range_calc_chunk_size(settings, num_tasks, &state);
num_tasks = min_ii(num_tasks, max_ii(1, (stop - start) / state.chunk_size));
if (num_tasks == 1) {