Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenkernel')
-rw-r--r--source/blender/blenkernel/intern/colortools.c69
-rw-r--r--source/blender/blenkernel/intern/dynamicpaint.c33
-rw-r--r--source/blender/blenkernel/intern/particle_system.c15
-rw-r--r--source/blender/blenkernel/intern/subdiv_ccg.c18
-rw-r--r--source/blender/blenkernel/intern/subdiv_foreach.c6
5 files changed, 69 insertions, 72 deletions
diff --git a/source/blender/blenkernel/intern/colortools.c b/source/blender/blenkernel/intern/colortools.c
index f82b8b6675c..3da384a2745 100644
--- a/source/blender/blenkernel/intern/colortools.c
+++ b/source/blender/blenkernel/intern/colortools.c
@@ -1383,8 +1383,6 @@ typedef struct ScopesUpdateData {
struct ColormanageProcessor *cm_processor;
const unsigned char *display_buffer;
const int ycc_mode;
-
- unsigned int *bin_lum, *bin_r, *bin_g, *bin_b, *bin_a;
} ScopesUpdateData;
typedef struct ScopesUpdateDataChunk {
@@ -1495,23 +1493,24 @@ static void scopes_update_cb(void *__restrict userdata,
}
}
-static void scopes_update_finalize(void *__restrict userdata, void *__restrict userdata_chunk)
+static void scopes_update_reduce(const void *__restrict UNUSED(userdata),
+ void *__restrict chunk_join,
+ void *__restrict chunk)
{
- const ScopesUpdateData *data = userdata;
- const ScopesUpdateDataChunk *data_chunk = userdata_chunk;
-
- unsigned int *bin_lum = data->bin_lum;
- unsigned int *bin_r = data->bin_r;
- unsigned int *bin_g = data->bin_g;
- unsigned int *bin_b = data->bin_b;
- unsigned int *bin_a = data->bin_a;
+ ScopesUpdateDataChunk *join_chunk = chunk_join;
+ const ScopesUpdateDataChunk *data_chunk = chunk;
+
+ unsigned int *bin_lum = join_chunk->bin_lum;
+ unsigned int *bin_r = join_chunk->bin_r;
+ unsigned int *bin_g = join_chunk->bin_g;
+ unsigned int *bin_b = join_chunk->bin_b;
+ unsigned int *bin_a = join_chunk->bin_a;
const unsigned int *bin_lum_c = data_chunk->bin_lum;
const unsigned int *bin_r_c = data_chunk->bin_r;
const unsigned int *bin_g_c = data_chunk->bin_g;
const unsigned int *bin_b_c = data_chunk->bin_b;
const unsigned int *bin_a_c = data_chunk->bin_a;
- float(*minmax)[2] = data->scopes->minmax;
const float *min = data_chunk->min;
const float *max = data_chunk->max;
@@ -1524,11 +1523,11 @@ static void scopes_update_finalize(void *__restrict userdata, void *__restrict u
}
for (int c = 3; c--;) {
- if (min[c] < minmax[c][0]) {
- minmax[c][0] = min[c];
+ if (min[c] < join_chunk->min[c]) {
+ join_chunk->min[c] = min[c];
}
- if (max[c] > minmax[c][1]) {
- minmax[c][1] = max[c];
+ if (max[c] > join_chunk->max[c]) {
+ join_chunk->max[c] = max[c];
}
}
}
@@ -1542,7 +1541,6 @@ void BKE_scopes_update(Scopes *scopes,
unsigned int nl, na, nr, ng, nb;
double divl, diva, divr, divg, divb;
const unsigned char *display_buffer = NULL;
- uint bin_lum[256] = {0}, bin_r[256] = {0}, bin_g[256] = {0}, bin_b[256] = {0}, bin_a[256] = {0};
int ycc_mode = -1;
void *cache_handle = NULL;
struct ColormanageProcessor *cm_processor = NULL;
@@ -1638,11 +1636,6 @@ void BKE_scopes_update(Scopes *scopes,
.cm_processor = cm_processor,
.display_buffer = display_buffer,
.ycc_mode = ycc_mode,
- .bin_lum = bin_lum,
- .bin_r = bin_r,
- .bin_g = bin_g,
- .bin_b = bin_b,
- .bin_a = bin_a,
};
ScopesUpdateDataChunk data_chunk = {{0}};
INIT_MINMAX(data_chunk.min, data_chunk.max);
@@ -1652,26 +1645,26 @@ void BKE_scopes_update(Scopes *scopes,
settings.use_threading = (ibuf->y > 256);
settings.userdata_chunk = &data_chunk;
settings.userdata_chunk_size = sizeof(data_chunk);
- settings.func_finalize = scopes_update_finalize;
+ settings.func_reduce = scopes_update_reduce;
BLI_task_parallel_range(0, ibuf->y, &data, scopes_update_cb, &settings);
/* convert hist data to float (proportional to max count) */
nl = na = nr = nb = ng = 0;
for (a = 0; a < 256; a++) {
- if (bin_lum[a] > nl) {
- nl = bin_lum[a];
+ if (data_chunk.bin_lum[a] > nl) {
+ nl = data_chunk.bin_lum[a];
}
- if (bin_r[a] > nr) {
- nr = bin_r[a];
+ if (data_chunk.bin_r[a] > nr) {
+ nr = data_chunk.bin_r[a];
}
- if (bin_g[a] > ng) {
- ng = bin_g[a];
+ if (data_chunk.bin_g[a] > ng) {
+ ng = data_chunk.bin_g[a];
}
- if (bin_b[a] > nb) {
- nb = bin_b[a];
+ if (data_chunk.bin_b[a] > nb) {
+ nb = data_chunk.bin_b[a];
}
- if (bin_a[a] > na) {
- na = bin_a[a];
+ if (data_chunk.bin_a[a] > na) {
+ na = data_chunk.bin_a[a];
}
}
divl = nl ? 1.0 / (double)nl : 1.0;
@@ -1681,11 +1674,11 @@ void BKE_scopes_update(Scopes *scopes,
divb = nb ? 1.0 / (double)nb : 1.0;
for (a = 0; a < 256; a++) {
- scopes->hist.data_luma[a] = bin_lum[a] * divl;
- scopes->hist.data_r[a] = bin_r[a] * divr;
- scopes->hist.data_g[a] = bin_g[a] * divg;
- scopes->hist.data_b[a] = bin_b[a] * divb;
- scopes->hist.data_a[a] = bin_a[a] * diva;
+ scopes->hist.data_luma[a] = data_chunk.bin_lum[a] * divl;
+ scopes->hist.data_r[a] = data_chunk.bin_r[a] * divr;
+ scopes->hist.data_g[a] = data_chunk.bin_g[a] * divg;
+ scopes->hist.data_b[a] = data_chunk.bin_b[a] * divb;
+ scopes->hist.data_a[a] = data_chunk.bin_a[a] * diva;
}
if (cm_processor) {
diff --git a/source/blender/blenkernel/intern/dynamicpaint.c b/source/blender/blenkernel/intern/dynamicpaint.c
index 4c78c88d168..3a8b846a41d 100644
--- a/source/blender/blenkernel/intern/dynamicpaint.c
+++ b/source/blender/blenkernel/intern/dynamicpaint.c
@@ -653,15 +653,15 @@ static void grid_bound_insert_cb_ex(void *__restrict userdata,
boundInsert(grid_bound, bData->realCoord[bData->s_pos[i]].v);
}
-static void grid_bound_insert_finalize(void *__restrict userdata, void *__restrict userdata_chunk)
+static void grid_bound_insert_reduce(const void *__restrict UNUSED(userdata),
+ void *__restrict chunk_join,
+ void *__restrict chunk)
{
- PaintBakeData *bData = userdata;
- VolumeGrid *grid = bData->grid;
-
- Bounds3D *grid_bound = userdata_chunk;
+ Bounds3D *join = chunk_join;
+ Bounds3D *grid_bound = chunk;
- boundInsert(&grid->grid_bounds, grid_bound->min);
- boundInsert(&grid->grid_bounds, grid_bound->max);
+ boundInsert(join, grid_bound->min);
+ boundInsert(join, grid_bound->max);
}
static void grid_cell_points_cb_ex(void *__restrict userdata,
@@ -685,17 +685,20 @@ static void grid_cell_points_cb_ex(void *__restrict userdata,
s_num[temp_t_index[i]]++;
}
-static void grid_cell_points_finalize(void *__restrict userdata, void *__restrict userdata_chunk)
+static void grid_cell_points_reduce(const void *__restrict userdata,
+ void *__restrict chunk_join,
+ void *__restrict chunk)
{
- PaintBakeData *bData = userdata;
- VolumeGrid *grid = bData->grid;
+ const PaintBakeData *bData = userdata;
+ const VolumeGrid *grid = bData->grid;
const int grid_cells = grid->dim[0] * grid->dim[1] * grid->dim[2];
- int *s_num = userdata_chunk;
+ int *join_s_num = chunk_join;
+ int *s_num = chunk;
/* calculate grid indexes */
for (int i = 0; i < grid_cells; i++) {
- grid->s_num[i] += s_num[i];
+ join_s_num[i] += s_num[i];
}
}
@@ -753,7 +756,7 @@ static void surfaceGenerateGrid(struct DynamicPaintSurface *surface)
settings.use_threading = (sData->total_points > 1000);
settings.userdata_chunk = &grid->grid_bounds;
settings.userdata_chunk_size = sizeof(grid->grid_bounds);
- settings.func_finalize = grid_bound_insert_finalize;
+ settings.func_reduce = grid_bound_insert_reduce;
BLI_task_parallel_range(0, sData->total_points, bData, grid_bound_insert_cb_ex, &settings);
}
/* get dimensions */
@@ -814,7 +817,7 @@ static void surfaceGenerateGrid(struct DynamicPaintSurface *surface)
settings.use_threading = (sData->total_points > 1000);
settings.userdata_chunk = grid->s_num;
settings.userdata_chunk_size = sizeof(*grid->s_num) * grid_cells;
- settings.func_finalize = grid_cell_points_finalize;
+ settings.func_reduce = grid_cell_points_reduce;
BLI_task_parallel_range(0, sData->total_points, bData, grid_cell_points_cb_ex, &settings);
}
@@ -4880,7 +4883,7 @@ static void dynamicPaint_prepareAdjacencyData(DynamicPaintSurface *surface, cons
0, sData->total_points, sData, dynamic_paint_prepare_adjacency_cb, &settings);
/* calculate average values (single thread).
- * Note: tried to put this in threaded callback (using _finalize feature),
+ * Note: tried to put this in threaded callback (using _reduce feature),
* but gave ~30% slower result! */
bData->average_dist = 0.0;
for (index = 0; index < sData->total_points; index++) {
diff --git a/source/blender/blenkernel/intern/particle_system.c b/source/blender/blenkernel/intern/particle_system.c
index 5ef2f7aeeff..14b1ef7b87f 100644
--- a/source/blender/blenkernel/intern/particle_system.c
+++ b/source/blender/blenkernel/intern/particle_system.c
@@ -3692,10 +3692,11 @@ typedef struct DynamicStepSolverTaskData {
SpinLock spin;
} DynamicStepSolverTaskData;
-static void dynamics_step_finalize_sphdata(void *__restrict UNUSED(userdata),
- void *__restrict tls_userdata_chunk)
+static void dynamics_step_sphdata_reduce(const void *__restrict UNUSED(userdata),
+ void *__restrict UNUSED(join_v),
+ void *__restrict chunk_v)
{
- SPHData *sphdata = tls_userdata_chunk;
+ SPHData *sphdata = chunk_v;
psys_sph_flush_springs(sphdata);
}
@@ -3986,7 +3987,7 @@ static void dynamics_step(ParticleSimulationData *sim, float cfra)
settings.use_threading = (psys->totpart > 100);
settings.userdata_chunk = &sphdata;
settings.userdata_chunk_size = sizeof(sphdata);
- settings.func_finalize = dynamics_step_finalize_sphdata;
+ settings.func_reduce = dynamics_step_sphdata_reduce;
BLI_task_parallel_range(
0, psys->totpart, &task_data, dynamics_step_sph_ddr_task_cb_ex, &settings);
@@ -4018,7 +4019,7 @@ static void dynamics_step(ParticleSimulationData *sim, float cfra)
settings.use_threading = (psys->totpart > 100);
settings.userdata_chunk = &sphdata;
settings.userdata_chunk_size = sizeof(sphdata);
- settings.func_finalize = dynamics_step_finalize_sphdata;
+ settings.func_reduce = dynamics_step_sphdata_reduce;
BLI_task_parallel_range(0,
psys->totpart,
&task_data,
@@ -4033,7 +4034,7 @@ static void dynamics_step(ParticleSimulationData *sim, float cfra)
settings.use_threading = (psys->totpart > 100);
settings.userdata_chunk = &sphdata;
settings.userdata_chunk_size = sizeof(sphdata);
- settings.func_finalize = dynamics_step_finalize_sphdata;
+ settings.func_reduce = dynamics_step_sphdata_reduce;
BLI_task_parallel_range(0,
psys->totpart,
&task_data,
@@ -4189,7 +4190,7 @@ static void particles_fluid_step(ParticleSimulationData *sim,
ParticleSettings *part = psys->part;
ParticleData *pa = NULL;
- int p, totpart, tottypepart = 0;
+ int p, totpart = 0, tottypepart = 0;
int flagActivePart, activeParts = 0;
float posX, posY, posZ, velX, velY, velZ;
float resX, resY, resZ;
diff --git a/source/blender/blenkernel/intern/subdiv_ccg.c b/source/blender/blenkernel/intern/subdiv_ccg.c
index 521aeb60e66..d99c41eaa3e 100644
--- a/source/blender/blenkernel/intern/subdiv_ccg.c
+++ b/source/blender/blenkernel/intern/subdiv_ccg.c
@@ -770,8 +770,8 @@ static void subdiv_ccg_recalc_inner_normal_task(void *__restrict userdata_v,
subdiv_ccg_average_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
}
-static void subdiv_ccg_recalc_inner_normal_finalize(void *__restrict UNUSED(userdata),
- void *__restrict tls_v)
+static void subdiv_ccg_recalc_inner_normal_free(const void *__restrict UNUSED(userdata),
+ void *__restrict tls_v)
{
RecalcInnerNormalsTLSData *tls = tls_v;
MEM_SAFE_FREE(tls->face_normals);
@@ -791,7 +791,7 @@ static void subdiv_ccg_recalc_inner_grid_normals(SubdivCCG *subdiv_ccg)
BLI_parallel_range_settings_defaults(&parallel_range_settings);
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
- parallel_range_settings.func_finalize = subdiv_ccg_recalc_inner_normal_finalize;
+ parallel_range_settings.func_free = subdiv_ccg_recalc_inner_normal_free;
BLI_task_parallel_range(0,
subdiv_ccg->num_grids,
&data,
@@ -834,8 +834,8 @@ static void subdiv_ccg_recalc_modified_inner_normal_task(void *__restrict userda
subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
}
-static void subdiv_ccg_recalc_modified_inner_normal_finalize(void *__restrict UNUSED(userdata),
- void *__restrict tls_v)
+static void subdiv_ccg_recalc_modified_inner_normal_free(const void *__restrict UNUSED(userdata),
+ void *__restrict tls_v)
{
RecalcInnerNormalsTLSData *tls = tls_v;
MEM_SAFE_FREE(tls->face_normals);
@@ -857,7 +857,7 @@ static void subdiv_ccg_recalc_modified_inner_grid_normals(SubdivCCG *subdiv_ccg,
BLI_parallel_range_settings_defaults(&parallel_range_settings);
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
- parallel_range_settings.func_finalize = subdiv_ccg_recalc_modified_inner_normal_finalize;
+ parallel_range_settings.func_free = subdiv_ccg_recalc_modified_inner_normal_free;
BLI_task_parallel_range(0,
num_effected_faces,
&data,
@@ -1077,8 +1077,8 @@ static void subdiv_ccg_average_grids_boundaries_task(void *__restrict userdata_v
subdiv_ccg_average_grids_boundary(subdiv_ccg, key, adjacent_edge, tls);
}
-static void subdiv_ccg_average_grids_boundaries_finalize(void *__restrict UNUSED(userdata),
- void *__restrict tls_v)
+static void subdiv_ccg_average_grids_boundaries_free(const void *__restrict UNUSED(userdata),
+ void *__restrict tls_v)
{
AverageGridsBoundariesTLSData *tls = tls_v;
MEM_SAFE_FREE(tls->accumulators);
@@ -1136,7 +1136,7 @@ static void subdiv_ccg_average_all_boundaries(SubdivCCG *subdiv_ccg, CCGKey *key
AverageGridsBoundariesTLSData tls_data = {NULL};
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
- parallel_range_settings.func_finalize = subdiv_ccg_average_grids_boundaries_finalize;
+ parallel_range_settings.func_free = subdiv_ccg_average_grids_boundaries_free;
BLI_task_parallel_range(0,
subdiv_ccg->num_adjacent_edges,
&boundaries_data,
diff --git a/source/blender/blenkernel/intern/subdiv_foreach.c b/source/blender/blenkernel/intern/subdiv_foreach.c
index b31fb2c9312..0884f40952f 100644
--- a/source/blender/blenkernel/intern/subdiv_foreach.c
+++ b/source/blender/blenkernel/intern/subdiv_foreach.c
@@ -1838,9 +1838,9 @@ static void subdiv_foreach_boundary_edges_task(void *__restrict userdata,
subdiv_foreach_boundary_edges(ctx, tls->userdata_chunk, edge_index);
}
-static void subdiv_foreach_finalize(void *__restrict userdata, void *__restrict userdata_chunk)
+static void subdiv_foreach_free(const void *__restrict userdata, void *__restrict userdata_chunk)
{
- SubdivForeachTaskContext *ctx = userdata;
+ const SubdivForeachTaskContext *ctx = userdata;
ctx->foreach_context->user_data_tls_free(userdata_chunk);
}
@@ -1873,7 +1873,7 @@ bool BKE_subdiv_foreach_subdiv_geometry(Subdiv *subdiv,
parallel_range_settings.userdata_chunk_size = context->user_data_tls_size;
parallel_range_settings.min_iter_per_thread = 1;
if (context->user_data_tls_free != NULL) {
- parallel_range_settings.func_finalize = subdiv_foreach_finalize;
+ parallel_range_settings.func_free = subdiv_foreach_free;
}
/* TODO(sergey): Possible optimization is to have a single pool and push all