Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/intern
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2022-04-18 19:32:56 +0300
committerBrecht Van Lommel <brecht@blender.org>2022-04-18 20:14:36 +0300
commit2cb76a6c8d69cf76bff7691ee703ef53375b4cac (patch)
tree260a6a0744519f40342bff93a551fe88dc839b2a /intern
parent029b0df81aa116a3e29f405dc8902834242d5338 (diff)
Cleanup: consistently use parallel_for without tbb namespace in Cycles
Diffstat (limited to 'intern')
-rw-r--r--intern/cycles/device/metal/kernel.mm2
-rw-r--r--intern/cycles/integrator/pass_accessor_cpu.cpp4
-rw-r--r--intern/cycles/integrator/path_trace.cpp36
-rw-r--r--intern/cycles/integrator/path_trace_work_cpu.cpp8
-rw-r--r--intern/cycles/integrator/shader_eval.cpp2
5 files changed, 26 insertions, 26 deletions
diff --git a/intern/cycles/device/metal/kernel.mm b/intern/cycles/device/metal/kernel.mm
index 1434b297ddd..9555ca03c8e 100644
--- a/intern/cycles/device/metal/kernel.mm
+++ b/intern/cycles/device/metal/kernel.mm
@@ -459,7 +459,7 @@ bool MetalDeviceKernels::load(MetalDevice *device, int kernel_type)
tbb::task_arena local_arena(max_mtlcompiler_threads);
local_arena.execute([&]() {
- tbb::parallel_for(int(0), int(DEVICE_KERNEL_NUM), [&](int i) {
+ parallel_for(int(0), int(DEVICE_KERNEL_NUM), [&](int i) {
/* skip megakernel */
if (i == DEVICE_KERNEL_INTEGRATOR_MEGAKERNEL) {
return;
diff --git a/intern/cycles/integrator/pass_accessor_cpu.cpp b/intern/cycles/integrator/pass_accessor_cpu.cpp
index 509190c8a7e..02260a54bf4 100644
--- a/intern/cycles/integrator/pass_accessor_cpu.cpp
+++ b/intern/cycles/integrator/pass_accessor_cpu.cpp
@@ -44,7 +44,7 @@ inline void PassAccessorCPU::run_get_pass_kernel_processor_float(
const int pixel_stride = destination.pixel_stride ? destination.pixel_stride :
destination.num_components;
- tbb::parallel_for(0, buffer_params.window_height, [&](int64_t y) {
+ parallel_for(0, buffer_params.window_height, [&](int64_t y) {
const float *buffer = window_data + y * buffer_row_stride;
float *pixel = destination.pixels +
(y * buffer_params.width + destination.offset) * pixel_stride;
@@ -69,7 +69,7 @@ inline void PassAccessorCPU::run_get_pass_kernel_processor_half_rgba(
const int destination_stride = destination.stride != 0 ? destination.stride :
buffer_params.width;
- tbb::parallel_for(0, buffer_params.window_height, [&](int64_t y) {
+ parallel_for(0, buffer_params.window_height, [&](int64_t y) {
const float *buffer = window_data + y * buffer_row_stride;
half4 *pixel = dst_start + y * destination_stride;
func(kfilm_convert, buffer, pixel, buffer_params.window_width, pass_stride);
diff --git a/intern/cycles/integrator/path_trace.cpp b/intern/cycles/integrator/path_trace.cpp
index 6fddbf56edc..4ecd3b829e8 100644
--- a/intern/cycles/integrator/path_trace.cpp
+++ b/intern/cycles/integrator/path_trace.cpp
@@ -334,7 +334,7 @@ void PathTrace::init_render_buffers(const RenderWork &render_work)
/* Handle initialization scheduled by the render scheduler. */
if (render_work.init_render_buffers) {
- tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
+ parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->zero_render_buffers();
});
@@ -357,7 +357,7 @@ void PathTrace::path_trace(RenderWork &render_work)
thread_capture_fp_settings();
- tbb::parallel_for(0, num_works, [&](int i) {
+ parallel_for(0, num_works, [&](int i) {
const double work_start_time = time_dt();
const int num_samples = render_work.path_trace.num_samples;
@@ -407,7 +407,7 @@ void PathTrace::adaptive_sample(RenderWork &render_work)
const double start_time = time_dt();
uint num_active_pixels = 0;
- tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
+ parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
const uint num_active_pixels_in_work =
path_trace_work->adaptive_sampling_converge_filter_count_active(
render_work.adaptive_sampling.threshold, render_work.adaptive_sampling.reset);
@@ -485,7 +485,7 @@ void PathTrace::cryptomatte_postprocess(const RenderWork &render_work)
}
VLOG(3) << "Perform cryptomatte work.";
- tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
+ parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->cryptomatte_postproces();
});
}
@@ -538,7 +538,7 @@ void PathTrace::denoise(const RenderWork &render_work)
if (multi_device_buffers) {
multi_device_buffers->copy_from_device();
- tbb::parallel_for_each(
+ parallel_for_each(
path_trace_works_, [&multi_device_buffers](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->copy_from_denoised_render_buffers(multi_device_buffers.get());
});
@@ -808,7 +808,7 @@ void PathTrace::tile_buffer_read()
}
/* Read buffers back from device. */
- tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
+ parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->copy_render_buffers_from_device();
});
@@ -816,7 +816,7 @@ void PathTrace::tile_buffer_read()
PathTraceTile tile(*this);
if (output_driver_->read_render_tile(tile)) {
/* Copy buffers to device again. */
- tbb::parallel_for_each(path_trace_works_, [](unique_ptr<PathTraceWork> &path_trace_work) {
+ parallel_for_each(path_trace_works_, [](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->copy_render_buffers_to_device();
});
}
@@ -880,20 +880,20 @@ void PathTrace::progress_set_status(const string &status, const string &substatu
void PathTrace::copy_to_render_buffers(RenderBuffers *render_buffers)
{
- tbb::parallel_for_each(path_trace_works_,
- [&render_buffers](unique_ptr<PathTraceWork> &path_trace_work) {
- path_trace_work->copy_to_render_buffers(render_buffers);
- });
+ parallel_for_each(path_trace_works_,
+ [&render_buffers](unique_ptr<PathTraceWork> &path_trace_work) {
+ path_trace_work->copy_to_render_buffers(render_buffers);
+ });
render_buffers->copy_to_device();
}
void PathTrace::copy_from_render_buffers(RenderBuffers *render_buffers)
{
render_buffers->copy_from_device();
- tbb::parallel_for_each(path_trace_works_,
- [&render_buffers](unique_ptr<PathTraceWork> &path_trace_work) {
- path_trace_work->copy_from_render_buffers(render_buffers);
- });
+ parallel_for_each(path_trace_works_,
+ [&render_buffers](unique_ptr<PathTraceWork> &path_trace_work) {
+ path_trace_work->copy_from_render_buffers(render_buffers);
+ });
}
bool PathTrace::copy_render_tile_from_device()
@@ -905,7 +905,7 @@ bool PathTrace::copy_render_tile_from_device()
bool success = true;
- tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
+ parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
if (!success) {
return;
}
@@ -1006,7 +1006,7 @@ bool PathTrace::get_render_tile_pixels(const PassAccessor &pass_accessor,
bool success = true;
- tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
+ parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
if (!success) {
return;
}
@@ -1023,7 +1023,7 @@ bool PathTrace::set_render_tile_pixels(PassAccessor &pass_accessor,
{
bool success = true;
- tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
+ parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
if (!success) {
return;
}
diff --git a/intern/cycles/integrator/path_trace_work_cpu.cpp b/intern/cycles/integrator/path_trace_work_cpu.cpp
index 147e273284b..518ef3185f9 100644
--- a/intern/cycles/integrator/path_trace_work_cpu.cpp
+++ b/intern/cycles/integrator/path_trace_work_cpu.cpp
@@ -73,7 +73,7 @@ void PathTraceWorkCPU::render_samples(RenderStatistics &statistics,
tbb::task_arena local_arena = local_tbb_arena_create(device_);
local_arena.execute([&]() {
- tbb::parallel_for(int64_t(0), total_pixels_num, [&](int64_t work_index) {
+ parallel_for(int64_t(0), total_pixels_num, [&](int64_t work_index) {
if (is_cancel_requested()) {
return;
}
@@ -219,7 +219,7 @@ int PathTraceWorkCPU::adaptive_sampling_converge_filter_count_active(float thres
/* Check convergency and do x-filter in a single `parallel_for`, to reduce threading overhead. */
local_arena.execute([&]() {
- tbb::parallel_for(full_y, full_y + height, [&](int y) {
+ parallel_for(full_y, full_y + height, [&](int y) {
CPUKernelThreadGlobals *kernel_globals = &kernel_thread_globals_[0];
bool row_converged = true;
@@ -243,7 +243,7 @@ int PathTraceWorkCPU::adaptive_sampling_converge_filter_count_active(float thres
if (num_active_pixels) {
local_arena.execute([&]() {
- tbb::parallel_for(full_x, full_x + width, [&](int x) {
+ parallel_for(full_x, full_x + width, [&](int x) {
CPUKernelThreadGlobals *kernel_globals = &kernel_thread_globals_[0];
kernels_.adaptive_sampling_filter_y(
kernel_globals, render_buffer, x, full_y, height, offset, stride);
@@ -265,7 +265,7 @@ void PathTraceWorkCPU::cryptomatte_postproces()
/* Check convergency and do x-filter in a single `parallel_for`, to reduce threading overhead. */
local_arena.execute([&]() {
- tbb::parallel_for(0, height, [&](int y) {
+ parallel_for(0, height, [&](int y) {
CPUKernelThreadGlobals *kernel_globals = &kernel_thread_globals_[0];
int pixel_index = y * width;
diff --git a/intern/cycles/integrator/shader_eval.cpp b/intern/cycles/integrator/shader_eval.cpp
index f5036b4020d..92b9d1c662d 100644
--- a/intern/cycles/integrator/shader_eval.cpp
+++ b/intern/cycles/integrator/shader_eval.cpp
@@ -92,7 +92,7 @@ bool ShaderEval::eval_cpu(Device *device,
tbb::task_arena local_arena(device->info.cpu_threads);
local_arena.execute([&]() {
- tbb::parallel_for(int64_t(0), work_size, [&](int64_t work_index) {
+ parallel_for(int64_t(0), work_size, [&](int64_t work_index) {
/* TODO: is this fast enough? */
if (progress_.get_cancel()) {
success = false;