Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJulian Eisel <julian@blender.org>2020-06-05 14:09:31 +0300
committerJulian Eisel <julian@blender.org>2020-06-05 14:09:31 +0300
commit920a58d9b6d667894cf166cbbd25e4c2fbd238ea (patch)
tree7ca5a9da640753b5e070c439ac3bdd14dfad92cf /intern/cycles/device/device_cpu.cpp
parentc94b6209861ca7cc3985b53474feed7d94c0221a (diff)
parenta1d55bdd530390e58c51abe9707b8d3b0ae3e861 (diff)
Merge branch 'master' into wm-drag-drop-rewritewm-drag-drop-rewrite
Diffstat (limited to 'intern/cycles/device/device_cpu.cpp')
-rw-r--r--intern/cycles/device/device_cpu.cpp50
1 files changed, 39 insertions, 11 deletions
diff --git a/intern/cycles/device/device_cpu.cpp b/intern/cycles/device/device_cpu.cpp
index 57e8523e02a..fc6febd8cee 100644
--- a/intern/cycles/device/device_cpu.cpp
+++ b/intern/cycles/device/device_cpu.cpp
@@ -188,6 +188,7 @@ class CPUDevice : public Device {
convert_to_byte_kernel;
KernelFunctions<void (*)(KernelGlobals *, uint4 *, float4 *, int, int, int, int, int)>
shader_kernel;
+ KernelFunctions<void (*)(KernelGlobals *, float *, int, int, int, int, int)> bake_kernel;
KernelFunctions<void (*)(
int, TileInfo *, int, int, float *, float *, float *, float *, float *, int *, int, int)>
@@ -270,6 +271,7 @@ class CPUDevice : public Device {
REGISTER_KERNEL(convert_to_half_float),
REGISTER_KERNEL(convert_to_byte),
REGISTER_KERNEL(shader),
+ REGISTER_KERNEL(bake),
REGISTER_KERNEL(filter_divide_shadow),
REGISTER_KERNEL(filter_get_feature),
REGISTER_KERNEL(filter_write_feature),
@@ -839,7 +841,7 @@ class CPUDevice : public Device {
return true;
}
- bool adaptive_sampling_filter(KernelGlobals *kg, RenderTile &tile)
+ bool adaptive_sampling_filter(KernelGlobals *kg, RenderTile &tile, int sample)
{
WorkTile wtile;
wtile.x = tile.x;
@@ -850,11 +852,24 @@ class CPUDevice : public Device {
wtile.stride = tile.stride;
wtile.buffer = (float *)tile.buffer;
+ /* For CPU we do adaptive stopping per sample so we can stop earlier, but
+ * for combined CPU + GPU rendering we match the GPU and do it per tile
+ * after a given number of sample steps. */
+ if (!kernel_data.integrator.adaptive_stop_per_sample) {
+ for (int y = wtile.y; y < wtile.y + wtile.h; ++y) {
+ for (int x = wtile.x; x < wtile.x + wtile.w; ++x) {
+ const int index = wtile.offset + x + y * wtile.stride;
+ float *buffer = wtile.buffer + index * kernel_data.film.pass_stride;
+ kernel_do_adaptive_stopping(kg, buffer, sample);
+ }
+ }
+ }
+
bool any = false;
- for (int y = tile.y; y < tile.y + tile.h; ++y) {
+ for (int y = wtile.y; y < wtile.y + wtile.h; ++y) {
any |= kernel_do_adaptive_filter_x(kg, y, &wtile);
}
- for (int x = tile.x; x < tile.x + tile.w; ++x) {
+ for (int x = wtile.x; x < wtile.x + wtile.w; ++x) {
any |= kernel_do_adaptive_filter_y(kg, x, &wtile);
}
return (!any);
@@ -882,7 +897,7 @@ class CPUDevice : public Device {
}
}
- void path_trace(DeviceTask &task, RenderTile &tile, KernelGlobals *kg)
+ void render(DeviceTask &task, RenderTile &tile, KernelGlobals *kg)
{
const bool use_coverage = kernel_data.film.cryptomatte_passes & CRYPT_ACCURATE;
@@ -906,18 +921,27 @@ class CPUDevice : public Device {
break;
}
- for (int y = tile.y; y < tile.y + tile.h; y++) {
- for (int x = tile.x; x < tile.x + tile.w; x++) {
- if (use_coverage) {
- coverage.init_pixel(x, y);
+ if (tile.task == RenderTile::PATH_TRACE) {
+ for (int y = tile.y; y < tile.y + tile.h; y++) {
+ for (int x = tile.x; x < tile.x + tile.w; x++) {
+ if (use_coverage) {
+ coverage.init_pixel(x, y);
+ }
+ path_trace_kernel()(kg, render_buffer, sample, x, y, tile.offset, tile.stride);
+ }
+ }
+ }
+ else {
+ for (int y = tile.y; y < tile.y + tile.h; y++) {
+ for (int x = tile.x; x < tile.x + tile.w; x++) {
+ bake_kernel()(kg, render_buffer, sample, x, y, tile.offset, tile.stride);
}
- path_trace_kernel()(kg, render_buffer, sample, x, y, tile.offset, tile.stride);
}
}
tile.sample = sample + 1;
if (task.adaptive_sampling.use && task.adaptive_sampling.need_filter(sample)) {
- const bool stop = adaptive_sampling_filter(kg, tile);
+ const bool stop = adaptive_sampling_filter(kg, tile, sample);
if (stop) {
const int num_progress_samples = end_sample - sample;
tile.sample = end_sample;
@@ -1006,9 +1030,12 @@ class CPUDevice : public Device {
split_kernel->path_trace(&task, tile, kgbuffer, void_buffer);
}
else {
- path_trace(task, tile, kg);
+ render(task, tile, kg);
}
}
+ else if (tile.task == RenderTile::BAKE) {
+ render(task, tile, kg);
+ }
else if (tile.task == RenderTile::DENOISE) {
denoise(denoising, tile);
task.update_progress(&tile, tile.w * tile.h);
@@ -1327,6 +1354,7 @@ void device_cpu_info(vector<DeviceInfo> &devices)
info.id = "CPU";
info.num = 0;
info.has_volume_decoupled = true;
+ info.has_adaptive_stop_per_sample = true;
info.has_osl = true;
info.has_half_images = true;
info.has_profiling = true;