Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Werner <stefan.werner@tangent-animation.com>2020-03-05 14:05:42 +0300
committerStefan Werner <stefan.werner@tangent-animation.com>2020-03-05 14:21:38 +0300
commit51e898324de30c0985a80e5bc067358b5ccedbfc (patch)
tree5efddead1b7ca5655f1d6d2422b59e7da51fe271 /intern/cycles/device
parent4ccbbd308060f0330472828b317c59e054c9ee7b (diff)
Adaptive Sampling for Cycles.
This feature takes some inspiration from "RenderMan: An Advanced Path Tracing Architecture for Movie Rendering" and "A Hierarchical Automatic Stopping Condition for Monte Carlo Global Illumination" The basic principle is as follows: While samples are being added to a pixel, the adaptive sampler writes half of the samples to a separate buffer. This gives it two separate estimates of the same pixel, and by comparing their difference it estimates convergence. Once convergence drops below a given threshold, the pixel is considered done. When a pixel has not converged yet and needs more samples than the minimum, its immediate neighbors are also set to take more samples. This is done in order to more reliably detect sharp features such as caustics. A 3x3 box filter that is run periodically over the tile buffer is used for that purpose. After a tile has finished rendering, the values of all passes are scaled as if they were rendered with the full number of samples. This way, any code operating on these buffers, for example the denoiser, does not need to be changed for per-pixel sample counts. Reviewed By: brecht, #cycles Differential Revision: https://developer.blender.org/D4686
Diffstat (limited to 'intern/cycles/device')
-rw-r--r--intern/cycles/device/cuda/device_cuda.h22
-rw-r--r--intern/cycles/device/cuda/device_cuda_impl.cpp126
-rw-r--r--intern/cycles/device/device_cpu.cpp62
-rw-r--r--intern/cycles/device/device_optix.cpp27
-rw-r--r--intern/cycles/device/device_split_kernel.cpp67
-rw-r--r--intern/cycles/device/device_split_kernel.h4
-rw-r--r--intern/cycles/device/device_task.cpp55
-rw-r--r--intern/cycles/device/device_task.h14
-rw-r--r--intern/cycles/device/opencl/device_opencl.h1
-rw-r--r--intern/cycles/device/opencl/device_opencl_impl.cpp10
10 files changed, 377 insertions, 11 deletions
diff --git a/intern/cycles/device/cuda/device_cuda.h b/intern/cycles/device/cuda/device_cuda.h
index a825bd6b128..6a0b39434aa 100644
--- a/intern/cycles/device/cuda/device_cuda.h
+++ b/intern/cycles/device/cuda/device_cuda.h
@@ -82,6 +82,17 @@ class CUDADevice : public Device {
device_vector<TextureInfo> texture_info;
bool need_texture_info;
+ /* Kernels */
+ struct {
+ bool loaded;
+
+ CUfunction adaptive_stopping;
+ CUfunction adaptive_filter_x;
+ CUfunction adaptive_filter_y;
+ CUfunction adaptive_scale_samples;
+ int adaptive_num_threads_per_block;
+ } functions;
+
static bool have_precompiled_kernels();
virtual bool show_samples() const;
@@ -114,6 +125,8 @@ class CUDADevice : public Device {
virtual bool load_kernels(const DeviceRequestedFeatures &requested_features);
+ void load_functions();
+
void reserve_local_memory(const DeviceRequestedFeatures &requested_features);
void init_host_memory();
@@ -197,6 +210,15 @@ class CUDADevice : public Device {
void denoise(RenderTile &rtile, DenoisingTask &denoising);
+ void adaptive_sampling_filter(uint filter_sample,
+ WorkTile *wtile,
+ CUdeviceptr d_wtile,
+ CUstream stream = 0);
+ void adaptive_sampling_post(RenderTile &rtile,
+ WorkTile *wtile,
+ CUdeviceptr d_wtile,
+ CUstream stream = 0);
+
void path_trace(DeviceTask &task, RenderTile &rtile, device_vector<WorkTile> &work_tiles);
void film_convert(DeviceTask &task,
diff --git a/intern/cycles/device/cuda/device_cuda_impl.cpp b/intern/cycles/device/cuda/device_cuda_impl.cpp
index 4a7c45d8b93..11dd9b69f10 100644
--- a/intern/cycles/device/cuda/device_cuda_impl.cpp
+++ b/intern/cycles/device/cuda/device_cuda_impl.cpp
@@ -208,6 +208,8 @@ CUDADevice::CUDADevice(DeviceInfo &info, Stats &stats, Profiler &profiler, bool
map_host_used = 0;
can_map_host = 0;
+ functions.loaded = false;
+
/* Intialize CUDA. */
if (cuda_error(cuInit(0)))
return;
@@ -531,9 +533,42 @@ bool CUDADevice::load_kernels(const DeviceRequestedFeatures &requested_features)
reserve_local_memory(requested_features);
}
+ load_functions();
+
return (result == CUDA_SUCCESS);
}
+void CUDADevice::load_functions()
+{
+ /* TODO: load all functions here. */
+ if (functions.loaded) {
+ return;
+ }
+ functions.loaded = true;
+
+ cuda_assert(cuModuleGetFunction(
+ &functions.adaptive_stopping, cuModule, "kernel_cuda_adaptive_stopping"));
+ cuda_assert(cuModuleGetFunction(
+ &functions.adaptive_filter_x, cuModule, "kernel_cuda_adaptive_filter_x"));
+ cuda_assert(cuModuleGetFunction(
+ &functions.adaptive_filter_y, cuModule, "kernel_cuda_adaptive_filter_y"));
+ cuda_assert(cuModuleGetFunction(
+ &functions.adaptive_scale_samples, cuModule, "kernel_cuda_adaptive_scale_samples"));
+
+ cuda_assert(cuFuncSetCacheConfig(functions.adaptive_stopping, CU_FUNC_CACHE_PREFER_L1));
+ cuda_assert(cuFuncSetCacheConfig(functions.adaptive_filter_x, CU_FUNC_CACHE_PREFER_L1));
+ cuda_assert(cuFuncSetCacheConfig(functions.adaptive_filter_y, CU_FUNC_CACHE_PREFER_L1));
+ cuda_assert(cuFuncSetCacheConfig(functions.adaptive_scale_samples, CU_FUNC_CACHE_PREFER_L1));
+
+ int unused_min_blocks;
+ cuda_assert(cuOccupancyMaxPotentialBlockSize(&unused_min_blocks,
+ &functions.adaptive_num_threads_per_block,
+ functions.adaptive_scale_samples,
+ NULL,
+ 0,
+ 0));
+}
+
void CUDADevice::reserve_local_memory(const DeviceRequestedFeatures &requested_features)
{
if (use_split_kernel()) {
@@ -1666,6 +1701,80 @@ void CUDADevice::denoise(RenderTile &rtile, DenoisingTask &denoising)
denoising.run_denoising(&rtile);
}
+void CUDADevice::adaptive_sampling_filter(uint filter_sample,
+ WorkTile *wtile,
+ CUdeviceptr d_wtile,
+ CUstream stream)
+{
+ const int num_threads_per_block = functions.adaptive_num_threads_per_block;
+
+ /* These are a series of tiny kernels because there is no grid synchronisation
+ * from within a kernel, so multiple kernel launches it is.*/
+ uint total_work_size = wtile->h * wtile->w;
+ void *args2[] = {&d_wtile, &filter_sample, &total_work_size};
+ uint num_blocks = divide_up(total_work_size, num_threads_per_block);
+ cuda_assert(cuLaunchKernel(functions.adaptive_stopping,
+ num_blocks,
+ 1,
+ 1,
+ num_threads_per_block,
+ 1,
+ 1,
+ 0,
+ stream,
+ args2,
+ 0));
+ total_work_size = wtile->h;
+ num_blocks = divide_up(total_work_size, num_threads_per_block);
+ cuda_assert(cuLaunchKernel(functions.adaptive_filter_x,
+ num_blocks,
+ 1,
+ 1,
+ num_threads_per_block,
+ 1,
+ 1,
+ 0,
+ stream,
+ args2,
+ 0));
+ total_work_size = wtile->w;
+ num_blocks = divide_up(total_work_size, num_threads_per_block);
+ cuda_assert(cuLaunchKernel(functions.adaptive_filter_y,
+ num_blocks,
+ 1,
+ 1,
+ num_threads_per_block,
+ 1,
+ 1,
+ 0,
+ stream,
+ args2,
+ 0));
+}
+
+void CUDADevice::adaptive_sampling_post(RenderTile &rtile,
+ WorkTile *wtile,
+ CUdeviceptr d_wtile,
+ CUstream stream)
+{
+ const int num_threads_per_block = functions.adaptive_num_threads_per_block;
+ uint total_work_size = wtile->h * wtile->w;
+
+ void *args[] = {&d_wtile, &rtile.start_sample, &rtile.sample, &total_work_size};
+ uint num_blocks = divide_up(total_work_size, num_threads_per_block);
+ cuda_assert(cuLaunchKernel(functions.adaptive_scale_samples,
+ num_blocks,
+ 1,
+ 1,
+ num_threads_per_block,
+ 1,
+ 1,
+ 0,
+ stream,
+ args,
+ 0));
+}
+
void CUDADevice::path_trace(DeviceTask &task,
RenderTile &rtile,
device_vector<WorkTile> &work_tiles)
@@ -1715,6 +1824,9 @@ void CUDADevice::path_trace(DeviceTask &task,
}
uint step_samples = divide_up(min_blocks * num_threads_per_block, wtile->w * wtile->h);
+ if (task.adaptive_sampling.use) {
+ step_samples = task.adaptive_sampling.align_static_samples(step_samples);
+ }
/* Render all samples. */
int start_sample = rtile.start_sample;
@@ -1736,6 +1848,12 @@ void CUDADevice::path_trace(DeviceTask &task,
cuda_assert(
cuLaunchKernel(cuPathTrace, num_blocks, 1, 1, num_threads_per_block, 1, 1, 0, 0, args, 0));
+ /* Run the adaptive sampling kernels at selected samples aligned to step samples. */
+ uint filter_sample = sample + wtile->num_samples - 1;
+ if (task.adaptive_sampling.use && task.adaptive_sampling.need_filter(filter_sample)) {
+ adaptive_sampling_filter(filter_sample, wtile, d_work_tiles);
+ }
+
cuda_assert(cuCtxSynchronize());
/* Update progress. */
@@ -1747,6 +1865,14 @@ void CUDADevice::path_trace(DeviceTask &task,
break;
}
}
+
+ /* Finalize adaptive sampling. */
+ if (task.adaptive_sampling.use) {
+ CUdeviceptr d_work_tiles = (CUdeviceptr)work_tiles.device_pointer;
+ adaptive_sampling_post(rtile, wtile, d_work_tiles);
+ cuda_assert(cuCtxSynchronize());
+ task.update_progress(&rtile, rtile.w * rtile.h * wtile->num_samples);
+ }
}
void CUDADevice::film_convert(DeviceTask &task,
diff --git a/intern/cycles/device/device_cpu.cpp b/intern/cycles/device/device_cpu.cpp
index 1c9d2227ac3..cf239ec39e2 100644
--- a/intern/cycles/device/device_cpu.cpp
+++ b/intern/cycles/device/device_cpu.cpp
@@ -34,6 +34,7 @@
#include "kernel/kernel_types.h"
#include "kernel/split/kernel_split_data.h"
#include "kernel/kernel_globals.h"
+#include "kernel/kernel_adaptive_sampling.h"
#include "kernel/filter/filter.h"
@@ -317,6 +318,10 @@ class CPUDevice : public Device {
REGISTER_SPLIT_KERNEL(next_iteration_setup);
REGISTER_SPLIT_KERNEL(indirect_subsurface);
REGISTER_SPLIT_KERNEL(buffer_update);
+ REGISTER_SPLIT_KERNEL(adaptive_stopping);
+ REGISTER_SPLIT_KERNEL(adaptive_filter_x);
+ REGISTER_SPLIT_KERNEL(adaptive_filter_y);
+ REGISTER_SPLIT_KERNEL(adaptive_adjust_samples);
#undef REGISTER_SPLIT_KERNEL
#undef KERNEL_FUNCTIONS
}
@@ -823,6 +828,50 @@ class CPUDevice : public Device {
return true;
}
+ bool adaptive_sampling_filter(KernelGlobals *kg, RenderTile &tile, int sample)
+ {
+ WorkTile wtile;
+ wtile.x = tile.x;
+ wtile.y = tile.y;
+ wtile.w = tile.w;
+ wtile.h = tile.h;
+ wtile.offset = tile.offset;
+ wtile.stride = tile.stride;
+ wtile.buffer = (float *)tile.buffer;
+
+ bool any = false;
+ for (int y = tile.y; y < tile.y + tile.h; ++y) {
+ any |= kernel_do_adaptive_filter_x(kg, y, &wtile);
+ }
+ for (int x = tile.x; x < tile.x + tile.w; ++x) {
+ any |= kernel_do_adaptive_filter_y(kg, x, &wtile);
+ }
+
+ return (!any);
+ }
+
+ void adaptive_sampling_post(const DeviceTask &task, const RenderTile &tile, KernelGlobals *kg)
+ {
+ float *render_buffer = (float *)tile.buffer;
+ for (int y = tile.y; y < tile.y + tile.h; y++) {
+ for (int x = tile.x; x < tile.x + tile.w; x++) {
+ int index = tile.offset + x + y * tile.stride;
+ ccl_global float *buffer = render_buffer + index * kernel_data.film.pass_stride;
+ if (buffer[kernel_data.film.pass_sample_count] < 0.0f) {
+ buffer[kernel_data.film.pass_sample_count] = -buffer[kernel_data.film.pass_sample_count];
+ float sample_multiplier = tile.sample / max((float)tile.start_sample + 1.0f,
+ buffer[kernel_data.film.pass_sample_count]);
+ if (sample_multiplier != 1.0f) {
+ kernel_adaptive_post_adjust(kg, buffer, sample_multiplier);
+ }
+ }
+ else {
+ kernel_adaptive_post_adjust(kg, buffer, tile.sample / (tile.sample - 1.0f));
+ }
+ }
+ }
+ }
+
void path_trace(DeviceTask &task, RenderTile &tile, KernelGlobals *kg)
{
const bool use_coverage = kernel_data.film.cryptomatte_passes & CRYPT_ACCURATE;
@@ -855,14 +904,25 @@ class CPUDevice : public Device {
path_trace_kernel()(kg, render_buffer, sample, x, y, tile.offset, tile.stride);
}
}
-
tile.sample = sample + 1;
task.update_progress(&tile, tile.w * tile.h);
+
+ if (task.adaptive_sampling.use && task.adaptive_sampling.need_filter(sample)) {
+ const bool stop = adaptive_sampling_filter(kg, tile, sample);
+ if (stop) {
+ tile.sample = end_sample;
+ break;
+ }
+ }
}
if (use_coverage) {
coverage.finalize();
}
+
+ if (task.adaptive_sampling.use) {
+ adaptive_sampling_post(task, tile, kg);
+ }
}
void denoise(DenoisingTask &denoising, RenderTile &tile)
diff --git a/intern/cycles/device/device_optix.cpp b/intern/cycles/device/device_optix.cpp
index 61a5c74f69e..30d624fdf7c 100644
--- a/intern/cycles/device/device_optix.cpp
+++ b/intern/cycles/device/device_optix.cpp
@@ -627,7 +627,11 @@ class OptiXDevice : public CUDADevice {
const int end_sample = rtile.start_sample + rtile.num_samples;
// Keep this number reasonable to avoid running into TDRs
- const int step_samples = (info.display_device ? 8 : 32);
+ int step_samples = (info.display_device ? 8 : 32);
+ if (task.adaptive_sampling.use) {
+ step_samples = task.adaptive_sampling.align_static_samples(step_samples);
+ }
+
// Offset into launch params buffer so that streams use separate data
device_ptr launch_params_ptr = launch_params.device_pointer +
thread_index * launch_params.data_elements;
@@ -638,10 +642,9 @@ class OptiXDevice : public CUDADevice {
// Copy work tile information to device
wtile.num_samples = min(step_samples, end_sample - sample);
wtile.start_sample = sample;
- check_result_cuda(cuMemcpyHtoDAsync(launch_params_ptr + offsetof(KernelParams, tile),
- &wtile,
- sizeof(wtile),
- cuda_stream[thread_index]));
+ device_ptr d_wtile_ptr = launch_params_ptr + offsetof(KernelParams, tile);
+ check_result_cuda(
+ cuMemcpyHtoDAsync(d_wtile_ptr, &wtile, sizeof(wtile), cuda_stream[thread_index]));
OptixShaderBindingTable sbt_params = {};
sbt_params.raygenRecord = sbt_data.device_pointer + PG_RGEN * sizeof(SbtRecord);
@@ -666,6 +669,12 @@ class OptiXDevice : public CUDADevice {
wtile.h,
1));
+ // Run the adaptive sampling kernels at selected samples aligned to step samples.
+ uint filter_sample = wtile.start_sample + wtile.num_samples - 1;
+ if (task.adaptive_sampling.use && task.adaptive_sampling.need_filter(filter_sample)) {
+ adaptive_sampling_filter(filter_sample, &wtile, d_wtile_ptr, cuda_stream[thread_index]);
+ }
+
// Wait for launch to finish
check_result_cuda(cuStreamSynchronize(cuda_stream[thread_index]));
@@ -677,6 +686,14 @@ class OptiXDevice : public CUDADevice {
if (task.get_cancel() && !task.need_finish_queue)
return; // Cancel rendering
}
+
+ // Finalize adaptive sampling
+ if (task.adaptive_sampling.use) {
+ device_ptr d_wtile_ptr = launch_params_ptr + offsetof(KernelParams, tile);
+ adaptive_sampling_post(rtile, &wtile, d_wtile_ptr, cuda_stream[thread_index]);
+ check_result_cuda(cuStreamSynchronize(cuda_stream[thread_index]));
+ task.update_progress(&rtile, rtile.w * rtile.h * wtile.num_samples);
+ }
}
bool launch_denoise(DeviceTask &task, RenderTile &rtile)
diff --git a/intern/cycles/device/device_split_kernel.cpp b/intern/cycles/device/device_split_kernel.cpp
index 42e597a34d7..f22d8761058 100644
--- a/intern/cycles/device/device_split_kernel.cpp
+++ b/intern/cycles/device/device_split_kernel.cpp
@@ -55,6 +55,10 @@ DeviceSplitKernel::DeviceSplitKernel(Device *device)
kernel_next_iteration_setup = NULL;
kernel_indirect_subsurface = NULL;
kernel_buffer_update = NULL;
+ kernel_adaptive_stopping = NULL;
+ kernel_adaptive_filter_x = NULL;
+ kernel_adaptive_filter_y = NULL;
+ kernel_adaptive_adjust_samples = NULL;
}
DeviceSplitKernel::~DeviceSplitKernel()
@@ -83,6 +87,10 @@ DeviceSplitKernel::~DeviceSplitKernel()
delete kernel_next_iteration_setup;
delete kernel_indirect_subsurface;
delete kernel_buffer_update;
+ delete kernel_adaptive_stopping;
+ delete kernel_adaptive_filter_x;
+ delete kernel_adaptive_filter_y;
+ delete kernel_adaptive_adjust_samples;
}
bool DeviceSplitKernel::load_kernels(const DeviceRequestedFeatures &requested_features)
@@ -114,6 +122,10 @@ bool DeviceSplitKernel::load_kernels(const DeviceRequestedFeatures &requested_fe
LOAD_KERNEL(next_iteration_setup);
LOAD_KERNEL(indirect_subsurface);
LOAD_KERNEL(buffer_update);
+ LOAD_KERNEL(adaptive_stopping);
+ LOAD_KERNEL(adaptive_filter_x);
+ LOAD_KERNEL(adaptive_filter_y);
+ LOAD_KERNEL(adaptive_adjust_samples);
#undef LOAD_KERNEL
@@ -202,13 +214,21 @@ bool DeviceSplitKernel::path_trace(DeviceTask *task,
/* initial guess to start rolling average */
const int initial_num_samples = 1;
/* approx number of samples per second */
- int samples_per_second = (avg_time_per_sample > 0.0) ?
- int(double(time_multiplier) / avg_time_per_sample) + 1 :
- initial_num_samples;
+ const int samples_per_second = (avg_time_per_sample > 0.0) ?
+ int(double(time_multiplier) / avg_time_per_sample) + 1 :
+ initial_num_samples;
RenderTile subtile = tile;
subtile.start_sample = tile.sample;
- subtile.num_samples = min(samples_per_second,
+ subtile.num_samples = samples_per_second;
+
+ if (task->adaptive_sampling.use) {
+ subtile.num_samples = task->adaptive_sampling.align_dynamic_samples(subtile.start_sample,
+ subtile.num_samples);
+ }
+
+ /* Don't go beyond requested number of samples. */
+ subtile.num_samples = min(subtile.num_samples,
tile.start_sample + tile.num_samples - tile.sample);
if (device->have_error()) {
@@ -302,6 +322,23 @@ bool DeviceSplitKernel::path_trace(DeviceTask *task,
}
}
+ int filter_sample = tile.sample + subtile.num_samples - 1;
+ if (task->adaptive_sampling.use && task->adaptive_sampling.need_filter(filter_sample)) {
+ size_t buffer_size[2];
+ buffer_size[0] = round_up(tile.w, local_size[0]);
+ buffer_size[1] = round_up(tile.h, local_size[1]);
+ kernel_adaptive_stopping->enqueue(
+ KernelDimensions(buffer_size, local_size), kgbuffer, kernel_data);
+ buffer_size[0] = round_up(tile.h, local_size[0]);
+ buffer_size[1] = round_up(1, local_size[1]);
+ kernel_adaptive_filter_x->enqueue(
+ KernelDimensions(buffer_size, local_size), kgbuffer, kernel_data);
+ buffer_size[0] = round_up(tile.w, local_size[0]);
+ buffer_size[1] = round_up(1, local_size[1]);
+ kernel_adaptive_filter_y->enqueue(
+ KernelDimensions(buffer_size, local_size), kgbuffer, kernel_data);
+ }
+
double time_per_sample = ((time_dt() - start_time) / subtile.num_samples);
if (avg_time_per_sample == 0.0) {
@@ -324,6 +361,28 @@ bool DeviceSplitKernel::path_trace(DeviceTask *task,
}
}
+ if (task->adaptive_sampling.use) {
+ /* Reset the start samples. */
+ RenderTile subtile = tile;
+ subtile.start_sample = tile.start_sample;
+ subtile.num_samples = tile.sample - tile.start_sample;
+ enqueue_split_kernel_data_init(KernelDimensions(global_size, local_size),
+ subtile,
+ num_global_elements,
+ kgbuffer,
+ kernel_data,
+ split_data,
+ ray_state,
+ queue_index,
+ use_queues_flag,
+ work_pool_wgs);
+ size_t buffer_size[2];
+ buffer_size[0] = round_up(tile.w, local_size[0]);
+ buffer_size[1] = round_up(tile.h, local_size[1]);
+ kernel_adaptive_adjust_samples->enqueue(
+ KernelDimensions(buffer_size, local_size), kgbuffer, kernel_data);
+ }
+
return true;
}
diff --git a/intern/cycles/device/device_split_kernel.h b/intern/cycles/device/device_split_kernel.h
index 6ff326bf214..9d6b9efdd62 100644
--- a/intern/cycles/device/device_split_kernel.h
+++ b/intern/cycles/device/device_split_kernel.h
@@ -75,6 +75,10 @@ class DeviceSplitKernel {
SplitKernelFunction *kernel_next_iteration_setup;
SplitKernelFunction *kernel_indirect_subsurface;
SplitKernelFunction *kernel_buffer_update;
+ SplitKernelFunction *kernel_adaptive_stopping;
+ SplitKernelFunction *kernel_adaptive_filter_x;
+ SplitKernelFunction *kernel_adaptive_filter_y;
+ SplitKernelFunction *kernel_adaptive_adjust_samples;
/* Global memory variables [porting]; These memory is used for
* co-operation between different kernels; Data written by one
diff --git a/intern/cycles/device/device_task.cpp b/intern/cycles/device/device_task.cpp
index 36522b874ab..c36b1344c3b 100644
--- a/intern/cycles/device/device_task.cpp
+++ b/intern/cycles/device/device_task.cpp
@@ -136,4 +136,59 @@ void DeviceTask::update_progress(RenderTile *rtile, int pixel_samples)
}
}
+/* Adaptive Sampling */
+
+AdaptiveSampling::AdaptiveSampling()
+ : use(true), adaptive_step(ADAPTIVE_SAMPLE_STEP), min_samples(0)
+{
+}
+
+/* Render samples in steps that align with the adaptive filtering. */
+int AdaptiveSampling::align_static_samples(int samples) const
+{
+ if (samples > adaptive_step) {
+ /* Make multiple of adaptive_step. */
+ while (samples % adaptive_step != 0) {
+ samples--;
+ }
+ }
+ else if (samples < adaptive_step) {
+ /* Make divisor of adaptive_step. */
+ while (adaptive_step % samples != 0) {
+ samples--;
+ }
+ }
+
+ return max(samples, 1);
+}
+
+/* Render samples in steps that align with the adaptive filtering, with the
+ * suggested number of samples dynamically changing. */
+int AdaptiveSampling::align_dynamic_samples(int offset, int samples) const
+{
+ /* Round so that we end up on multiples of adaptive_samples. */
+ samples += offset;
+
+ if (samples > adaptive_step) {
+ /* Make multiple of adaptive_step. */
+ while (samples % adaptive_step != 0) {
+ samples--;
+ }
+ }
+
+ samples -= offset;
+
+ return max(samples, 1);
+}
+
+bool AdaptiveSampling::need_filter(int sample) const
+{
+ if (sample > min_samples) {
+ return (sample & (adaptive_step - 1)) == (adaptive_step - 1);
+ }
+ else {
+ return false;
+ }
+}
+
CCL_NAMESPACE_END
diff --git a/intern/cycles/device/device_task.h b/intern/cycles/device/device_task.h
index 972f6131092..8c4e682adb1 100644
--- a/intern/cycles/device/device_task.h
+++ b/intern/cycles/device/device_task.h
@@ -62,6 +62,19 @@ class DenoiseParams {
}
};
+class AdaptiveSampling {
+ public:
+ AdaptiveSampling();
+
+ int align_static_samples(int samples) const;
+ int align_dynamic_samples(int offset, int samples) const;
+ bool need_filter(int sample) const;
+
+ bool use;
+ int adaptive_step;
+ int min_samples;
+};
+
class DeviceTask : public Task {
public:
typedef enum { RENDER, FILM_CONVERT, SHADER, DENOISE_BUFFER } Type;
@@ -115,6 +128,7 @@ class DeviceTask : public Task {
bool need_finish_queue;
bool integrator_branched;
+ AdaptiveSampling adaptive_sampling;
protected:
double last_update_time;
diff --git a/intern/cycles/device/opencl/device_opencl.h b/intern/cycles/device/opencl/device_opencl.h
index 61b1e3e3b6b..b761726b1ad 100644
--- a/intern/cycles/device/opencl/device_opencl.h
+++ b/intern/cycles/device/opencl/device_opencl.h
@@ -445,6 +445,7 @@ class OpenCLDevice : public Device {
device_ptr rgba_byte,
device_ptr rgba_half);
void shader(DeviceTask &task);
+ void update_adaptive(DeviceTask &task, RenderTile &tile, int sample);
void denoise(RenderTile &tile, DenoisingTask &denoising);
diff --git a/intern/cycles/device/opencl/device_opencl_impl.cpp b/intern/cycles/device/opencl/device_opencl_impl.cpp
index 68cdfd5238c..3dbe54b38aa 100644
--- a/intern/cycles/device/opencl/device_opencl_impl.cpp
+++ b/intern/cycles/device/opencl/device_opencl_impl.cpp
@@ -56,7 +56,11 @@ static const string SPLIT_BUNDLE_KERNELS =
"enqueue_inactive "
"next_iteration_setup "
"indirect_subsurface "
- "buffer_update";
+ "buffer_update "
+ "adaptive_stopping "
+ "adaptive_filter_x "
+ "adaptive_filter_y "
+ "adaptive_adjust_samples";
const string OpenCLDevice::get_opencl_program_name(const string &kernel_name)
{
@@ -283,6 +287,10 @@ void OpenCLDevice::OpenCLSplitPrograms::load_kernels(
ADD_SPLIT_KERNEL_BUNDLE_PROGRAM(next_iteration_setup);
ADD_SPLIT_KERNEL_BUNDLE_PROGRAM(indirect_subsurface);
ADD_SPLIT_KERNEL_BUNDLE_PROGRAM(buffer_update);
+ ADD_SPLIT_KERNEL_BUNDLE_PROGRAM(adaptive_stopping);
+ ADD_SPLIT_KERNEL_BUNDLE_PROGRAM(adaptive_filter_x);
+ ADD_SPLIT_KERNEL_BUNDLE_PROGRAM(adaptive_filter_y);
+ ADD_SPLIT_KERNEL_BUNDLE_PROGRAM(adaptive_adjust_samples);
programs.push_back(&program_split);
# undef ADD_SPLIT_KERNEL_PROGRAM