diff options
author | Stefan Werner <stefan.werner@tangent-animation.com> | 2020-03-05 14:05:42 +0300 |
---|---|---|
committer | Stefan Werner <stefan.werner@tangent-animation.com> | 2020-03-05 14:21:38 +0300 |
commit | 51e898324de30c0985a80e5bc067358b5ccedbfc (patch) | |
tree | 5efddead1b7ca5655f1d6d2422b59e7da51fe271 /intern/cycles/kernel/kernels/cuda | |
parent | 4ccbbd308060f0330472828b317c59e054c9ee7b (diff) |
Adaptive Sampling for Cycles.
This feature takes some inspiration from
"RenderMan: An Advanced Path Tracing Architecture for Movie Rendering" and
"A Hierarchical Automatic Stopping Condition for Monte Carlo Global Illumination"
The basic principle is as follows:
While samples are being added to a pixel, the adaptive sampler writes half
of the samples to a separate buffer. This gives it two separate estimates
of the same pixel, and by comparing their difference it estimates convergence.
Once convergence drops below a given threshold, the pixel is considered done.
When a pixel has not converged yet and needs more samples than the minimum,
its immediate neighbors are also set to take more samples. This is done in order
to more reliably detect sharp features such as caustics. A 3x3 box filter that
is run periodically over the tile buffer is used for that purpose.
After a tile has finished rendering, the values of all passes are scaled as if
they were rendered with the full number of samples. This way, any code operating
on these buffers, for example the denoiser, does not need to be changed for
per-pixel sample counts.
Reviewed By: brecht, #cycles
Differential Revision: https://developer.blender.org/D4686
Diffstat (limited to 'intern/cycles/kernel/kernels/cuda')
-rw-r--r-- | intern/cycles/kernel/kernels/cuda/kernel.cu | 70 | ||||
-rw-r--r-- | intern/cycles/kernel/kernels/cuda/kernel_split.cu | 8 |
2 files changed, 78 insertions, 0 deletions
diff --git a/intern/cycles/kernel/kernels/cuda/kernel.cu b/intern/cycles/kernel/kernels/cuda/kernel.cu index af311027f78..c4c810c6a82 100644 --- a/intern/cycles/kernel/kernels/cuda/kernel.cu +++ b/intern/cycles/kernel/kernels/cuda/kernel.cu @@ -33,6 +33,7 @@ #include "kernel/kernel_path_branched.h" #include "kernel/kernel_bake.h" #include "kernel/kernel_work_stealing.h" +#include "kernel/kernel_adaptive_sampling.h" /* kernels */ extern "C" __global__ void @@ -83,6 +84,75 @@ kernel_cuda_branched_path_trace(WorkTile *tile, uint total_work_size) extern "C" __global__ void CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS) +kernel_cuda_adaptive_stopping(WorkTile *tile, int sample, uint total_work_size) +{ + int work_index = ccl_global_id(0); + bool thread_is_active = work_index < total_work_size; + KernelGlobals kg; + if(thread_is_active && kernel_data.film.pass_adaptive_aux_buffer) { + uint x = tile->x + work_index % tile->w; + uint y = tile->y + work_index / tile->w; + int index = tile->offset + x + y * tile->stride; + ccl_global float *buffer = tile->buffer + index * kernel_data.film.pass_stride; + kernel_do_adaptive_stopping(&kg, buffer, sample); + } +} + +extern "C" __global__ void +CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS) +kernel_cuda_adaptive_filter_x(WorkTile *tile, int sample, uint) +{ + KernelGlobals kg; + if(kernel_data.film.pass_adaptive_aux_buffer && sample > kernel_data.integrator.adaptive_min_samples) { + if(ccl_global_id(0) < tile->h) { + int y = tile->y + ccl_global_id(0); + kernel_do_adaptive_filter_x(&kg, y, tile); + } + } +} + +extern "C" __global__ void +CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS) +kernel_cuda_adaptive_filter_y(WorkTile *tile, int sample, uint) +{ + KernelGlobals kg; + if(kernel_data.film.pass_adaptive_aux_buffer && sample > kernel_data.integrator.adaptive_min_samples) { + if(ccl_global_id(0) < tile->w) { + int x = tile->x + ccl_global_id(0); + kernel_do_adaptive_filter_y(&kg, x, tile); + } + } +} + +extern "C" __global__ void +CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS) +kernel_cuda_adaptive_scale_samples(WorkTile *tile, int start_sample, int sample, uint total_work_size) +{ + if(kernel_data.film.pass_adaptive_aux_buffer) { + int work_index = ccl_global_id(0); + bool thread_is_active = work_index < total_work_size; + KernelGlobals kg; + if(thread_is_active) { + uint x = tile->x + work_index % tile->w; + uint y = tile->y + work_index / tile->w; + int index = tile->offset + x + y * tile->stride; + ccl_global float *buffer = tile->buffer + index * kernel_data.film.pass_stride; + if(buffer[kernel_data.film.pass_sample_count] < 0.0f) { + buffer[kernel_data.film.pass_sample_count] = -buffer[kernel_data.film.pass_sample_count]; + float sample_multiplier = sample / max((float)start_sample + 1.0f, buffer[kernel_data.film.pass_sample_count]); + if(sample_multiplier != 1.0f) { + kernel_adaptive_post_adjust(&kg, buffer, sample_multiplier); + } + } + else { + kernel_adaptive_post_adjust(&kg, buffer, sample / (sample - 1.0f)); + } + } + } +} + +extern "C" __global__ void +CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS) kernel_cuda_convert_to_byte(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride) { int x = sx + blockDim.x*blockIdx.x + threadIdx.x; diff --git a/intern/cycles/kernel/kernels/cuda/kernel_split.cu b/intern/cycles/kernel/kernels/cuda/kernel_split.cu index 43b3d0aa0e6..95ad7599cf1 100644 --- a/intern/cycles/kernel/kernels/cuda/kernel_split.cu +++ b/intern/cycles/kernel/kernels/cuda/kernel_split.cu @@ -43,6 +43,10 @@ #include "kernel/split/kernel_next_iteration_setup.h" #include "kernel/split/kernel_indirect_subsurface.h" #include "kernel/split/kernel_buffer_update.h" +#include "kernel/split/kernel_adaptive_stopping.h" +#include "kernel/split/kernel_adaptive_filter_x.h" +#include "kernel/split/kernel_adaptive_filter_y.h" +#include "kernel/split/kernel_adaptive_adjust_samples.h" #include "kernel/kernel_film.h" @@ -121,6 +125,10 @@ DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(enqueue_inactive, uint) DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(next_iteration_setup, uint) DEFINE_SPLIT_KERNEL_FUNCTION(indirect_subsurface) DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(buffer_update, uint) +DEFINE_SPLIT_KERNEL_FUNCTION(adaptive_stopping) +DEFINE_SPLIT_KERNEL_FUNCTION(adaptive_filter_x) +DEFINE_SPLIT_KERNEL_FUNCTION(adaptive_filter_y) +DEFINE_SPLIT_KERNEL_FUNCTION(adaptive_adjust_samples) extern "C" __global__ void CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS) |