Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Werner <stefan.werner@tangent-animation.com>2020-03-05 14:05:42 +0300
committerStefan Werner <stefan.werner@tangent-animation.com>2020-03-05 14:21:38 +0300
commit51e898324de30c0985a80e5bc067358b5ccedbfc (patch)
tree5efddead1b7ca5655f1d6d2422b59e7da51fe271 /intern/cycles/device/cuda/device_cuda_impl.cpp
parent4ccbbd308060f0330472828b317c59e054c9ee7b (diff)
Adaptive Sampling for Cycles.
This feature takes some inspiration from "RenderMan: An Advanced Path Tracing Architecture for Movie Rendering" and "A Hierarchical Automatic Stopping Condition for Monte Carlo Global Illumination" The basic principle is as follows: While samples are being added to a pixel, the adaptive sampler writes half of the samples to a separate buffer. This gives it two separate estimates of the same pixel, and by comparing their difference it estimates convergence. Once convergence drops below a given threshold, the pixel is considered done. When a pixel has not converged yet and needs more samples than the minimum, its immediate neighbors are also set to take more samples. This is done in order to more reliably detect sharp features such as caustics. A 3x3 box filter that is run periodically over the tile buffer is used for that purpose. After a tile has finished rendering, the values of all passes are scaled as if they were rendered with the full number of samples. This way, any code operating on these buffers, for example the denoiser, does not need to be changed for per-pixel sample counts. Reviewed By: brecht, #cycles Differential Revision: https://developer.blender.org/D4686
Diffstat (limited to 'intern/cycles/device/cuda/device_cuda_impl.cpp')
-rw-r--r--intern/cycles/device/cuda/device_cuda_impl.cpp126
1 files changed, 126 insertions, 0 deletions
diff --git a/intern/cycles/device/cuda/device_cuda_impl.cpp b/intern/cycles/device/cuda/device_cuda_impl.cpp
index 4a7c45d8b93..11dd9b69f10 100644
--- a/intern/cycles/device/cuda/device_cuda_impl.cpp
+++ b/intern/cycles/device/cuda/device_cuda_impl.cpp
@@ -208,6 +208,8 @@ CUDADevice::CUDADevice(DeviceInfo &info, Stats &stats, Profiler &profiler, bool
map_host_used = 0;
can_map_host = 0;
+ functions.loaded = false;
+
/* Intialize CUDA. */
if (cuda_error(cuInit(0)))
return;
@@ -531,9 +533,42 @@ bool CUDADevice::load_kernels(const DeviceRequestedFeatures &requested_features)
reserve_local_memory(requested_features);
}
+ load_functions();
+
return (result == CUDA_SUCCESS);
}
+void CUDADevice::load_functions()
+{
+ /* TODO: load all functions here. */
+ if (functions.loaded) {
+ return;
+ }
+ functions.loaded = true;
+
+ cuda_assert(cuModuleGetFunction(
+ &functions.adaptive_stopping, cuModule, "kernel_cuda_adaptive_stopping"));
+ cuda_assert(cuModuleGetFunction(
+ &functions.adaptive_filter_x, cuModule, "kernel_cuda_adaptive_filter_x"));
+ cuda_assert(cuModuleGetFunction(
+ &functions.adaptive_filter_y, cuModule, "kernel_cuda_adaptive_filter_y"));
+ cuda_assert(cuModuleGetFunction(
+ &functions.adaptive_scale_samples, cuModule, "kernel_cuda_adaptive_scale_samples"));
+
+ cuda_assert(cuFuncSetCacheConfig(functions.adaptive_stopping, CU_FUNC_CACHE_PREFER_L1));
+ cuda_assert(cuFuncSetCacheConfig(functions.adaptive_filter_x, CU_FUNC_CACHE_PREFER_L1));
+ cuda_assert(cuFuncSetCacheConfig(functions.adaptive_filter_y, CU_FUNC_CACHE_PREFER_L1));
+ cuda_assert(cuFuncSetCacheConfig(functions.adaptive_scale_samples, CU_FUNC_CACHE_PREFER_L1));
+
+ int unused_min_blocks;
+ cuda_assert(cuOccupancyMaxPotentialBlockSize(&unused_min_blocks,
+ &functions.adaptive_num_threads_per_block,
+ functions.adaptive_scale_samples,
+ NULL,
+ 0,
+ 0));
+}
+
void CUDADevice::reserve_local_memory(const DeviceRequestedFeatures &requested_features)
{
if (use_split_kernel()) {
@@ -1666,6 +1701,80 @@ void CUDADevice::denoise(RenderTile &rtile, DenoisingTask &denoising)
denoising.run_denoising(&rtile);
}
+void CUDADevice::adaptive_sampling_filter(uint filter_sample,
+ WorkTile *wtile,
+ CUdeviceptr d_wtile,
+ CUstream stream)
+{
+ const int num_threads_per_block = functions.adaptive_num_threads_per_block;
+
+ /* These are a series of tiny kernels because there is no grid synchronisation
+ * from within a kernel, so multiple kernel launches it is.*/
+ uint total_work_size = wtile->h * wtile->w;
+ void *args2[] = {&d_wtile, &filter_sample, &total_work_size};
+ uint num_blocks = divide_up(total_work_size, num_threads_per_block);
+ cuda_assert(cuLaunchKernel(functions.adaptive_stopping,
+ num_blocks,
+ 1,
+ 1,
+ num_threads_per_block,
+ 1,
+ 1,
+ 0,
+ stream,
+ args2,
+ 0));
+ total_work_size = wtile->h;
+ num_blocks = divide_up(total_work_size, num_threads_per_block);
+ cuda_assert(cuLaunchKernel(functions.adaptive_filter_x,
+ num_blocks,
+ 1,
+ 1,
+ num_threads_per_block,
+ 1,
+ 1,
+ 0,
+ stream,
+ args2,
+ 0));
+ total_work_size = wtile->w;
+ num_blocks = divide_up(total_work_size, num_threads_per_block);
+ cuda_assert(cuLaunchKernel(functions.adaptive_filter_y,
+ num_blocks,
+ 1,
+ 1,
+ num_threads_per_block,
+ 1,
+ 1,
+ 0,
+ stream,
+ args2,
+ 0));
+}
+
+void CUDADevice::adaptive_sampling_post(RenderTile &rtile,
+ WorkTile *wtile,
+ CUdeviceptr d_wtile,
+ CUstream stream)
+{
+ const int num_threads_per_block = functions.adaptive_num_threads_per_block;
+ uint total_work_size = wtile->h * wtile->w;
+
+ void *args[] = {&d_wtile, &rtile.start_sample, &rtile.sample, &total_work_size};
+ uint num_blocks = divide_up(total_work_size, num_threads_per_block);
+ cuda_assert(cuLaunchKernel(functions.adaptive_scale_samples,
+ num_blocks,
+ 1,
+ 1,
+ num_threads_per_block,
+ 1,
+ 1,
+ 0,
+ stream,
+ args,
+ 0));
+}
+
void CUDADevice::path_trace(DeviceTask &task,
RenderTile &rtile,
device_vector<WorkTile> &work_tiles)
@@ -1715,6 +1824,9 @@ void CUDADevice::path_trace(DeviceTask &task,
}
uint step_samples = divide_up(min_blocks * num_threads_per_block, wtile->w * wtile->h);
+ if (task.adaptive_sampling.use) {
+ step_samples = task.adaptive_sampling.align_static_samples(step_samples);
+ }
/* Render all samples. */
int start_sample = rtile.start_sample;
@@ -1736,6 +1848,12 @@ void CUDADevice::path_trace(DeviceTask &task,
cuda_assert(
cuLaunchKernel(cuPathTrace, num_blocks, 1, 1, num_threads_per_block, 1, 1, 0, 0, args, 0));
+ /* Run the adaptive sampling kernels at selected samples aligned to step samples. */
+ uint filter_sample = sample + wtile->num_samples - 1;
+ if (task.adaptive_sampling.use && task.adaptive_sampling.need_filter(filter_sample)) {
+ adaptive_sampling_filter(filter_sample, wtile, d_work_tiles);
+ }
+
cuda_assert(cuCtxSynchronize());
/* Update progress. */
@@ -1747,6 +1865,14 @@ void CUDADevice::path_trace(DeviceTask &task,
break;
}
}
+
+ /* Finalize adaptive sampling. */
+ if (task.adaptive_sampling.use) {
+ CUdeviceptr d_work_tiles = (CUdeviceptr)work_tiles.device_pointer;
+ adaptive_sampling_post(rtile, wtile, d_work_tiles);
+ cuda_assert(cuCtxSynchronize());
+ task.update_progress(&rtile, rtile.w * rtile.h * wtile->num_samples);
+ }
}
void CUDADevice::film_convert(DeviceTask &task,