Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/cycles/integrator')
-rw-r--r--intern/cycles/integrator/CMakeLists.txt6
-rw-r--r--intern/cycles/integrator/guiding.h32
-rw-r--r--intern/cycles/integrator/path_trace.cpp140
-rw-r--r--intern/cycles/integrator/path_trace.h33
-rw-r--r--intern/cycles/integrator/path_trace_work.cpp4
-rw-r--r--intern/cycles/integrator/path_trace_work.h7
-rw-r--r--intern/cycles/integrator/path_trace_work_cpu.cpp110
-rw-r--r--intern/cycles/integrator/path_trace_work_cpu.h17
-rw-r--r--intern/cycles/integrator/path_trace_work_gpu.cpp31
-rw-r--r--intern/cycles/integrator/render_scheduler.cpp15
-rw-r--r--intern/cycles/integrator/render_scheduler.h6
-rw-r--r--intern/cycles/integrator/work_balancer.cpp3
12 files changed, 390 insertions, 14 deletions
diff --git a/intern/cycles/integrator/CMakeLists.txt b/intern/cycles/integrator/CMakeLists.txt
index 9722003083e..ef2a07854ec 100644
--- a/intern/cycles/integrator/CMakeLists.txt
+++ b/intern/cycles/integrator/CMakeLists.txt
@@ -65,6 +65,12 @@ if(WITH_OPENIMAGEDENOISE)
)
endif()
+if(WITH_CYCLES_PATH_GUIDING)
+ list(APPEND LIB
+ ${OPENPGL_LIBRARIES}
+ )
+endif()
+
include_directories(${INC})
include_directories(SYSTEM ${INC_SYS})
diff --git a/intern/cycles/integrator/guiding.h b/intern/cycles/integrator/guiding.h
new file mode 100644
index 00000000000..b7d7e2fe51e
--- /dev/null
+++ b/intern/cycles/integrator/guiding.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright 2011-2022 Blender Foundation */
+
+#pragma once
+
+#include "kernel/types.h"
+
+CCL_NAMESPACE_BEGIN
+
+struct GuidingParams {
+ /* The subset of path guiding parameters that can trigger a creation/rebuild
+ * of the guiding field. */
+ bool use = false;
+ bool use_surface_guiding = false;
+ bool use_volume_guiding = false;
+
+ GuidingDistributionType type = GUIDING_TYPE_PARALLAX_AWARE_VMM;
+ int training_samples = 128;
+ bool deterministic = false;
+
+ GuidingParams() = default;
+
+ bool modified(const GuidingParams &other) const
+ {
+ return !((use == other.use) && (use_surface_guiding == other.use_surface_guiding) &&
+ (use_volume_guiding == other.use_volume_guiding) && (type == other.type) &&
+ (training_samples == other.training_samples) &&
+ (deterministic == other.deterministic));
+ }
+};
+
+CCL_NAMESPACE_END
diff --git a/intern/cycles/integrator/path_trace.cpp b/intern/cycles/integrator/path_trace.cpp
index 3ec7b601d9f..8e8fbd86be0 100644
--- a/intern/cycles/integrator/path_trace.cpp
+++ b/intern/cycles/integrator/path_trace.cpp
@@ -43,8 +43,11 @@ PathTrace::PathTrace(Device *device,
/* Create path tracing work in advance, so that it can be reused by incremental sampling as much
* as possible. */
device_->foreach_device([&](Device *path_trace_device) {
- path_trace_works_.emplace_back(PathTraceWork::create(
- path_trace_device, film, device_scene, &render_cancel_.is_requested));
+ unique_ptr<PathTraceWork> work = PathTraceWork::create(
+ path_trace_device, film, device_scene, &render_cancel_.is_requested);
+ if (work) {
+ path_trace_works_.emplace_back(std::move(work));
+ }
});
work_balance_infos_.resize(path_trace_works_.size());
@@ -185,11 +188,25 @@ void PathTrace::render_pipeline(RenderWork render_work)
rebalance(render_work);
+ /* Prepare all per-thread guiding structures before we start with the next rendering
+ * iteration/progression. */
+ const bool use_guiding = device_scene_->data.integrator.use_guiding;
+ if (use_guiding) {
+ guiding_prepare_structures();
+ }
+
path_trace(render_work);
if (render_cancel_.is_requested) {
return;
}
+ /* Update the guiding field using the training data/samples collected during the rendering
+ * iteration/progression. */
+ const bool train_guiding = device_scene_->data.integrator.train_guiding;
+ if (use_guiding && train_guiding) {
+ guiding_update_structures();
+ }
+
adaptive_sample(render_work);
if (render_cancel_.is_requested) {
return;
@@ -1241,4 +1258,123 @@ string PathTrace::full_report() const
return result;
}
+void PathTrace::set_guiding_params(const GuidingParams &guiding_params, const bool reset)
+{
+#ifdef WITH_PATH_GUIDING
+ if (guiding_params_.modified(guiding_params)) {
+ guiding_params_ = guiding_params;
+
+ if (guiding_params_.use) {
+ PGLFieldArguments field_args;
+ switch (guiding_params_.type) {
+ default:
+ /* Parallax-aware von Mises-Fisher mixture models. */
+ case GUIDING_TYPE_PARALLAX_AWARE_VMM: {
+ pglFieldArgumentsSetDefaults(
+ field_args,
+ PGL_SPATIAL_STRUCTURE_TYPE::PGL_SPATIAL_STRUCTURE_KDTREE,
+ PGL_DIRECTIONAL_DISTRIBUTION_TYPE::PGL_DIRECTIONAL_DISTRIBUTION_PARALLAX_AWARE_VMM);
+ break;
+ }
+ /* Directional quad-trees. */
+ case GUIDING_TYPE_DIRECTIONAL_QUAD_TREE: {
+ pglFieldArgumentsSetDefaults(
+ field_args,
+ PGL_SPATIAL_STRUCTURE_TYPE::PGL_SPATIAL_STRUCTURE_KDTREE,
+ PGL_DIRECTIONAL_DISTRIBUTION_TYPE::PGL_DIRECTIONAL_DISTRIBUTION_QUADTREE);
+ break;
+ }
+ /* von Mises-Fisher mixture models. */
+ case GUIDING_TYPE_VMM: {
+ pglFieldArgumentsSetDefaults(
+ field_args,
+ PGL_SPATIAL_STRUCTURE_TYPE::PGL_SPATIAL_STRUCTURE_KDTREE,
+ PGL_DIRECTIONAL_DISTRIBUTION_TYPE::PGL_DIRECTIONAL_DISTRIBUTION_VMM);
+ break;
+ }
+ }
+# if OPENPGL_VERSION_MINOR >= 4
+ field_args.deterministic = guiding_params.deterministic;
+# endif
+ reinterpret_cast<PGLKDTreeArguments *>(field_args.spatialSturctureArguments)->maxDepth = 16;
+ openpgl::cpp::Device *guiding_device = static_cast<openpgl::cpp::Device *>(
+ device_->get_guiding_device());
+ if (guiding_device) {
+ guiding_sample_data_storage_ = make_unique<openpgl::cpp::SampleStorage>();
+ guiding_field_ = make_unique<openpgl::cpp::Field>(guiding_device, field_args);
+ }
+ else {
+ guiding_sample_data_storage_ = nullptr;
+ guiding_field_ = nullptr;
+ }
+ }
+ else {
+ guiding_sample_data_storage_ = nullptr;
+ guiding_field_ = nullptr;
+ }
+ }
+ else if (reset) {
+ if (guiding_field_) {
+ guiding_field_->Reset();
+ }
+ }
+#else
+ (void)guiding_params;
+ (void)reset;
+#endif
+}
+
+void PathTrace::guiding_prepare_structures()
+{
+#ifdef WITH_PATH_GUIDING
+ const bool train = (guiding_params_.training_samples == 0) ||
+ (guiding_field_->GetIteration() < guiding_params_.training_samples);
+
+ for (auto &&path_trace_work : path_trace_works_) {
+ path_trace_work->guiding_init_kernel_globals(
+ guiding_field_.get(), guiding_sample_data_storage_.get(), train);
+ }
+
+ if (train) {
+ /* For training the guiding distribution we need to force the number of samples
+ * per update to be limited, for reproducible results and reasonable training size.
+ *
+ * Idea: we could stochastically discard samples with a probability of 1/num_samples_per_update
+ * we can then update only after the num_samples_per_update iterations are rendered. */
+ render_scheduler_.set_limit_samples_per_update(4);
+ }
+ else {
+ render_scheduler_.set_limit_samples_per_update(0);
+ }
+#endif
+}
+
+void PathTrace::guiding_update_structures()
+{
+#ifdef WITH_PATH_GUIDING
+ VLOG_WORK << "Update path guiding structures";
+
+ VLOG_DEBUG << "Number of surface samples: " << guiding_sample_data_storage_->GetSizeSurface();
+ VLOG_DEBUG << "Number of volume samples: " << guiding_sample_data_storage_->GetSizeVolume();
+
+ const size_t num_valid_samples = guiding_sample_data_storage_->GetSizeSurface() +
+ guiding_sample_data_storage_->GetSizeVolume();
+
+ /* we wait until we have at least 1024 samples */
+ if (num_valid_samples >= 1024) {
+# if OPENPGL_VERSION_MINOR < 4
+ const size_t num_samples = 1;
+ guiding_field_->Update(*guiding_sample_data_storage_, num_samples);
+# else
+ guiding_field_->Update(*guiding_sample_data_storage_);
+# endif
+ guiding_update_count++;
+
+ VLOG_DEBUG << "Path guiding field valid: " << guiding_field_->Validate();
+
+ guiding_sample_data_storage_->Clear();
+ }
+#endif
+}
+
CCL_NAMESPACE_END
diff --git a/intern/cycles/integrator/path_trace.h b/intern/cycles/integrator/path_trace.h
index 59382b51d23..d3a238696fd 100644
--- a/intern/cycles/integrator/path_trace.h
+++ b/intern/cycles/integrator/path_trace.h
@@ -4,11 +4,15 @@
#pragma once
#include "integrator/denoiser.h"
+#include "integrator/guiding.h"
#include "integrator/pass_accessor.h"
#include "integrator/path_trace_work.h"
#include "integrator/work_balancer.h"
+
#include "session/buffers.h"
+
#include "util/function.h"
+#include "util/guiding.h"
#include "util/thread.h"
#include "util/unique_ptr.h"
#include "util/vector.h"
@@ -89,6 +93,10 @@ class PathTrace {
* Use this to configure the adaptive sampler before rendering any samples. */
void set_adaptive_sampling(const AdaptiveSampling &adaptive_sampling);
+ /* Set the parameters for guiding.
+ * Use to setup the guiding structures before each rendering iteration.*/
+ void set_guiding_params(const GuidingParams &params, const bool reset);
+
/* Sets output driver for render buffer output. */
void set_output_driver(unique_ptr<OutputDriver> driver);
@@ -205,6 +213,15 @@ class PathTrace {
void write_tile_buffer(const RenderWork &render_work);
void finalize_full_buffer_on_disk(const RenderWork &render_work);
+ /* Updates/initializes the guiding structures after a rendering iteration.
+ * The structures are updated using the training data/samples generated during the previous
+ * rendering iteration */
+ void guiding_update_structures();
+
+ /* Prepares the per-kernel thread related guiding structures (e.g., PathSegmentStorage,
+ * pointers to the global Field and SegmentStorage)*/
+ void guiding_prepare_structures();
+
/* Get number of samples in the current state of the render buffers. */
int get_num_samples_in_buffer();
@@ -265,6 +282,22 @@ class PathTrace {
/* Denoiser device descriptor which holds the denoised big tile for multi-device workloads. */
unique_ptr<PathTraceWork> big_tile_denoise_work_;
+#ifdef WITH_PATH_GUIDING
+ /* Guiding related attributes */
+ GuidingParams guiding_params_;
+
+ /* The guiding field which holds the representation of the incident radiance field for the
+ * complete scene. */
+ unique_ptr<openpgl::cpp::Field> guiding_field_;
+
+ /* The storage container which holds the training data/samples generated during the last
+ * rendering iteration. */
+ unique_ptr<openpgl::cpp::SampleStorage> guiding_sample_data_storage_;
+
+ /* The number of already performed training iterations for the guiding field.*/
+ int guiding_update_count = 0;
+#endif
+
/* State which is common for all the steps of the render work.
* Is brought up to date in the `render()` call and is accessed from all the steps involved into
* rendering the work. */
diff --git a/intern/cycles/integrator/path_trace_work.cpp b/intern/cycles/integrator/path_trace_work.cpp
index bb5c6e1a61a..a5f98b5475a 100644
--- a/intern/cycles/integrator/path_trace_work.cpp
+++ b/intern/cycles/integrator/path_trace_work.cpp
@@ -23,6 +23,10 @@ unique_ptr<PathTraceWork> PathTraceWork::create(Device *device,
if (device->info.type == DEVICE_CPU) {
return make_unique<PathTraceWorkCPU>(device, film, device_scene, cancel_requested_flag);
}
+ if (device->info.type == DEVICE_DUMMY) {
+ /* Dummy devices can't perform any work. */
+ return nullptr;
+ }
return make_unique<PathTraceWorkGPU>(device, film, device_scene, cancel_requested_flag);
}
diff --git a/intern/cycles/integrator/path_trace_work.h b/intern/cycles/integrator/path_trace_work.h
index 737d6babc08..e31a6ef8819 100644
--- a/intern/cycles/integrator/path_trace_work.h
+++ b/intern/cycles/integrator/path_trace_work.h
@@ -140,6 +140,13 @@ class PathTraceWork {
return device_;
}
+#ifdef WITH_PATH_GUIDING
+ /* Initializes the per-thread guiding kernel data. */
+ virtual void guiding_init_kernel_globals(void *, void *, const bool)
+ {
+ }
+#endif
+
protected:
PathTraceWork(Device *device,
Film *film,
diff --git a/intern/cycles/integrator/path_trace_work_cpu.cpp b/intern/cycles/integrator/path_trace_work_cpu.cpp
index 518ef3185f9..188ec28cf65 100644
--- a/intern/cycles/integrator/path_trace_work_cpu.cpp
+++ b/intern/cycles/integrator/path_trace_work_cpu.cpp
@@ -6,6 +6,7 @@
#include "device/cpu/kernel.h"
#include "device/device.h"
+#include "kernel/film/write.h"
#include "kernel/integrator/path_state.h"
#include "integrator/pass_accessor_cpu.h"
@@ -145,6 +146,13 @@ void PathTraceWorkCPU::render_samples_full_pipeline(KernelGlobalsCPU *kernel_glo
kernels_.integrator_megakernel(kernel_globals, state, render_buffer);
+#ifdef WITH_PATH_GUIDING
+ if (kernel_globals->data.integrator.train_guiding) {
+ /* Push the generated sample data to the global sample data storage. */
+ guiding_push_sample_data_to_global_storage(kernel_globals, state, render_buffer);
+ }
+#endif
+
if (shadow_catcher_state) {
kernels_.integrator_megakernel(kernel_globals, shadow_catcher_state, render_buffer);
}
@@ -276,4 +284,106 @@ void PathTraceWorkCPU::cryptomatte_postproces()
});
}
+#ifdef WITH_PATH_GUIDING
+/* NOTE: It seems that this is called before every rendering iteration/progression and not once per
+ * rendering. May be we find a way to call it only once per rendering. */
+void PathTraceWorkCPU::guiding_init_kernel_globals(void *guiding_field,
+ void *sample_data_storage,
+ const bool train)
+{
+ /* Linking the global guiding structures (e.g., Field and SampleStorage) to the per-thread
+ * kernel globals. */
+ for (int thread_index = 0; thread_index < kernel_thread_globals_.size(); thread_index++) {
+ CPUKernelThreadGlobals &kg = kernel_thread_globals_[thread_index];
+ openpgl::cpp::Field *field = (openpgl::cpp::Field *)guiding_field;
+
+ /* Allocate sampling distributions. */
+ kg.opgl_guiding_field = field;
+
+# if PATH_GUIDING_LEVEL >= 4
+ if (kg.opgl_surface_sampling_distribution) {
+ delete kg.opgl_surface_sampling_distribution;
+ kg.opgl_surface_sampling_distribution = nullptr;
+ }
+ if (kg.opgl_volume_sampling_distribution) {
+ delete kg.opgl_volume_sampling_distribution;
+ kg.opgl_volume_sampling_distribution = nullptr;
+ }
+
+ if (field) {
+ kg.opgl_surface_sampling_distribution = new openpgl::cpp::SurfaceSamplingDistribution(field);
+ kg.opgl_volume_sampling_distribution = new openpgl::cpp::VolumeSamplingDistribution(field);
+ }
+# endif
+
+ /* Reserve storage for training. */
+ kg.data.integrator.train_guiding = train;
+ kg.opgl_sample_data_storage = (openpgl::cpp::SampleStorage *)sample_data_storage;
+
+ if (train) {
+ kg.opgl_path_segment_storage->Reserve(kg.data.integrator.transparent_max_bounce +
+ kg.data.integrator.max_bounce + 3);
+ kg.opgl_path_segment_storage->Clear();
+ }
+ }
+}
+
+void PathTraceWorkCPU::guiding_push_sample_data_to_global_storage(
+ KernelGlobalsCPU *kg, IntegratorStateCPU *state, ccl_global float *ccl_restrict render_buffer)
+{
+# ifdef WITH_CYCLES_DEBUG
+ if (VLOG_WORK_IS_ON) {
+ /* Check if the generated path segments contain valid values. */
+ const bool validSegments = kg->opgl_path_segment_storage->ValidateSegments();
+ if (!validSegments) {
+ VLOG_WORK << "Guiding: invalid path segments!";
+ }
+ }
+
+ /* Write debug render pass to validate it matches combined pass. */
+ pgl_vec3f pgl_final_color = kg->opgl_path_segment_storage->CalculatePixelEstimate(false);
+ const uint32_t render_pixel_index = INTEGRATOR_STATE(state, path, render_pixel_index);
+ const uint64_t render_buffer_offset = (uint64_t)render_pixel_index *
+ kernel_data.film.pass_stride;
+ ccl_global float *buffer = render_buffer + render_buffer_offset;
+ float3 final_color = make_float3(pgl_final_color.x, pgl_final_color.y, pgl_final_color.z);
+ if (kernel_data.film.pass_guiding_color != PASS_UNUSED) {
+ film_write_pass_float3(buffer + kernel_data.film.pass_guiding_color, final_color);
+ }
+# else
+ (void)state;
+ (void)render_buffer;
+# endif
+
+ /* Convert the path segment representation of the random walk into radiance samples. */
+# if PATH_GUIDING_LEVEL >= 2
+ const bool use_direct_light = kernel_data.integrator.use_guiding_direct_light;
+ const bool use_mis_weights = kernel_data.integrator.use_guiding_mis_weights;
+ kg->opgl_path_segment_storage->PrepareSamples(
+ false, nullptr, use_mis_weights, use_direct_light, false);
+# endif
+
+# ifdef WITH_CYCLES_DEBUG
+ /* Check if the training/radiance samples generated py the path segment storage are valid.*/
+ if (VLOG_WORK_IS_ON) {
+ const bool validSamples = kg->opgl_path_segment_storage->ValidateSamples();
+ if (!validSamples) {
+ VLOG_WORK
+ << "Guiding: path segment storage generated/contains invalid radiance/training samples!";
+ }
+ }
+# endif
+
+# if PATH_GUIDING_LEVEL >= 3
+ /* Push radiance samples from current random walk/path to the global sample storage. */
+ size_t num_samples = 0;
+ const openpgl::cpp::SampleData *samples = kg->opgl_path_segment_storage->GetSamples(num_samples);
+ kg->opgl_sample_data_storage->AddSamples(samples, num_samples);
+# endif
+
+ /* Clear storage for the current path, to be ready for the next path. */
+ kg->opgl_path_segment_storage->Clear();
+}
+#endif
+
CCL_NAMESPACE_END
diff --git a/intern/cycles/integrator/path_trace_work_cpu.h b/intern/cycles/integrator/path_trace_work_cpu.h
index 5a0918aecec..e50ba8721d9 100644
--- a/intern/cycles/integrator/path_trace_work_cpu.h
+++ b/intern/cycles/integrator/path_trace_work_cpu.h
@@ -16,6 +16,7 @@ CCL_NAMESPACE_BEGIN
struct KernelWorkTile;
struct KernelGlobalsCPU;
+struct IntegratorStateCPU;
class CPUKernels;
@@ -50,6 +51,22 @@ class PathTraceWorkCPU : public PathTraceWork {
virtual int adaptive_sampling_converge_filter_count_active(float threshold, bool reset) override;
virtual void cryptomatte_postproces() override;
+#ifdef WITH_PATH_GUIDING
+ /* Initializes the per-thread guiding kernel data. The function sets the pointers to the
+ * global guiding field and the sample data storage as well es initializes the per-thread
+ * guided sampling distributions (e.g., SurfaceSamplingDistribution and
+ * VolumeSamplingDistribution). */
+ void guiding_init_kernel_globals(void *guiding_field,
+ void *sample_data_storage,
+ const bool train) override;
+
+ /* Pushes the collected training data/samples of a path to the global sample storage.
+ * This function is called at the end of a random walk/path generation. */
+ void guiding_push_sample_data_to_global_storage(KernelGlobalsCPU *kernel_globals,
+ IntegratorStateCPU *state,
+ ccl_global float *ccl_restrict render_buffer);
+#endif
+
protected:
/* Core path tracing routine. Renders given work time on the given queue. */
void render_samples_full_pipeline(KernelGlobalsCPU *kernel_globals,
diff --git a/intern/cycles/integrator/path_trace_work_gpu.cpp b/intern/cycles/integrator/path_trace_work_gpu.cpp
index ee250a6916b..547e8d50a22 100644
--- a/intern/cycles/integrator/path_trace_work_gpu.cpp
+++ b/intern/cycles/integrator/path_trace_work_gpu.cpp
@@ -18,13 +18,15 @@
CCL_NAMESPACE_BEGIN
-static size_t estimate_single_state_size()
+static size_t estimate_single_state_size(const uint kernel_features)
{
size_t state_size = 0;
#define KERNEL_STRUCT_BEGIN(name) for (int array_index = 0;; array_index++) {
-#define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) state_size += sizeof(type);
-#define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) state_size += sizeof(type);
+#define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \
+ state_size += (kernel_features & (feature)) ? sizeof(type) : 0;
+#define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \
+ state_size += (kernel_features & (feature)) ? sizeof(type) : 0;
#define KERNEL_STRUCT_END(name) \
break; \
}
@@ -76,16 +78,11 @@ PathTraceWorkGPU::PathTraceWorkGPU(Device *device,
num_queued_paths_(device, "num_queued_paths", MEM_READ_WRITE),
work_tiles_(device, "work_tiles", MEM_READ_WRITE),
display_rgba_half_(device, "display buffer half", MEM_READ_WRITE),
- max_num_paths_(queue_->num_concurrent_states(estimate_single_state_size())),
- min_num_active_main_paths_(queue_->num_concurrent_busy_states()),
+ max_num_paths_(0),
+ min_num_active_main_paths_(0),
max_active_main_path_index_(0)
{
memset(&integrator_state_gpu_, 0, sizeof(integrator_state_gpu_));
-
- /* Limit number of active paths to the half of the overall state. This is due to the logic in the
- * path compaction which relies on the fact that regeneration does not happen sooner than half of
- * the states are available again. */
- min_num_active_main_paths_ = min(min_num_active_main_paths_, max_num_paths_ / 2);
}
void PathTraceWorkGPU::alloc_integrator_soa()
@@ -103,6 +100,20 @@ void PathTraceWorkGPU::alloc_integrator_soa()
integrator_state_soa_volume_stack_size_ = max(integrator_state_soa_volume_stack_size_,
requested_volume_stack_size);
+ /* Determine the number of path states. Deferring this for as long as possible allows the
+ * back-end to make better decisions about memory availability. */
+ if (max_num_paths_ == 0) {
+ size_t single_state_size = estimate_single_state_size(kernel_features);
+
+ max_num_paths_ = queue_->num_concurrent_states(single_state_size);
+ min_num_active_main_paths_ = queue_->num_concurrent_busy_states(single_state_size);
+
+ /* Limit number of active paths to the half of the overall state. This is due to the logic in
+ * the path compaction which relies on the fact that regeneration does not happen sooner than
+ * half of the states are available again. */
+ min_num_active_main_paths_ = min(min_num_active_main_paths_, max_num_paths_ / 2);
+ }
+
/* Allocate a device only memory buffer before for each struct member, and then
* write the pointers into a struct that resides in constant memory.
*
diff --git a/intern/cycles/integrator/render_scheduler.cpp b/intern/cycles/integrator/render_scheduler.cpp
index e4676bd059c..2e05dbbaf6e 100644
--- a/intern/cycles/integrator/render_scheduler.cpp
+++ b/intern/cycles/integrator/render_scheduler.cpp
@@ -45,6 +45,11 @@ void RenderScheduler::set_denoiser_params(const DenoiseParams &params)
denoiser_params_ = params;
}
+void RenderScheduler::set_limit_samples_per_update(const int limit_samples)
+{
+ limit_samples_per_update_ = limit_samples;
+}
+
void RenderScheduler::set_adaptive_sampling(const AdaptiveSampling &adaptive_sampling)
{
adaptive_sampling_ = adaptive_sampling;
@@ -760,7 +765,13 @@ int RenderScheduler::calculate_num_samples_per_update() const
const double update_interval_in_seconds = guess_display_update_interval_in_seconds();
- return max(int(num_samples_in_second * update_interval_in_seconds), 1);
+ int num_samples_per_update = max(int(num_samples_in_second * update_interval_in_seconds), 1);
+
+ if (limit_samples_per_update_) {
+ num_samples_per_update = min(limit_samples_per_update_, num_samples_per_update);
+ }
+
+ return num_samples_per_update;
}
int RenderScheduler::get_start_sample_to_path_trace() const
@@ -808,7 +819,7 @@ int RenderScheduler::get_num_samples_to_path_trace() const
return 1;
}
- const int num_samples_per_update = calculate_num_samples_per_update();
+ int num_samples_per_update = calculate_num_samples_per_update();
const int path_trace_start_sample = get_start_sample_to_path_trace();
/* Round number of samples to a power of two, so that division of path states into tiles goes in
diff --git a/intern/cycles/integrator/render_scheduler.h b/intern/cycles/integrator/render_scheduler.h
index dce876d44bd..a0ab17b3794 100644
--- a/intern/cycles/integrator/render_scheduler.h
+++ b/intern/cycles/integrator/render_scheduler.h
@@ -187,6 +187,8 @@ class RenderScheduler {
* times, and so on. */
string full_report() const;
+ void set_limit_samples_per_update(const int limit_samples);
+
protected:
/* Check whether all work has been scheduled and time limit was not exceeded.
*
@@ -450,6 +452,10 @@ class RenderScheduler {
* (quadratic dependency from the resolution divider): resolution divider of 2 brings render time
* down by a factor of 4. */
int calculate_resolution_divider_for_time(double desired_time, double actual_time);
+
+ /* If the number of samples per rendering progression should be limited because of path guiding
+ * being activated or is still inside its training phase */
+ int limit_samples_per_update_ = 0;
};
int calculate_resolution_divider_for_resolution(int width, int height, int resolution);
diff --git a/intern/cycles/integrator/work_balancer.cpp b/intern/cycles/integrator/work_balancer.cpp
index 5f1c6c92b9d..0fe170b2791 100644
--- a/intern/cycles/integrator/work_balancer.cpp
+++ b/intern/cycles/integrator/work_balancer.cpp
@@ -17,6 +17,9 @@ void work_balance_do_initial(vector<WorkBalanceInfo> &work_balance_infos)
work_balance_infos[0].weight = 1.0;
return;
}
+ else if (num_infos == 0) {
+ return;
+ }
/* There is no statistics available, so start with an equal distribution. */
const double weight = 1.0 / num_infos;