Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrii <pembem22>2021-11-11 10:57:35 +0300
committerWilliam Leeson <william@blender.org>2021-11-11 11:39:25 +0300
commitc63e735f6b601e662fbb1ed5d7a71293552d6720 (patch)
tree83e7eb2e002067fc7072a63fd3badca9a471f5d8 /intern/cycles/integrator
parentb8d53b703a4e20b4301e794bf58c9d1fdb65d61b (diff)
Cycles: Add sample offset option
This patch exposes the sampling offset option to Blender. It is located in the "Sampling > Advanced" panel. For example, this can be useful to parallelize rendering and distribute different chunks of samples for each computer to render. --- I also had to add this option to `RenderWork` and `RenderScheduler` classes so that the sample count in the status string can be calculated correctly. Reviewed By: leesonw Differential Revision: https://developer.blender.org/D13086
Diffstat (limited to 'intern/cycles/integrator')
-rw-r--r--intern/cycles/integrator/path_trace.cpp8
-rw-r--r--intern/cycles/integrator/path_trace_work.h5
-rw-r--r--intern/cycles/integrator/path_trace_work_cpu.cpp4
-rw-r--r--intern/cycles/integrator/path_trace_work_cpu.h3
-rw-r--r--intern/cycles/integrator/path_trace_work_gpu.cpp4
-rw-r--r--intern/cycles/integrator/path_trace_work_gpu.h3
-rw-r--r--intern/cycles/integrator/render_scheduler.cpp17
-rw-r--r--intern/cycles/integrator/render_scheduler.h8
-rw-r--r--intern/cycles/integrator/work_tile_scheduler.cpp3
-rw-r--r--intern/cycles/integrator/work_tile_scheduler.h2
10 files changed, 47 insertions, 10 deletions
diff --git a/intern/cycles/integrator/path_trace.cpp b/intern/cycles/integrator/path_trace.cpp
index daf270d6686..dcbdf07ee67 100644
--- a/intern/cycles/integrator/path_trace.cpp
+++ b/intern/cycles/integrator/path_trace.cpp
@@ -380,7 +380,10 @@ void PathTrace::path_trace(RenderWork &render_work)
PathTraceWork *path_trace_work = path_trace_works_[i].get();
PathTraceWork::RenderStatistics statistics;
- path_trace_work->render_samples(statistics, render_work.path_trace.start_sample, num_samples);
+ path_trace_work->render_samples(statistics,
+ render_work.path_trace.start_sample,
+ num_samples,
+ render_work.path_trace.sample_offset);
const double work_time = time_dt() - work_start_time;
work_balance_infos_[i].time_spent += work_time;
@@ -849,7 +852,8 @@ void PathTrace::progress_update_if_needed(const RenderWork &render_work)
const int2 tile_size = get_render_tile_size();
const int num_samples_added = tile_size.x * tile_size.y * render_work.path_trace.num_samples;
const int current_sample = render_work.path_trace.start_sample +
- render_work.path_trace.num_samples;
+ render_work.path_trace.num_samples -
+ render_work.path_trace.sample_offset;
progress_->add_samples(num_samples_added, current_sample);
}
diff --git a/intern/cycles/integrator/path_trace_work.h b/intern/cycles/integrator/path_trace_work.h
index 0dc7cd2f896..2ebfc913580 100644
--- a/intern/cycles/integrator/path_trace_work.h
+++ b/intern/cycles/integrator/path_trace_work.h
@@ -75,7 +75,10 @@ class PathTraceWork {
/* Render given number of samples as a synchronous blocking call.
* The samples are added to the render buffer associated with this work. */
- virtual void render_samples(RenderStatistics &statistics, int start_sample, int samples_num) = 0;
+ virtual void render_samples(RenderStatistics &statistics,
+ int start_sample,
+ int samples_num,
+ int sample_offset) = 0;
/* Copy render result from this work to the corresponding place of the GPU display.
*
diff --git a/intern/cycles/integrator/path_trace_work_cpu.cpp b/intern/cycles/integrator/path_trace_work_cpu.cpp
index 36ce2be9f6d..530e60d6750 100644
--- a/intern/cycles/integrator/path_trace_work_cpu.cpp
+++ b/intern/cycles/integrator/path_trace_work_cpu.cpp
@@ -71,7 +71,8 @@ void PathTraceWorkCPU::init_execution()
void PathTraceWorkCPU::render_samples(RenderStatistics &statistics,
int start_sample,
- int samples_num)
+ int samples_num,
+ int sample_offset)
{
const int64_t image_width = effective_buffer_params_.width;
const int64_t image_height = effective_buffer_params_.height;
@@ -97,6 +98,7 @@ void PathTraceWorkCPU::render_samples(RenderStatistics &statistics,
work_tile.w = 1;
work_tile.h = 1;
work_tile.start_sample = start_sample;
+ work_tile.sample_offset = sample_offset;
work_tile.num_samples = 1;
work_tile.offset = effective_buffer_params_.offset;
work_tile.stride = effective_buffer_params_.stride;
diff --git a/intern/cycles/integrator/path_trace_work_cpu.h b/intern/cycles/integrator/path_trace_work_cpu.h
index 6e734690811..63ab686588c 100644
--- a/intern/cycles/integrator/path_trace_work_cpu.h
+++ b/intern/cycles/integrator/path_trace_work_cpu.h
@@ -48,7 +48,8 @@ class PathTraceWorkCPU : public PathTraceWork {
virtual void render_samples(RenderStatistics &statistics,
int start_sample,
- int samples_num) override;
+ int samples_num,
+ int sample_offset) override;
virtual void copy_to_display(PathTraceDisplay *display,
PassMode pass_mode,
diff --git a/intern/cycles/integrator/path_trace_work_gpu.cpp b/intern/cycles/integrator/path_trace_work_gpu.cpp
index dfc1362ab09..2263c9892f4 100644
--- a/intern/cycles/integrator/path_trace_work_gpu.cpp
+++ b/intern/cycles/integrator/path_trace_work_gpu.cpp
@@ -250,7 +250,8 @@ void PathTraceWorkGPU::init_execution()
void PathTraceWorkGPU::render_samples(RenderStatistics &statistics,
int start_sample,
- int samples_num)
+ int samples_num,
+ int sample_offset)
{
/* Limit number of states for the tile and rely on a greedy scheduling of tiles. This allows to
* add more work (because tiles are smaller, so there is higher chance that more paths will
@@ -261,6 +262,7 @@ void PathTraceWorkGPU::render_samples(RenderStatistics &statistics,
work_tile_scheduler_.reset(effective_buffer_params_,
start_sample,
samples_num,
+ sample_offset,
device_scene_->data.integrator.scrambling_distance);
enqueue_reset();
diff --git a/intern/cycles/integrator/path_trace_work_gpu.h b/intern/cycles/integrator/path_trace_work_gpu.h
index c5e291e72db..5aa497c26e7 100644
--- a/intern/cycles/integrator/path_trace_work_gpu.h
+++ b/intern/cycles/integrator/path_trace_work_gpu.h
@@ -46,7 +46,8 @@ class PathTraceWorkGPU : public PathTraceWork {
virtual void render_samples(RenderStatistics &statistics,
int start_sample,
- int samples_num) override;
+ int samples_num,
+ int sample_offset) override;
virtual void copy_to_display(PathTraceDisplay *display,
PassMode pass_mode,
diff --git a/intern/cycles/integrator/render_scheduler.cpp b/intern/cycles/integrator/render_scheduler.cpp
index f776d01ef67..538d751e8b1 100644
--- a/intern/cycles/integrator/render_scheduler.cpp
+++ b/intern/cycles/integrator/render_scheduler.cpp
@@ -88,6 +88,16 @@ int RenderScheduler::get_num_samples() const
return num_samples_;
}
+void RenderScheduler::set_sample_offset(int sample_offset)
+{
+ sample_offset_ = sample_offset;
+}
+
+int RenderScheduler::get_sample_offset() const
+{
+ return sample_offset_;
+}
+
void RenderScheduler::set_time_limit(double time_limit)
{
time_limit_ = time_limit;
@@ -110,13 +120,15 @@ int RenderScheduler::get_num_rendered_samples() const
return state_.num_rendered_samples;
}
-void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples)
+void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples, int sample_offset)
{
buffer_params_ = buffer_params;
update_start_resolution_divider();
set_num_samples(num_samples);
+ set_start_sample(sample_offset);
+ set_sample_offset(sample_offset);
/* In background mode never do lower resolution render preview, as it is not really supported
* by the software. */
@@ -171,7 +183,7 @@ void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples)
void RenderScheduler::reset_for_next_tile()
{
- reset(buffer_params_, num_samples_);
+ reset(buffer_params_, num_samples_, sample_offset_);
}
bool RenderScheduler::render_work_reschedule_on_converge(RenderWork &render_work)
@@ -317,6 +329,7 @@ RenderWork RenderScheduler::get_render_work()
render_work.path_trace.start_sample = get_start_sample_to_path_trace();
render_work.path_trace.num_samples = get_num_samples_to_path_trace();
+ render_work.path_trace.sample_offset = get_sample_offset();
render_work.init_render_buffers = (render_work.path_trace.start_sample == get_start_sample());
diff --git a/intern/cycles/integrator/render_scheduler.h b/intern/cycles/integrator/render_scheduler.h
index d7b7413ae31..28f563c46e3 100644
--- a/intern/cycles/integrator/render_scheduler.h
+++ b/intern/cycles/integrator/render_scheduler.h
@@ -39,6 +39,7 @@ class RenderWork {
struct {
int start_sample = 0;
int num_samples = 0;
+ int sample_offset = 0;
} path_trace;
struct {
@@ -125,6 +126,9 @@ class RenderScheduler {
void set_num_samples(int num_samples);
int get_num_samples() const;
+ void set_sample_offset(int sample_offset);
+ int get_sample_offset() const;
+
/* Time limit for the path tracing tasks, in minutes.
* Zero disables the limit. */
void set_time_limit(double time_limit);
@@ -150,7 +154,7 @@ class RenderScheduler {
/* Reset scheduler, indicating that rendering will happen from scratch.
* Resets current rendered state, as well as scheduling information. */
- void reset(const BufferParams &buffer_params, int num_samples);
+ void reset(const BufferParams &buffer_params, int num_samples, int sample_offset);
/* Reset scheduler upon switching to a next tile.
* Will keep the same number of samples and full-frame render parameters, but will reset progress
@@ -419,6 +423,8 @@ class RenderScheduler {
int start_sample_ = 0;
int num_samples_ = 0;
+ int sample_offset_ = 0;
+
/* Limit in seconds for how long path tracing is allowed to happen.
* Zero means no limit is applied. */
double time_limit_ = 0.0;
diff --git a/intern/cycles/integrator/work_tile_scheduler.cpp b/intern/cycles/integrator/work_tile_scheduler.cpp
index 2d1ac07db7f..d60f7149bf4 100644
--- a/intern/cycles/integrator/work_tile_scheduler.cpp
+++ b/intern/cycles/integrator/work_tile_scheduler.cpp
@@ -36,6 +36,7 @@ void WorkTileScheduler::set_max_num_path_states(int max_num_path_states)
void WorkTileScheduler::reset(const BufferParams &buffer_params,
int sample_start,
int samples_num,
+ int sample_offset,
float scrambling_distance)
{
/* Image buffer parameters. */
@@ -51,6 +52,7 @@ void WorkTileScheduler::reset(const BufferParams &buffer_params,
/* Samples parameters. */
sample_start_ = sample_start;
samples_num_ = samples_num;
+ sample_offset_ = sample_offset;
/* Initialize new scheduling. */
reset_scheduler_state();
@@ -111,6 +113,7 @@ bool WorkTileScheduler::get_work(KernelWorkTile *work_tile_, const int max_work_
work_tile.h = tile_size_.height;
work_tile.start_sample = sample_start_ + start_sample;
work_tile.num_samples = min(tile_size_.num_samples, samples_num_ - start_sample);
+ work_tile.sample_offset = sample_offset_;
work_tile.offset = offset_;
work_tile.stride = stride_;
diff --git a/intern/cycles/integrator/work_tile_scheduler.h b/intern/cycles/integrator/work_tile_scheduler.h
index d9fa7e84431..2d6395799f7 100644
--- a/intern/cycles/integrator/work_tile_scheduler.h
+++ b/intern/cycles/integrator/work_tile_scheduler.h
@@ -41,6 +41,7 @@ class WorkTileScheduler {
void reset(const BufferParams &buffer_params,
int sample_start,
int samples_num,
+ int sample_offset,
float scrambling_distance);
/* Get work for a device.
@@ -79,6 +80,7 @@ class WorkTileScheduler {
* (splitting into a smaller work tiles). */
int sample_start_ = 0;
int samples_num_ = 0;
+ int sample_offset_ = 0;
/* Tile size which be scheduled for rendering. */
TileSize tile_size_;