Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/cycles/integrator')
-rw-r--r--intern/cycles/integrator/denoiser.cpp4
-rw-r--r--intern/cycles/integrator/denoiser_device.cpp2
-rw-r--r--intern/cycles/integrator/denoiser_oidn.cpp4
-rw-r--r--intern/cycles/integrator/path_trace.cpp66
-rw-r--r--intern/cycles/integrator/path_trace_work_gpu.cpp8
-rw-r--r--intern/cycles/integrator/render_scheduler.cpp21
-rw-r--r--intern/cycles/integrator/shader_eval.cpp4
-rw-r--r--intern/cycles/integrator/work_tile_scheduler.cpp6
8 files changed, 58 insertions, 57 deletions
diff --git a/intern/cycles/integrator/denoiser.cpp b/intern/cycles/integrator/denoiser.cpp
index 23ab825a4d2..94991d63e4c 100644
--- a/intern/cycles/integrator/denoiser.cpp
+++ b/intern/cycles/integrator/denoiser.cpp
@@ -58,8 +58,8 @@ bool Denoiser::load_kernels(Progress *progress)
return false;
}
- VLOG(3) << "Will denoise on " << denoiser_device->info.description << " ("
- << denoiser_device->info.id << ")";
+ VLOG_WORK << "Will denoise on " << denoiser_device->info.description << " ("
+ << denoiser_device->info.id << ")";
return true;
}
diff --git a/intern/cycles/integrator/denoiser_device.cpp b/intern/cycles/integrator/denoiser_device.cpp
index 595397312b3..5414f9dfb1a 100644
--- a/intern/cycles/integrator/denoiser_device.cpp
+++ b/intern/cycles/integrator/denoiser_device.cpp
@@ -48,7 +48,7 @@ bool DeviceDenoiser::denoise_buffer(const BufferParams &buffer_params,
task.render_buffers = render_buffers;
}
else {
- VLOG(3) << "Creating temporary buffer on denoiser device.";
+ VLOG_WORK << "Creating temporary buffer on denoiser device.";
DeviceQueue *queue = denoiser_device->get_denoise_queue();
diff --git a/intern/cycles/integrator/denoiser_oidn.cpp b/intern/cycles/integrator/denoiser_oidn.cpp
index b074408e229..04e659a15e2 100644
--- a/intern/cycles/integrator/denoiser_oidn.cpp
+++ b/intern/cycles/integrator/denoiser_oidn.cpp
@@ -284,8 +284,8 @@ class OIDNDenoiseContext {
/* Read pass pixels using PassAccessor into a temporary buffer which is owned by the pass.. */
void read_pass_pixels_into_buffer(OIDNPass &oidn_pass)
{
- VLOG(3) << "Allocating temporary buffer for pass " << oidn_pass.name << " ("
- << pass_type_as_string(oidn_pass.type) << ")";
+ VLOG_WORK << "Allocating temporary buffer for pass " << oidn_pass.name << " ("
+ << pass_type_as_string(oidn_pass.type) << ")";
const int64_t width = buffer_params_.width;
const int64_t height = buffer_params_.height;
diff --git a/intern/cycles/integrator/path_trace.cpp b/intern/cycles/integrator/path_trace.cpp
index 36a0326e405..9ad1c465725 100644
--- a/intern/cycles/integrator/path_trace.cpp
+++ b/intern/cycles/integrator/path_trace.cpp
@@ -348,8 +348,8 @@ void PathTrace::path_trace(RenderWork &render_work)
return;
}
- VLOG(3) << "Will path trace " << render_work.path_trace.num_samples
- << " samples at the resolution divider " << render_work.resolution_divider;
+ VLOG_WORK << "Will path trace " << render_work.path_trace.num_samples
+ << " samples at the resolution divider " << render_work.resolution_divider;
const double start_time = time_dt();
@@ -373,9 +373,9 @@ void PathTrace::path_trace(RenderWork &render_work)
work_balance_infos_[i].time_spent += work_time;
work_balance_infos_[i].occupancy = statistics.occupancy;
- VLOG(3) << "Rendered " << num_samples << " samples in " << work_time << " seconds ("
- << work_time / num_samples
- << " seconds per sample), occupancy: " << statistics.occupancy;
+ VLOG_WORK << "Rendered " << num_samples << " samples in " << work_time << " seconds ("
+ << work_time / num_samples
+ << " seconds per sample), occupancy: " << statistics.occupancy;
});
float occupancy_accum = 0.0f;
@@ -398,10 +398,10 @@ void PathTrace::adaptive_sample(RenderWork &render_work)
bool did_reschedule_on_idle = false;
while (true) {
- VLOG(3) << "Will filter adaptive stopping buffer, threshold "
- << render_work.adaptive_sampling.threshold;
+ VLOG_WORK << "Will filter adaptive stopping buffer, threshold "
+ << render_work.adaptive_sampling.threshold;
if (render_work.adaptive_sampling.reset) {
- VLOG(3) << "Will re-calculate convergency flag for currently converged pixels.";
+ VLOG_WORK << "Will re-calculate convergency flag for currently converged pixels.";
}
const double start_time = time_dt();
@@ -420,11 +420,11 @@ void PathTrace::adaptive_sample(RenderWork &render_work)
render_work, time_dt() - start_time, is_cancel_requested());
if (num_active_pixels == 0) {
- VLOG(3) << "All pixels converged.";
+ VLOG_WORK << "All pixels converged.";
if (!render_scheduler_.render_work_reschedule_on_converge(render_work)) {
break;
}
- VLOG(3) << "Continuing with lower threshold.";
+ VLOG_WORK << "Continuing with lower threshold.";
}
else if (did_reschedule_on_idle) {
break;
@@ -436,10 +436,10 @@ void PathTrace::adaptive_sample(RenderWork &render_work)
* A better heuristic is possible here: for example, use maximum of 128^2 and percentage of
* the final resolution. */
if (!render_scheduler_.render_work_reschedule_on_idle(render_work)) {
- VLOG(3) << "Rescheduling is not possible: final threshold is reached.";
+ VLOG_WORK << "Rescheduling is not possible: final threshold is reached.";
break;
}
- VLOG(3) << "Rescheduling lower threshold.";
+ VLOG_WORK << "Rescheduling lower threshold.";
did_reschedule_on_idle = true;
}
else {
@@ -483,7 +483,7 @@ void PathTrace::cryptomatte_postprocess(const RenderWork &render_work)
if (!render_work.cryptomatte.postprocess) {
return;
}
- VLOG(3) << "Perform cryptomatte work.";
+ VLOG_WORK << "Perform cryptomatte work.";
parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->cryptomatte_postproces();
@@ -501,7 +501,7 @@ void PathTrace::denoise(const RenderWork &render_work)
return;
}
- VLOG(3) << "Perform denoising work.";
+ VLOG_WORK << "Perform denoising work.";
const double start_time = time_dt();
@@ -599,26 +599,26 @@ void PathTrace::update_display(const RenderWork &render_work)
}
if (!display_ && !output_driver_) {
- VLOG(3) << "Ignore display update.";
+ VLOG_WORK << "Ignore display update.";
return;
}
if (full_params_.width == 0 || full_params_.height == 0) {
- VLOG(3) << "Skipping PathTraceDisplay update due to 0 size of the render buffer.";
+ VLOG_WORK << "Skipping PathTraceDisplay update due to 0 size of the render buffer.";
return;
}
const double start_time = time_dt();
if (output_driver_) {
- VLOG(3) << "Invoke buffer update callback.";
+ VLOG_WORK << "Invoke buffer update callback.";
PathTraceTile tile(*this);
output_driver_->update_render_tile(tile);
}
if (display_) {
- VLOG(3) << "Perform copy to GPUDisplay work.";
+ VLOG_WORK << "Perform copy to GPUDisplay work.";
const int texture_width = render_state_.effective_big_tile_params.window_width;
const int texture_height = render_state_.effective_big_tile_params.window_height;
@@ -654,33 +654,33 @@ void PathTrace::rebalance(const RenderWork &render_work)
const int num_works = path_trace_works_.size();
if (num_works == 1) {
- VLOG(3) << "Ignoring rebalance work due to single device render.";
+ VLOG_WORK << "Ignoring rebalance work due to single device render.";
return;
}
const double start_time = time_dt();
if (VLOG_IS_ON(3)) {
- VLOG(3) << "Perform rebalance work.";
- VLOG(3) << "Per-device path tracing time (seconds):";
+ VLOG_WORK << "Perform rebalance work.";
+ VLOG_WORK << "Per-device path tracing time (seconds):";
for (int i = 0; i < num_works; ++i) {
- VLOG(3) << path_trace_works_[i]->get_device()->info.description << ": "
- << work_balance_infos_[i].time_spent;
+ VLOG_WORK << path_trace_works_[i]->get_device()->info.description << ": "
+ << work_balance_infos_[i].time_spent;
}
}
const bool did_rebalance = work_balance_do_rebalance(work_balance_infos_);
if (VLOG_IS_ON(3)) {
- VLOG(3) << "Calculated per-device weights for works:";
+ VLOG_WORK << "Calculated per-device weights for works:";
for (int i = 0; i < num_works; ++i) {
- VLOG(3) << path_trace_works_[i]->get_device()->info.description << ": "
- << work_balance_infos_[i].weight;
+ VLOG_WORK << path_trace_works_[i]->get_device()->info.description << ": "
+ << work_balance_infos_[i].weight;
}
}
if (!did_rebalance) {
- VLOG(3) << "Balance in path trace works did not change.";
+ VLOG_WORK << "Balance in path trace works did not change.";
render_scheduler_.report_rebalance_time(render_work, time_dt() - start_time, false);
return;
}
@@ -704,7 +704,7 @@ void PathTrace::write_tile_buffer(const RenderWork &render_work)
return;
}
- VLOG(3) << "Write tile result.";
+ VLOG_WORK << "Write tile result.";
render_state_.tile_written = true;
@@ -718,14 +718,14 @@ void PathTrace::write_tile_buffer(const RenderWork &render_work)
*
* Important thing is: tile should be written to the software via callback only once. */
if (!has_multiple_tiles) {
- VLOG(3) << "Write tile result via buffer write callback.";
+ VLOG_WORK << "Write tile result via buffer write callback.";
tile_buffer_write();
}
/* Write tile to disk, so that the render work's render buffer can be re-used for the next tile.
*/
if (has_multiple_tiles) {
- VLOG(3) << "Write tile result into .";
+ VLOG_WORK << "Write tile result into .";
tile_buffer_write_to_disk();
}
}
@@ -736,10 +736,10 @@ void PathTrace::finalize_full_buffer_on_disk(const RenderWork &render_work)
return;
}
- VLOG(3) << "Handle full-frame render buffer work.";
+ VLOG_WORK << "Handle full-frame render buffer work.";
if (!tile_manager_.has_written_tiles()) {
- VLOG(3) << "No tiles on disk.";
+ VLOG_WORK << "No tiles on disk.";
return;
}
@@ -935,7 +935,7 @@ static string get_layer_view_name(const RenderBuffers &buffers)
void PathTrace::process_full_buffer_from_disk(string_view filename)
{
- VLOG(3) << "Processing full frame buffer file " << filename;
+ VLOG_WORK << "Processing full frame buffer file " << filename;
progress_set_status("Reading full buffer from disk");
diff --git a/intern/cycles/integrator/path_trace_work_gpu.cpp b/intern/cycles/integrator/path_trace_work_gpu.cpp
index ede81705ae8..e262c252ce3 100644
--- a/intern/cycles/integrator/path_trace_work_gpu.cpp
+++ b/intern/cycles/integrator/path_trace_work_gpu.cpp
@@ -152,7 +152,7 @@ void PathTraceWorkGPU::alloc_integrator_soa()
total_soa_size += soa_memory->memory_size();
}
- VLOG(3) << "GPU SoA state size: " << string_human_readable_size(total_soa_size);
+ VLOG_DEVICE_STATS << "GPU SoA state size: " << string_human_readable_size(total_soa_size);
}
}
@@ -239,7 +239,7 @@ void PathTraceWorkGPU::init_execution()
/* Copy to device side struct in constant memory. */
device_->const_copy_to(
- "__integrator_state", &integrator_state_gpu_, sizeof(integrator_state_gpu_));
+ "integrator_state", &integrator_state_gpu_, sizeof(integrator_state_gpu_));
}
void PathTraceWorkGPU::render_samples(RenderStatistics &statistics,
@@ -820,10 +820,10 @@ bool PathTraceWorkGPU::should_use_graphics_interop()
interop_use_ = device->should_use_graphics_interop();
if (interop_use_) {
- VLOG(2) << "Using graphics interop GPU display update.";
+ VLOG_INFO << "Using graphics interop GPU display update.";
}
else {
- VLOG(2) << "Using naive GPU display update.";
+ VLOG_INFO << "Using naive GPU display update.";
}
interop_use_checked_ = true;
diff --git a/intern/cycles/integrator/render_scheduler.cpp b/intern/cycles/integrator/render_scheduler.cpp
index ebc3170393f..e4676bd059c 100644
--- a/intern/cycles/integrator/render_scheduler.cpp
+++ b/intern/cycles/integrator/render_scheduler.cpp
@@ -225,7 +225,7 @@ bool RenderScheduler::render_work_reschedule_on_idle(RenderWork &render_work)
void RenderScheduler::render_work_reschedule_on_cancel(RenderWork &render_work)
{
- VLOG(3) << "Schedule work for cancel.";
+ VLOG_WORK << "Schedule work for cancel.";
/* Un-schedule samples: they will not be rendered and should not be counted. */
state_.num_rendered_samples -= render_work.path_trace.num_samples;
@@ -475,14 +475,14 @@ void RenderScheduler::report_path_trace_time(const RenderWork &render_work,
path_trace_time_.add_average(final_time_approx, render_work.path_trace.num_samples);
- VLOG(4) << "Average path tracing time: " << path_trace_time_.get_average() << " seconds.";
+ VLOG_WORK << "Average path tracing time: " << path_trace_time_.get_average() << " seconds.";
}
void RenderScheduler::report_path_trace_occupancy(const RenderWork &render_work, float occupancy)
{
state_.occupancy_num_samples = render_work.path_trace.num_samples;
state_.occupancy = occupancy;
- VLOG(4) << "Measured path tracing occupancy: " << occupancy;
+ VLOG_WORK << "Measured path tracing occupancy: " << occupancy;
}
void RenderScheduler::report_adaptive_filter_time(const RenderWork &render_work,
@@ -503,8 +503,8 @@ void RenderScheduler::report_adaptive_filter_time(const RenderWork &render_work,
adaptive_filter_time_.add_average(final_time_approx, render_work.path_trace.num_samples);
- VLOG(4) << "Average adaptive sampling filter time: " << adaptive_filter_time_.get_average()
- << " seconds.";
+ VLOG_WORK << "Average adaptive sampling filter time: " << adaptive_filter_time_.get_average()
+ << " seconds.";
}
void RenderScheduler::report_denoise_time(const RenderWork &render_work, double time)
@@ -523,7 +523,7 @@ void RenderScheduler::report_denoise_time(const RenderWork &render_work, double
denoise_time_.add_average(final_time_approx);
- VLOG(4) << "Average denoising time: " << denoise_time_.get_average() << " seconds.";
+ VLOG_WORK << "Average denoising time: " << denoise_time_.get_average() << " seconds.";
}
void RenderScheduler::report_display_update_time(const RenderWork &render_work, double time)
@@ -542,7 +542,8 @@ void RenderScheduler::report_display_update_time(const RenderWork &render_work,
display_update_time_.add_average(final_time_approx);
- VLOG(4) << "Average display update time: " << display_update_time_.get_average() << " seconds.";
+ VLOG_WORK << "Average display update time: " << display_update_time_.get_average()
+ << " seconds.";
/* Move the display update moment further in time, so that logic which checks when last update
* did happen have more reliable point in time (without path tracing and denoising parts of the
@@ -568,7 +569,7 @@ void RenderScheduler::report_rebalance_time(const RenderWork &render_work,
state_.last_rebalance_changed = balance_changed;
- VLOG(4) << "Average rebalance time: " << rebalance_time_.get_average() << " seconds.";
+ VLOG_WORK << "Average rebalance time: " << rebalance_time_.get_average() << " seconds.";
}
string RenderScheduler::full_report() const
@@ -1063,7 +1064,7 @@ void RenderScheduler::update_start_resolution_divider()
/* Resolution divider has never been calculated before: use default resolution, so that we have
* somewhat good initial behavior, giving a chance to collect real numbers. */
start_resolution_divider_ = default_start_resolution_divider_;
- VLOG(3) << "Initial resolution divider is " << start_resolution_divider_;
+ VLOG_WORK << "Initial resolution divider is " << start_resolution_divider_;
return;
}
@@ -1092,7 +1093,7 @@ void RenderScheduler::update_start_resolution_divider()
* simple and compute device is fast). */
start_resolution_divider_ = max(resolution_divider_for_update, pixel_size_);
- VLOG(3) << "Calculated resolution divider is " << start_resolution_divider_;
+ VLOG_WORK << "Calculated resolution divider is " << start_resolution_divider_;
}
double RenderScheduler::guess_viewport_navigation_update_interval_in_seconds() const
diff --git a/intern/cycles/integrator/shader_eval.cpp b/intern/cycles/integrator/shader_eval.cpp
index 92b9d1c662d..b1450732f5c 100644
--- a/intern/cycles/integrator/shader_eval.cpp
+++ b/intern/cycles/integrator/shader_eval.cpp
@@ -31,8 +31,8 @@ bool ShaderEval::eval(const ShaderEvalType type,
device_->foreach_device([&](Device *device) {
if (!first_device) {
- LOG(ERROR) << "Multi-devices are not yet fully implemented, will evaluate shader on a "
- "single device.";
+ VLOG_WORK << "Multi-devices are not yet fully implemented, will evaluate shader on a "
+ "single device.";
return;
}
first_device = false;
diff --git a/intern/cycles/integrator/work_tile_scheduler.cpp b/intern/cycles/integrator/work_tile_scheduler.cpp
index 6dc511064c9..4bc8c0c4396 100644
--- a/intern/cycles/integrator/work_tile_scheduler.cpp
+++ b/intern/cycles/integrator/work_tile_scheduler.cpp
@@ -55,7 +55,7 @@ void WorkTileScheduler::reset_scheduler_state()
tile_size_ = tile_calculate_best_size(
accelerated_rt_, image_size_px_, samples_num_, max_num_path_states_, scrambling_distance_);
- VLOG(3) << "Will schedule tiles of size " << tile_size_;
+ VLOG_WORK << "Will schedule tiles of size " << tile_size_;
if (VLOG_IS_ON(3)) {
/* The logging is based on multiple tiles scheduled, ignoring overhead of multi-tile scheduling
@@ -63,8 +63,8 @@ void WorkTileScheduler::reset_scheduler_state()
const int num_path_states_in_tile = tile_size_.width * tile_size_.height *
tile_size_.num_samples;
const int num_tiles = max_num_path_states_ / num_path_states_in_tile;
- VLOG(3) << "Number of unused path states: "
- << max_num_path_states_ - num_tiles * num_path_states_in_tile;
+ VLOG_WORK << "Number of unused path states: "
+ << max_num_path_states_ - num_tiles * num_path_states_in_tile;
}
num_tiles_x_ = divide_up(image_size_px_.x, tile_size_.width);