Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2021-09-22 07:48:01 +0300
committerCampbell Barton <ideasman42@gmail.com>2021-09-22 07:54:01 +0300
commit4d66cbd140b1648b79df0df695046cb718797b70 (patch)
tree945b1093ba250ad57611f44bda7ca0b8a31a0211 /intern/cycles/integrator
parent77061a5621015dfd0c9f89fd21cb23d706d0cec8 (diff)
Cleanup: spelling in comments
Diffstat (limited to 'intern/cycles/integrator')
-rw-r--r--intern/cycles/integrator/denoiser.h10
-rw-r--r--intern/cycles/integrator/denoiser_device.cpp2
-rw-r--r--intern/cycles/integrator/denoiser_oidn.cpp4
-rw-r--r--intern/cycles/integrator/path_trace.cpp6
-rw-r--r--intern/cycles/integrator/path_trace.h16
-rw-r--r--intern/cycles/integrator/path_trace_work.h10
-rw-r--r--intern/cycles/integrator/path_trace_work_gpu.cpp6
-rw-r--r--intern/cycles/integrator/render_scheduler.cpp8
-rw-r--r--intern/cycles/integrator/render_scheduler.h10
-rw-r--r--intern/cycles/integrator/shader_eval.cpp2
-rw-r--r--intern/cycles/integrator/work_balancer.h2
-rw-r--r--intern/cycles/integrator/work_tile_scheduler.cpp2
-rw-r--r--intern/cycles/integrator/work_tile_scheduler.h4
13 files changed, 41 insertions, 41 deletions
diff --git a/intern/cycles/integrator/denoiser.h b/intern/cycles/integrator/denoiser.h
index 3101b45e31b..b02bcbeb046 100644
--- a/intern/cycles/integrator/denoiser.h
+++ b/intern/cycles/integrator/denoiser.h
@@ -33,7 +33,7 @@ class Progress;
/* Implementation of a specific denoising algorithm.
*
- * This class takes care of breaking down denosiing algorithm into a series of device calls or to
+ * This class takes care of breaking down denoising algorithm into a series of device calls or to
* calls of an external API to denoise given input.
*
* TODO(sergey): Are we better with device or a queue here? */
@@ -53,7 +53,7 @@ class Denoiser {
const DenoiseParams &get_params() const;
/* Create devices and load kernels needed for denoising.
- * The progress is used to communicate state when kenrels actually needs to be loaded.
+ * The progress is used to communicate state when kernels actually needs to be loaded.
*
* NOTE: The `progress` is an optional argument, can be nullptr. */
virtual bool load_kernels(Progress *progress);
@@ -64,7 +64,7 @@ class Denoiser {
* a lower resolution render into a bigger allocated buffer, which is used in viewport during
* navigation and non-unit pixel size. Use that instead of render_buffers->params.
*
- * The buffer might be copming from a "foreign" device from what this denoise is created for.
+ * The buffer might be coming from a "foreign" device from what this denoise is created for.
* This means that in general case the denoiser will make sure the input data is available on
* the denoiser device, perform denoising, and put data back to the device where the buffer
* came from.
@@ -95,8 +95,8 @@ class Denoiser {
* using OptiX denoiser and rendering on CPU.
*
* - No threading safety is ensured in this call. This means, that it is up to caller to ensure
- * that there is no threadingconflict between denoising task lazily initializing the device and
- * access to this device happen. */
+ * that there is no threading-conflict between denoising task lazily initializing the device
+ * and access to this device happen. */
Device *get_denoiser_device() const;
function<bool(void)> is_cancelled_cb;
diff --git a/intern/cycles/integrator/denoiser_device.cpp b/intern/cycles/integrator/denoiser_device.cpp
index 8088cfd7800..e8361c50f2f 100644
--- a/intern/cycles/integrator/denoiser_device.cpp
+++ b/intern/cycles/integrator/denoiser_device.cpp
@@ -77,7 +77,7 @@ bool DeviceDenoiser::denoise_buffer(const BufferParams &buffer_params,
local_render_buffers.reset(buffer_params);
/* NOTE: The local buffer is allocated for an exact size of the effective render size, while
- * the input render buffer is allcoated for the lowest resolution divider possible. So it is
+ * the input render buffer is allocated for the lowest resolution divider possible. So it is
* important to only copy actually needed part of the input buffer. */
memcpy(local_render_buffers.buffer.data(),
render_buffers->buffer.data(),
diff --git a/intern/cycles/integrator/denoiser_oidn.cpp b/intern/cycles/integrator/denoiser_oidn.cpp
index 1b5a012ec87..7fc2b2b1892 100644
--- a/intern/cycles/integrator/denoiser_oidn.cpp
+++ b/intern/cycles/integrator/denoiser_oidn.cpp
@@ -93,7 +93,7 @@ class OIDNPass {
* Is required for albedo and normal passes. The color pass OIDN will perform auto-exposure, so
* scaling is not needed for the color pass unless adaptive sampling is used.
*
- * NOTE: Do not scale the outout pass, as that requires to be a pointer in the original buffer.
+ * NOTE: Do not scale the output pass, as that requires to be a pointer in the original buffer.
* All the scaling on the output needed for integration with adaptive sampling will happen
* outside of generic pass handling. */
bool need_scale = false;
@@ -479,7 +479,7 @@ class OIDNDenoiseContext {
}
if (num_samples_ == 1) {
- /* If the avoid scaling if there is only one sample, to save up time (so we dont divide
+ /* If the avoid scaling if there is only one sample, to save up time (so we don't divide
* buffer by 1). */
return false;
}
diff --git a/intern/cycles/integrator/path_trace.cpp b/intern/cycles/integrator/path_trace.cpp
index 6c02316ac2b..bc43747718d 100644
--- a/intern/cycles/integrator/path_trace.cpp
+++ b/intern/cycles/integrator/path_trace.cpp
@@ -177,7 +177,7 @@ void PathTrace::render(const RenderWork &render_work)
void PathTrace::render_pipeline(RenderWork render_work)
{
- /* NOTE: Only check for "instant" cancel here. Ther user-requested cancel via progress is
+ /* NOTE: Only check for "instant" cancel here. The user-requested cancel via progress is
* checked in Session and the work in the event of cancel is to be finished here. */
render_scheduler_.set_need_schedule_cryptomatte(device_scene_->data.film.cryptomatte_passes !=
@@ -680,7 +680,7 @@ void PathTrace::write_tile_buffer(const RenderWork &render_work)
*
* Tiles are written to a file during rendering, and written to the software at the end
* of rendering (wither when all tiles are finished, or when rendering was requested to be
- * cancelled).
+ * canceled).
*
* Important thing is: tile should be written to the software via callback only once. */
if (!has_multiple_tiles) {
@@ -913,7 +913,7 @@ void PathTrace::process_full_buffer_from_disk(string_view filename)
* ensure proper denoiser is used. */
set_denoiser_params(denoise_params);
- /* Number of samples doesn't matter too much, since the sampels count pass will be used. */
+ /* Number of samples doesn't matter too much, since the samples count pass will be used. */
denoiser_->denoise_buffer(full_frame_buffers.params, &full_frame_buffers, 0, false);
render_state_.has_denoised_result = true;
diff --git a/intern/cycles/integrator/path_trace.h b/intern/cycles/integrator/path_trace.h
index 78ca68c1198..fc7713e6df9 100644
--- a/intern/cycles/integrator/path_trace.h
+++ b/intern/cycles/integrator/path_trace.h
@@ -83,7 +83,7 @@ class PathTrace {
void set_progress(Progress *progress);
/* NOTE: This is a blocking call. Meaning, it will not return until given number of samples are
- * rendered (or until rendering is requested to be cancelled). */
+ * rendered (or until rendering is requested to be canceled). */
void render(const RenderWork &render_work);
/* TODO(sergey): Decide whether denoiser is really a part of path tracer. Currently it is
@@ -110,7 +110,7 @@ class PathTrace {
/* Cancel rendering process as soon as possible, without waiting for full tile to be sampled.
* Used in cases like reset of render session.
*
- * This is a blockign call, which returns as soon as there is no running `render_samples()` call.
+ * This is a blocking call, which returns as soon as there is no running `render_samples()` call.
*/
void cancel();
@@ -120,11 +120,11 @@ class PathTrace {
* the data will be copied to the device of the given render buffers. */
void copy_to_render_buffers(RenderBuffers *render_buffers);
- /* Copy happens via CPU side buffer: data will be copied from the device of the given rendetr
+ /* Copy happens via CPU side buffer: data will be copied from the device of the given render
* buffers and will be copied to all devices of the path trace. */
void copy_from_render_buffers(RenderBuffers *render_buffers);
- /* Copy render buffers of the big tile from the device to hsot.
+ /* Copy render buffers of the big tile from the device to host.
* Return true if all copies are successful. */
bool copy_render_tile_from_device();
@@ -172,10 +172,10 @@ class PathTrace {
* Is called during path tracing to communicate work-in-progress state of the final buffer. */
function<void(void)> tile_buffer_update_cb;
- /* Callback which communicates final rendered buffer. Is called after pathtracing is done. */
+ /* Callback which communicates final rendered buffer. Is called after path-tracing is done. */
function<void(void)> tile_buffer_write_cb;
- /* Callback which initializes rendered buffer. Is called before pathtracing starts.
+ /* Callback which initializes rendered buffer. Is called before path-tracing starts.
*
* This is used for baking. */
function<bool(void)> tile_buffer_read_cb;
@@ -189,7 +189,7 @@ class PathTrace {
protected:
/* Actual implementation of the rendering pipeline.
- * Calls steps in order, checking for the cancel to be requested inbetween.
+ * Calls steps in order, checking for the cancel to be requested in between.
*
* Is separate from `render()` to simplify dealing with the early outputs and keeping
* `render_cancel_` in the consistent state. */
@@ -283,7 +283,7 @@ class PathTrace {
* affects both resolution and stride as visible by the integrator kernels. */
int resolution_divider = 0;
- /* Paramaters of the big tile with the current resolution divider applied. */
+ /* Parameters of the big tile with the current resolution divider applied. */
BufferParams effective_big_tile_params;
/* Denosier was run and there are denoised versions of the passes in the render buffers. */
diff --git a/intern/cycles/integrator/path_trace_work.h b/intern/cycles/integrator/path_trace_work.h
index 97b97f3d888..8c9c8811199 100644
--- a/intern/cycles/integrator/path_trace_work.h
+++ b/intern/cycles/integrator/path_trace_work.h
@@ -39,8 +39,8 @@ class PathTraceWork {
/* Create path trace work which fits best the device.
*
- * The cancel request flag is used for a cheap check whether cancel is to berformed as soon as
- * possible. This could be, for rexample, request to cancel rendering on camera navigation in
+ * The cancel request flag is used for a cheap check whether cancel is to be performed as soon as
+ * possible. This could be, for example, request to cancel rendering on camera navigation in
* viewport. */
static unique_ptr<PathTraceWork> create(Device *device,
Film *film,
@@ -107,7 +107,7 @@ class PathTraceWork {
/* Special version of the `copy_from_render_buffers()` which only copies denosied passes from the
* given render buffers, leaving rest of the passes.
*
- * Same notes about device copying aplies to this call as well. */
+ * Same notes about device copying applies to this call as well. */
void copy_from_denoised_render_buffers(const RenderBuffers *render_buffers);
/* Copy render buffers to/from device using an appropriate device queue when needed so that
@@ -119,7 +119,7 @@ class PathTraceWork {
* things are executed in order with the `render_samples()`. */
virtual bool zero_render_buffers() = 0;
- /* Access pixels rendered by this work and copy them to the coresponding location in the
+ /* Access pixels rendered by this work and copy them to the corresponding location in the
* destination.
*
* NOTE: Does not perform copy of buffers from the device. Use `copy_render_tile_from_device()`
@@ -182,7 +182,7 @@ class PathTraceWork {
unique_ptr<RenderBuffers> buffers_;
/* Effective parameters of the full, big tile, and current work render buffer.
- * The latter might be different from buffers_->params when there is a resolution divider
+ * The latter might be different from `buffers_->params` when there is a resolution divider
* involved. */
BufferParams effective_full_params_;
BufferParams effective_big_tile_params_;
diff --git a/intern/cycles/integrator/path_trace_work_gpu.cpp b/intern/cycles/integrator/path_trace_work_gpu.cpp
index 10baf869aa6..135466becc6 100644
--- a/intern/cycles/integrator/path_trace_work_gpu.cpp
+++ b/intern/cycles/integrator/path_trace_work_gpu.cpp
@@ -498,7 +498,7 @@ void PathTraceWorkGPU::compact_states(const int num_active_paths)
bool PathTraceWorkGPU::enqueue_work_tiles(bool &finished)
{
/* If there are existing paths wait them to go to intersect closest kernel, which will align the
- * wavefront of the existing and newely added paths. */
+ * wavefront of the existing and newly added paths. */
/* TODO: Check whether counting new intersection kernels here will have positive affect on the
* performance. */
const DeviceKernel kernel = get_most_queued_kernel();
@@ -508,7 +508,7 @@ bool PathTraceWorkGPU::enqueue_work_tiles(bool &finished)
int num_active_paths = get_num_active_paths();
- /* Don't schedule more work if cancelling. */
+ /* Don't schedule more work if canceling. */
if (is_cancel_requested()) {
if (num_active_paths == 0) {
finished = true;
@@ -729,7 +729,7 @@ void PathTraceWorkGPU::copy_to_gpu_display_naive(GPUDisplay *gpu_display,
gpu_display_rgba_half_.data_height != final_height) {
gpu_display_rgba_half_.alloc(final_width, final_height);
/* TODO(sergey): There should be a way to make sure device-side memory is allocated without
- * transfering zeroes to the device. */
+ * transferring zeroes to the device. */
queue_->zero_to_device(gpu_display_rgba_half_);
}
diff --git a/intern/cycles/integrator/render_scheduler.cpp b/intern/cycles/integrator/render_scheduler.cpp
index 4eb1dd941f9..3e5b3417a6a 100644
--- a/intern/cycles/integrator/render_scheduler.cpp
+++ b/intern/cycles/integrator/render_scheduler.cpp
@@ -233,7 +233,7 @@ void RenderScheduler::render_work_reschedule_on_cancel(RenderWork &render_work)
const bool has_rendered_samples = get_num_rendered_samples() != 0;
- /* Reset all fields of the previous work, canelling things like adaptive sampling filtering and
+ /* Reset all fields of the previous work, canceling things like adaptive sampling filtering and
* denoising.
* However, need to preserve write requests, since those will not be possible to recover and
* writes are only to happen once. */
@@ -246,7 +246,7 @@ void RenderScheduler::render_work_reschedule_on_cancel(RenderWork &render_work)
render_work.full.write = full_write;
/* Do not write tile if it has zero samples it it, treat it similarly to all other tiles which
- * got cancelled. */
+ * got canceled. */
if (!state_.tile_result_was_written && has_rendered_samples) {
render_work.tile.write = true;
}
@@ -817,7 +817,7 @@ int RenderScheduler::get_num_samples_to_path_trace() const
int num_samples_to_render = min(num_samples_pot, max_num_samples_to_render);
- /* When enough statistics is available and doing an offlien rendering prefer to keep device
+ /* When enough statistics is available and doing an offline rendering prefer to keep device
* occupied. */
if (state_.occupancy_num_samples && (background_ || headless_)) {
/* Keep occupancy at about 0.5 (this is more of an empirical figure which seems to match scenes
@@ -874,7 +874,7 @@ int RenderScheduler::get_num_samples_during_navigation(int resolution_divider) c
/* Always render 4 samples, even if scene is configured for less.
* The idea here is to have enough information on the screen. Resolution divider of 2 allows us
- * to have 4 time extra samples, so verall worst case timing is the same as the final resolution
+ * to have 4 time extra samples, so overall worst case timing is the same as the final resolution
* at one sample. */
return 4;
}
diff --git a/intern/cycles/integrator/render_scheduler.h b/intern/cycles/integrator/render_scheduler.h
index 9c2d107e46d..b7b598fb10c 100644
--- a/intern/cycles/integrator/render_scheduler.h
+++ b/intern/cycles/integrator/render_scheduler.h
@@ -83,7 +83,7 @@ class RenderWork {
} display;
/* Re-balance multi-device scheduling after rendering this work.
- * Note that the scheduler does not know anything abouce devices, so if there is only a single
+ * Note that the scheduler does not know anything about devices, so if there is only a single
* device used, then it is up for the PathTracer to ignore the balancing. */
bool rebalance = false;
@@ -203,7 +203,7 @@ class RenderScheduler {
* extra work needs to be scheduled to denoise and write final result. */
bool done() const;
- /* Update scheduling state for a newely scheduled work.
+ /* Update scheduling state for a newly scheduled work.
* Takes care of things like checking whether work was ever denoised, tile was written and states
* like that. */
void update_state_for_render_work(const RenderWork &render_work);
@@ -235,7 +235,7 @@ class RenderScheduler {
double guess_display_update_interval_in_seconds_for_num_samples_no_limit(
int num_rendered_samples) const;
- /* Calculate number of samples which can be rendered within current desred update interval which
+ /* Calculate number of samples which can be rendered within current desired update interval which
* is calculated by `guess_update_interval_in_seconds()`. */
int calculate_num_samples_per_update() const;
@@ -250,11 +250,11 @@ class RenderScheduler {
/* Whether adaptive sampling convergence check and filter is to happen. */
bool work_need_adaptive_filter() const;
- /* Calculate thretshold for adaptive sampling. */
+ /* Calculate threshold for adaptive sampling. */
float work_adaptive_threshold() const;
/* Check whether current work needs denoising.
- * Denoising is not needed if the denoiser is not configured, or when denosiing is happening too
+ * Denoising is not needed if the denoiser is not configured, or when denoising is happening too
* often.
*
* The delayed will be true when the denoiser is configured for use, but it was delayed for a
diff --git a/intern/cycles/integrator/shader_eval.cpp b/intern/cycles/integrator/shader_eval.cpp
index 465b4a8d4da..d35ff4cd03f 100644
--- a/intern/cycles/integrator/shader_eval.cpp
+++ b/intern/cycles/integrator/shader_eval.cpp
@@ -71,7 +71,7 @@ bool ShaderEval::eval(const ShaderEvalType type,
success = (device->info.type == DEVICE_CPU) ? eval_cpu(device, type, input, output) :
eval_gpu(device, type, input, output);
- /* Copy data back from device if not cancelled. */
+ /* Copy data back from device if not canceled. */
if (success) {
output.copy_from_device(0, 1, output.size());
read_output(output);
diff --git a/intern/cycles/integrator/work_balancer.h b/intern/cycles/integrator/work_balancer.h
index 94e20ecf054..fc5e561845e 100644
--- a/intern/cycles/integrator/work_balancer.h
+++ b/intern/cycles/integrator/work_balancer.h
@@ -32,7 +32,7 @@ struct WorkBalanceInfo {
double weight = 1.0;
};
-/* Balance work for an initial render interation, before any statistics is known. */
+/* Balance work for an initial render integration, before any statistics is known. */
void work_balance_do_initial(vector<WorkBalanceInfo> &work_balance_infos);
/* Rebalance work after statistics has been accumulated.
diff --git a/intern/cycles/integrator/work_tile_scheduler.cpp b/intern/cycles/integrator/work_tile_scheduler.cpp
index 3fc99d5b74d..e6ada2f46ee 100644
--- a/intern/cycles/integrator/work_tile_scheduler.cpp
+++ b/intern/cycles/integrator/work_tile_scheduler.cpp
@@ -81,7 +81,7 @@ void WorkTileScheduler::reset_scheduler_state()
bool WorkTileScheduler::get_work(KernelWorkTile *work_tile_, const int max_work_size)
{
/* Note that the `max_work_size` can be higher than the `max_num_path_states_`: this is because
- * the path trace work can decice to use smaller tile sizes and greedily schedule multiple tiles,
+ * the path trace work can decide to use smaller tile sizes and greedily schedule multiple tiles,
* improving overall device occupancy.
* So the `max_num_path_states_` is a "scheduling unit", and the `max_work_size` is a "scheduling
* limit". */
diff --git a/intern/cycles/integrator/work_tile_scheduler.h b/intern/cycles/integrator/work_tile_scheduler.h
index e4c8f701259..85f11b601c7 100644
--- a/intern/cycles/integrator/work_tile_scheduler.h
+++ b/intern/cycles/integrator/work_tile_scheduler.h
@@ -64,7 +64,7 @@ class WorkTileScheduler {
/* dimensions of the currently rendering image in pixels. */
int2 image_size_px_ = make_int2(0, 0);
- /* Offset and stride of the buffer within which scheduing is happenning.
+ /* Offset and stride of the buffer within which scheduling is happening.
* Will be passed over to the KernelWorkTile. */
int offset_, stride_;
@@ -87,7 +87,7 @@ class WorkTileScheduler {
* in the `get_work()`? */
int total_tiles_num_ = 0;
- /* In the case when the number of sam[les in the `tile_size_` is lower than samples_num_ denotes
+ /* In the case when the number of samples in the `tile_size_` is lower than samples_num_ denotes
* how many tiles are to be "stacked" to cover the entire requested range of samples. */
int num_tiles_per_sample_range_ = 0;