diff options
Diffstat (limited to 'intern/cycles/integrator/path_trace_work.h')
-rw-r--r-- | intern/cycles/integrator/path_trace_work.h | 194 |
1 files changed, 194 insertions, 0 deletions
diff --git a/intern/cycles/integrator/path_trace_work.h b/intern/cycles/integrator/path_trace_work.h new file mode 100644 index 00000000000..97b97f3d888 --- /dev/null +++ b/intern/cycles/integrator/path_trace_work.h @@ -0,0 +1,194 @@ +/* + * Copyright 2011-2021 Blender Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "integrator/pass_accessor.h" +#include "render/buffers.h" +#include "render/pass.h" +#include "util/util_types.h" +#include "util/util_unique_ptr.h" + +CCL_NAMESPACE_BEGIN + +class BufferParams; +class Device; +class DeviceScene; +class Film; +class GPUDisplay; +class RenderBuffers; + +class PathTraceWork { + public: + struct RenderStatistics { + float occupancy = 1.0f; + }; + + /* Create path trace work which fits best the device. + * + * The cancel request flag is used for a cheap check whether cancel is to berformed as soon as + * possible. This could be, for rexample, request to cancel rendering on camera navigation in + * viewport. */ + static unique_ptr<PathTraceWork> create(Device *device, + Film *film, + DeviceScene *device_scene, + bool *cancel_requested_flag); + + virtual ~PathTraceWork(); + + /* Access the render buffers. + * + * Is only supposed to be used by the PathTrace to update buffer allocation and slicing to + * correspond to the big tile size and relative device performance. */ + RenderBuffers *get_render_buffers(); + + /* Set effective parameters of the big tile and the work itself. */ + void set_effective_buffer_params(const BufferParams &effective_full_params, + const BufferParams &effective_big_tile_params, + const BufferParams &effective_buffer_params); + + /* Check whether the big tile is being worked on by multiple path trace works. */ + bool has_multiple_works() const; + + /* Allocate working memory for execution. Must be called before init_execution(). */ + virtual void alloc_work_memory(){}; + + /* Initialize execution of kernels. + * Will ensure that all device queues are initialized for execution. + * + * This method is to be called after any change in the scene. It is not needed to call it prior + * to an every call of the `render_samples()`. */ + virtual void init_execution() = 0; + + /* Render given number of samples as a synchronous blocking call. + * The samples are added to the render buffer associated with this work. */ + virtual void render_samples(RenderStatistics &statistics, int start_sample, int samples_num) = 0; + + /* Copy render result from this work to the corresponding place of the GPU display. + * + * The `pass_mode` indicates whether to access denoised or noisy version of the display pass. The + * noisy pass mode will be passed here when it is known that the buffer does not have denoised + * passes yet (because denoiser did not run). If the denoised pass is requested and denoiser is + * not used then this function will fall-back to the noisy pass instead. */ + virtual void copy_to_gpu_display(GPUDisplay *gpu_display, + PassMode pass_mode, + int num_samples) = 0; + + virtual void destroy_gpu_resources(GPUDisplay *gpu_display) = 0; + + /* Copy data from/to given render buffers. + * Will copy pixels from a corresponding place (from multi-device point of view) of the render + * buffers, and copy work's render buffers to the corresponding place of the destination. */ + + /* Notes: + * - Copies work's render buffer from the device. + * - Copies CPU-side buffer of the given buffer + * - Does not copy the buffer to its device. */ + void copy_to_render_buffers(RenderBuffers *render_buffers); + + /* Notes: + * - Does not copy given render buffers from the device. + * - Copies work's render buffer to its device. */ + void copy_from_render_buffers(const RenderBuffers *render_buffers); + + /* Special version of the `copy_from_render_buffers()` which only copies denosied passes from the + * given render buffers, leaving rest of the passes. + * + * Same notes about device copying aplies to this call as well. */ + void copy_from_denoised_render_buffers(const RenderBuffers *render_buffers); + + /* Copy render buffers to/from device using an appropriate device queue when needed so that + * things are executed in order with the `render_samples()`. */ + virtual bool copy_render_buffers_from_device() = 0; + virtual bool copy_render_buffers_to_device() = 0; + + /* Zero render buffers to/from device using an appropriate device queue when needed so that + * things are executed in order with the `render_samples()`. */ + virtual bool zero_render_buffers() = 0; + + /* Access pixels rendered by this work and copy them to the coresponding location in the + * destination. + * + * NOTE: Does not perform copy of buffers from the device. Use `copy_render_tile_from_device()` + * to update host-side data. */ + bool get_render_tile_pixels(const PassAccessor &pass_accessor, + const PassAccessor::Destination &destination); + + /* Set pass data for baking. */ + bool set_render_tile_pixels(PassAccessor &pass_accessor, const PassAccessor::Source &source); + + /* Perform convergence test on the render buffer, and filter the convergence mask. + * Returns number of active pixels (the ones which did not converge yet). */ + virtual int adaptive_sampling_converge_filter_count_active(float threshold, bool reset) = 0; + + /* Run cryptomatte pass post-processing kernels. */ + virtual void cryptomatte_postproces() = 0; + + /* Cheap-ish request to see whether rendering is requested and is to be stopped as soon as + * possible, without waiting for any samples to be finished. */ + inline bool is_cancel_requested() const + { + /* NOTE: Rely on the fact that on x86 CPU reading scalar can happen without atomic even in + * threaded environment. */ + return *cancel_requested_flag_; + } + + /* Access to the device which is used to path trace this work on. */ + Device *get_device() const + { + return device_; + } + + protected: + PathTraceWork(Device *device, + Film *film, + DeviceScene *device_scene, + bool *cancel_requested_flag); + + PassAccessor::PassAccessInfo get_display_pass_access_info(PassMode pass_mode) const; + + /* Get destination which offset and stride are configured so that writing to it will write to a + * proper location of GPU display texture, taking current tile and device slice into account. */ + PassAccessor::Destination get_gpu_display_destination_template( + const GPUDisplay *gpu_display) const; + + /* Device which will be used for path tracing. + * Note that it is an actual render device (and never is a multi-device). */ + Device *device_; + + /* Film is used to access display pass configuration for GPU display update. + * Note that only fields which are not a part of kernel data can be accessed via the Film. */ + Film *film_; + + /* Device side scene storage, that may be used for integrator logic. */ + DeviceScene *device_scene_; + + /* Render buffers where sampling is being accumulated into, allocated for a fraction of the big + * tile which is being rendered by this work. + * It also defines possible subset of a big tile in the case of multi-device rendering. */ + unique_ptr<RenderBuffers> buffers_; + + /* Effective parameters of the full, big tile, and current work render buffer. + * The latter might be different from buffers_->params when there is a resolution divider + * involved. */ + BufferParams effective_full_params_; + BufferParams effective_big_tile_params_; + BufferParams effective_buffer_params_; + + bool *cancel_requested_flag_ = nullptr; +}; + +CCL_NAMESPACE_END |