Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2021-09-20 18:59:20 +0300
committerBrecht Van Lommel <brecht@blender.org>2021-09-21 15:55:54 +0300
commit08031197250aeecbaca3803254e6f25b8c7b7b37 (patch)
tree6fe7ab045f0dc0a423d6557c4073f34309ef4740 /intern/cycles/kernel/kernel_path_state.h
parentfa6b1007bad065440950cd67deb16a04f368856f (diff)
Cycles: merge of cycles-x branch, a major update to the renderer
This includes much improved GPU rendering performance, viewport interactivity, new shadow catcher, revamped sampling settings, subsurface scattering anisotropy, new GPU volume sampling, improved PMJ sampling pattern, and more. Some features have also been removed or changed, breaking backwards compatibility. Including the removal of the OpenCL backend, for which alternatives are under development. Release notes and code docs: https://wiki.blender.org/wiki/Reference/Release_Notes/3.0/Cycles https://wiki.blender.org/wiki/Source/Render/Cycles Credits: * Sergey Sharybin * Brecht Van Lommel * Patrick Mours (OptiX backend) * Christophe Hery (subsurface scattering anisotropy) * William Leeson (PMJ sampling pattern) * Alaska (various fixes and tweaks) * Thomas Dinges (various fixes) For the full commit history, see the cycles-x branch. This squashes together all the changes since intermediate changes would often fail building or tests. Ref T87839, T87837, T87836 Fixes T90734, T89353, T80267, T80267, T77185, T69800
Diffstat (limited to 'intern/cycles/kernel/kernel_path_state.h')
-rw-r--r--intern/cycles/kernel/kernel_path_state.h383
1 files changed, 237 insertions, 146 deletions
diff --git a/intern/cycles/kernel/kernel_path_state.h b/intern/cycles/kernel/kernel_path_state.h
index bf601580cd0..ebb2c0df4f1 100644
--- a/intern/cycles/kernel/kernel_path_state.h
+++ b/intern/cycles/kernel/kernel_path_state.h
@@ -14,99 +14,116 @@
* limitations under the License.
*/
-CCL_NAMESPACE_BEGIN
+#pragma once
-ccl_device_inline void path_state_init(KernelGlobals *kg,
- ShaderData *stack_sd,
- ccl_addr_space PathState *state,
- uint rng_hash,
- int sample,
- ccl_addr_space Ray *ray)
-{
- state->flag = PATH_RAY_CAMERA | PATH_RAY_MIS_SKIP | PATH_RAY_TRANSPARENT_BACKGROUND;
+#include "kernel_random.h"
- state->rng_hash = rng_hash;
- state->rng_offset = PRNG_BASE_NUM;
- state->sample = sample;
- state->num_samples = kernel_data.integrator.aa_samples;
- state->branch_factor = 1.0f;
+CCL_NAMESPACE_BEGIN
- state->bounce = 0;
- state->diffuse_bounce = 0;
- state->glossy_bounce = 0;
- state->transmission_bounce = 0;
- state->transparent_bounce = 0;
+/* Initialize queues, so that the this path is considered terminated.
+ * Used for early outputs in the camera ray initialization, as well as initialization of split
+ * states for shadow catcher. */
+ccl_device_inline void path_state_init_queues(INTEGRATOR_STATE_ARGS)
+{
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = 0;
+ INTEGRATOR_STATE_WRITE(shadow_path, queued_kernel) = 0;
+}
-#ifdef __DENOISING_FEATURES__
- if (kernel_data.film.pass_denoising_data) {
- state->flag |= PATH_RAY_STORE_SHADOW_INFO;
- state->denoising_feature_weight = 1.0f;
- state->denoising_feature_throughput = one_float3();
- }
- else {
- state->denoising_feature_weight = 0.0f;
- state->denoising_feature_throughput = zero_float3();
- }
-#endif /* __DENOISING_FEATURES__ */
+/* Minimalistic initialization of the path state, which is needed for early outputs in the
+ * integrator initialization to work. */
+ccl_device_inline void path_state_init(INTEGRATOR_STATE_ARGS,
+ const ccl_global KernelWorkTile *ccl_restrict tile,
+ const int x,
+ const int y)
+{
+ const uint render_pixel_index = (uint)tile->offset + x + y * tile->stride;
- state->min_ray_pdf = FLT_MAX;
- state->ray_pdf = 0.0f;
-#ifdef __LAMP_MIS__
- state->ray_t = 0.0f;
-#endif
+ INTEGRATOR_STATE_WRITE(path, render_pixel_index) = render_pixel_index;
-#ifdef __VOLUME__
- state->volume_bounce = 0;
- state->volume_bounds_bounce = 0;
+ path_state_init_queues(INTEGRATOR_STATE_PASS);
+}
- if (kernel_data.integrator.use_volumes) {
- /* Initialize volume stack with volume we are inside of. */
- kernel_volume_stack_init(kg, stack_sd, state, ray, state->volume_stack);
+/* Initialize the rest of the path state needed to continue the path integration. */
+ccl_device_inline void path_state_init_integrator(INTEGRATOR_STATE_ARGS,
+ const int sample,
+ const uint rng_hash)
+{
+ INTEGRATOR_STATE_WRITE(path, sample) = sample;
+ INTEGRATOR_STATE_WRITE(path, bounce) = 0;
+ INTEGRATOR_STATE_WRITE(path, diffuse_bounce) = 0;
+ INTEGRATOR_STATE_WRITE(path, glossy_bounce) = 0;
+ INTEGRATOR_STATE_WRITE(path, transmission_bounce) = 0;
+ INTEGRATOR_STATE_WRITE(path, transparent_bounce) = 0;
+ INTEGRATOR_STATE_WRITE(path, volume_bounce) = 0;
+ INTEGRATOR_STATE_WRITE(path, volume_bounds_bounce) = 0;
+ INTEGRATOR_STATE_WRITE(path, rng_hash) = rng_hash;
+ INTEGRATOR_STATE_WRITE(path, rng_offset) = PRNG_BASE_NUM;
+ INTEGRATOR_STATE_WRITE(path, flag) = PATH_RAY_CAMERA | PATH_RAY_MIS_SKIP |
+ PATH_RAY_TRANSPARENT_BACKGROUND;
+ INTEGRATOR_STATE_WRITE(path, mis_ray_pdf) = 0.0f;
+ INTEGRATOR_STATE_WRITE(path, mis_ray_t) = 0.0f;
+ INTEGRATOR_STATE_WRITE(path, min_ray_pdf) = FLT_MAX;
+ INTEGRATOR_STATE_WRITE(path, throughput) = make_float3(1.0f, 1.0f, 1.0f);
+
+ if (kernel_data.kernel_features & KERNEL_FEATURE_VOLUME) {
+ INTEGRATOR_STATE_ARRAY_WRITE(volume_stack, 0, object) = OBJECT_NONE;
+ INTEGRATOR_STATE_ARRAY_WRITE(volume_stack, 0, shader) = kernel_data.background.volume_shader;
+ INTEGRATOR_STATE_ARRAY_WRITE(volume_stack, 1, object) = OBJECT_NONE;
+ INTEGRATOR_STATE_ARRAY_WRITE(volume_stack, 1, shader) = SHADER_NONE;
}
- else {
- state->volume_stack[0].shader = SHADER_NONE;
+
+#ifdef __DENOISING_FEATURES__
+ if (kernel_data.kernel_features & KERNEL_FEATURE_DENOISING) {
+ INTEGRATOR_STATE_WRITE(path, flag) |= PATH_RAY_DENOISING_FEATURES;
+ INTEGRATOR_STATE_WRITE(path, denoising_feature_throughput) = one_float3();
}
#endif
}
-ccl_device_inline void path_state_next(KernelGlobals *kg,
- ccl_addr_space PathState *state,
- int label)
+ccl_device_inline void path_state_next(INTEGRATOR_STATE_ARGS, int label)
{
+ uint32_t flag = INTEGRATOR_STATE(path, flag);
+
/* ray through transparent keeps same flags from previous ray and is
* not counted as a regular bounce, transparent has separate max */
if (label & LABEL_TRANSPARENT) {
- state->flag |= PATH_RAY_TRANSPARENT;
- state->transparent_bounce++;
- if (state->transparent_bounce >= kernel_data.integrator.transparent_max_bounce) {
- state->flag |= PATH_RAY_TERMINATE_IMMEDIATE;
+ uint32_t transparent_bounce = INTEGRATOR_STATE(path, transparent_bounce) + 1;
+
+ flag |= PATH_RAY_TRANSPARENT;
+ if (transparent_bounce >= kernel_data.integrator.transparent_max_bounce) {
+ flag |= PATH_RAY_TERMINATE_ON_NEXT_SURFACE;
}
if (!kernel_data.integrator.transparent_shadows)
- state->flag |= PATH_RAY_MIS_SKIP;
-
- /* random number generator next bounce */
- state->rng_offset += PRNG_BOUNCE_NUM;
+ flag |= PATH_RAY_MIS_SKIP;
+ INTEGRATOR_STATE_WRITE(path, flag) = flag;
+ INTEGRATOR_STATE_WRITE(path, transparent_bounce) = transparent_bounce;
+ /* Random number generator next bounce. */
+ INTEGRATOR_STATE_WRITE(path, rng_offset) += PRNG_BOUNCE_NUM;
return;
}
- state->bounce++;
- if (state->bounce >= kernel_data.integrator.max_bounce) {
- state->flag |= PATH_RAY_TERMINATE_AFTER_TRANSPARENT;
+ uint32_t bounce = INTEGRATOR_STATE(path, bounce) + 1;
+ if (bounce >= kernel_data.integrator.max_bounce) {
+ flag |= PATH_RAY_TERMINATE_AFTER_TRANSPARENT;
}
- state->flag &= ~(PATH_RAY_ALL_VISIBILITY | PATH_RAY_MIS_SKIP);
+ flag &= ~(PATH_RAY_ALL_VISIBILITY | PATH_RAY_MIS_SKIP);
#ifdef __VOLUME__
if (label & LABEL_VOLUME_SCATTER) {
/* volume scatter */
- state->flag |= PATH_RAY_VOLUME_SCATTER;
- state->flag &= ~PATH_RAY_TRANSPARENT_BACKGROUND;
+ flag |= PATH_RAY_VOLUME_SCATTER;
+ flag &= ~PATH_RAY_TRANSPARENT_BACKGROUND;
+ if (bounce == 1) {
+ flag |= PATH_RAY_VOLUME_PASS;
+ }
- state->volume_bounce++;
- if (state->volume_bounce >= kernel_data.integrator.max_volume_bounce) {
- state->flag |= PATH_RAY_TERMINATE_AFTER_TRANSPARENT;
+ const int volume_bounce = INTEGRATOR_STATE(path, volume_bounce) + 1;
+ INTEGRATOR_STATE_WRITE(path, volume_bounce) = volume_bounce;
+ if (volume_bounce >= kernel_data.integrator.max_volume_bounce) {
+ flag |= PATH_RAY_TERMINATE_AFTER_TRANSPARENT;
}
}
else
@@ -114,163 +131,237 @@ ccl_device_inline void path_state_next(KernelGlobals *kg,
{
/* surface reflection/transmission */
if (label & LABEL_REFLECT) {
- state->flag |= PATH_RAY_REFLECT;
- state->flag &= ~PATH_RAY_TRANSPARENT_BACKGROUND;
+ flag |= PATH_RAY_REFLECT;
+ flag &= ~PATH_RAY_TRANSPARENT_BACKGROUND;
if (label & LABEL_DIFFUSE) {
- state->diffuse_bounce++;
- if (state->diffuse_bounce >= kernel_data.integrator.max_diffuse_bounce) {
- state->flag |= PATH_RAY_TERMINATE_AFTER_TRANSPARENT;
+ const int diffuse_bounce = INTEGRATOR_STATE(path, diffuse_bounce) + 1;
+ INTEGRATOR_STATE_WRITE(path, diffuse_bounce) = diffuse_bounce;
+ if (diffuse_bounce >= kernel_data.integrator.max_diffuse_bounce) {
+ flag |= PATH_RAY_TERMINATE_AFTER_TRANSPARENT;
}
}
else {
- state->glossy_bounce++;
- if (state->glossy_bounce >= kernel_data.integrator.max_glossy_bounce) {
- state->flag |= PATH_RAY_TERMINATE_AFTER_TRANSPARENT;
+ const int glossy_bounce = INTEGRATOR_STATE(path, glossy_bounce) + 1;
+ INTEGRATOR_STATE_WRITE(path, glossy_bounce) = glossy_bounce;
+ if (glossy_bounce >= kernel_data.integrator.max_glossy_bounce) {
+ flag |= PATH_RAY_TERMINATE_AFTER_TRANSPARENT;
}
}
}
else {
kernel_assert(label & LABEL_TRANSMIT);
- state->flag |= PATH_RAY_TRANSMIT;
+ flag |= PATH_RAY_TRANSMIT;
if (!(label & LABEL_TRANSMIT_TRANSPARENT)) {
- state->flag &= ~PATH_RAY_TRANSPARENT_BACKGROUND;
+ flag &= ~PATH_RAY_TRANSPARENT_BACKGROUND;
}
- state->transmission_bounce++;
- if (state->transmission_bounce >= kernel_data.integrator.max_transmission_bounce) {
- state->flag |= PATH_RAY_TERMINATE_AFTER_TRANSPARENT;
+ const int transmission_bounce = INTEGRATOR_STATE(path, transmission_bounce) + 1;
+ INTEGRATOR_STATE_WRITE(path, transmission_bounce) = transmission_bounce;
+ if (transmission_bounce >= kernel_data.integrator.max_transmission_bounce) {
+ flag |= PATH_RAY_TERMINATE_AFTER_TRANSPARENT;
}
}
/* diffuse/glossy/singular */
if (label & LABEL_DIFFUSE) {
- state->flag |= PATH_RAY_DIFFUSE | PATH_RAY_DIFFUSE_ANCESTOR;
+ flag |= PATH_RAY_DIFFUSE | PATH_RAY_DIFFUSE_ANCESTOR;
}
else if (label & LABEL_GLOSSY) {
- state->flag |= PATH_RAY_GLOSSY;
+ flag |= PATH_RAY_GLOSSY;
}
else {
kernel_assert(label & LABEL_SINGULAR);
- state->flag |= PATH_RAY_GLOSSY | PATH_RAY_SINGULAR | PATH_RAY_MIS_SKIP;
+ flag |= PATH_RAY_GLOSSY | PATH_RAY_SINGULAR | PATH_RAY_MIS_SKIP;
+ }
+
+ /* Render pass categories. */
+ if (bounce == 1) {
+ flag |= (label & LABEL_TRANSMIT) ? PATH_RAY_TRANSMISSION_PASS : PATH_RAY_REFLECT_PASS;
}
}
- /* random number generator next bounce */
- state->rng_offset += PRNG_BOUNCE_NUM;
+ INTEGRATOR_STATE_WRITE(path, flag) = flag;
+ INTEGRATOR_STATE_WRITE(path, bounce) = bounce;
-#ifdef __DENOISING_FEATURES__
- if ((state->denoising_feature_weight == 0.0f) && !(state->flag & PATH_RAY_SHADOW_CATCHER)) {
- state->flag &= ~PATH_RAY_STORE_SHADOW_INFO;
- }
-#endif
+ /* Random number generator next bounce. */
+ INTEGRATOR_STATE_WRITE(path, rng_offset) += PRNG_BOUNCE_NUM;
}
#ifdef __VOLUME__
-ccl_device_inline bool path_state_volume_next(KernelGlobals *kg, ccl_addr_space PathState *state)
+ccl_device_inline bool path_state_volume_next(INTEGRATOR_STATE_ARGS)
{
/* For volume bounding meshes we pass through without counting transparent
* bounces, only sanity check in case self intersection gets us stuck. */
- state->volume_bounds_bounce++;
- if (state->volume_bounds_bounce > VOLUME_BOUNDS_MAX) {
+ uint32_t volume_bounds_bounce = INTEGRATOR_STATE(path, volume_bounds_bounce) + 1;
+ INTEGRATOR_STATE_WRITE(path, volume_bounds_bounce) = volume_bounds_bounce;
+ if (volume_bounds_bounce > VOLUME_BOUNDS_MAX) {
return false;
}
/* Random number generator next bounce. */
- if (state->volume_bounds_bounce > 1) {
- state->rng_offset += PRNG_BOUNCE_NUM;
+ if (volume_bounds_bounce > 1) {
+ INTEGRATOR_STATE_WRITE(path, rng_offset) += PRNG_BOUNCE_NUM;
}
return true;
}
#endif
-ccl_device_inline uint path_state_ray_visibility(KernelGlobals *kg,
- ccl_addr_space PathState *state)
+ccl_device_inline uint path_state_ray_visibility(INTEGRATOR_STATE_CONST_ARGS)
{
- uint flag = state->flag & PATH_RAY_ALL_VISIBILITY;
+ const uint32_t path_flag = INTEGRATOR_STATE(path, flag);
- /* for visibility, diffuse/glossy are for reflection only */
- if (flag & PATH_RAY_TRANSMIT)
- flag &= ~(PATH_RAY_DIFFUSE | PATH_RAY_GLOSSY);
- /* todo: this is not supported as its own ray visibility yet */
- if (state->flag & PATH_RAY_VOLUME_SCATTER)
- flag |= PATH_RAY_DIFFUSE;
+ uint32_t visibility = path_flag & PATH_RAY_ALL_VISIBILITY;
- return flag;
+ /* For visibility, diffuse/glossy are for reflection only. */
+ if (visibility & PATH_RAY_TRANSMIT) {
+ visibility &= ~(PATH_RAY_DIFFUSE | PATH_RAY_GLOSSY);
+ }
+
+ /* todo: this is not supported as its own ray visibility yet. */
+ if (path_flag & PATH_RAY_VOLUME_SCATTER) {
+ visibility |= PATH_RAY_DIFFUSE;
+ }
+
+ visibility = SHADOW_CATCHER_PATH_VISIBILITY(path_flag, visibility);
+
+ return visibility;
}
-ccl_device_inline float path_state_continuation_probability(KernelGlobals *kg,
- ccl_addr_space PathState *state,
- const float3 throughput)
+ccl_device_inline float path_state_continuation_probability(INTEGRATOR_STATE_CONST_ARGS,
+ const uint32_t path_flag)
{
- if (state->flag & PATH_RAY_TERMINATE_IMMEDIATE) {
- /* Ray is to be terminated immediately. */
- return 0.0f;
- }
- else if (state->flag & PATH_RAY_TRANSPARENT) {
+ if (path_flag & PATH_RAY_TRANSPARENT) {
+ const uint32_t transparent_bounce = INTEGRATOR_STATE(path, transparent_bounce);
/* Do at least specified number of bounces without RR. */
- if (state->transparent_bounce <= kernel_data.integrator.transparent_min_bounce) {
- return 1.0f;
- }
-#ifdef __SHADOW_TRICKS__
- /* Exception for shadow catcher not working correctly with RR. */
- else if ((state->flag & PATH_RAY_SHADOW_CATCHER) && (state->transparent_bounce <= 8)) {
+ if (transparent_bounce <= kernel_data.integrator.transparent_min_bounce) {
return 1.0f;
}
-#endif
}
else {
+ const uint32_t bounce = INTEGRATOR_STATE(path, bounce);
/* Do at least specified number of bounces without RR. */
- if (state->bounce <= kernel_data.integrator.min_bounce) {
+ if (bounce <= kernel_data.integrator.min_bounce) {
return 1.0f;
}
-#ifdef __SHADOW_TRICKS__
- /* Exception for shadow catcher not working correctly with RR. */
- else if ((state->flag & PATH_RAY_SHADOW_CATCHER) && (state->bounce <= 3)) {
- return 1.0f;
- }
-#endif
}
/* Probabilistic termination: use sqrt() to roughly match typical view
* transform and do path termination a bit later on average. */
- return min(sqrtf(max3(fabs(throughput)) * state->branch_factor), 1.0f);
+ return min(sqrtf(max3(fabs(INTEGRATOR_STATE(path, throughput)))), 1.0f);
}
-/* TODO(DingTo): Find more meaningful name for this */
-ccl_device_inline void path_state_modify_bounce(ccl_addr_space PathState *state, bool increase)
+ccl_device_inline bool path_state_ao_bounce(INTEGRATOR_STATE_CONST_ARGS)
{
- /* Modify bounce temporarily for shader eval */
- if (increase)
- state->bounce += 1;
- else
- state->bounce -= 1;
-}
-
-ccl_device_inline bool path_state_ao_bounce(KernelGlobals *kg, ccl_addr_space PathState *state)
-{
- if (state->bounce <= kernel_data.integrator.ao_bounces) {
+ if (!kernel_data.integrator.ao_bounces) {
return false;
}
- int bounce = state->bounce - state->transmission_bounce - (state->glossy_bounce > 0);
+ const int bounce = INTEGRATOR_STATE(path, bounce) - INTEGRATOR_STATE(path, transmission_bounce) -
+ (INTEGRATOR_STATE(path, glossy_bounce) > 0) + 1;
return (bounce > kernel_data.integrator.ao_bounces);
}
-ccl_device_inline void path_state_branch(ccl_addr_space PathState *state,
- int branch,
- int num_branches)
+/* Random Number Sampling Utility Functions
+ *
+ * For each random number in each step of the path we must have a unique
+ * dimension to avoid using the same sequence twice.
+ *
+ * For branches in the path we must be careful not to reuse the same number
+ * in a sequence and offset accordingly.
+ */
+
+/* RNG State loaded onto stack. */
+typedef struct RNGState {
+ uint rng_hash;
+ uint rng_offset;
+ int sample;
+} RNGState;
+
+ccl_device_inline void path_state_rng_load(INTEGRATOR_STATE_CONST_ARGS, RNGState *rng_state)
+{
+ rng_state->rng_hash = INTEGRATOR_STATE(path, rng_hash);
+ rng_state->rng_offset = INTEGRATOR_STATE(path, rng_offset);
+ rng_state->sample = INTEGRATOR_STATE(path, sample);
+}
+
+ccl_device_inline void shadow_path_state_rng_load(INTEGRATOR_STATE_CONST_ARGS, RNGState *rng_state)
+{
+ const uint shadow_bounces = INTEGRATOR_STATE(shadow_path, transparent_bounce) -
+ INTEGRATOR_STATE(path, transparent_bounce);
+
+ rng_state->rng_hash = INTEGRATOR_STATE(path, rng_hash);
+ rng_state->rng_offset = INTEGRATOR_STATE(path, rng_offset) + PRNG_BOUNCE_NUM * shadow_bounces;
+ rng_state->sample = INTEGRATOR_STATE(path, sample);
+}
+
+ccl_device_inline float path_state_rng_1D(const KernelGlobals *kg,
+ const RNGState *rng_state,
+ int dimension)
+{
+ return path_rng_1D(
+ kg, rng_state->rng_hash, rng_state->sample, rng_state->rng_offset + dimension);
+}
+
+ccl_device_inline void path_state_rng_2D(
+ const KernelGlobals *kg, const RNGState *rng_state, int dimension, float *fx, float *fy)
+{
+ path_rng_2D(
+ kg, rng_state->rng_hash, rng_state->sample, rng_state->rng_offset + dimension, fx, fy);
+}
+
+ccl_device_inline float path_state_rng_1D_hash(const KernelGlobals *kg,
+ const RNGState *rng_state,
+ uint hash)
+{
+ /* Use a hash instead of dimension, this is not great but avoids adding
+ * more dimensions to each bounce which reduces quality of dimensions we
+ * are already using. */
+ return path_rng_1D(
+ kg, cmj_hash_simple(rng_state->rng_hash, hash), rng_state->sample, rng_state->rng_offset);
+}
+
+ccl_device_inline float path_branched_rng_1D(const KernelGlobals *kg,
+ const RNGState *rng_state,
+ int branch,
+ int num_branches,
+ int dimension)
+{
+ return path_rng_1D(kg,
+ rng_state->rng_hash,
+ rng_state->sample * num_branches + branch,
+ rng_state->rng_offset + dimension);
+}
+
+ccl_device_inline void path_branched_rng_2D(const KernelGlobals *kg,
+ const RNGState *rng_state,
+ int branch,
+ int num_branches,
+ int dimension,
+ float *fx,
+ float *fy)
+{
+ path_rng_2D(kg,
+ rng_state->rng_hash,
+ rng_state->sample * num_branches + branch,
+ rng_state->rng_offset + dimension,
+ fx,
+ fy);
+}
+
+/* Utility functions to get light termination value,
+ * since it might not be needed in many cases.
+ */
+ccl_device_inline float path_state_rng_light_termination(const KernelGlobals *kg,
+ const RNGState *state)
{
- if (num_branches > 1) {
- /* Path is splitting into a branch, adjust so that each branch
- * still gets a unique sample from the same sequence. */
- state->sample = state->sample * num_branches + branch;
- state->num_samples = state->num_samples * num_branches;
- state->branch_factor *= num_branches;
+ if (kernel_data.integrator.light_inv_rr_threshold > 0.0f) {
+ return path_state_rng_1D(kg, state, PRNG_LIGHT_TERMINATE);
}
+ return 0.0f;
}
CCL_NAMESPACE_END