From 409074aae56138f49ce078ce919a6d02e44e521e Mon Sep 17 00:00:00 2001 From: Stefan Werner Date: Mon, 2 Mar 2020 15:12:44 +0100 Subject: Cycles: add Progressive Multi-Jitter sampling pattern This sampling pattern is particularly suited to adaptive sampling, and will be used for that upcoming feature. Based on "Progressive Multi-Jittered Sample Sequences" by Per Christensen, Andrew Kensler and Charlie Kilpatrick. Ref D4686 --- intern/cycles/kernel/kernel_jitter.h | 31 ++++++++++++++++++++++++++++++ intern/cycles/kernel/kernel_random.h | 35 +++++++++++++++++++++++++++++++--- intern/cycles/kernel/kernel_textures.h | 2 +- intern/cycles/kernel/kernel_types.h | 5 +++++ 4 files changed, 69 insertions(+), 4 deletions(-) (limited to 'intern/cycles/kernel') diff --git a/intern/cycles/kernel/kernel_jitter.h b/intern/cycles/kernel/kernel_jitter.h index e59d8946950..b733bb9fee2 100644 --- a/intern/cycles/kernel/kernel_jitter.h +++ b/intern/cycles/kernel/kernel_jitter.h @@ -195,4 +195,35 @@ ccl_device void cmj_sample_2D(int s, int N, int p, float *fx, float *fy) } #endif +ccl_device float pmj_sample_1D(KernelGlobals *kg, int sample, int rng_hash, int dimension) +{ + /* Fallback to random */ + if (sample > NUM_PMJ_SAMPLES) { + int p = rng_hash + dimension; + return cmj_randfloat(sample, p); + } + uint tmp_rng = cmj_hash_simple(dimension, rng_hash); + int index = ((dimension % NUM_PMJ_PATTERNS) * NUM_PMJ_SAMPLES + sample) * 2; + return __uint_as_float(kernel_tex_fetch(__sample_pattern_lut, index) ^ (tmp_rng & 0x007fffff)) - + 1.0f; +} + +ccl_device void pmj_sample_2D( + KernelGlobals *kg, int sample, int rng_hash, int dimension, float *fx, float *fy) +{ + if (sample > NUM_PMJ_SAMPLES) { + int p = rng_hash + dimension; + *fx = cmj_randfloat(sample, p); + *fy = cmj_randfloat(sample, p + 1); + } + uint tmp_rng = cmj_hash_simple(dimension, rng_hash); + int index = ((dimension % NUM_PMJ_PATTERNS) * NUM_PMJ_SAMPLES + sample) * 2; + *fx = __uint_as_float(kernel_tex_fetch(__sample_pattern_lut, index) ^ (tmp_rng & 0x007fffff)) - + 1.0f; + tmp_rng = cmj_hash_simple(dimension + 1, rng_hash); + *fy = __uint_as_float(kernel_tex_fetch(__sample_pattern_lut, index + 1) ^ + (tmp_rng & 0x007fffff)) - + 1.0f; +} + CCL_NAMESPACE_END diff --git a/intern/cycles/kernel/kernel_random.h b/intern/cycles/kernel/kernel_random.h index 80738213d2a..dae9c8f930c 100644 --- a/intern/cycles/kernel/kernel_random.h +++ b/intern/cycles/kernel/kernel_random.h @@ -43,7 +43,7 @@ ccl_device uint sobol_dimension(KernelGlobals *kg, int index, int dimension) uint i = index + SOBOL_SKIP; for (int j = 0, x; (x = find_first_set(i)); i >>= x) { j += x; - result ^= kernel_tex_fetch(__sobol_directions, 32 * dimension + j - 1); + result ^= kernel_tex_fetch(__sample_pattern_lut, 32 * dimension + j - 1); } return result; } @@ -56,7 +56,9 @@ ccl_device_forceinline float path_rng_1D( #ifdef __DEBUG_CORRELATION__ return (float)drand48(); #endif - + if (kernel_data.integrator.sampling_pattern == SAMPLING_PATTERN_PMJ) { + return pmj_sample_1D(kg, sample, rng_hash, dimension); + } #ifdef __CMJ__ # ifdef __SOBOL__ if (kernel_data.integrator.sampling_pattern == SAMPLING_PATTERN_CMJ) @@ -99,7 +101,10 @@ ccl_device_forceinline void path_rng_2D(KernelGlobals *kg, *fy = (float)drand48(); return; #endif - + if (kernel_data.integrator.sampling_pattern == SAMPLING_PATTERN_PMJ) { + pmj_sample_2D(kg, sample, rng_hash, dimension, fx, fy); + return; + } #ifdef __CMJ__ # ifdef __SOBOL__ if (kernel_data.integrator.sampling_pattern == SAMPLING_PATTERN_CMJ) @@ -284,4 +289,28 @@ ccl_device float lcg_step_float_addrspace(ccl_addr_space uint *rng) return (float)*rng * (1.0f / (float)0xFFFFFFFF); } +ccl_device_inline bool sample_is_even(int pattern, int sample) +{ + if (pattern == SAMPLING_PATTERN_PMJ) { + /* See Section 10.2.1, "Progressive Multi-Jittered Sample Sequences", Christensen et al. + * We can use this to get divide sample sequence into two classes for easier variance + * estimation. There must be a more elegant way of writing this? */ +#if defined(__GNUC__) && !defined(__KERNEL_GPU__) + return __builtin_popcount(sample & 0xaaaaaaaa) & 1; +#elif defined(__NVCC__) + return __popc(sample & 0xaaaaaaaa) & 1; +#else + int i = sample & 0xaaaaaaaa; + i = i - ((i >> 1) & 0x55555555); + i = (i & 0x33333333) + ((i >> 2) & 0x33333333); + i = (((i + (i >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; + return i & 1; +#endif + } + else { + /* TODO(Stefan): Are there reliable ways of dividing CMJ and Sobol into two classes? */ + return sample & 0x1; + } +} + CCL_NAMESPACE_END diff --git a/intern/cycles/kernel/kernel_textures.h b/intern/cycles/kernel/kernel_textures.h index 9eaa6b5516e..1cae34348c9 100644 --- a/intern/cycles/kernel/kernel_textures.h +++ b/intern/cycles/kernel/kernel_textures.h @@ -77,7 +77,7 @@ KERNEL_TEX(KernelShader, __shaders) KERNEL_TEX(float, __lookup_table) /* sobol */ -KERNEL_TEX(uint, __sobol_directions) +KERNEL_TEX(uint, __sample_pattern_lut) /* image textures */ KERNEL_TEX(TextureInfo, __texture_info) diff --git a/intern/cycles/kernel/kernel_types.h b/intern/cycles/kernel/kernel_types.h index 442b84a4f41..88c2d0d3196 100644 --- a/intern/cycles/kernel/kernel_types.h +++ b/intern/cycles/kernel/kernel_types.h @@ -267,6 +267,7 @@ enum PathTraceDimension { enum SamplingPattern { SAMPLING_PATTERN_SOBOL = 0, SAMPLING_PATTERN_CMJ = 1, + SAMPLING_PATTERN_PMJ = 2, SAMPLING_NUM_PATTERNS, }; @@ -1667,6 +1668,10 @@ typedef struct WorkTile { ccl_global float *buffer; } WorkTile; +/* Precoumputed sample table sizes for PMJ02 sampler. */ +#define NUM_PMJ_SAMPLES 64 * 64 +#define NUM_PMJ_PATTERNS 48 + CCL_NAMESPACE_END #endif /* __KERNEL_TYPES_H__ */ -- cgit v1.2.3