Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Werner <stefan.werner@tangent-animation.com>2020-03-02 17:12:44 +0300
committerBrecht Van Lommel <brecht@blender.org>2020-03-02 18:35:52 +0300
commit409074aae56138f49ce078ce919a6d02e44e521e (patch)
tree3c2e57fc9ff715c297c21bd3c888cc4ffc1b2848 /intern/cycles/kernel
parent7b8db971d42f6d6b7b1c74959758266ce8c859e0 (diff)
Cycles: add Progressive Multi-Jitter sampling pattern
This sampling pattern is particularly suited to adaptive sampling, and will be used for that upcoming feature. Based on "Progressive Multi-Jittered Sample Sequences" by Per Christensen, Andrew Kensler and Charlie Kilpatrick. Ref D4686
Diffstat (limited to 'intern/cycles/kernel')
-rw-r--r--intern/cycles/kernel/kernel_jitter.h31
-rw-r--r--intern/cycles/kernel/kernel_random.h35
-rw-r--r--intern/cycles/kernel/kernel_textures.h2
-rw-r--r--intern/cycles/kernel/kernel_types.h5
4 files changed, 69 insertions, 4 deletions
diff --git a/intern/cycles/kernel/kernel_jitter.h b/intern/cycles/kernel/kernel_jitter.h
index e59d8946950..b733bb9fee2 100644
--- a/intern/cycles/kernel/kernel_jitter.h
+++ b/intern/cycles/kernel/kernel_jitter.h
@@ -195,4 +195,35 @@ ccl_device void cmj_sample_2D(int s, int N, int p, float *fx, float *fy)
}
#endif
+ccl_device float pmj_sample_1D(KernelGlobals *kg, int sample, int rng_hash, int dimension)
+{
+ /* Fallback to random */
+ if (sample > NUM_PMJ_SAMPLES) {
+ int p = rng_hash + dimension;
+ return cmj_randfloat(sample, p);
+ }
+ uint tmp_rng = cmj_hash_simple(dimension, rng_hash);
+ int index = ((dimension % NUM_PMJ_PATTERNS) * NUM_PMJ_SAMPLES + sample) * 2;
+ return __uint_as_float(kernel_tex_fetch(__sample_pattern_lut, index) ^ (tmp_rng & 0x007fffff)) -
+ 1.0f;
+}
+
+ccl_device void pmj_sample_2D(
+ KernelGlobals *kg, int sample, int rng_hash, int dimension, float *fx, float *fy)
+{
+ if (sample > NUM_PMJ_SAMPLES) {
+ int p = rng_hash + dimension;
+ *fx = cmj_randfloat(sample, p);
+ *fy = cmj_randfloat(sample, p + 1);
+ }
+ uint tmp_rng = cmj_hash_simple(dimension, rng_hash);
+ int index = ((dimension % NUM_PMJ_PATTERNS) * NUM_PMJ_SAMPLES + sample) * 2;
+ *fx = __uint_as_float(kernel_tex_fetch(__sample_pattern_lut, index) ^ (tmp_rng & 0x007fffff)) -
+ 1.0f;
+ tmp_rng = cmj_hash_simple(dimension + 1, rng_hash);
+ *fy = __uint_as_float(kernel_tex_fetch(__sample_pattern_lut, index + 1) ^
+ (tmp_rng & 0x007fffff)) -
+ 1.0f;
+}
+
CCL_NAMESPACE_END
diff --git a/intern/cycles/kernel/kernel_random.h b/intern/cycles/kernel/kernel_random.h
index 80738213d2a..dae9c8f930c 100644
--- a/intern/cycles/kernel/kernel_random.h
+++ b/intern/cycles/kernel/kernel_random.h
@@ -43,7 +43,7 @@ ccl_device uint sobol_dimension(KernelGlobals *kg, int index, int dimension)
uint i = index + SOBOL_SKIP;
for (int j = 0, x; (x = find_first_set(i)); i >>= x) {
j += x;
- result ^= kernel_tex_fetch(__sobol_directions, 32 * dimension + j - 1);
+ result ^= kernel_tex_fetch(__sample_pattern_lut, 32 * dimension + j - 1);
}
return result;
}
@@ -56,7 +56,9 @@ ccl_device_forceinline float path_rng_1D(
#ifdef __DEBUG_CORRELATION__
return (float)drand48();
#endif
-
+ if (kernel_data.integrator.sampling_pattern == SAMPLING_PATTERN_PMJ) {
+ return pmj_sample_1D(kg, sample, rng_hash, dimension);
+ }
#ifdef __CMJ__
# ifdef __SOBOL__
if (kernel_data.integrator.sampling_pattern == SAMPLING_PATTERN_CMJ)
@@ -99,7 +101,10 @@ ccl_device_forceinline void path_rng_2D(KernelGlobals *kg,
*fy = (float)drand48();
return;
#endif
-
+ if (kernel_data.integrator.sampling_pattern == SAMPLING_PATTERN_PMJ) {
+ pmj_sample_2D(kg, sample, rng_hash, dimension, fx, fy);
+ return;
+ }
#ifdef __CMJ__
# ifdef __SOBOL__
if (kernel_data.integrator.sampling_pattern == SAMPLING_PATTERN_CMJ)
@@ -284,4 +289,28 @@ ccl_device float lcg_step_float_addrspace(ccl_addr_space uint *rng)
return (float)*rng * (1.0f / (float)0xFFFFFFFF);
}
+ccl_device_inline bool sample_is_even(int pattern, int sample)
+{
+ if (pattern == SAMPLING_PATTERN_PMJ) {
+ /* See Section 10.2.1, "Progressive Multi-Jittered Sample Sequences", Christensen et al.
+ * We can use this to get divide sample sequence into two classes for easier variance
+ * estimation. There must be a more elegant way of writing this? */
+#if defined(__GNUC__) && !defined(__KERNEL_GPU__)
+ return __builtin_popcount(sample & 0xaaaaaaaa) & 1;
+#elif defined(__NVCC__)
+ return __popc(sample & 0xaaaaaaaa) & 1;
+#else
+ int i = sample & 0xaaaaaaaa;
+ i = i - ((i >> 1) & 0x55555555);
+ i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
+ i = (((i + (i >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
+ return i & 1;
+#endif
+ }
+ else {
+ /* TODO(Stefan): Are there reliable ways of dividing CMJ and Sobol into two classes? */
+ return sample & 0x1;
+ }
+}
+
CCL_NAMESPACE_END
diff --git a/intern/cycles/kernel/kernel_textures.h b/intern/cycles/kernel/kernel_textures.h
index 9eaa6b5516e..1cae34348c9 100644
--- a/intern/cycles/kernel/kernel_textures.h
+++ b/intern/cycles/kernel/kernel_textures.h
@@ -77,7 +77,7 @@ KERNEL_TEX(KernelShader, __shaders)
KERNEL_TEX(float, __lookup_table)
/* sobol */
-KERNEL_TEX(uint, __sobol_directions)
+KERNEL_TEX(uint, __sample_pattern_lut)
/* image textures */
KERNEL_TEX(TextureInfo, __texture_info)
diff --git a/intern/cycles/kernel/kernel_types.h b/intern/cycles/kernel/kernel_types.h
index 442b84a4f41..88c2d0d3196 100644
--- a/intern/cycles/kernel/kernel_types.h
+++ b/intern/cycles/kernel/kernel_types.h
@@ -267,6 +267,7 @@ enum PathTraceDimension {
enum SamplingPattern {
SAMPLING_PATTERN_SOBOL = 0,
SAMPLING_PATTERN_CMJ = 1,
+ SAMPLING_PATTERN_PMJ = 2,
SAMPLING_NUM_PATTERNS,
};
@@ -1667,6 +1668,10 @@ typedef struct WorkTile {
ccl_global float *buffer;
} WorkTile;
+/* Precoumputed sample table sizes for PMJ02 sampler. */
+#define NUM_PMJ_SAMPLES 64 * 64
+#define NUM_PMJ_PATTERNS 48
+
CCL_NAMESPACE_END
#endif /* __KERNEL_TYPES_H__ */