Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/cycles/util')
-rw-r--r--intern/cycles/util/CMakeLists.txt2
-rw-r--r--intern/cycles/util/util_atomic.h28
-rw-r--r--intern/cycles/util/util_murmurhash.cpp125
-rw-r--r--intern/cycles/util/util_murmurhash.h30
4 files changed, 185 insertions, 0 deletions
diff --git a/intern/cycles/util/CMakeLists.txt b/intern/cycles/util/CMakeLists.txt
index 291f9a9fcae..4f623c5dfb7 100644
--- a/intern/cycles/util/CMakeLists.txt
+++ b/intern/cycles/util/CMakeLists.txt
@@ -15,6 +15,7 @@ set(SRC
util_logging.cpp
util_math_cdf.cpp
util_md5.cpp
+ util_murmurhash.cpp
util_path.cpp
util_string.cpp
util_simd.cpp
@@ -64,6 +65,7 @@ set(SRC_HEADERS
util_math_int4.h
util_math_matrix.h
util_md5.h
+ util_murmurhash.h
util_opengl.h
util_optimization.h
util_param.h
diff --git a/intern/cycles/util/util_atomic.h b/intern/cycles/util/util_atomic.h
index f3c7ae546a0..e17e99d0acd 100644
--- a/intern/cycles/util/util_atomic.h
+++ b/intern/cycles/util/util_atomic.h
@@ -23,6 +23,7 @@
#include "atomic_ops.h"
#define atomic_add_and_fetch_float(p, x) atomic_add_and_fetch_fl((p), (x))
+#define atomic_compare_and_swap_float(p, old_val, new_val) atomic_cas_float((p), (old_val), (new_val))
#define atomic_fetch_and_inc_uint32(p) atomic_fetch_and_add_uint32((p), 1)
#define atomic_fetch_and_dec_uint32(p) atomic_fetch_and_add_uint32((p), -1)
@@ -57,6 +58,20 @@ ccl_device_inline float atomic_add_and_fetch_float(volatile ccl_global float *so
return new_value.float_value;
}
+ccl_device_inline float atomic_compare_and_swap_float(volatile ccl_global float *dest,
+ const float old_val, const float new_val)
+{
+ union {
+ unsigned int int_value;
+ float float_value;
+ } new_value, prev_value, result;
+ prev_value.float_value = old_val;
+ new_value.float_value = new_val;
+ result.int_value = atomic_cmpxchg((volatile ccl_global unsigned int *)dest,
+ prev_value.int_value, new_value.int_value);
+ return result.float_value;
+}
+
#define atomic_fetch_and_add_uint32(p, x) atomic_add((p), (x))
#define atomic_fetch_and_inc_uint32(p) atomic_inc((p))
#define atomic_fetch_and_dec_uint32(p) atomic_dec((p))
@@ -75,6 +90,19 @@ ccl_device_inline float atomic_add_and_fetch_float(volatile ccl_global float *so
#define atomic_fetch_and_inc_uint32(p) atomic_fetch_and_add_uint32((p), 1)
#define atomic_fetch_and_dec_uint32(p) atomic_fetch_and_sub_uint32((p), 1)
+ccl_device_inline float atomic_compare_and_swap_float(volatile float *dest,
+ const float old_val, const float new_val)
+{
+ union {
+ unsigned int int_value;
+ float float_value;
+ } new_value, prev_value, result;
+ prev_value.float_value = old_val;
+ new_value.float_value = new_val;
+ result.int_value = atomicCAS((unsigned int *)dest, prev_value.int_value,new_value.int_value);
+ return result.float_value;
+}
+
#define CCL_LOCAL_MEM_FENCE
#define ccl_barrier(flags) __syncthreads()
diff --git a/intern/cycles/util/util_murmurhash.cpp b/intern/cycles/util/util_murmurhash.cpp
new file mode 100644
index 00000000000..c1f81e61b72
--- /dev/null
+++ b/intern/cycles/util/util_murmurhash.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2018 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This is taken from alShaders/Cryptomatte/MurmurHash3.h:
+ *
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain. The author hereby disclaims copyright to this source code.
+ *
+ */
+
+#include "util/util_algorithm.h"
+#include "util/util_murmurhash.h"
+
+#if defined(_MSC_VER)
+# include <stdlib.h>
+# define ROTL32(x,y) _rotl(x,y)
+# define ROTL64(x,y) _rotl64(x,y)
+# define BIG_CONSTANT(x) (x)
+#else
+ccl_device_inline uint32_t rotl32(uint32_t x, int8_t r)
+{
+ return (x << r) | (x >> (32 - r));
+}
+# define ROTL32(x,y) rotl32(x,y)
+# define BIG_CONSTANT(x) (x##LLU)
+#endif
+
+CCL_NAMESPACE_BEGIN
+
+/* Block read - if your platform needs to do endian-swapping or can only
+ * handle aligned reads, do the conversion here. */
+ccl_device_inline uint32_t mm_hash_getblock32(const uint32_t *p, int i)
+{
+ return p[i];
+}
+
+/* Finalization mix - force all bits of a hash block to avalanche */
+ccl_device_inline uint32_t mm_hash_fmix32 ( uint32_t h )
+{
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+ return h;
+}
+
+uint32_t util_murmur_hash3(const void *key, int len, uint32_t seed)
+{
+ const uint8_t * data = (const uint8_t*)key;
+ const int nblocks = len / 4;
+
+ uint32_t h1 = seed;
+
+ const uint32_t c1 = 0xcc9e2d51;
+ const uint32_t c2 = 0x1b873593;
+
+ const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
+
+ for(int i = -nblocks; i; i++) {
+ uint32_t k1 = mm_hash_getblock32(blocks,i);
+
+ k1 *= c1;
+ k1 = ROTL32(k1,15);
+ k1 *= c2;
+
+ h1 ^= k1;
+ h1 = ROTL32(h1,13);
+ h1 = h1 * 5 + 0xe6546b64;
+ }
+
+ const uint8_t *tail = (const uint8_t*)(data + nblocks*4);
+
+ uint32_t k1 = 0;
+
+ switch(len & 3) {
+ case 3:
+ k1 ^= tail[2] << 16;
+ ATTR_FALLTHROUGH;
+ case 2:
+ k1 ^= tail[1] << 8;
+ ATTR_FALLTHROUGH;
+ case 1:
+ k1 ^= tail[0];
+ k1 *= c1;
+ k1 = ROTL32(k1,15);
+ k1 *= c2;
+ h1 ^= k1;
+ }
+
+ h1 ^= len;
+ h1 = mm_hash_fmix32(h1);
+ return h1;
+}
+
+/* This is taken from the cryptomatte specification 1.0 */
+float util_hash_to_float(uint32_t hash)
+{
+ uint32_t mantissa = hash & (( 1 << 23) - 1);
+ uint32_t exponent = (hash >> 23) & ((1 << 8) - 1);
+ exponent = max(exponent, (uint32_t) 1);
+ exponent = min(exponent, (uint32_t) 254);
+ exponent = exponent << 23;
+ uint32_t sign = (hash >> 31);
+ sign = sign << 31;
+ uint32_t float_bits = sign | exponent | mantissa;
+ float f;
+ memcpy(&f, &float_bits, sizeof(uint32_t));
+ return f;
+}
+
+CCL_NAMESPACE_END
diff --git a/intern/cycles/util/util_murmurhash.h b/intern/cycles/util/util_murmurhash.h
new file mode 100644
index 00000000000..824ed59cb16
--- /dev/null
+++ b/intern/cycles/util/util_murmurhash.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef __UTIL_MURMURHASH_H__
+#define __UTIL_MURMURHASH_H__
+
+#include "util/util_types.h"
+
+CCL_NAMESPACE_BEGIN
+
+uint32_t util_murmur_hash3(const void *key, int len, uint32_t seed);
+float util_hash_to_float(uint32_t hash);
+
+CCL_NAMESPACE_END
+
+#endif /* __UTIL_MURMURHASH_H__ */