diff options
author | Thomas Dinges <blender@dingto.org> | 2014-06-13 23:13:18 +0400 |
---|---|---|
committer | Thomas Dinges <blender@dingto.org> | 2014-06-13 23:59:12 +0400 |
commit | cd5e1ff74e4f6443f3e4b836dd23fe46b56cb7ed (patch) | |
tree | 578ee132eab87d348147e49c91e1929660558c20 /intern/cycles/util | |
parent | d0573ce9054e325c0ad2fbb943087e0f8b9e159a (diff) |
Cycles Refactor: Add SSE Utility code from Embree for cleaner SSE code.
This makes the code a bit easier to understand, and might come in handy
if we want to reuse more Embree code.
Differential Revision: https://developer.blender.org/D482
Code by Brecht, with fixes by Lockal, Sergey and myself.
Diffstat (limited to 'intern/cycles/util')
-rw-r--r-- | intern/cycles/util/CMakeLists.txt | 4 | ||||
-rw-r--r-- | intern/cycles/util/util_color.h | 46 | ||||
-rw-r--r-- | intern/cycles/util/util_half.h | 24 | ||||
-rw-r--r-- | intern/cycles/util/util_optimization.h | 10 | ||||
-rw-r--r-- | intern/cycles/util/util_simd.cpp | 42 | ||||
-rw-r--r-- | intern/cycles/util/util_simd.h | 473 | ||||
-rw-r--r-- | intern/cycles/util/util_sseb.h | 161 | ||||
-rw-r--r-- | intern/cycles/util/util_ssef.h | 588 | ||||
-rw-r--r-- | intern/cycles/util/util_ssei.h | 294 | ||||
-rw-r--r-- | intern/cycles/util/util_types.h | 2 |
10 files changed, 1451 insertions, 193 deletions
diff --git a/intern/cycles/util/CMakeLists.txt b/intern/cycles/util/CMakeLists.txt index c1150d226ae..01b5675b9f7 100644 --- a/intern/cycles/util/CMakeLists.txt +++ b/intern/cycles/util/CMakeLists.txt @@ -16,6 +16,7 @@ set(SRC util_opencl.cpp util_path.cpp util_string.cpp + util_simd.cpp util_system.cpp util_task.cpp util_time.cpp @@ -53,6 +54,9 @@ set(SRC_HEADERS util_progress.h util_set.h util_simd.h + util_sseb.h + util_ssef.h + util_ssei.h util_stats.h util_string.h util_system.h diff --git a/intern/cycles/util/util_color.h b/intern/cycles/util/util_color.h index b72cc6bc873..d566e1bf359 100644 --- a/intern/cycles/util/util_color.h +++ b/intern/cycles/util/util_color.h @@ -155,28 +155,28 @@ ccl_device float3 color_srgb_to_scene_linear(float3 c) * e2coeff = 2^(127/exponent - 127) * bias_coeff^(1/exponent), encoded as uint32_t */ template<unsigned exp, unsigned e2coeff> -ccl_device_inline __m128 fastpow(const __m128 &arg) +ccl_device_inline ssef fastpow(const ssef &arg) { - __m128 ret; - ret = _mm_mul_ps(arg, _mm_castsi128_ps(_mm_set1_epi32(e2coeff))); - ret = _mm_cvtepi32_ps(_mm_castps_si128(ret)); - ret = _mm_mul_ps(ret, _mm_castsi128_ps(_mm_set1_epi32(exp))); - ret = _mm_castsi128_ps(_mm_cvtps_epi32(ret)); + ssef ret; + ret = arg * cast(ssei(e2coeff)); + ret = ssef(cast(ret)); + ret = ret * cast(ssei(exp)); + ret = cast(ssei(ret)); return ret; } /* Improve x ^ 1.0f/5.0f solution with Newton-Raphson method */ -ccl_device_inline __m128 improve_5throot_solution(const __m128 &old_result, const __m128 &x) +ccl_device_inline ssef improve_5throot_solution(const ssef &old_result, const ssef &x) { - __m128 approx2 = _mm_mul_ps(old_result, old_result); - __m128 approx4 = _mm_mul_ps(approx2, approx2); - __m128 t = _mm_div_ps(x, approx4); - __m128 summ = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(4.0f), old_result), t); /* fma */ - return _mm_mul_ps(summ, _mm_set1_ps(1.0f/5.0f)); + ssef approx2 = old_result * old_result; + ssef approx4 = approx2 * approx2; + ssef t = x / approx4; + ssef summ = madd(ssef(4.0f), old_result, t); + return summ * ssef(1.0f/5.0f); } /* Calculate powf(x, 2.4). Working domain: 1e-10 < x < 1e+10 */ -ccl_device_inline __m128 fastpow24(const __m128 &arg) +ccl_device_inline ssef fastpow24(const ssef &arg) { /* max, avg and |avg| errors were calculated in gcc without FMA instructions * The final precision should be better than powf in glibc */ @@ -184,22 +184,22 @@ ccl_device_inline __m128 fastpow24(const __m128 &arg) /* Calculate x^4/5, coefficient 0.994 was constructed manually to minimize avg error */ /* 0x3F4CCCCD = 4/5 */ /* 0x4F55A7FB = 2^(127/(4/5) - 127) * 0.994^(1/(4/5)) */ - __m128 x = fastpow<0x3F4CCCCD, 0x4F55A7FB>(arg); // error max = 0.17 avg = 0.0018 |avg| = 0.05 - __m128 arg2 = _mm_mul_ps(arg, arg); - __m128 arg4 = _mm_mul_ps(arg2, arg2); + ssef x = fastpow<0x3F4CCCCD, 0x4F55A7FB>(arg); // error max = 0.17 avg = 0.0018 |avg| = 0.05 + ssef arg2 = arg * arg; + ssef arg4 = arg2 * arg2; x = improve_5throot_solution(x, arg4); /* error max = 0.018 avg = 0.0031 |avg| = 0.0031 */ x = improve_5throot_solution(x, arg4); /* error max = 0.00021 avg = 1.6e-05 |avg| = 1.6e-05 */ x = improve_5throot_solution(x, arg4); /* error max = 6.1e-07 avg = 5.2e-08 |avg| = 1.1e-07 */ - return _mm_mul_ps(x, _mm_mul_ps(x, x)); + return x * (x * x); } -ccl_device __m128 color_srgb_to_scene_linear(const __m128 &c) +ccl_device ssef color_srgb_to_scene_linear(const ssef &c) { - __m128 cmp = _mm_cmplt_ps(c, _mm_set1_ps(0.04045f)); - __m128 lt = _mm_max_ps(_mm_mul_ps(c, _mm_set1_ps(1.0f/12.92f)), _mm_set1_ps(0.0f)); - __m128 gtebase = _mm_mul_ps(_mm_add_ps(c, _mm_set1_ps(0.055f)), _mm_set1_ps(1.0f/1.055f)); /* fma */ - __m128 gte = fastpow24(gtebase); - return blend(cmp, lt, gte); + sseb cmp = c < ssef(0.04045f); + ssef lt = max(c * ssef(1.0f/12.92f), ssef(0.0f)); + ssef gtebase = (c + ssef(0.055f)) * ssef(1.0f/1.055f); /* fma */ + ssef gte = fastpow24(gtebase); + return select(cmp, lt, gte); } #endif diff --git a/intern/cycles/util/util_half.h b/intern/cycles/util/util_half.h index da6fae79bb9..397133618be 100644 --- a/intern/cycles/util/util_half.h +++ b/intern/cycles/util/util_half.h @@ -68,18 +68,18 @@ ccl_device_inline void float4_store_half(half *h, float4 f, float scale) } #else /* same as above with SSE */ - const __m128 mm_scale = _mm_set_ps1(scale); - const __m128i mm_38800000 = _mm_set1_epi32(0x38800000); - const __m128i mm_7FFF = _mm_set1_epi32(0x7FFF); - const __m128i mm_7FFFFFFF = _mm_set1_epi32(0x7FFFFFFF); - const __m128i mm_C8000000 = _mm_set1_epi32(0xC8000000); - - __m128 mm_fscale = _mm_mul_ps(load_m128(f), mm_scale); - __m128i x = _mm_castps_si128(_mm_min_ps(_mm_max_ps(mm_fscale, _mm_set_ps1(0.0f)), _mm_set_ps1(65500.0f))); - __m128i absolute = _mm_and_si128(x, mm_7FFFFFFF); - __m128i Z = _mm_add_epi32(absolute, mm_C8000000); - __m128i result = _mm_andnot_si128(_mm_cmplt_epi32(absolute, mm_38800000), Z); - __m128i rh = _mm_and_si128(_mm_srai_epi32(result, 13), mm_7FFF); + const ssef mm_scale = ssef(scale); + const ssei mm_38800000 = ssei(0x38800000); + const ssei mm_7FFF = ssei(0x7FFF); + const ssei mm_7FFFFFFF = ssei(0x7FFFFFFF); + const ssei mm_C8000000 = ssei(0xC8000000); + + ssef mm_fscale = load4f(f) * mm_scale; + ssei x = cast(min(max(mm_fscale, ssef(0.0f)), ssef(65500.0f))); + ssei absolute = x & mm_7FFFFFFF; + ssei Z = absolute + mm_C8000000; + ssei result = andnot(absolute < mm_38800000, Z); + ssei rh = (result >> 13) & mm_7FFF; _mm_storel_pi((__m64*)h, _mm_castsi128_ps(_mm_packs_epi32(rh, rh))); #endif diff --git a/intern/cycles/util/util_optimization.h b/intern/cycles/util/util_optimization.h index f901513ec4b..0a6013cddd4 100644 --- a/intern/cycles/util/util_optimization.h +++ b/intern/cycles/util/util_optimization.h @@ -101,6 +101,10 @@ /* SSE intrinsics headers */ #ifndef FREE_WINDOWS64 +#ifdef _MSC_VER +#include <intrin.h> +#else + #ifdef __KERNEL_SSE2__ #include <xmmintrin.h> /* SSE 1 */ #include <emmintrin.h> /* SSE 2 */ @@ -118,6 +122,12 @@ #include <smmintrin.h> /* SSE 4.1 */ #endif +#ifdef __KERNEL_AVX__ +#include <immintrin.h> /* AVX */ +#endif + +#endif + #else /* MinGW64 has conflicting declarations for these SSE headers in <windows.h>. diff --git a/intern/cycles/util/util_simd.cpp b/intern/cycles/util/util_simd.cpp new file mode 100644 index 00000000000..8c34f6600d3 --- /dev/null +++ b/intern/cycles/util/util_simd.cpp @@ -0,0 +1,42 @@ +/* + * Copyright 2011-2013 Intel Corporation + * Modifications Copyright 2014, Blender Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +#include "util_simd.h" + +CCL_NAMESPACE_BEGIN + +const __m128 _mm_lookupmask_ps[16] = { + _mm_castsi128_ps(_mm_set_epi32( 0, 0, 0, 0)), + _mm_castsi128_ps(_mm_set_epi32( 0, 0, 0,-1)), + _mm_castsi128_ps(_mm_set_epi32( 0, 0,-1, 0)), + _mm_castsi128_ps(_mm_set_epi32( 0, 0,-1,-1)), + _mm_castsi128_ps(_mm_set_epi32( 0,-1, 0, 0)), + _mm_castsi128_ps(_mm_set_epi32( 0,-1, 0,-1)), + _mm_castsi128_ps(_mm_set_epi32( 0,-1,-1, 0)), + _mm_castsi128_ps(_mm_set_epi32( 0,-1,-1,-1)), + _mm_castsi128_ps(_mm_set_epi32(-1, 0, 0, 0)), + _mm_castsi128_ps(_mm_set_epi32(-1, 0, 0,-1)), + _mm_castsi128_ps(_mm_set_epi32(-1, 0,-1, 0)), + _mm_castsi128_ps(_mm_set_epi32(-1, 0,-1,-1)), + _mm_castsi128_ps(_mm_set_epi32(-1,-1, 0, 0)), + _mm_castsi128_ps(_mm_set_epi32(-1,-1, 0,-1)), + _mm_castsi128_ps(_mm_set_epi32(-1,-1,-1, 0)), + _mm_castsi128_ps(_mm_set_epi32(-1,-1,-1,-1)) +}; + +CCL_NAMESPACE_END + diff --git a/intern/cycles/util/util_simd.h b/intern/cycles/util/util_simd.h index f0f37fa57aa..0f65fab58b6 100644 --- a/intern/cycles/util/util_simd.h +++ b/intern/cycles/util/util_simd.h @@ -1,7 +1,8 @@ /* - * Copyright 2011-2013 Blender Foundation + * Copyright 2011-2013 Intel Corporation + * Modifications Copyright 2014, Blender Foundation. * - * Licensed under the Apache License, Version 2.0 (the "License"); + * Licensed under the Apache License, Version 2.0(the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * @@ -14,263 +15,419 @@ * limitations under the License */ -#ifndef __UTIL_SIMD_H__ -#define __UTIL_SIMD_H__ +#ifndef __UTIL_SIMD_TYPES_H__ +#define __UTIL_SIMD_TYPES_H__ + +#include <limits> + +#include "util_debug.h" +#include "util_types.h" CCL_NAMESPACE_BEGIN #ifdef __KERNEL_SSE2__ -/* SSE shuffle utility functions */ +struct sseb; +struct ssei; +struct ssef; + +extern const __m128 _mm_lookupmask_ps[16]; + +/* Special Types */ -#ifdef __KERNEL_SSSE3__ +static struct TrueTy { +__forceinline operator bool( ) const { return true; } +} True ccl_maybe_unused; -/* faster version for SSSE3 */ -typedef __m128i shuffle_swap_t; +static struct FalseTy { +__forceinline operator bool( ) const { return false; } +} False ccl_maybe_unused; -ccl_device_inline const shuffle_swap_t shuffle_swap_identity(void) +static struct NegInfTy { - return _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); -} +__forceinline operator float ( ) const { return -std::numeric_limits<float>::infinity(); } +__forceinline operator int ( ) const { return std::numeric_limits<int>::min(); } +} neg_inf ccl_maybe_unused; -ccl_device_inline const shuffle_swap_t shuffle_swap_swap(void) +static struct PosInfTy { - return _mm_set_epi8(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); +__forceinline operator float ( ) const { return std::numeric_limits<float>::infinity(); } +__forceinline operator int ( ) const { return std::numeric_limits<int>::max(); } +} inf ccl_maybe_unused, pos_inf ccl_maybe_unused; + +/* Intrinsics Functions */ + +#if defined(__BMI__) && defined(__GNUC__) +#define _tzcnt_u32 __tzcnt_u32 +#define _tzcnt_u64 __tzcnt_u64 +#endif + +#if defined(__LZCNT__) +#define _lzcnt_u32 __lzcnt32 +#define _lzcnt_u64 __lzcnt64 +#endif + +#if defined(_WIN32) + +__forceinline int __popcnt(int in) { + return _mm_popcnt_u32(in); } -ccl_device_inline const __m128 shuffle_swap(const __m128& a, const shuffle_swap_t& shuf) -{ - return _mm_castsi128_ps(_mm_shuffle_epi8(_mm_castps_si128(a), shuf)); +#if !defined(_MSC_VER) +__forceinline unsigned int __popcnt(unsigned int in) { + return _mm_popcnt_u32(in); +} +#endif + +#if defined(__KERNEL_64_BIT__) +__forceinline long long __popcnt(long long in) { + return _mm_popcnt_u64(in); +} +__forceinline size_t __popcnt(size_t in) { + return _mm_popcnt_u64(in); +} +#endif + +__forceinline int __bsf(int v) { +#if defined(__KERNEL_AVX2__) + return _tzcnt_u32(v); +#else + unsigned long r = 0; _BitScanForward(&r,v); return r; +#endif } +__forceinline unsigned int __bsf(unsigned int v) { +#if defined(__KERNEL_AVX2__) + return _tzcnt_u32(v); #else + unsigned long r = 0; _BitScanForward(&r,v); return r; +#endif +} -/* somewhat slower version for SSE2 */ -typedef int shuffle_swap_t; +__forceinline int __bsr(int v) { + unsigned long r = 0; _BitScanReverse(&r,v); return r; +} -ccl_device_inline const shuffle_swap_t shuffle_swap_identity(void) -{ - return 0; +__forceinline int __btc(int v, int i) { + long r = v; _bittestandcomplement(&r,i); return r; } -ccl_device_inline const shuffle_swap_t shuffle_swap_swap(void) -{ - return 1; +__forceinline int __bts(int v, int i) { + long r = v; _bittestandset(&r,i); return r; } -ccl_device_inline const __m128 shuffle_swap(const __m128& a, shuffle_swap_t shuf) -{ - /* shuffle value must be a constant, so we need to branch */ - if(shuf) - return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2)); - else - return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 1, 0)); +__forceinline int __btr(int v, int i) { + long r = v; _bittestandreset(&r,i); return r; } +__forceinline int bitscan(int v) { +#if defined(__KERNEL_AVX2__) + return _tzcnt_u32(v); +#else + return __bsf(v); #endif +} -#ifdef __KERNEL_SSE41__ -ccl_device_inline void gen_idirsplat_swap(const __m128 &pn, const shuffle_swap_t &shuf_identity, const shuffle_swap_t &shuf_swap, - const float3& idir, __m128 idirsplat[3], shuffle_swap_t shufflexyz[3]) +__forceinline int clz(const int x) { - const __m128 idirsplat_raw[] = { _mm_set_ps1(idir.x), _mm_set_ps1(idir.y), _mm_set_ps1(idir.z) }; - idirsplat[0] = _mm_xor_ps(idirsplat_raw[0], pn); - idirsplat[1] = _mm_xor_ps(idirsplat_raw[1], pn); - idirsplat[2] = _mm_xor_ps(idirsplat_raw[2], pn); - - const __m128 signmask = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); - const __m128 shuf_identity_f = _mm_castsi128_ps(shuf_identity); - const __m128 shuf_swap_f = _mm_castsi128_ps(shuf_swap); - shufflexyz[0] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[0], signmask))); - shufflexyz[1] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[1], signmask))); - shufflexyz[2] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[2], signmask))); -} +#if defined(__KERNEL_AVX2__) + return _lzcnt_u32(x); #else -ccl_device_inline void gen_idirsplat_swap(const __m128 &pn, const shuffle_swap_t &shuf_identity, const shuffle_swap_t &shuf_swap, - const float3& idir, __m128 idirsplat[3], shuffle_swap_t shufflexyz[3]) -{ - idirsplat[0] = _mm_xor_ps(_mm_set_ps1(idir.x), pn); - idirsplat[1] = _mm_xor_ps(_mm_set_ps1(idir.y), pn); - idirsplat[2] = _mm_xor_ps(_mm_set_ps1(idir.z), pn); - - shufflexyz[0] = (idir.x >= 0)? shuf_identity: shuf_swap; - shufflexyz[1] = (idir.y >= 0)? shuf_identity: shuf_swap; - shufflexyz[2] = (idir.z >= 0)? shuf_identity: shuf_swap; -} + if (UNLIKELY(x == 0)) return 32; + return 31 - __bsr(x); #endif +} -template<size_t i0, size_t i1, size_t i2, size_t i3> ccl_device_inline const __m128 shuffle(const __m128& a, const __m128& b) +__forceinline int __bscf(int& v) { - return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0)); + int i = __bsf(v); + v &= v-1; + return i; } -template<size_t i0, size_t i1, size_t i2, size_t i3> ccl_device_inline const __m128 shuffle(const __m128& a) +__forceinline unsigned int __bscf(unsigned int& v) { - return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(a), _MM_SHUFFLE(i3, i2, i1, i0))); + unsigned int i = __bsf(v); + v &= v-1; + return i; } -template<> __forceinline const __m128 shuffle<0, 1, 0, 1>(const __m128& a) -{ - return _mm_movelh_ps(a, a); +#if defined(__KERNEL_64_BIT__) + +__forceinline size_t __bsf(size_t v) { +#if defined(__KERNEL_AVX2__) + return _tzcnt_u64(v); +#else + unsigned long r = 0; _BitScanForward64(&r,v); return r; +#endif } -template<> __forceinline const __m128 shuffle<2, 3, 2, 3>(const __m128& a) -{ - return _mm_movehl_ps(a, a); +__forceinline size_t __bsr(size_t v) { + unsigned long r = 0; _BitScanReverse64(&r,v); return r; } -template<size_t i0, size_t i1, size_t i2, size_t i3> ccl_device_inline const __m128i shuffle(const __m128i& a) -{ - return _mm_shuffle_epi32(a, _MM_SHUFFLE(i3, i2, i1, i0)); +__forceinline size_t __btc(size_t v, size_t i) { + size_t r = v; _bittestandcomplement64((__int64*)&r,i); return r; } -template<size_t i0, size_t i1, size_t i2, size_t i3> ccl_device_inline const __m128i shuffle(const __m128i& a, const __m128i& b) -{ - return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0))); +__forceinline size_t __bts(size_t v, size_t i) { + __int64 r = v; _bittestandset64(&r,i); return r; } -/* Blend 2 vectors based on mask: (a[i] & mask[i]) | (b[i] & ~mask[i]) */ -#ifdef __KERNEL_SSE41__ -ccl_device_inline const __m128 blend(const __m128& mask, const __m128& a, const __m128& b) -{ - return _mm_blendv_ps(b, a, mask); +__forceinline size_t __btr(size_t v, size_t i) { + __int64 r = v; _bittestandreset64(&r,i); return r; } + +__forceinline size_t bitscan(size_t v) { +#if defined(__KERNEL_AVX2__) +#if defined(__KERNEL_64_BIT__) + return _tzcnt_u64(v); #else -ccl_device_inline const __m128 blend(const __m128& mask, const __m128& a, const __m128& b) -{ - return _mm_or_ps(_mm_and_ps(mask, a), _mm_andnot_ps(mask, b)); -} + return _tzcnt_u32(v); #endif +#else + return __bsf(v); +#endif +} -/* calculate a*b+c (replacement for fused multiply-add on SSE CPUs) */ -ccl_device_inline const __m128 fma(const __m128& a, const __m128& b, const __m128& c) +__forceinline size_t __bscf(size_t& v) { - return _mm_add_ps(_mm_mul_ps(a, b), c); + size_t i = __bsf(v); + v &= v-1; + return i; } -/* calculate a*b-c (replacement for fused multiply-subtract on SSE CPUs) */ -ccl_device_inline const __m128 fms(const __m128& a, const __m128& b, const __m128& c) -{ - return _mm_sub_ps(_mm_mul_ps(a, b), c); +#endif /* __KERNEL_64_BIT__ */ + +#else /* _WIN32 */ + +__forceinline unsigned int __popcnt(unsigned int in) { + int r = 0; asm ("popcnt %1,%0" : "=r"(r) : "r"(in)); return r; } -/* calculate -a*b+c (replacement for fused negated-multiply-subtract on SSE CPUs) */ -ccl_device_inline const __m128 fnma(const __m128& a, const __m128& b, const __m128& c) -{ - return _mm_sub_ps(c, _mm_mul_ps(a, b)); +__forceinline int __bsf(int v) { + int r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r; } -template<size_t N> ccl_device_inline const __m128 broadcast(const __m128& a) -{ - return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(a), _MM_SHUFFLE(N, N, N, N))); +__forceinline int __bsr(int v) { + int r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r; } -template<size_t N> ccl_device_inline const __m128i broadcast(const __m128i& a) -{ - return _mm_shuffle_epi32(a, _MM_SHUFFLE(N, N, N, N)); +__forceinline int __btc(int v, int i) { + int r = 0; asm ("btc %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags" ); return r; } -ccl_device_inline const __m128 uint32_to_float(const __m128i &in) -{ - __m128i a = _mm_srli_epi32(in, 16); - __m128i b = _mm_and_si128(in, _mm_set1_epi32(0x0000ffff)); - __m128i c = _mm_or_si128(a, _mm_set1_epi32(0x53000000)); - __m128 d = _mm_cvtepi32_ps(b); - __m128 e = _mm_sub_ps(_mm_castsi128_ps(c), _mm_castsi128_ps(_mm_set1_epi32(0x53000000))); - return _mm_add_ps(e, d); +__forceinline int __bts(int v, int i) { + int r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r; } -template<size_t S1, size_t S2, size_t S3, size_t S4> -ccl_device_inline const __m128 set_sign_bit(const __m128 &a) -{ - return _mm_xor_ps(a, _mm_castsi128_ps(_mm_setr_epi32(S1 << 31, S2 << 31, S3 << 31, S4 << 31))); +__forceinline int __btr(int v, int i) { + int r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r; } -#ifdef __KERNEL_WITH_SSE_ALIGN__ -ccl_device_inline const __m128 load_m128(const float4 &vec) -{ - return _mm_load_ps(&vec.x); +__forceinline size_t __bsf(size_t v) { + size_t r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r; } -ccl_device_inline const __m128 load_m128(const float3 &vec) -{ - return _mm_load_ps(&vec.x); +__forceinline unsigned int __bsf(unsigned int v) { + unsigned int r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r; } -#else +__forceinline size_t __bsr(size_t v) { + size_t r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r; +} -ccl_device_inline const __m128 load_m128(const float4 &vec) -{ - return _mm_loadu_ps(&vec.x); +__forceinline size_t __btc(size_t v, size_t i) { + size_t r = 0; asm ("btc %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags" ); return r; } -ccl_device_inline const __m128 load_m128(const float3 &vec) -{ - return _mm_loadu_ps(&vec.x); +__forceinline size_t __bts(size_t v, size_t i) { + size_t r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r; } -#endif /* __KERNEL_WITH_SSE_ALIGN__ */ -ccl_device_inline const __m128 dot3_splat(const __m128& a, const __m128& b) -{ -#ifdef __KERNEL_SSE41__ - return _mm_dp_ps(a, b, 0x7f); +__forceinline size_t __btr(size_t v, size_t i) { + size_t r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r; +} + +__forceinline int bitscan(int v) { +#if defined(__KERNEL_AVX2__) + return _tzcnt_u32(v); #else - __m128 t = _mm_mul_ps(a, b); - return _mm_set1_ps(((float*)&t)[0] + ((float*)&t)[1] + ((float*)&t)[2]); + return __bsf(v); #endif } -/* squared length taking only specified axes into account */ -template<size_t X, size_t Y, size_t Z, size_t W> -ccl_device_inline float len_squared(const __m128& a) -{ -#ifndef __KERNEL_SSE41__ - float4& t = (float4 &)a; - return (X ? t.x * t.x : 0.0f) + (Y ? t.y * t.y : 0.0f) + (Z ? t.z * t.z : 0.0f) + (W ? t.w * t.w : 0.0f); +__forceinline unsigned int bitscan(unsigned int v) { +#if defined(__KERNEL_AVX2__) + return _tzcnt_u32(v); #else - return _mm_cvtss_f32(_mm_dp_ps(a, a, (X << 4) | (Y << 5) | (Z << 6) | (W << 7) | 0xf)); + return __bsf(v); #endif } -ccl_device_inline float dot3(const __m128& a, const __m128& b) -{ -#ifdef __KERNEL_SSE41__ - return _mm_cvtss_f32(_mm_dp_ps(a, b, 0x7f)); +__forceinline size_t bitscan(size_t v) { +#if defined(__KERNEL_AVX2__) +#if defined(__KERNEL_64_BIT__) + return _tzcnt_u64(v); #else - __m128 t = _mm_mul_ps(a, b); - return ((float*)&t)[0] + ((float*)&t)[1] + ((float*)&t)[2]; + return _tzcnt_u32(v); +#endif +#else + return __bsf(v); #endif } -ccl_device_inline const __m128 len3_squared_splat(const __m128& a) +__forceinline int clz(const int x) { - return dot3_splat(a, a); +#if defined(__KERNEL_AVX2__) + return _lzcnt_u32(x); +#else + if (UNLIKELY(x == 0)) return 32; + return 31 - __bsr(x); +#endif } -ccl_device_inline float len3_squared(const __m128& a) +__forceinline int __bscf(int& v) { - return dot3(a, a); + int i = bitscan(v); +#if defined(__KERNEL_AVX2__) + v &= v-1; +#else + v = __btc(v,i); +#endif + return i; } -ccl_device_inline float len3(const __m128& a) +__forceinline unsigned int __bscf(unsigned int& v) { - return _mm_cvtss_f32(_mm_sqrt_ss(dot3_splat(a, a))); + unsigned int i = bitscan(v); + v &= v-1; + return i; } -/* calculate shuffled cross product, useful when order of components does not matter */ -ccl_device_inline const __m128 cross_zxy(const __m128& a, const __m128& b) +__forceinline size_t __bscf(size_t& v) { - return fms(a, shuffle<1, 2, 0, 3>(b), _mm_mul_ps(b, shuffle<1, 2, 0, 3>(a))); + size_t i = bitscan(v); +#if defined(__KERNEL_AVX2__) + v &= v-1; +#else + v = __btc(v,i); +#endif + return i; +} + +#endif /* _WIN32 */ + +static const unsigned int BITSCAN_NO_BIT_SET_32 = 32; +static const size_t BITSCAN_NO_BIT_SET_64 = 64; + +/* Emulation of SSE4 functions with SSE3 */ + +#if defined(__KERNEL_SSE3) && !defined(__KERNEL_SSE4__) + +#define _MM_FROUND_TO_NEAREST_INT 0x00 +#define _MM_FROUND_TO_NEG_INF 0x01 +#define _MM_FROUND_TO_POS_INF 0x02 +#define _MM_FROUND_TO_ZERO 0x03 +#define _MM_FROUND_CUR_DIRECTION 0x04 + +#define _mm_blendv_ps __emu_mm_blendv_ps +__forceinline __m128 _mm_blendv_ps( __m128 value, __m128 input, __m128 mask ) { + return _mm_or_ps(_mm_and_ps(mask, input), _mm_andnot_ps(mask, value)); +} + +#define _mm_blend_ps __emu_mm_blend_ps +__forceinline __m128 _mm_blend_ps( __m128 value, __m128 input, const int mask ) { + assert(mask < 0x10); return _mm_blendv_ps(value, input, _mm_lookupmask_ps[mask]); +} + +#define _mm_blendv_epi8 __emu_mm_blendv_epi8 +__forceinline __m128i _mm_blendv_epi8( __m128i value, __m128i input, __m128i mask ) { + return _mm_or_si128(_mm_and_si128(mask, input), _mm_andnot_si128(mask, value)); +} + +#define _mm_mullo_epi32 __emu_mm_mullo_epi32 +__forceinline __m128i _mm_mullo_epi32( __m128i value, __m128i input ) { + __m128i rvalue; + char* _r = (char*)(&rvalue + 1); + char* _v = (char*)(& value + 1); + char* _i = (char*)(& input + 1); + for ( ssize_t i = -16 ; i != 0 ; i += 4 ) *((int32*)(_r + i)) = *((int32*)(_v + i))* *((int32*)(_i + i)); + return rvalue; +} + + +#define _mm_min_epi32 __emu_mm_min_epi32 +__forceinline __m128i _mm_min_epi32( __m128i value, __m128i input ) { + return _mm_blendv_epi8(input, value, _mm_cmplt_epi32(value, input)); +} + +#define _mm_max_epi32 __emu_mm_max_epi32 +__forceinline __m128i _mm_max_epi32( __m128i value, __m128i input ) { + return _mm_blendv_epi8(value, input, _mm_cmplt_epi32(value, input)); +} + +#define _mm_extract_epi32 __emu_mm_extract_epi32 +__forceinline int _mm_extract_epi32( __m128i input, const int index ) { + switch ( index ) { + case 0: return _mm_cvtsi128_si32(input); + case 1: return _mm_cvtsi128_si32(_mm_shuffle_epi32(input, _MM_SHUFFLE(1, 1, 1, 1))); + case 2: return _mm_cvtsi128_si32(_mm_shuffle_epi32(input, _MM_SHUFFLE(2, 2, 2, 2))); + case 3: return _mm_cvtsi128_si32(_mm_shuffle_epi32(input, _MM_SHUFFLE(3, 3, 3, 3))); + default: assert(false); return 0; + } } -ccl_device_inline const __m128 cross(const __m128& a, const __m128& b) +#define _mm_insert_epi32 __emu_mm_insert_epi32 +__forceinline __m128i _mm_insert_epi32( __m128i value, int input, const int index ) { + assert(index >= 0 && index < 4); ((int*)&value)[index] = input; return value; +} + +#define _mm_extract_ps __emu_mm_extract_ps +__forceinline int _mm_extract_ps( __m128 input, const int index ) { + int32* ptr = (int32*)&input; return ptr[index]; +} + +#define _mm_insert_ps __emu_mm_insert_ps +__forceinline __m128 _mm_insert_ps( __m128 value, __m128 input, const int index ) +{ assert(index < 0x100); ((float*)&value)[(index >> 4)&0x3] = ((float*)&input)[index >> 6]; return _mm_andnot_ps(_mm_lookupmask_ps[index&0xf], value); } + +#define _mm_round_ps __emu_mm_round_ps +__forceinline __m128 _mm_round_ps( __m128 value, const int flags ) { - return shuffle<1, 2, 0, 3>(cross_zxy(a, b)); + switch ( flags ) + { + case _MM_FROUND_TO_NEAREST_INT: return _mm_cvtepi32_ps(_mm_cvtps_epi32(value)); + case _MM_FROUND_TO_NEG_INF : return _mm_cvtepi32_ps(_mm_cvtps_epi32(_mm_add_ps(value, _mm_set1_ps(-0.5f)))); + case _MM_FROUND_TO_POS_INF : return _mm_cvtepi32_ps(_mm_cvtps_epi32(_mm_add_ps(value, _mm_set1_ps( 0.5f)))); + case _MM_FROUND_TO_ZERO : return _mm_cvtepi32_ps(_mm_cvttps_epi32(value)); + } + return value; +} + +#ifdef _M_X64 +#define _mm_insert_epi64 __emu_mm_insert_epi64 +__forceinline __m128i _mm_insert_epi64( __m128i value, __int64 input, const int index ) { + assert(size_t(index) < 4); ((__int64*)&value)[index] = input; return value; } +#define _mm_extract_epi64 __emu_mm_extract_epi64 +__forceinline __int64 _mm_extract_epi64( __m128i input, const int index ) { + assert(size_t(index) < 2); + return index == 0 ? _mm_cvtsi128_si64x(input) : _mm_cvtsi128_si64x(_mm_unpackhi_epi64(input, input)); +} +#endif + +#endif + #endif /* __KERNEL_SSE2__ */ CCL_NAMESPACE_END -#endif /* __UTIL_SIMD_H__ */ +#include "util_math.h" +#include "util_sseb.h" +#include "util_ssei.h" +#include "util_ssef.h" + +#endif /* __UTIL_SIMD_TYPES_H__ */ diff --git a/intern/cycles/util/util_sseb.h b/intern/cycles/util/util_sseb.h new file mode 100644 index 00000000000..be510256dd3 --- /dev/null +++ b/intern/cycles/util/util_sseb.h @@ -0,0 +1,161 @@ +/* + * Copyright 2011-2013 Intel Corporation + * Modifications Copyright 2014, Blender Foundation. + * + * Licensed under the Apache License, Version 2.0(the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +#ifndef __UTIL_SSEB_H__ +#define __UTIL_SSEB_H__ + +CCL_NAMESPACE_BEGIN + +#ifdef __KERNEL_SSE2__ + +/*! 4-wide SSE bool type. */ +struct sseb +{ + typedef sseb Mask; // mask type + typedef ssei Int; // int type + typedef ssef Float; // float type + + enum { size = 4 }; // number of SIMD elements + union { __m128 m128; int32_t v[4]; }; // data + + //////////////////////////////////////////////////////////////////////////////// + /// Constructors, Assignment & Cast Operators + //////////////////////////////////////////////////////////////////////////////// + + __forceinline sseb ( ) {} + __forceinline sseb ( const sseb& other ) { m128 = other.m128; } + __forceinline sseb& operator=( const sseb& other ) { m128 = other.m128; return *this; } + + __forceinline sseb( const __m128 input ) : m128(input) {} + __forceinline operator const __m128&( void ) const { return m128; } + __forceinline operator const __m128i( void ) const { return _mm_castps_si128(m128); } + __forceinline operator const __m128d( void ) const { return _mm_castps_pd(m128); } + + __forceinline sseb ( bool a ) + : m128(_mm_lookupmask_ps[(size_t(a) << 3) | (size_t(a) << 2) | (size_t(a) << 1) | size_t(a)]) {} + __forceinline sseb ( bool a, bool b) + : m128(_mm_lookupmask_ps[(size_t(b) << 3) | (size_t(a) << 2) | (size_t(b) << 1) | size_t(a)]) {} + __forceinline sseb ( bool a, bool b, bool c, bool d) + : m128(_mm_lookupmask_ps[(size_t(d) << 3) | (size_t(c) << 2) | (size_t(b) << 1) | size_t(a)]) {} + __forceinline sseb(int mask) { + assert(mask >= 0 && mask < 16); + m128 = _mm_lookupmask_ps[mask]; + } + + //////////////////////////////////////////////////////////////////////////////// + /// Constants + //////////////////////////////////////////////////////////////////////////////// + + __forceinline sseb( FalseTy ) : m128(_mm_setzero_ps()) {} + __forceinline sseb( TrueTy ) : m128(_mm_castsi128_ps(_mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128()))) {} + + //////////////////////////////////////////////////////////////////////////////// + /// Array Access + //////////////////////////////////////////////////////////////////////////////// + + __forceinline bool operator []( const size_t i ) const { assert(i < 4); return (_mm_movemask_ps(m128) >> i) & 1; } + __forceinline int32_t& operator []( const size_t i ) { assert(i < 4); return v[i]; } +}; + +//////////////////////////////////////////////////////////////////////////////// +/// Unary Operators +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const sseb operator !( const sseb& a ) { return _mm_xor_ps(a, sseb(True)); } + +//////////////////////////////////////////////////////////////////////////////// +/// Binary Operators +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const sseb operator &( const sseb& a, const sseb& b ) { return _mm_and_ps(a, b); } +__forceinline const sseb operator |( const sseb& a, const sseb& b ) { return _mm_or_ps (a, b); } +__forceinline const sseb operator ^( const sseb& a, const sseb& b ) { return _mm_xor_ps(a, b); } + +//////////////////////////////////////////////////////////////////////////////// +/// Assignment Operators +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const sseb operator &=( sseb& a, const sseb& b ) { return a = a & b; } +__forceinline const sseb operator |=( sseb& a, const sseb& b ) { return a = a | b; } +__forceinline const sseb operator ^=( sseb& a, const sseb& b ) { return a = a ^ b; } + +//////////////////////////////////////////////////////////////////////////////// +/// Comparison Operators + Select +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const sseb operator !=( const sseb& a, const sseb& b ) { return _mm_xor_ps(a, b); } +__forceinline const sseb operator ==( const sseb& a, const sseb& b ) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); } + +__forceinline const sseb select( const sseb& m, const sseb& t, const sseb& f ) { +#if defined(__KERNEL_SSE41__) + return _mm_blendv_ps(f, t, m); +#else + return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f)); +#endif +} + +//////////////////////////////////////////////////////////////////////////////// +/// Movement/Shifting/Shuffling Functions +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const sseb unpacklo( const sseb& a, const sseb& b ) { return _mm_unpacklo_ps(a, b); } +__forceinline const sseb unpackhi( const sseb& a, const sseb& b ) { return _mm_unpackhi_ps(a, b); } + +template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const sseb shuffle( const sseb& a ) { + return _mm_shuffle_epi32(a, _MM_SHUFFLE(i3, i2, i1, i0)); +} + +template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const sseb shuffle( const sseb& a, const sseb& b ) { + return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0)); +} + +#if defined(__KERNEL_SSE3__) +template<> __forceinline const sseb shuffle<0, 0, 2, 2>( const sseb& a ) { return _mm_moveldup_ps(a); } +template<> __forceinline const sseb shuffle<1, 1, 3, 3>( const sseb& a ) { return _mm_movehdup_ps(a); } +template<> __forceinline const sseb shuffle<0, 1, 0, 1>( const sseb& a ) { return _mm_castpd_ps(_mm_movedup_pd (a)); } +#endif + +#if defined(__KERNEL_SSE41__) +template<size_t dst, size_t src, size_t clr> __forceinline const sseb insert( const sseb& a, const sseb& b ) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); } +template<size_t dst, size_t src> __forceinline const sseb insert( const sseb& a, const sseb& b ) { return insert<dst, src, 0>(a, b); } +template<size_t dst> __forceinline const sseb insert( const sseb& a, const bool b ) { return insert<dst,0>(a, sseb(b)); } +#endif + +//////////////////////////////////////////////////////////////////////////////// +/// Reduction Operations +//////////////////////////////////////////////////////////////////////////////// + +#if defined(__KERNEL_SSE41__) +__forceinline size_t popcnt( const sseb& a ) { return __popcnt(_mm_movemask_ps(a)); } +#else +__forceinline size_t popcnt( const sseb& a ) { return bool(a[0])+bool(a[1])+bool(a[2])+bool(a[3]); } +#endif + +__forceinline bool reduce_and( const sseb& a ) { return _mm_movemask_ps(a) == 0xf; } +__forceinline bool reduce_or ( const sseb& a ) { return _mm_movemask_ps(a) != 0x0; } +__forceinline bool all ( const sseb& b ) { return _mm_movemask_ps(b) == 0xf; } +__forceinline bool any ( const sseb& b ) { return _mm_movemask_ps(b) != 0x0; } +__forceinline bool none ( const sseb& b ) { return _mm_movemask_ps(b) == 0x0; } + +__forceinline size_t movemask( const sseb& a ) { return _mm_movemask_ps(a); } + +#endif + +CCL_NAMESPACE_END + +#endif + diff --git a/intern/cycles/util/util_ssef.h b/intern/cycles/util/util_ssef.h new file mode 100644 index 00000000000..f4236cc616e --- /dev/null +++ b/intern/cycles/util/util_ssef.h @@ -0,0 +1,588 @@ +/* + * Copyright 2011-2013 Intel Corporation + * Modifications Copyright 2014, Blender Foundation. + * + * Licensed under the Apache License, Version 2.0(the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +#ifndef __UTIL_SSEF_H__ +#define __UTIL_SSEF_H__ + +CCL_NAMESPACE_BEGIN + +#ifdef __KERNEL_SSE2__ + +/*! 4-wide SSE float type. */ +struct ssef +{ + typedef sseb Mask; // mask type + typedef ssei Int; // int type + typedef ssef Float; // float type + + enum { size = 4 }; // number of SIMD elements + union { __m128 m128; float f[4]; int i[4]; }; // data + + //////////////////////////////////////////////////////////////////////////////// + /// Constructors, Assignment & Cast Operators + //////////////////////////////////////////////////////////////////////////////// + + __forceinline ssef () {} + __forceinline ssef (const ssef& other) { m128 = other.m128; } + __forceinline ssef& operator=(const ssef& other) { m128 = other.m128; return *this; } + + __forceinline ssef(const __m128 a) : m128(a) {} + __forceinline operator const __m128&(void) const { return m128; } + __forceinline operator __m128&(void) { return m128; } + + __forceinline ssef (float a) : m128(_mm_set1_ps(a)) {} + __forceinline ssef (float a, float b, float c, float d) : m128(_mm_setr_ps(a, b, c, d)) {} + + __forceinline explicit ssef(const __m128i a) : m128(_mm_cvtepi32_ps(a)) {} + + //////////////////////////////////////////////////////////////////////////////// + /// Loads and Stores + //////////////////////////////////////////////////////////////////////////////// + +#if defined(__KERNEL_AVX__) + static __forceinline ssef broadcast(const void* const a) { return _mm_broadcast_ss((float*)a); } +#else + static __forceinline ssef broadcast(const void* const a) { return _mm_set1_ps(*(float*)a); } +#endif + + //////////////////////////////////////////////////////////////////////////////// + /// Array Access + //////////////////////////////////////////////////////////////////////////////// + + __forceinline const float& operator [](const size_t i) const { assert(i < 4); return f[i]; } + __forceinline float& operator [](const size_t i) { assert(i < 4); return f[i]; } +}; + + +//////////////////////////////////////////////////////////////////////////////// +/// Unary Operators +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const ssef cast (const __m128i& a) { return _mm_castsi128_ps(a); } +__forceinline const ssef operator +(const ssef& a) { return a; } +__forceinline const ssef operator -(const ssef& a) { return _mm_xor_ps(a.m128, _mm_castsi128_ps(_mm_set1_epi32(0x80000000))); } +__forceinline const ssef abs (const ssef& a) { return _mm_and_ps(a.m128, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff))); } +#if defined(__KERNEL_SSE41__) +__forceinline const ssef sign (const ssef& a) { return _mm_blendv_ps(ssef(1.0f), -ssef(1.0f), _mm_cmplt_ps(a,ssef(0.0f))); } +#endif +__forceinline const ssef signmsk (const ssef& a) { return _mm_and_ps(a.m128,_mm_castsi128_ps(_mm_set1_epi32(0x80000000))); } + +__forceinline const ssef rcp (const ssef& a) { + const ssef r = _mm_rcp_ps(a.m128); + return _mm_sub_ps(_mm_add_ps(r, r), _mm_mul_ps(_mm_mul_ps(r, r), a)); +} +__forceinline const ssef sqr (const ssef& a) { return _mm_mul_ps(a,a); } +__forceinline const ssef mm_sqrt(const ssef& a) { return _mm_sqrt_ps(a.m128); } +__forceinline const ssef rsqrt(const ssef& a) { + const ssef r = _mm_rsqrt_ps(a.m128); + return _mm_add_ps(_mm_mul_ps(_mm_set_ps(1.5f, 1.5f, 1.5f, 1.5f), r), + _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set_ps(-0.5f, -0.5f, -0.5f, -0.5f)), r), _mm_mul_ps(r, r))); +} + +//////////////////////////////////////////////////////////////////////////////// +/// Binary Operators +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const ssef operator +(const ssef& a, const ssef& b) { return _mm_add_ps(a.m128, b.m128); } +__forceinline const ssef operator +(const ssef& a, const float& b) { return a + ssef(b); } +__forceinline const ssef operator +(const float& a, const ssef& b) { return ssef(a) + b; } + +__forceinline const ssef operator -(const ssef& a, const ssef& b) { return _mm_sub_ps(a.m128, b.m128); } +__forceinline const ssef operator -(const ssef& a, const float& b) { return a - ssef(b); } +__forceinline const ssef operator -(const float& a, const ssef& b) { return ssef(a) - b; } + +__forceinline const ssef operator *(const ssef& a, const ssef& b) { return _mm_mul_ps(a.m128, b.m128); } +__forceinline const ssef operator *(const ssef& a, const float& b) { return a * ssef(b); } +__forceinline const ssef operator *(const float& a, const ssef& b) { return ssef(a) * b; } + +__forceinline const ssef operator /(const ssef& a, const ssef& b) { return _mm_div_ps(a.m128,b.m128); } +__forceinline const ssef operator /(const ssef& a, const float& b) { return a/ssef(b); } +__forceinline const ssef operator /(const float& a, const ssef& b) { return ssef(a)/b; } + +__forceinline const ssef operator^(const ssef& a, const ssef& b) { return _mm_xor_ps(a.m128,b.m128); } +__forceinline const ssef operator^(const ssef& a, const ssei& b) { return _mm_xor_ps(a.m128,_mm_castsi128_ps(b.m128)); } + +__forceinline const ssef operator&(const ssef& a, const ssef& b) { return _mm_and_ps(a.m128,b.m128); } +__forceinline const ssef operator&(const ssef& a, const ssei& b) { return _mm_and_ps(a.m128,_mm_castsi128_ps(b.m128)); } + +__forceinline const ssef andnot(const ssef& a, const ssef& b) { return _mm_andnot_ps(a.m128,b.m128); } + +__forceinline const ssef min(const ssef& a, const ssef& b) { return _mm_min_ps(a.m128,b.m128); } +__forceinline const ssef min(const ssef& a, const float& b) { return _mm_min_ps(a.m128,ssef(b)); } +__forceinline const ssef min(const float& a, const ssef& b) { return _mm_min_ps(ssef(a),b.m128); } + +__forceinline const ssef max(const ssef& a, const ssef& b) { return _mm_max_ps(a.m128,b.m128); } +__forceinline const ssef max(const ssef& a, const float& b) { return _mm_max_ps(a.m128,ssef(b)); } +__forceinline const ssef max(const float& a, const ssef& b) { return _mm_max_ps(ssef(a),b.m128); } + +#if defined(__KERNEL_SSE41__) +__forceinline ssef mini(const ssef& a, const ssef& b) { + const ssei ai = _mm_castps_si128(a); + const ssei bi = _mm_castps_si128(b); + const ssei ci = _mm_min_epi32(ai,bi); + return _mm_castsi128_ps(ci); +} +#endif + +#if defined(__KERNEL_SSE41__) +__forceinline ssef maxi(const ssef& a, const ssef& b) { + const ssei ai = _mm_castps_si128(a); + const ssei bi = _mm_castps_si128(b); + const ssei ci = _mm_max_epi32(ai,bi); + return _mm_castsi128_ps(ci); +} +#endif + +//////////////////////////////////////////////////////////////////////////////// +/// Ternary Operators +//////////////////////////////////////////////////////////////////////////////// + +#if defined(__KERNEL_AVX2__) +__forceinline const ssef madd (const ssef& a, const ssef& b, const ssef& c) { return _mm_fmadd_ps(a,b,c); } +__forceinline const ssef msub (const ssef& a, const ssef& b, const ssef& c) { return _mm_fmsub_ps(a,b,c); } +__forceinline const ssef nmadd(const ssef& a, const ssef& b, const ssef& c) { return _mm_fnmadd_ps(a,b,c); } +__forceinline const ssef nmsub(const ssef& a, const ssef& b, const ssef& c) { return _mm_fnmsub_ps(a,b,c); } +#else +__forceinline const ssef madd (const ssef& a, const ssef& b, const ssef& c) { return a*b+c; } +__forceinline const ssef msub (const ssef& a, const ssef& b, const ssef& c) { return a*b-c; } +__forceinline const ssef nmadd(const ssef& a, const ssef& b, const ssef& c) { return -a*b-c;} +__forceinline const ssef nmsub(const ssef& a, const ssef& b, const ssef& c) { return c-a*b; } +#endif + +//////////////////////////////////////////////////////////////////////////////// +/// Assignment Operators +//////////////////////////////////////////////////////////////////////////////// + +__forceinline ssef& operator +=(ssef& a, const ssef& b) { return a = a + b; } +__forceinline ssef& operator +=(ssef& a, const float& b) { return a = a + b; } + +__forceinline ssef& operator -=(ssef& a, const ssef& b) { return a = a - b; } +__forceinline ssef& operator -=(ssef& a, const float& b) { return a = a - b; } + +__forceinline ssef& operator *=(ssef& a, const ssef& b) { return a = a * b; } +__forceinline ssef& operator *=(ssef& a, const float& b) { return a = a * b; } + +__forceinline ssef& operator /=(ssef& a, const ssef& b) { return a = a / b; } +__forceinline ssef& operator /=(ssef& a, const float& b) { return a = a / b; } + +//////////////////////////////////////////////////////////////////////////////// +/// Comparison Operators + Select +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const sseb operator ==(const ssef& a, const ssef& b) { return _mm_cmpeq_ps(a.m128, b.m128); } +__forceinline const sseb operator ==(const ssef& a, const float& b) { return a == ssef(b); } +__forceinline const sseb operator ==(const float& a, const ssef& b) { return ssef(a) == b; } + +__forceinline const sseb operator !=(const ssef& a, const ssef& b) { return _mm_cmpneq_ps(a.m128, b.m128); } +__forceinline const sseb operator !=(const ssef& a, const float& b) { return a != ssef(b); } +__forceinline const sseb operator !=(const float& a, const ssef& b) { return ssef(a) != b; } + +__forceinline const sseb operator <(const ssef& a, const ssef& b) { return _mm_cmplt_ps(a.m128, b.m128); } +__forceinline const sseb operator <(const ssef& a, const float& b) { return a < ssef(b); } +__forceinline const sseb operator <(const float& a, const ssef& b) { return ssef(a) < b; } + +__forceinline const sseb operator >=(const ssef& a, const ssef& b) { return _mm_cmpnlt_ps(a.m128, b.m128); } +__forceinline const sseb operator >=(const ssef& a, const float& b) { return a >= ssef(b); } +__forceinline const sseb operator >=(const float& a, const ssef& b) { return ssef(a) >= b; } + +__forceinline const sseb operator >(const ssef& a, const ssef& b) { return _mm_cmpnle_ps(a.m128, b.m128); } +__forceinline const sseb operator >(const ssef& a, const float& b) { return a > ssef(b); } +__forceinline const sseb operator >(const float& a, const ssef& b) { return ssef(a) > b; } + +__forceinline const sseb operator <=(const ssef& a, const ssef& b) { return _mm_cmple_ps(a.m128, b.m128); } +__forceinline const sseb operator <=(const ssef& a, const float& b) { return a <= ssef(b); } +__forceinline const sseb operator <=(const float& a, const ssef& b) { return ssef(a) <= b; } + +__forceinline const ssef select(const sseb& m, const ssef& t, const ssef& f) { +#ifdef __KERNEL_SSE41__ + return _mm_blendv_ps(f, t, m); +#else + return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f)); +#endif +} + +__forceinline const ssef select(const ssef& m, const ssef& t, const ssef& f) { +#ifdef __KERNEL_SSE41__ + return _mm_blendv_ps(f, t, m); +#else + return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f)); +#endif +} + +__forceinline const ssef select(const int mask, const ssef& t, const ssef& f) { +#if defined(__KERNEL_SSE41__) && ((!defined(__clang__) && !defined(_MSC_VER)) || defined(__INTEL_COMPILER)) + return _mm_blend_ps(f, t, mask); +#else + return select(sseb(mask),t,f); +#endif +} + +//////////////////////////////////////////////////////////////////////////////// +/// Rounding Functions +//////////////////////////////////////////////////////////////////////////////// + +#if defined(__KERNEL_SSE41__) +__forceinline const ssef round_even(const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); } +__forceinline const ssef round_down(const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); } +__forceinline const ssef round_up (const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); } +__forceinline const ssef round_zero(const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_ZERO ); } +__forceinline const ssef floor (const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); } +__forceinline const ssef ceil (const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); } +#endif + +__forceinline ssei truncatei(const ssef& a) { + return _mm_cvttps_epi32(a.m128); +} + +__forceinline ssei floori(const ssef& a) { +#if defined(__KERNEL_SSE41__) + return ssei(floor(a)); +#else + return ssei(a-ssef(0.5f)); +#endif +} + +//////////////////////////////////////////////////////////////////////////////// +/// Movement/Shifting/Shuffling Functions +//////////////////////////////////////////////////////////////////////////////// + +__forceinline ssef unpacklo(const ssef& a, const ssef& b) { return _mm_unpacklo_ps(a.m128, b.m128); } +__forceinline ssef unpackhi(const ssef& a, const ssef& b) { return _mm_unpackhi_ps(a.m128, b.m128); } + +template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const ssef shuffle(const ssef& b) { + return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(b), _MM_SHUFFLE(i3, i2, i1, i0))); +} + +template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const ssef shuffle(const ssef& a, const ssef& b) { + return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0)); +} + +#if defined(__KERNEL_SSSE3__) +__forceinline const ssef shuffle8(const ssef& a, const ssei& shuf) { + return _mm_castsi128_ps(_mm_shuffle_epi8(_mm_castps_si128(a), shuf)); +} +#endif + +#if defined(__KERNEL_SSE3__) +template<> __forceinline const ssef shuffle<0, 0, 2, 2>(const ssef& b) { return _mm_moveldup_ps(b); } +template<> __forceinline const ssef shuffle<1, 1, 3, 3>(const ssef& b) { return _mm_movehdup_ps(b); } +template<> __forceinline const ssef shuffle<0, 1, 0, 1>(const ssef& b) { return _mm_castpd_ps(_mm_movedup_pd(_mm_castps_pd(b))); } +#endif + +template<size_t i0> __forceinline const ssef shuffle(const ssef& b) { + return shuffle<i0,i0,i0,i0>(b); +} + +#if defined(__KERNEL_SSE41__) && !defined(__GNUC__) +template<size_t i> __forceinline float extract (const ssef& a) { return _mm_cvtss_f32(_mm_extract_ps(a,i)); } +#else +template<size_t i> __forceinline float extract (const ssef& a) { return _mm_cvtss_f32(shuffle<i,i,i,i>(a)); } +#endif +template<> __forceinline float extract<0>(const ssef& a) { return _mm_cvtss_f32(a); } + +#if defined(__KERNEL_SSE41__) +template<size_t dst, size_t src, size_t clr> __forceinline const ssef insert(const ssef& a, const ssef& b) { return _mm_insert_ps(a, b,(dst << 4) |(src << 6) | clr); } +template<size_t dst, size_t src> __forceinline const ssef insert(const ssef& a, const ssef& b) { return insert<dst, src, 0>(a, b); } +template<size_t dst> __forceinline const ssef insert(const ssef& a, const float b) { return insert<dst, 0>(a, _mm_set_ss(b)); } +#else +template<size_t dst> __forceinline const ssef insert(const ssef& a, const float b) { ssef c = a; c[dst] = b; return c; } +#endif + +//////////////////////////////////////////////////////////////////////////////// +/// Transpose +//////////////////////////////////////////////////////////////////////////////// + +__forceinline void transpose(const ssef& r0, const ssef& r1, const ssef& r2, const ssef& r3, ssef& c0, ssef& c1, ssef& c2, ssef& c3) +{ + ssef l02 = unpacklo(r0,r2); + ssef h02 = unpackhi(r0,r2); + ssef l13 = unpacklo(r1,r3); + ssef h13 = unpackhi(r1,r3); + c0 = unpacklo(l02,l13); + c1 = unpackhi(l02,l13); + c2 = unpacklo(h02,h13); + c3 = unpackhi(h02,h13); +} + +__forceinline void transpose(const ssef& r0, const ssef& r1, const ssef& r2, const ssef& r3, ssef& c0, ssef& c1, ssef& c2) +{ + ssef l02 = unpacklo(r0,r2); + ssef h02 = unpackhi(r0,r2); + ssef l13 = unpacklo(r1,r3); + ssef h13 = unpackhi(r1,r3); + c0 = unpacklo(l02,l13); + c1 = unpackhi(l02,l13); + c2 = unpacklo(h02,h13); +} + +//////////////////////////////////////////////////////////////////////////////// +/// Reductions +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const ssef vreduce_min(const ssef& v) { ssef h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); } +__forceinline const ssef vreduce_max(const ssef& v) { ssef h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); } +__forceinline const ssef vreduce_add(const ssef& v) { ssef h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; } + +__forceinline float reduce_min(const ssef& v) { return _mm_cvtss_f32(vreduce_min(v)); } +__forceinline float reduce_max(const ssef& v) { return _mm_cvtss_f32(vreduce_max(v)); } +__forceinline float reduce_add(const ssef& v) { return _mm_cvtss_f32(vreduce_add(v)); } + +__forceinline size_t select_min(const ssef& v) { return __bsf(movemask(v == vreduce_min(v))); } +__forceinline size_t select_max(const ssef& v) { return __bsf(movemask(v == vreduce_max(v))); } + +__forceinline size_t select_min(const sseb& valid, const ssef& v) { const ssef a = select(valid,v,ssef(pos_inf)); return __bsf(movemask(valid &(a == vreduce_min(a)))); } +__forceinline size_t select_max(const sseb& valid, const ssef& v) { const ssef a = select(valid,v,ssef(neg_inf)); return __bsf(movemask(valid &(a == vreduce_max(a)))); } + +//////////////////////////////////////////////////////////////////////////////// +/// Memory load and store operations +//////////////////////////////////////////////////////////////////////////////// + +__forceinline ssef load4f(const float4& a) { +#ifdef __KERNEL_WITH_SSE_ALIGN__ + return _mm_load_ps(&a.x); +#else + return _mm_loadu_ps(&a.x); +#endif +} + +__forceinline ssef load4f(const float3& a) { +#ifdef __KERNEL_WITH_SSE_ALIGN__ + return _mm_load_ps(&a.x); +#else + return _mm_loadu_ps(&a.x); +#endif +} + +__forceinline ssef load4f(const void* const a) { + return _mm_load_ps((float*)a); +} + +__forceinline ssef load1f_first(const float a) { + return _mm_set_ss(a); +} + +__forceinline void store4f(void* ptr, const ssef& v) { + _mm_store_ps((float*)ptr,v); +} + +__forceinline ssef loadu4f(const void* const a) { + return _mm_loadu_ps((float*)a); +} + +__forceinline void storeu4f(void* ptr, const ssef& v) { + _mm_storeu_ps((float*)ptr,v); +} + +__forceinline void store4f(const sseb& mask, void* ptr, const ssef& f) { +#if defined(__KERNEL_AVX__) + _mm_maskstore_ps((float*)ptr,(__m128i)mask,f); +#else + *(ssef*)ptr = select(mask,f,*(ssef*)ptr); +#endif +} + +__forceinline ssef load4f_nt(void* ptr) { +#if defined(__KERNEL_SSE41__) + return _mm_castsi128_ps(_mm_stream_load_si128((__m128i*)ptr)); +#else + return _mm_load_ps((float*)ptr); +#endif +} + +__forceinline void store4f_nt(void* ptr, const ssef& v) { +#if defined(__KERNEL_SSE41__) + _mm_stream_ps((float*)ptr,v); +#else + _mm_store_ps((float*)ptr,v); +#endif +} + +//////////////////////////////////////////////////////////////////////////////// +/// Euclidian Space Operators +//////////////////////////////////////////////////////////////////////////////// + +__forceinline float dot(const ssef& a, const ssef& b) { + return reduce_add(a*b); +} + +/* calculate shuffled cross product, useful when order of components does not matter */ +__forceinline ssef cross_zxy(const ssef& a, const ssef& b) +{ + const ssef a0 = a; + const ssef b0 = shuffle<1,2,0,3>(b); + const ssef a1 = shuffle<1,2,0,3>(a); + const ssef b1 = b; + return msub(a0,b0,a1*b1); +} + +__forceinline ssef cross(const ssef& a, const ssef& b) +{ + return shuffle<1,2,0,3>(cross_zxy(a, b)); +} + +ccl_device_inline const ssef dot3_splat(const ssef& a, const ssef& b) +{ +#ifdef __KERNEL_SSE41__ + return _mm_dp_ps(a.m128, b.m128, 0x7f); +#else + ssef t = a * b; + return ssef(((float*)&t)[0] + ((float*)&t)[1] + ((float*)&t)[2]); +#endif +} + +/* squared length taking only specified axes into account */ +template<size_t X, size_t Y, size_t Z, size_t W> +ccl_device_inline float len_squared(const ssef& a) +{ +#ifndef __KERNEL_SSE41__ + float4& t = (float4 &)a; + return (X ? t.x * t.x : 0.0f) + (Y ? t.y * t.y : 0.0f) + (Z ? t.z * t.z : 0.0f) + (W ? t.w * t.w : 0.0f); +#else + return extract<0>(ssef(_mm_dp_ps(a.m128, a.m128, (X << 4) | (Y << 5) | (Z << 6) | (W << 7) | 0xf))); +#endif +} + +ccl_device_inline float dot3(const ssef& a, const ssef& b) +{ +#ifdef __KERNEL_SSE41__ + return extract<0>(ssef(_mm_dp_ps(a.m128, b.m128, 0x7f))); +#else + ssef t = a * b; + return ((float*)&t)[0] + ((float*)&t)[1] + ((float*)&t)[2]; +#endif +} + +ccl_device_inline const ssef len3_squared_splat(const ssef& a) +{ + return dot3_splat(a, a); +} + +ccl_device_inline float len3_squared(const ssef& a) +{ + return dot3(a, a); +} + +ccl_device_inline float len3(const ssef& a) +{ + return extract<0>(mm_sqrt(dot3_splat(a, a))); +} + +/* SSE shuffle utility functions */ + +#ifdef __KERNEL_SSSE3__ + +/* faster version for SSSE3 */ +typedef ssei shuffle_swap_t; + +ccl_device_inline const shuffle_swap_t shuffle_swap_identity(void) +{ + return _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +} + +ccl_device_inline const shuffle_swap_t shuffle_swap_swap(void) +{ + return _mm_set_epi8(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); +} + +ccl_device_inline const ssef shuffle_swap(const ssef& a, const shuffle_swap_t& shuf) +{ + return cast(_mm_shuffle_epi8(cast(a), shuf)); +} + +#else + +/* somewhat slower version for SSE2 */ +typedef int shuffle_swap_t; + +ccl_device_inline const shuffle_swap_t shuffle_swap_identity(void) +{ + return 0; +} + +ccl_device_inline const shuffle_swap_t shuffle_swap_swap(void) +{ + return 1; +} + +ccl_device_inline const ssef shuffle_swap(const ssef& a, shuffle_swap_t shuf) +{ + /* shuffle value must be a constant, so we need to branch */ + if(shuf) + return ssef(_mm_shuffle_ps(a.m128, a.m128, _MM_SHUFFLE(1, 0, 3, 2))); + else + return ssef(_mm_shuffle_ps(a.m128, a.m128, _MM_SHUFFLE(3, 2, 1, 0))); +} + +#endif + +#ifdef __KERNEL_SSE41__ + +ccl_device_inline void gen_idirsplat_swap(const ssef &pn, const shuffle_swap_t &shuf_identity, const shuffle_swap_t &shuf_swap, + const float3& idir, ssef idirsplat[3], shuffle_swap_t shufflexyz[3]) +{ + const __m128 idirsplat_raw[] = { _mm_set_ps1(idir.x), _mm_set_ps1(idir.y), _mm_set_ps1(idir.z) }; + idirsplat[0] = _mm_xor_ps(idirsplat_raw[0], pn); + idirsplat[1] = _mm_xor_ps(idirsplat_raw[1], pn); + idirsplat[2] = _mm_xor_ps(idirsplat_raw[2], pn); + + const ssef signmask = cast(ssei(0x80000000)); + const ssef shuf_identity_f = cast(shuf_identity); + const ssef shuf_swap_f = cast(shuf_swap); + + shufflexyz[0] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[0], signmask))); + shufflexyz[1] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[1], signmask))); + shufflexyz[2] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[2], signmask))); +} + +#else + +ccl_device_inline void gen_idirsplat_swap(const ssef &pn, const shuffle_swap_t &shuf_identity, const shuffle_swap_t &shuf_swap, + const float3& idir, ssef idirsplat[3], shuffle_swap_t shufflexyz[3]) +{ + idirsplat[0] = ssef(idir.x) ^ pn; + idirsplat[1] = ssef(idir.y) ^ pn; + idirsplat[2] = ssef(idir.z) ^ pn; + + shufflexyz[0] = (idir.x >= 0)? shuf_identity: shuf_swap; + shufflexyz[1] = (idir.y >= 0)? shuf_identity: shuf_swap; + shufflexyz[2] = (idir.z >= 0)? shuf_identity: shuf_swap; +} + +#endif + +ccl_device_inline const ssef uint32_to_float(const ssei &in) +{ + ssei a = _mm_srli_epi32(in, 16); + ssei b = _mm_and_si128(in, _mm_set1_epi32(0x0000ffff)); + ssei c = _mm_or_si128(a, _mm_set1_epi32(0x53000000)); + ssef d = _mm_cvtepi32_ps(b); + ssef e = _mm_sub_ps(_mm_castsi128_ps(c), _mm_castsi128_ps(_mm_set1_epi32(0x53000000))); + return _mm_add_ps(e, d); +} + +template<size_t S1, size_t S2, size_t S3, size_t S4> +ccl_device_inline const ssef set_sign_bit(const ssef &a) +{ + return a ^ cast(ssei(S1 << 31, S2 << 31, S3 << 31, S4 << 31)); +} + +#endif + +CCL_NAMESPACE_END + +#endif + diff --git a/intern/cycles/util/util_ssei.h b/intern/cycles/util/util_ssei.h new file mode 100644 index 00000000000..5f5a8686e35 --- /dev/null +++ b/intern/cycles/util/util_ssei.h @@ -0,0 +1,294 @@ +/* + * Copyright 2011-2013 Intel Corporation + * Modifications Copyright 2014, Blender Foundation. + * + * Licensed under the Apache License, Version 2.0(the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License + */ + +#ifndef __UTIL_SSEI_H__ +#define __UTIL_SSEI_H__ + +CCL_NAMESPACE_BEGIN + +#ifdef __KERNEL_SSE2__ + +/*! 4-wide SSE integer type. */ +struct ssei +{ + typedef sseb Mask; // mask type + typedef ssei Int; // int type + typedef ssef Float; // float type + + enum { size = 4 }; // number of SIMD elements + union { __m128i m128; int32_t i[4]; }; // data + + //////////////////////////////////////////////////////////////////////////////// + /// Constructors, Assignment & Cast Operators + //////////////////////////////////////////////////////////////////////////////// + + __forceinline ssei ( ) {} + __forceinline ssei ( const ssei& a ) { m128 = a.m128; } + __forceinline ssei& operator=( const ssei& a ) { m128 = a.m128; return *this; } + + __forceinline ssei( const __m128i a ) : m128(a) {} + __forceinline operator const __m128i&( void ) const { return m128; } + __forceinline operator __m128i&( void ) { return m128; } + + __forceinline ssei ( const int a ) : m128(_mm_set1_epi32(a)) {} + __forceinline ssei ( int a, int b, int c, int d ) : m128(_mm_setr_epi32(a, b, c, d)) {} + + __forceinline explicit ssei( const __m128 a ) : m128(_mm_cvtps_epi32(a)) {} + + //////////////////////////////////////////////////////////////////////////////// + /// Array Access + //////////////////////////////////////////////////////////////////////////////// + + __forceinline const int32_t& operator []( const size_t index ) const { assert(index < 4); return i[index]; } + __forceinline int32_t& operator []( const size_t index ) { assert(index < 4); return i[index]; } +}; + +//////////////////////////////////////////////////////////////////////////////// +/// Unary Operators +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const ssei cast ( const __m128& a ) { return _mm_castps_si128(a); } +__forceinline const ssei operator +( const ssei& a ) { return a; } +__forceinline const ssei operator -( const ssei& a ) { return _mm_sub_epi32(_mm_setzero_si128(), a.m128); } +#if defined(__KERNEL_SSSE3__) +__forceinline const ssei abs ( const ssei& a ) { return _mm_abs_epi32(a.m128); } +#endif + +//////////////////////////////////////////////////////////////////////////////// +/// Binary Operators +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const ssei operator +( const ssei& a, const ssei& b ) { return _mm_add_epi32(a.m128, b.m128); } +__forceinline const ssei operator +( const ssei& a, const int32_t& b ) { return a + ssei(b); } +__forceinline const ssei operator +( const int32_t& a, const ssei& b ) { return ssei(a) + b; } + +__forceinline const ssei operator -( const ssei& a, const ssei& b ) { return _mm_sub_epi32(a.m128, b.m128); } +__forceinline const ssei operator -( const ssei& a, const int32_t& b ) { return a - ssei(b); } +__forceinline const ssei operator -( const int32_t& a, const ssei& b ) { return ssei(a) - b; } + +#if defined(__KERNEL_SSE41__) +__forceinline const ssei operator *( const ssei& a, const ssei& b ) { return _mm_mullo_epi32(a.m128, b.m128); } +__forceinline const ssei operator *( const ssei& a, const int32_t& b ) { return a * ssei(b); } +__forceinline const ssei operator *( const int32_t& a, const ssei& b ) { return ssei(a) * b; } +#endif + +__forceinline const ssei operator &( const ssei& a, const ssei& b ) { return _mm_and_si128(a.m128, b.m128); } +__forceinline const ssei operator &( const ssei& a, const int32_t& b ) { return a & ssei(b); } +__forceinline const ssei operator &( const int32_t& a, const ssei& b ) { return ssei(a) & b; } + +__forceinline const ssei operator |( const ssei& a, const ssei& b ) { return _mm_or_si128(a.m128, b.m128); } +__forceinline const ssei operator |( const ssei& a, const int32_t& b ) { return a | ssei(b); } +__forceinline const ssei operator |( const int32_t& a, const ssei& b ) { return ssei(a) | b; } + +__forceinline const ssei operator ^( const ssei& a, const ssei& b ) { return _mm_xor_si128(a.m128, b.m128); } +__forceinline const ssei operator ^( const ssei& a, const int32_t& b ) { return a ^ ssei(b); } +__forceinline const ssei operator ^( const int32_t& a, const ssei& b ) { return ssei(a) ^ b; } + +__forceinline const ssei operator <<( const ssei& a, const int32_t& n ) { return _mm_slli_epi32(a.m128, n); } +__forceinline const ssei operator >>( const ssei& a, const int32_t& n ) { return _mm_srai_epi32(a.m128, n); } + +__forceinline const ssei andnot(const ssei& a, const ssei& b) { return _mm_andnot_si128(a.m128,b.m128); } +__forceinline const ssei andnot(const sseb& a, const ssei& b) { return _mm_andnot_si128(cast(a.m128),b.m128); } +__forceinline const ssei andnot(const ssei& a, const sseb& b) { return _mm_andnot_si128(a.m128,cast(b.m128)); } + +__forceinline const ssei sra ( const ssei& a, const int32_t& b ) { return _mm_srai_epi32(a.m128, b); } +__forceinline const ssei srl ( const ssei& a, const int32_t& b ) { return _mm_srli_epi32(a.m128, b); } + +#if defined(__KERNEL_SSE41__) +__forceinline const ssei min( const ssei& a, const ssei& b ) { return _mm_min_epi32(a.m128, b.m128); } +__forceinline const ssei min( const ssei& a, const int32_t& b ) { return min(a,ssei(b)); } +__forceinline const ssei min( const int32_t& a, const ssei& b ) { return min(ssei(a),b); } + +__forceinline const ssei max( const ssei& a, const ssei& b ) { return _mm_max_epi32(a.m128, b.m128); } +__forceinline const ssei max( const ssei& a, const int32_t& b ) { return max(a,ssei(b)); } +__forceinline const ssei max( const int32_t& a, const ssei& b ) { return max(ssei(a),b); } +#endif + +//////////////////////////////////////////////////////////////////////////////// +/// Assignment Operators +//////////////////////////////////////////////////////////////////////////////// + +__forceinline ssei& operator +=( ssei& a, const ssei& b ) { return a = a + b; } +__forceinline ssei& operator +=( ssei& a, const int32_t& b ) { return a = a + b; } + +__forceinline ssei& operator -=( ssei& a, const ssei& b ) { return a = a - b; } +__forceinline ssei& operator -=( ssei& a, const int32_t& b ) { return a = a - b; } + +#if defined(__KERNEL_SSE41__) +__forceinline ssei& operator *=( ssei& a, const ssei& b ) { return a = a * b; } +__forceinline ssei& operator *=( ssei& a, const int32_t& b ) { return a = a * b; } +#endif + +__forceinline ssei& operator &=( ssei& a, const ssei& b ) { return a = a & b; } +__forceinline ssei& operator &=( ssei& a, const int32_t& b ) { return a = a & b; } + +__forceinline ssei& operator |=( ssei& a, const ssei& b ) { return a = a | b; } +__forceinline ssei& operator |=( ssei& a, const int32_t& b ) { return a = a | b; } + +__forceinline ssei& operator <<=( ssei& a, const int32_t& b ) { return a = a << b; } +__forceinline ssei& operator >>=( ssei& a, const int32_t& b ) { return a = a >> b; } + +//////////////////////////////////////////////////////////////////////////////// +/// Comparison Operators + Select +//////////////////////////////////////////////////////////////////////////////// + +__forceinline const sseb operator ==( const ssei& a, const ssei& b ) { return _mm_castsi128_ps(_mm_cmpeq_epi32 (a.m128, b.m128)); } +__forceinline const sseb operator ==( const ssei& a, const int32_t& b ) { return a == ssei(b); } +__forceinline const sseb operator ==( const int32_t& a, const ssei& b ) { return ssei(a) == b; } + +__forceinline const sseb operator !=( const ssei& a, const ssei& b ) { return !(a == b); } +__forceinline const sseb operator !=( const ssei& a, const int32_t& b ) { return a != ssei(b); } +__forceinline const sseb operator !=( const int32_t& a, const ssei& b ) { return ssei(a) != b; } + +__forceinline const sseb operator < ( const ssei& a, const ssei& b ) { return _mm_castsi128_ps(_mm_cmplt_epi32 (a.m128, b.m128)); } +__forceinline const sseb operator < ( const ssei& a, const int32_t& b ) { return a < ssei(b); } +__forceinline const sseb operator < ( const int32_t& a, const ssei& b ) { return ssei(a) < b; } + +__forceinline const sseb operator >=( const ssei& a, const ssei& b ) { return !(a < b); } +__forceinline const sseb operator >=( const ssei& a, const int32_t& b ) { return a >= ssei(b); } +__forceinline const sseb operator >=( const int32_t& a, const ssei& b ) { return ssei(a) >= b; } + +__forceinline const sseb operator > ( const ssei& a, const ssei& b ) { return _mm_castsi128_ps(_mm_cmpgt_epi32 (a.m128, b.m128)); } +__forceinline const sseb operator > ( const ssei& a, const int32_t& b ) { return a > ssei(b); } +__forceinline const sseb operator > ( const int32_t& a, const ssei& b ) { return ssei(a) > b; } + +__forceinline const sseb operator <=( const ssei& a, const ssei& b ) { return !(a > b); } +__forceinline const sseb operator <=( const ssei& a, const int32_t& b ) { return a <= ssei(b); } +__forceinline const sseb operator <=( const int32_t& a, const ssei& b ) { return ssei(a) <= b; } + +__forceinline const ssei select( const sseb& m, const ssei& t, const ssei& f ) { +#ifdef __KERNEL_SSE41__ + return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m)); +#else + return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f)); +#endif +} + +__forceinline const ssei select( const int mask, const ssei& t, const ssei& f ) { +#if defined(__KERNEL_SSE41__) && ((!defined(__clang__) && !defined(_MSC_VER)) || defined(__INTEL_COMPILER)) + return _mm_castps_si128(_mm_blend_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), mask)); +#else + return select(sseb(mask),t,f); +#endif +} + +//////////////////////////////////////////////////////////////////////////////// +// Movement/Shifting/Shuffling Functions +//////////////////////////////////////////////////////////////////////////////// + +__forceinline ssei unpacklo( const ssei& a, const ssei& b ) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a.m128), _mm_castsi128_ps(b.m128))); } +__forceinline ssei unpackhi( const ssei& a, const ssei& b ) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a.m128), _mm_castsi128_ps(b.m128))); } + +template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const ssei shuffle( const ssei& a ) { + return _mm_shuffle_epi32(a, _MM_SHUFFLE(i3, i2, i1, i0)); +} + +template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const ssei shuffle( const ssei& a, const ssei& b ) { + return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0))); +} + +#if defined(__KERNEL_SSE3__) +template<> __forceinline const ssei shuffle<0, 0, 2, 2>( const ssei& a ) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(a))); } +template<> __forceinline const ssei shuffle<1, 1, 3, 3>( const ssei& a ) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(a))); } +template<> __forceinline const ssei shuffle<0, 1, 0, 1>( const ssei& a ) { return _mm_castpd_si128(_mm_movedup_pd (_mm_castsi128_pd(a))); } +#endif + +template<size_t i0> __forceinline const ssei shuffle( const ssei& b ) { + return shuffle<i0,i0,i0,i0>(b); +} + +#if defined(__KERNEL_SSE41__) +template<size_t src> __forceinline int extract( const ssei& b ) { return _mm_extract_epi32(b, src); } +template<size_t dst> __forceinline const ssei insert( const ssei& a, const int32_t b ) { return _mm_insert_epi32(a, b, dst); } +#else +template<size_t src> __forceinline int extract( const ssei& b ) { return b[src]; } +template<size_t dst> __forceinline const ssei insert( const ssei& a, const int32_t b ) { ssei c = a; c[dst] = b; return c; } +#endif + +//////////////////////////////////////////////////////////////////////////////// +/// Reductions +//////////////////////////////////////////////////////////////////////////////// + +#if defined(__KERNEL_SSE41__) +__forceinline const ssei vreduce_min(const ssei& v) { ssei h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); } +__forceinline const ssei vreduce_max(const ssei& v) { ssei h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); } +__forceinline const ssei vreduce_add(const ssei& v) { ssei h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; } + +__forceinline int reduce_min(const ssei& v) { return extract<0>(vreduce_min(v)); } +__forceinline int reduce_max(const ssei& v) { return extract<0>(vreduce_max(v)); } +__forceinline int reduce_add(const ssei& v) { return extract<0>(vreduce_add(v)); } + +__forceinline size_t select_min(const ssei& v) { return __bsf(movemask(v == vreduce_min(v))); } +__forceinline size_t select_max(const ssei& v) { return __bsf(movemask(v == vreduce_max(v))); } + +__forceinline size_t select_min(const sseb& valid, const ssei& v) { const ssei a = select(valid,v,ssei((int)pos_inf)); return __bsf(movemask(valid & (a == vreduce_min(a)))); } +__forceinline size_t select_max(const sseb& valid, const ssei& v) { const ssei a = select(valid,v,ssei((int)neg_inf)); return __bsf(movemask(valid & (a == vreduce_max(a)))); } + +#else + +__forceinline int reduce_min(const ssei& v) { return min(min(v[0],v[1]),min(v[2],v[3])); } +__forceinline int reduce_max(const ssei& v) { return max(max(v[0],v[1]),max(v[2],v[3])); } +__forceinline int reduce_add(const ssei& v) { return v[0]+v[1]+v[2]+v[3]; } + +#endif + +//////////////////////////////////////////////////////////////////////////////// +/// Memory load and store operations +//////////////////////////////////////////////////////////////////////////////// + +__forceinline ssei load4i( const void* const a ) { + return _mm_load_si128((__m128i*)a); +} + +__forceinline void store4i(void* ptr, const ssei& v) { + _mm_store_si128((__m128i*)ptr,v); +} + +__forceinline void storeu4i(void* ptr, const ssei& v) { + _mm_storeu_si128((__m128i*)ptr,v); +} + +__forceinline void store4i( const sseb& mask, void* ptr, const ssei& i ) { +#if defined (__KERNEL_AVX__) + _mm_maskstore_ps((float*)ptr,(__m128i)mask,_mm_castsi128_ps(i)); +#else + *(ssei*)ptr = select(mask,i,*(ssei*)ptr); +#endif +} + +__forceinline ssei load4i_nt (void* ptr) { +#if defined(__KERNEL_SSE41__) + return _mm_stream_load_si128((__m128i*)ptr); +#else + return _mm_load_si128((__m128i*)ptr); +#endif +} + +__forceinline void store4i_nt(void* ptr, const ssei& v) { +#if defined(__KERNEL_SSE41__) + _mm_stream_ps((float*)ptr,_mm_castsi128_ps(v)); +#else + _mm_store_si128((__m128i*)ptr,v); +#endif +} + +#endif + +CCL_NAMESPACE_END + +#endif + diff --git a/intern/cycles/util/util_types.h b/intern/cycles/util/util_types.h index b1319011936..98d70786d44 100644 --- a/intern/cycles/util/util_types.h +++ b/intern/cycles/util/util_types.h @@ -51,6 +51,7 @@ #endif #define ccl_may_alias #define ccl_always_inline __forceinline +#define ccl_maybe_unused #else @@ -62,6 +63,7 @@ #define ccl_try_align(...) __attribute__((aligned(__VA_ARGS__))) #define ccl_may_alias __attribute__((__may_alias__)) #define ccl_always_inline __attribute__((always_inline)) +#define ccl_maybe_unused __attribute__((used)) #endif |