Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--intern/cycles/kernel/geom/geom_bvh_shadow.h54
-rw-r--r--intern/cycles/kernel/geom/geom_bvh_subsurface.h54
-rw-r--r--intern/cycles/kernel/geom/geom_bvh_traversal.h56
-rw-r--r--intern/cycles/kernel/geom/geom_curve.h122
-rw-r--r--intern/cycles/kernel/kernel_avx.cpp1
-rw-r--r--intern/cycles/kernel/kernel_compat_cpu.h12
-rw-r--r--intern/cycles/kernel/svm/svm_image.h10
-rw-r--r--intern/cycles/kernel/svm/svm_noise.h144
-rw-r--r--intern/cycles/kernel/svm/svm_texture.h22
-rw-r--r--intern/cycles/render/curves.cpp16
-rw-r--r--intern/cycles/render/tile.cpp2
-rw-r--r--intern/cycles/util/CMakeLists.txt4
-rw-r--r--intern/cycles/util/util_color.h46
-rw-r--r--intern/cycles/util/util_half.h24
-rw-r--r--intern/cycles/util/util_optimization.h10
-rw-r--r--intern/cycles/util/util_simd.cpp42
-rw-r--r--intern/cycles/util/util_simd.h473
-rw-r--r--intern/cycles/util/util_sseb.h161
-rw-r--r--intern/cycles/util/util_ssef.h588
-rw-r--r--intern/cycles/util/util_ssei.h294
-rw-r--r--intern/cycles/util/util_types.h2
21 files changed, 1697 insertions, 440 deletions
diff --git a/intern/cycles/kernel/geom/geom_bvh_shadow.h b/intern/cycles/kernel/geom/geom_bvh_shadow.h
index 98bf82b3b2d..48876da049e 100644
--- a/intern/cycles/kernel/geom/geom_bvh_shadow.h
+++ b/intern/cycles/kernel/geom/geom_bvh_shadow.h
@@ -68,15 +68,15 @@ ccl_device bool BVH_FUNCTION_NAME
const shuffle_swap_t shuf_identity = shuffle_swap_identity();
const shuffle_swap_t shuf_swap = shuffle_swap_swap();
- const __m128 pn = _mm_castsi128_ps(_mm_set_epi32(0x80000000, 0x80000000, 0, 0));
- __m128 Psplat[3], idirsplat[3];
+ const ssef pn = cast(ssei(0, 0, 0x80000000, 0x80000000));
+ ssef Psplat[3], idirsplat[3];
shuffle_swap_t shufflexyz[3];
- Psplat[0] = _mm_set_ps1(P.x);
- Psplat[1] = _mm_set_ps1(P.y);
- Psplat[2] = _mm_set_ps1(P.z);
+ Psplat[0] = ssef(P.x);
+ Psplat[1] = ssef(P.y);
+ Psplat[2] = ssef(P.z);
- __m128 tsplat = _mm_set_ps(-isect_t, -isect_t, 0.0f, 0.0f);
+ ssef tsplat(0.0f, 0.0f, -isect_t, -isect_t);
gen_idirsplat_swap(pn, shuf_identity, shuf_swap, idir, idirsplat, shufflexyz);
#endif
@@ -132,27 +132,27 @@ ccl_device bool BVH_FUNCTION_NAME
/* Intersect two child bounding boxes, SSE3 version adapted from Embree */
/* fetch node data */
- const __m128 *bvh_nodes = (__m128*)kg->__bvh_nodes.data + nodeAddr*BVH_NODE_SIZE;
+ const ssef *bvh_nodes = (ssef*)kg->__bvh_nodes.data + nodeAddr*BVH_NODE_SIZE;
const float4 cnodes = ((float4*)bvh_nodes)[3];
/* intersect ray against child nodes */
- const __m128 tminmaxx = _mm_mul_ps(_mm_sub_ps(shuffle_swap(bvh_nodes[0], shufflexyz[0]), Psplat[0]), idirsplat[0]);
- const __m128 tminmaxy = _mm_mul_ps(_mm_sub_ps(shuffle_swap(bvh_nodes[1], shufflexyz[1]), Psplat[1]), idirsplat[1]);
- const __m128 tminmaxz = _mm_mul_ps(_mm_sub_ps(shuffle_swap(bvh_nodes[2], shufflexyz[2]), Psplat[2]), idirsplat[2]);
+ const ssef tminmaxx = (shuffle_swap(bvh_nodes[0], shufflexyz[0]) - Psplat[0]) * idirsplat[0];
+ const ssef tminmaxy = (shuffle_swap(bvh_nodes[1], shufflexyz[1]) - Psplat[1]) * idirsplat[1];
+ const ssef tminmaxz = (shuffle_swap(bvh_nodes[2], shufflexyz[2]) - Psplat[2]) * idirsplat[2];
/* calculate { c0min, c1min, -c0max, -c1max} */
- __m128 minmax = _mm_max_ps(_mm_max_ps(tminmaxx, tminmaxy), _mm_max_ps(tminmaxz, tsplat));
- const __m128 tminmax = _mm_xor_ps(minmax, pn);
- const __m128 lrhit = _mm_cmple_ps(tminmax, shuffle<2, 3, 0, 1>(tminmax));
+ const ssef minmax = max(max(tminmaxx, tminmaxy), max(tminmaxz, tsplat));
+ const ssef tminmax = minmax ^ pn;
+ const sseb lrhit = tminmax <= shuffle<2, 3, 0, 1>(tminmax);
/* decide which nodes to traverse next */
#ifdef __VISIBILITY_FLAG__
/* this visibility test gives a 5% performance hit, how to solve? */
- traverseChild0 = (_mm_movemask_ps(lrhit) & 1) && (__float_as_uint(cnodes.z) & PATH_RAY_SHADOW);
- traverseChild1 = (_mm_movemask_ps(lrhit) & 2) && (__float_as_uint(cnodes.w) & PATH_RAY_SHADOW);
+ traverseChild0 = (movemask(lrhit) & 1) && (__float_as_uint(cnodes.z) & PATH_RAY_SHADOW);
+ traverseChild1 = (movemask(lrhit) & 2) && (__float_as_uint(cnodes.w) & PATH_RAY_SHADOW);
#else
- traverseChild0 = (_mm_movemask_ps(lrhit) & 1);
- traverseChild1 = (_mm_movemask_ps(lrhit) & 2);
+ traverseChild0 = (movemask(lrhit) & 1);
+ traverseChild1 = (movemask(lrhit) & 2);
#endif
#endif // __KERNEL_SSE2__
@@ -164,9 +164,7 @@ ccl_device bool BVH_FUNCTION_NAME
#if !defined(__KERNEL_SSE2__)
bool closestChild1 = (c1min < c0min);
#else
- union { __m128 m128; float v[4]; } uminmax;
- uminmax.m128 = tminmax;
- bool closestChild1 = uminmax.v[1] < uminmax.v[0];
+ bool closestChild1 = tminmax[1] < tminmax[0];
#endif
if(closestChild1) {
@@ -301,12 +299,12 @@ ccl_device bool BVH_FUNCTION_NAME
num_hits_in_instance = 0;
#if defined(__KERNEL_SSE2__)
- Psplat[0] = _mm_set_ps1(P.x);
- Psplat[1] = _mm_set_ps1(P.y);
- Psplat[2] = _mm_set_ps1(P.z);
+ Psplat[0] = ssef(P.x);
+ Psplat[1] = ssef(P.y);
+ Psplat[2] = ssef(P.z);
isect_array->t = isect_t;
- tsplat = _mm_set_ps(-isect_t, -isect_t, 0.0f, 0.0f);
+ tsplat = ssef(0.0f, 0.0f, -isect_t, -isect_t);
gen_idirsplat_swap(pn, shuf_identity, shuf_swap, idir, idirsplat, shufflexyz);
#endif
@@ -348,13 +346,13 @@ ccl_device bool BVH_FUNCTION_NAME
}
#if defined(__KERNEL_SSE2__)
- Psplat[0] = _mm_set_ps1(P.x);
- Psplat[1] = _mm_set_ps1(P.y);
- Psplat[2] = _mm_set_ps1(P.z);
+ Psplat[0] = ssef(P.x);
+ Psplat[1] = ssef(P.y);
+ Psplat[2] = ssef(P.z);
isect_t = tmax;
isect_array->t = isect_t;
- tsplat = _mm_set_ps(-isect_t, -isect_t, 0.0f, 0.0f);
+ tsplat = ssef(0.0f, 0.0f, -isect_t, -isect_t);
gen_idirsplat_swap(pn, shuf_identity, shuf_swap, idir, idirsplat, shufflexyz);
#endif
diff --git a/intern/cycles/kernel/geom/geom_bvh_subsurface.h b/intern/cycles/kernel/geom/geom_bvh_subsurface.h
index a19f05dd371..a8f57cffa78 100644
--- a/intern/cycles/kernel/geom/geom_bvh_subsurface.h
+++ b/intern/cycles/kernel/geom/geom_bvh_subsurface.h
@@ -65,15 +65,15 @@ ccl_device uint BVH_FUNCTION_NAME(KernelGlobals *kg, const Ray *ray, Intersectio
const shuffle_swap_t shuf_identity = shuffle_swap_identity();
const shuffle_swap_t shuf_swap = shuffle_swap_swap();
- const __m128 pn = _mm_castsi128_ps(_mm_set_epi32(0x80000000, 0x80000000, 0, 0));
- __m128 Psplat[3], idirsplat[3];
+ const ssef pn = cast(ssei(0, 0, 0x80000000, 0x80000000));
+ ssef Psplat[3], idirsplat[3];
shuffle_swap_t shufflexyz[3];
- Psplat[0] = _mm_set_ps1(P.x);
- Psplat[1] = _mm_set_ps1(P.y);
- Psplat[2] = _mm_set_ps1(P.z);
+ Psplat[0] = ssef(P.x);
+ Psplat[1] = ssef(P.y);
+ Psplat[2] = ssef(P.z);
- __m128 tsplat = _mm_set_ps(-isect_t, -isect_t, 0.0f, 0.0f);
+ ssef tsplat(0.0f, 0.0f, -isect_t, -isect_t);
gen_idirsplat_swap(pn, shuf_identity, shuf_swap, idir, idirsplat, shufflexyz);
#endif
@@ -131,25 +131,27 @@ ccl_device uint BVH_FUNCTION_NAME(KernelGlobals *kg, const Ray *ray, Intersectio
/* Intersect two child bounding boxes, SSE3 version adapted from Embree */
/* fetch node data */
- const __m128 *bvh_nodes = (__m128*)kg->__bvh_nodes.data + nodeAddr*BVH_NODE_SIZE;
+ const ssef *bvh_nodes = (ssef*)kg->__bvh_nodes.data + nodeAddr*BVH_NODE_SIZE;
const float4 cnodes = ((float4*)bvh_nodes)[3];
/* intersect ray against child nodes */
- const __m128 tminmaxx = _mm_mul_ps(_mm_sub_ps(shuffle_swap(bvh_nodes[0], shufflexyz[0]), Psplat[0]), idirsplat[0]);
- const __m128 tminmaxy = _mm_mul_ps(_mm_sub_ps(shuffle_swap(bvh_nodes[1], shufflexyz[1]), Psplat[1]), idirsplat[1]);
- const __m128 tminmaxz = _mm_mul_ps(_mm_sub_ps(shuffle_swap(bvh_nodes[2], shufflexyz[2]), Psplat[2]), idirsplat[2]);
+ const ssef tminmaxx = (shuffle_swap(bvh_nodes[0], shufflexyz[0]) - Psplat[0]) * idirsplat[0];
+ const ssef tminmaxy = (shuffle_swap(bvh_nodes[1], shufflexyz[1]) - Psplat[1]) * idirsplat[1];
+ const ssef tminmaxz = (shuffle_swap(bvh_nodes[2], shufflexyz[2]) - Psplat[2]) * idirsplat[2];
- const __m128 tminmax = _mm_xor_ps(_mm_max_ps(_mm_max_ps(tminmaxx, tminmaxy), _mm_max_ps(tminmaxz, tsplat)), pn);
- const __m128 lrhit = _mm_cmple_ps(tminmax, shuffle<2, 3, 0, 1>(tminmax));
+ /* calculate { c0min, c1min, -c0max, -c1max} */
+ const ssef minmax = max(max(tminmaxx, tminmaxy), max(tminmaxz, tsplat));
+ const ssef tminmax = minmax ^ pn;
+ const sseb lrhit = tminmax <= shuffle<2, 3, 0, 1>(tminmax);
/* decide which nodes to traverse next */
#ifdef __VISIBILITY_FLAG__
/* this visibility test gives a 5% performance hit, how to solve? */
- traverseChild0 = (_mm_movemask_ps(lrhit) & 1) && (__float_as_uint(cnodes.z) & visibility);
- traverseChild1 = (_mm_movemask_ps(lrhit) & 2) && (__float_as_uint(cnodes.w) & visibility);
+ traverseChild0 = (movemask(lrhit) & 1) && (__float_as_uint(cnodes.z) & visibility);
+ traverseChild1 = (movemask(lrhit) & 2) && (__float_as_uint(cnodes.w) & visibility);
#else
- traverseChild0 = (_mm_movemask_ps(lrhit) & 1);
- traverseChild1 = (_mm_movemask_ps(lrhit) & 2);
+ traverseChild0 = (movemask(lrhit) & 1);
+ traverseChild1 = (movemask(lrhit) & 2);
#endif
#endif // __KERNEL_SSE2__
@@ -161,9 +163,7 @@ ccl_device uint BVH_FUNCTION_NAME(KernelGlobals *kg, const Ray *ray, Intersectio
#if !defined(__KERNEL_SSE2__)
bool closestChild1 = (c1min < c0min);
#else
- union { __m128 m128; float v[4]; } uminmax;
- uminmax.m128 = tminmax;
- bool closestChild1 = uminmax.v[1] < uminmax.v[0];
+ bool closestChild1 = tminmax[1] < tminmax[0];
#endif
if(closestChild1) {
@@ -243,11 +243,11 @@ ccl_device uint BVH_FUNCTION_NAME(KernelGlobals *kg, const Ray *ray, Intersectio
#endif
#if defined(__KERNEL_SSE2__)
- Psplat[0] = _mm_set_ps1(P.x);
- Psplat[1] = _mm_set_ps1(P.y);
- Psplat[2] = _mm_set_ps1(P.z);
+ Psplat[0] = ssef(P.x);
+ Psplat[1] = ssef(P.y);
+ Psplat[2] = ssef(P.z);
- tsplat = _mm_set_ps(-isect_t, -isect_t, 0.0f, 0.0f);
+ tsplat = ssef(0.0f, 0.0f, -isect_t, -isect_t);
gen_idirsplat_swap(pn, shuf_identity, shuf_swap, idir, idirsplat, shufflexyz);
#endif
@@ -279,11 +279,11 @@ ccl_device uint BVH_FUNCTION_NAME(KernelGlobals *kg, const Ray *ray, Intersectio
#endif
#if defined(__KERNEL_SSE2__)
- Psplat[0] = _mm_set_ps1(P.x);
- Psplat[1] = _mm_set_ps1(P.y);
- Psplat[2] = _mm_set_ps1(P.z);
+ Psplat[0] = ssef(P.x);
+ Psplat[1] = ssef(P.y);
+ Psplat[2] = ssef(P.z);
- tsplat = _mm_set_ps(-isect_t, -isect_t, 0.0f, 0.0f);
+ tsplat = ssef(0.0f, 0.0f, -isect_t, -isect_t);
gen_idirsplat_swap(pn, shuf_identity, shuf_swap, idir, idirsplat, shufflexyz);
#endif
diff --git a/intern/cycles/kernel/geom/geom_bvh_traversal.h b/intern/cycles/kernel/geom/geom_bvh_traversal.h
index 9fd40f91471..e39228c33de 100644
--- a/intern/cycles/kernel/geom/geom_bvh_traversal.h
+++ b/intern/cycles/kernel/geom/geom_bvh_traversal.h
@@ -72,15 +72,15 @@ ccl_device bool BVH_FUNCTION_NAME
const shuffle_swap_t shuf_identity = shuffle_swap_identity();
const shuffle_swap_t shuf_swap = shuffle_swap_swap();
- const __m128 pn = _mm_castsi128_ps(_mm_set_epi32(0x80000000, 0x80000000, 0, 0));
- __m128 Psplat[3], idirsplat[3];
+ const ssef pn = cast(ssei(0, 0, 0x80000000, 0x80000000));
+ ssef Psplat[3], idirsplat[3];
shuffle_swap_t shufflexyz[3];
- Psplat[0] = _mm_set_ps1(P.x);
- Psplat[1] = _mm_set_ps1(P.y);
- Psplat[2] = _mm_set_ps1(P.z);
+ Psplat[0] = ssef(P.x);
+ Psplat[1] = ssef(P.y);
+ Psplat[2] = ssef(P.z);
- __m128 tsplat = _mm_set_ps(-isect->t, -isect->t, 0.0f, 0.0f);
+ ssef tsplat(0.0f, 0.0f, -isect->t, -isect->t);
gen_idirsplat_swap(pn, shuf_identity, shuf_swap, idir, idirsplat, shufflexyz);
#endif
@@ -151,17 +151,17 @@ ccl_device bool BVH_FUNCTION_NAME
/* Intersect two child bounding boxes, SSE3 version adapted from Embree */
/* fetch node data */
- const __m128 *bvh_nodes = (__m128*)kg->__bvh_nodes.data + nodeAddr*BVH_NODE_SIZE;
+ const ssef *bvh_nodes = (ssef*)kg->__bvh_nodes.data + nodeAddr*BVH_NODE_SIZE;
const float4 cnodes = ((float4*)bvh_nodes)[3];
/* intersect ray against child nodes */
- const __m128 tminmaxx = _mm_mul_ps(_mm_sub_ps(shuffle_swap(bvh_nodes[0], shufflexyz[0]), Psplat[0]), idirsplat[0]);
- const __m128 tminmaxy = _mm_mul_ps(_mm_sub_ps(shuffle_swap(bvh_nodes[1], shufflexyz[1]), Psplat[1]), idirsplat[1]);
- const __m128 tminmaxz = _mm_mul_ps(_mm_sub_ps(shuffle_swap(bvh_nodes[2], shufflexyz[2]), Psplat[2]), idirsplat[2]);
+ const ssef tminmaxx = (shuffle_swap(bvh_nodes[0], shufflexyz[0]) - Psplat[0]) * idirsplat[0];
+ const ssef tminmaxy = (shuffle_swap(bvh_nodes[1], shufflexyz[1]) - Psplat[1]) * idirsplat[1];
+ const ssef tminmaxz = (shuffle_swap(bvh_nodes[2], shufflexyz[2]) - Psplat[2]) * idirsplat[2];
/* calculate { c0min, c1min, -c0max, -c1max} */
- __m128 minmax = _mm_max_ps(_mm_max_ps(tminmaxx, tminmaxy), _mm_max_ps(tminmaxz, tsplat));
- const __m128 tminmax = _mm_xor_ps(minmax, pn);
+ ssef minmax = max(max(tminmaxx, tminmaxy), max(tminmaxz, tsplat));
+ const ssef tminmax = minmax ^ pn;
#if FEATURE(BVH_HAIR_MINIMUM_WIDTH)
if(difl != 0.0f) {
@@ -182,16 +182,16 @@ ccl_device bool BVH_FUNCTION_NAME
}
#endif
- const __m128 lrhit = _mm_cmple_ps(tminmax, shuffle<2, 3, 0, 1>(tminmax));
+ const sseb lrhit = tminmax <= shuffle<2, 3, 0, 1>(tminmax);
/* decide which nodes to traverse next */
#ifdef __VISIBILITY_FLAG__
/* this visibility test gives a 5% performance hit, how to solve? */
- traverseChild0 = (_mm_movemask_ps(lrhit) & 1) && (__float_as_uint(cnodes.z) & visibility);
- traverseChild1 = (_mm_movemask_ps(lrhit) & 2) && (__float_as_uint(cnodes.w) & visibility);
+ traverseChild0 = (movemask(lrhit) & 1) && (__float_as_uint(cnodes.z) & visibility);
+ traverseChild1 = (movemask(lrhit) & 2) && (__float_as_uint(cnodes.w) & visibility);
#else
- traverseChild0 = (_mm_movemask_ps(lrhit) & 1);
- traverseChild1 = (_mm_movemask_ps(lrhit) & 2);
+ traverseChild0 = (movemask(lrhit) & 1);
+ traverseChild1 = (movemask(lrhit) & 2);
#endif
#endif // __KERNEL_SSE2__
@@ -203,9 +203,7 @@ ccl_device bool BVH_FUNCTION_NAME
#if !defined(__KERNEL_SSE2__)
bool closestChild1 = (c1min < c0min);
#else
- union { __m128 m128; float v[4]; } uminmax;
- uminmax.m128 = tminmax;
- bool closestChild1 = uminmax.v[1] < uminmax.v[0];
+ bool closestChild1 = tminmax[1] < tminmax[0];
#endif
if(closestChild1) {
@@ -282,7 +280,7 @@ ccl_device bool BVH_FUNCTION_NAME
if(visibility == PATH_RAY_SHADOW_OPAQUE)
return true;
- tsplat = _mm_set_ps(-isect->t, -isect->t, 0.0f, 0.0f);
+ tsplat = ssef(0.0f, 0.0f, -isect->t, -isect->t);
}
#else
if(hit && visibility == PATH_RAY_SHADOW_OPAQUE)
@@ -304,11 +302,11 @@ ccl_device bool BVH_FUNCTION_NAME
#endif
#if defined(__KERNEL_SSE2__)
- Psplat[0] = _mm_set_ps1(P.x);
- Psplat[1] = _mm_set_ps1(P.y);
- Psplat[2] = _mm_set_ps1(P.z);
+ Psplat[0] = ssef(P.x);
+ Psplat[1] = ssef(P.y);
+ Psplat[2] = ssef(P.z);
- tsplat = _mm_set_ps(-isect->t, -isect->t, 0.0f, 0.0f);
+ tsplat = ssef(0.0f, 0.0f, -isect->t, -isect->t);
gen_idirsplat_swap(pn, shuf_identity, shuf_swap, idir, idirsplat, shufflexyz);
#endif
@@ -334,11 +332,11 @@ ccl_device bool BVH_FUNCTION_NAME
#endif
#if defined(__KERNEL_SSE2__)
- Psplat[0] = _mm_set_ps1(P.x);
- Psplat[1] = _mm_set_ps1(P.y);
- Psplat[2] = _mm_set_ps1(P.z);
+ Psplat[0] = ssef(P.x);
+ Psplat[1] = ssef(P.y);
+ Psplat[2] = ssef(P.z);
- tsplat = _mm_set_ps(-isect->t, -isect->t, 0.0f, 0.0f);
+ tsplat = ssef(0.0f, 0.0f, -isect->t, -isect->t);
gen_idirsplat_swap(pn, shuf_identity, shuf_swap, idir, idirsplat, shufflexyz);
#endif
diff --git a/intern/cycles/kernel/geom/geom_curve.h b/intern/cycles/kernel/geom/geom_curve.h
index dabfb0c72c8..863836ffcea 100644
--- a/intern/cycles/kernel/geom/geom_curve.h
+++ b/intern/cycles/kernel/geom/geom_curve.h
@@ -214,9 +214,9 @@ ccl_device_inline void curvebounds(float *lower, float *upper, float *extremta,
}
#ifdef __KERNEL_SSE2__
-ccl_device_inline __m128 transform_point_T3(const __m128 t[3], const __m128 &a)
+ccl_device_inline ssef transform_point_T3(const ssef t[3], const ssef &a)
{
- return fma(broadcast<0>(a), t[0], fma(broadcast<1>(a), t[1], _mm_mul_ps(broadcast<2>(a), t[2])));
+ return madd(shuffle<0>(a), t[0], madd(shuffle<1>(a), t[1], shuffle<2>(a) * t[2]));
}
#endif
@@ -238,16 +238,16 @@ ccl_device_inline bool bvh_cardinal_curve_intersect(KernelGlobals *kg, Intersect
int prim = kernel_tex_fetch(__prim_index, curveAddr);
#ifdef __KERNEL_SSE2__
- __m128 vdir = load_m128(dir);
- __m128 vcurve_coef[4];
+ ssef vdir = load4f(dir);
+ ssef vcurve_coef[4];
const float3 *curve_coef = (float3 *)vcurve_coef;
{
- __m128 dtmp = _mm_mul_ps(vdir, vdir);
- __m128 d_ss = _mm_sqrt_ss(_mm_add_ss(dtmp, broadcast<2>(dtmp)));
- __m128 rd_ss = _mm_div_ss(_mm_set_ss(1.0f), d_ss);
+ ssef dtmp = vdir * vdir;
+ ssef d_ss = mm_sqrt(dtmp + shuffle<2>(dtmp));
+ ssef rd_ss = load1f_first(1.0f) / d_ss;
- __m128i v00vec = _mm_load_si128((__m128i *)&kg->__curves.data[prim]);
+ ssei v00vec = load4i((ssei *)&kg->__curves.data[prim]);
int2 &v00 = (int2 &)v00vec;
int k0 = v00.x + segment;
@@ -255,44 +255,44 @@ ccl_device_inline bool bvh_cardinal_curve_intersect(KernelGlobals *kg, Intersect
int ka = max(k0 - 1, v00.x);
int kb = min(k1 + 1, v00.x + v00.y - 1);
- __m128 P_curve[4];
+ ssef P_curve[4];
if(type & PRIMITIVE_CURVE) {
- P_curve[0] = _mm_load_ps(&kg->__curve_keys.data[ka].x);
- P_curve[1] = _mm_load_ps(&kg->__curve_keys.data[k0].x);
- P_curve[2] = _mm_load_ps(&kg->__curve_keys.data[k1].x);
- P_curve[3] = _mm_load_ps(&kg->__curve_keys.data[kb].x);
+ P_curve[0] = load4f(&kg->__curve_keys.data[ka].x);
+ P_curve[1] = load4f(&kg->__curve_keys.data[k0].x);
+ P_curve[2] = load4f(&kg->__curve_keys.data[k1].x);
+ P_curve[3] = load4f(&kg->__curve_keys.data[kb].x);
}
else {
int fobject = (object == OBJECT_NONE)? kernel_tex_fetch(__prim_object, curveAddr): object;
motion_cardinal_curve_keys(kg, fobject, prim, time, ka, k0, k1, kb, (float4*)&P_curve);
}
- __m128 rd_sgn = set_sign_bit<0, 1, 1, 1>(broadcast<0>(rd_ss));
- __m128 mul_zxxy = _mm_mul_ps(shuffle<2, 0, 0, 1>(vdir), rd_sgn);
- __m128 mul_yz = _mm_mul_ps(shuffle<1, 2, 1, 2>(vdir), mul_zxxy);
- __m128 mul_shuf = shuffle<0, 1, 2, 3>(mul_zxxy, mul_yz);
- __m128 vdir0 = _mm_and_ps(vdir, _mm_castsi128_ps(_mm_setr_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0)));
+ ssef rd_sgn = set_sign_bit<0, 1, 1, 1>(shuffle<0>(rd_ss));
+ ssef mul_zxxy = shuffle<2, 0, 0, 1>(vdir) * rd_sgn;
+ ssef mul_yz = shuffle<1, 2, 1, 2>(vdir) * mul_zxxy;
+ ssef mul_shuf = shuffle<0, 1, 2, 3>(mul_zxxy, mul_yz);
+ ssef vdir0 = vdir & cast(ssei(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0));
- __m128 htfm0 = shuffle<0, 2, 0, 3>(mul_shuf, vdir0);
- __m128 htfm1 = shuffle<1, 0, 1, 3>(_mm_set_ss(_mm_cvtss_f32(d_ss)), vdir0);
- __m128 htfm2 = shuffle<1, 3, 2, 3>(mul_shuf, vdir0);
+ ssef htfm0 = shuffle<0, 2, 0, 3>(mul_shuf, vdir0);
+ ssef htfm1 = shuffle<1, 0, 1, 3>(load1f_first(extract<0>(d_ss)), vdir0);
+ ssef htfm2 = shuffle<1, 3, 2, 3>(mul_shuf, vdir0);
- __m128 htfm[] = { htfm0, htfm1, htfm2 };
- __m128 vP = load_m128(P);
- __m128 p0 = transform_point_T3(htfm, _mm_sub_ps(P_curve[0], vP));
- __m128 p1 = transform_point_T3(htfm, _mm_sub_ps(P_curve[1], vP));
- __m128 p2 = transform_point_T3(htfm, _mm_sub_ps(P_curve[2], vP));
- __m128 p3 = transform_point_T3(htfm, _mm_sub_ps(P_curve[3], vP));
+ ssef htfm[] = { htfm0, htfm1, htfm2 };
+ ssef vP = load4f(P);
+ ssef p0 = transform_point_T3(htfm, P_curve[0] - vP);
+ ssef p1 = transform_point_T3(htfm, P_curve[1] - vP);
+ ssef p2 = transform_point_T3(htfm, P_curve[2] - vP);
+ ssef p3 = transform_point_T3(htfm, P_curve[3] - vP);
float fc = 0.71f;
- __m128 vfc = _mm_set1_ps(fc);
- __m128 vfcxp3 = _mm_mul_ps(vfc, p3);
+ ssef vfc = ssef(fc);
+ ssef vfcxp3 = vfc * p3;
vcurve_coef[0] = p1;
- vcurve_coef[1] = _mm_mul_ps(vfc, _mm_sub_ps(p2, p0));
- vcurve_coef[2] = fma(_mm_set1_ps(fc * 2.0f), p0, fma(_mm_set1_ps(fc - 3.0f), p1, fms(_mm_set1_ps(3.0f - 2.0f * fc), p2, vfcxp3)));
- vcurve_coef[3] = fms(_mm_set1_ps(fc - 2.0f), _mm_sub_ps(p2, p1), fms(vfc, p0, vfcxp3));
+ vcurve_coef[1] = vfc * (p2 - p0);
+ vcurve_coef[2] = madd(ssef(fc * 2.0f), p0, madd(ssef(fc - 3.0f), p1, msub(ssef(3.0f - 2.0f * fc), p2, vfcxp3)));
+ vcurve_coef[3] = msub(ssef(fc - 2.0f), p2 - p1, msub(vfc, p0, vfcxp3));
r_st = ((float4 &)P_curve[1]).w;
r_en = ((float4 &)P_curve[2]).w;
@@ -386,12 +386,12 @@ ccl_device_inline bool bvh_cardinal_curve_intersect(KernelGlobals *kg, Intersect
float i_st = tree * resol;
float i_en = i_st + (level * resol);
#ifdef __KERNEL_SSE2__
- __m128 vi_st = _mm_set1_ps(i_st), vi_en = _mm_set1_ps(i_en);
- __m128 vp_st = fma(fma(fma(vcurve_coef[3], vi_st, vcurve_coef[2]), vi_st, vcurve_coef[1]), vi_st, vcurve_coef[0]);
- __m128 vp_en = fma(fma(fma(vcurve_coef[3], vi_en, vcurve_coef[2]), vi_en, vcurve_coef[1]), vi_en, vcurve_coef[0]);
+ ssef vi_st = ssef(i_st), vi_en = ssef(i_en);
+ ssef vp_st = madd(madd(madd(vcurve_coef[3], vi_st, vcurve_coef[2]), vi_st, vcurve_coef[1]), vi_st, vcurve_coef[0]);
+ ssef vp_en = madd(madd(madd(vcurve_coef[3], vi_en, vcurve_coef[2]), vi_en, vcurve_coef[1]), vi_en, vcurve_coef[0]);
- __m128 vbmin = _mm_min_ps(vp_st, vp_en);
- __m128 vbmax = _mm_max_ps(vp_st, vp_en);
+ ssef vbmin = min(vp_st, vp_en);
+ ssef vbmax = max(vp_st, vp_en);
float3 &bmin = (float3 &)vbmin, &bmax = (float3 &)vbmax;
float &bminx = bmin.x, &bminy = bmin.y, &bminz = bmin.z;
@@ -678,38 +678,38 @@ ccl_device_inline bool bvh_curve_intersect(KernelGlobals *kg, Intersection *isec
float sphere_b_tmp = dot3(dir, sphere_dif1);
float3 sphere_dif2 = sphere_dif1 - sphere_b_tmp * dir;
#else
- __m128 P_curve[2];
+ ssef P_curve[2];
if(type & PRIMITIVE_CURVE) {
- P_curve[0] = _mm_load_ps(&kg->__curve_keys.data[k0].x);
- P_curve[1] = _mm_load_ps(&kg->__curve_keys.data[k1].x);
+ P_curve[0] = load4f(&kg->__curve_keys.data[k0].x);
+ P_curve[1] = load4f(&kg->__curve_keys.data[k1].x);
}
else {
int fobject = (object == OBJECT_NONE)? kernel_tex_fetch(__prim_object, curveAddr): object;
motion_curve_keys(kg, fobject, prim, time, k0, k1, (float4*)&P_curve);
}
- const __m128 or12 = shuffle<3, 3, 3, 3>(P_curve[0], P_curve[1]);
+ const ssef or12 = shuffle<3, 3, 3, 3>(P_curve[0], P_curve[1]);
- __m128 r12 = or12;
- const __m128 vP = load_m128(P);
- const __m128 dif = _mm_sub_ps(vP, P_curve[0]);
- const __m128 dif_second = _mm_sub_ps(vP, P_curve[1]);
+ ssef r12 = or12;
+ const ssef vP = load4f(P);
+ const ssef dif = vP - P_curve[0];
+ const ssef dif_second = vP - P_curve[1];
if(difl != 0.0f) {
- const __m128 len1_sq = len3_squared_splat(dif);
- const __m128 len2_sq = len3_squared_splat(dif_second);
- const __m128 len12 = _mm_sqrt_ps(shuffle<0, 0, 0, 0>(len1_sq, len2_sq));
- const __m128 pixelsize12 = _mm_min_ps(_mm_mul_ps(len12, _mm_set1_ps(difl)), _mm_set1_ps(extmax));
- r12 = _mm_max_ps(or12, pixelsize12);
+ const ssef len1_sq = len3_squared_splat(dif);
+ const ssef len2_sq = len3_squared_splat(dif_second);
+ const ssef len12 = mm_sqrt(shuffle<0, 0, 0, 0>(len1_sq, len2_sq));
+ const ssef pixelsize12 = min(len12 * difl, ssef(extmax));
+ r12 = max(or12, pixelsize12);
}
- float or1 = _mm_cvtss_f32(or12), or2 = _mm_cvtss_f32(broadcast<2>(or12));
- float r1 = _mm_cvtss_f32(r12), r2 = _mm_cvtss_f32(broadcast<2>(r12));
-
- const __m128 p21_diff = _mm_sub_ps(P_curve[1], P_curve[0]);
- const __m128 sphere_dif1 = _mm_mul_ps(_mm_add_ps(dif, dif_second), _mm_set1_ps(0.5f));
- const __m128 dir = load_m128(direction);
- const __m128 sphere_b_tmp = dot3_splat(dir, sphere_dif1);
- const __m128 sphere_dif2 = fnma(sphere_b_tmp, dir, sphere_dif1);
+ float or1 = extract<0>(or12), or2 = extract<0>(shuffle<2>(or12));
+ float r1 = extract<0>(r12), r2 = extract<0>(shuffle<2>(r12));
+
+ const ssef p21_diff = P_curve[1] - P_curve[0];
+ const ssef sphere_dif1 = (dif + dif_second) * 0.5f;
+ const ssef dir = load4f(direction);
+ const ssef sphere_b_tmp = dot3_splat(dir, sphere_dif1);
+ const ssef sphere_dif2 = nmsub(sphere_b_tmp, dir, sphere_dif1);
#endif
float mr = max(r1, r2);
@@ -727,7 +727,7 @@ ccl_device_inline bool bvh_curve_intersect(KernelGlobals *kg, Intersection *isec
#ifndef __KERNEL_SSE2__
float3 tg = p21_diff * invl;
#else
- const __m128 tg = _mm_mul_ps(p21_diff, _mm_set1_ps(invl));
+ const ssef tg = p21_diff * invl;
#endif
float gd = (r2 - r1) * invl;
@@ -751,7 +751,7 @@ ccl_device_inline bool bvh_curve_intersect(KernelGlobals *kg, Intersection *isec
float3 cprod = cross(tg, dir);
float cprod2sq = len3_squared(cross(tg, dif));
#else
- const __m128 cprod = cross(tg, dir);
+ const ssef cprod = cross(tg, dir);
float cprod2sq = len3_squared(cross_zxy(tg, dif));
#endif
float cprodsq = len3_squared(cprod);
@@ -769,7 +769,7 @@ ccl_device_inline bool bvh_curve_intersect(KernelGlobals *kg, Intersection *isec
#ifndef __KERNEL_SSE2__
float3 tdif = dif + tcentre * dir;
#else
- const __m128 tdif = fma(_mm_set1_ps(tcentre), dir, dif);
+ const ssef tdif = madd(ssef(tcentre), dir, dif);
#endif
float tdifz = dot3(tdif, tg);
float tdifma = tdifz*gd + r1;
diff --git a/intern/cycles/kernel/kernel_avx.cpp b/intern/cycles/kernel/kernel_avx.cpp
index f5e1b8a7bb7..d612a82b785 100644
--- a/intern/cycles/kernel/kernel_avx.cpp
+++ b/intern/cycles/kernel/kernel_avx.cpp
@@ -24,6 +24,7 @@
#define __KERNEL_SSE3__
#define __KERNEL_SSSE3__
#define __KERNEL_SSE41__
+#define __KERNEL_AVX__
#endif
#include "util_optimization.h"
diff --git a/intern/cycles/kernel/kernel_compat_cpu.h b/intern/cycles/kernel/kernel_compat_cpu.h
index d027bb62ebe..c2aab93c87b 100644
--- a/intern/cycles/kernel/kernel_compat_cpu.h
+++ b/intern/cycles/kernel/kernel_compat_cpu.h
@@ -44,16 +44,16 @@ template<typename T> struct texture {
}
#if 0
- ccl_always_inline __m128 fetch_m128(int index)
+ ccl_always_inline ssef fetch_ssef(int index)
{
kernel_assert(index >= 0 && index < width);
- return ((__m128*)data)[index];
+ return ((ssef*)data)[index];
}
- ccl_always_inline __m128i fetch_m128i(int index)
+ ccl_always_inline ssei fetch_ssei(int index)
{
kernel_assert(index >= 0 && index < width);
- return ((__m128i*)data)[index];
+ return ((ssei*)data)[index];
}
#endif
@@ -232,8 +232,8 @@ typedef texture_image<uchar4> texture_image_uchar4;
/* Macros to handle different memory storage on different devices */
#define kernel_tex_fetch(tex, index) (kg->tex.fetch(index))
-#define kernel_tex_fetch_m128(tex, index) (kg->tex.fetch_m128(index))
-#define kernel_tex_fetch_m128i(tex, index) (kg->tex.fetch_m128i(index))
+#define kernel_tex_fetch_ssef(tex, index) (kg->tex.fetch_ssef(index))
+#define kernel_tex_fetch_ssei(tex, index) (kg->tex.fetch_ssei(index))
#define kernel_tex_lookup(tex, t, offset, size) (kg->tex.lookup(t, offset, size))
#define kernel_tex_image_interp(tex, x, y) ((tex < MAX_FLOAT_IMAGES) ? kg->texture_float_images[tex].interp(x, y) : kg->texture_byte_images[tex - MAX_FLOAT_IMAGES].interp(x, y))
#define kernel_tex_image_interp_3d(tex, x, y, z) ((tex < MAX_FLOAT_IMAGES) ? kg->texture_float_images[tex].interp_3d(x, y, z) : kg->texture_byte_images[tex - MAX_FLOAT_IMAGES].interp_3d(x, y, z))
diff --git a/intern/cycles/kernel/svm/svm_image.h b/intern/cycles/kernel/svm/svm_image.h
index daf7c6652d2..b34c101f5e7 100644
--- a/intern/cycles/kernel/svm/svm_image.h
+++ b/intern/cycles/kernel/svm/svm_image.h
@@ -134,8 +134,8 @@ ccl_device float4 svm_image_texture(KernelGlobals *kg, int id, float x, float y,
{
#ifdef __KERNEL_CPU__
#ifdef __KERNEL_SSE2__
- __m128 r_m128;
- float4 &r = (float4 &)r_m128;
+ ssef r_ssef;
+ float4 &r = (float4 &)r_ssef;
r = kernel_tex_image_interp(id, x, y);
#else
float4 r = kernel_tex_image_interp(id, x, y);
@@ -318,14 +318,14 @@ ccl_device float4 svm_image_texture(KernelGlobals *kg, int id, float x, float y,
float alpha = r.w;
if(use_alpha && alpha != 1.0f && alpha != 0.0f) {
- r_m128 = _mm_div_ps(r_m128, _mm_set1_ps(alpha));
+ r_ssef = r_ssef / ssef(alpha);
if(id >= TEX_NUM_FLOAT_IMAGES)
- r_m128 = _mm_min_ps(r_m128, _mm_set1_ps(1.0f));
+ r_ssef = min(r_ssef, ssef(1.0f));
r.w = alpha;
}
if(srgb) {
- r_m128 = color_srgb_to_scene_linear(r_m128);
+ r_ssef = color_srgb_to_scene_linear(r_ssef);
r.w = alpha;
}
#else
diff --git a/intern/cycles/kernel/svm/svm_noise.h b/intern/cycles/kernel/svm/svm_noise.h
index 91dda8972f9..869341c81f4 100644
--- a/intern/cycles/kernel/svm/svm_noise.h
+++ b/intern/cycles/kernel/svm/svm_noise.h
@@ -38,11 +38,11 @@ ccl_device int quick_floor(float x)
return float_to_int(x) - ((x < 0) ? 1 : 0);
}
#else
-ccl_device_inline __m128i quick_floor_sse(const __m128& x)
+ccl_device_inline ssei quick_floor_sse(const ssef& x)
{
- __m128i b = _mm_cvttps_epi32(x);
- __m128i isneg = _mm_castps_si128(_mm_cmplt_ps(x, _mm_set1_ps(0.0f)));
- return _mm_add_epi32(b, isneg); // unsaturated add 0xffffffff is the same as subtract -1
+ ssei b = truncatei(x);
+ ssei isneg = cast((x < ssef(0.0f)).m128);
+ return b + isneg; // unsaturated add 0xffffffff is the same as subtract -1
}
#endif
@@ -52,9 +52,9 @@ ccl_device float bits_to_01(uint bits)
return bits * (1.0f/(float)0xFFFFFFFF);
}
#else
-ccl_device_inline __m128 bits_to_01_sse(const __m128i& bits)
+ccl_device_inline ssef bits_to_01_sse(const ssei& bits)
{
- return _mm_mul_ps(uint32_to_float(bits), _mm_set1_ps(1.0f/(float)0xFFFFFFFF));
+ return uint32_to_float(bits) * ssef(1.0f/(float)0xFFFFFFFF);
}
#endif
@@ -88,16 +88,16 @@ ccl_device uint hash(uint kx, uint ky, uint kz)
}
#ifdef __KERNEL_SSE2__
-ccl_device_inline __m128i hash_sse(const __m128i& kx, const __m128i& ky, const __m128i& kz)
+ccl_device_inline ssei hash_sse(const ssei& kx, const ssei& ky, const ssei& kz)
{
-#define rot(x,k) _mm_or_si128(_mm_slli_epi32((x), (k)), _mm_srli_epi32((x), 32-(k)))
-#define xor_rot(a, b, c) do {a = _mm_xor_si128(a, b); a = _mm_sub_epi32(a, rot(b, c));} while(0)
+#define rot(x,k) (((x)<<(k)) | (srl(x, 32-(k))))
+#define xor_rot(a, b, c) do {a = a^b; a = a - rot(b, c);} while(0)
uint len = 3;
- __m128i magic = _mm_set1_epi32(0xdeadbeef + (len << 2) + 13);
- __m128i a = _mm_add_epi32(magic, kx);
- __m128i b = _mm_add_epi32(magic, ky);
- __m128i c = _mm_add_epi32(magic, kz);
+ ssei magic = ssei(0xdeadbeef + (len << 2) + 13);
+ ssei a = magic + kx;
+ ssei b = magic + ky;
+ ssei c = magic + kz;
xor_rot(c, b, 14);
xor_rot(a, c, 11);
@@ -133,10 +133,10 @@ ccl_device float floorfrac(float x, int* i)
return x - *i;
}
#else
-ccl_device_inline __m128 floorfrac_sse(const __m128& x, __m128i *i)
+ccl_device_inline ssef floorfrac_sse(const ssef& x, ssei *i)
{
*i = quick_floor_sse(x);
- return _mm_sub_ps(x, _mm_cvtepi32_ps(*i));
+ return x - ssef(*i);
}
#endif
@@ -146,11 +146,11 @@ ccl_device float fade(float t)
return t * t * t * (t * (t * 6.0f - 15.0f) + 10.0f);
}
#else
-ccl_device_inline __m128 fade_sse(const __m128 *t)
+ccl_device_inline ssef fade_sse(const ssef *t)
{
- __m128 a = fma(*t, _mm_set1_ps(6.0f), _mm_set1_ps(-15.0f));
- __m128 b = fma(*t, a, _mm_set1_ps(10.0f));
- return _mm_mul_ps(_mm_mul_ps(*t, *t), _mm_mul_ps(*t, b));
+ ssef a = madd(*t, ssef(6.0f), ssef(-15.0f));
+ ssef b = madd(*t, a, ssef(10.0f));
+ return ((*t) * (*t)) * ((*t) * b);
}
#endif
@@ -160,10 +160,10 @@ ccl_device float nerp(float t, float a, float b)
return (1.0f - t) * a + t * b;
}
#else
-ccl_device_inline __m128 nerp_sse(const __m128& t, const __m128& a, const __m128& b)
+ccl_device_inline ssef nerp_sse(const ssef& t, const ssef& a, const ssef& b)
{
- __m128 x1 = _mm_mul_ps(_mm_sub_ps(_mm_set1_ps(1.0f), t), a);
- return fma(t, b, x1);
+ ssef x1 = (ssef(1.0f) - t) * a;
+ return madd(t, b, x1);
}
#endif
@@ -178,35 +178,35 @@ ccl_device float grad(int hash, float x, float y, float z)
return ((h&1) ? -u : u) + ((h&2) ? -v : v);
}
#else
-ccl_device_inline __m128 grad_sse(const __m128i& hash, const __m128& x, const __m128& y, const __m128& z)
+ccl_device_inline ssef grad_sse(const ssei& hash, const ssef& x, const ssef& y, const ssef& z)
{
- __m128i c1 = _mm_set1_epi32(1);
- __m128i c2 = _mm_set1_epi32(2);
+ ssei c1 = ssei(1);
+ ssei c2 = ssei(2);
- __m128i h = _mm_and_si128(hash, _mm_set1_epi32(15)); // h = hash & 15
+ ssei h = hash & ssei(15); // h = hash & 15
- __m128i case_ux = _mm_cmplt_epi32(h, _mm_set1_epi32(8)); // 0xffffffff if h < 8 else 0
+ sseb case_ux = h < ssei(8); // 0xffffffff if h < 8 else 0
- __m128 u = blend(_mm_castsi128_ps(case_ux), x, y); // u = h<8 ? x : y
+ ssef u = select(case_ux, x, y); // u = h<8 ? x : y
- __m128i case_vy = _mm_cmplt_epi32(h, _mm_set1_epi32(4)); // 0xffffffff if h < 4 else 0
+ sseb case_vy = h < ssei(4); // 0xffffffff if h < 4 else 0
- __m128i case_h12 = _mm_cmpeq_epi32(h, _mm_set1_epi32(12)); // 0xffffffff if h == 12 else 0
- __m128i case_h14 = _mm_cmpeq_epi32(h, _mm_set1_epi32(14)); // 0xffffffff if h == 14 else 0
+ sseb case_h12 = h == ssei(12); // 0xffffffff if h == 12 else 0
+ sseb case_h14 = h == ssei(14); // 0xffffffff if h == 14 else 0
- __m128i case_vx = _mm_or_si128(case_h12, case_h14); // 0xffffffff if h == 12 or h == 14 else 0
+ sseb case_vx = case_h12 | case_h14; // 0xffffffff if h == 12 or h == 14 else 0
- __m128 v = blend(_mm_castsi128_ps(case_vy), y, blend(_mm_castsi128_ps(case_vx), x, z)); // v = h<4 ? y : h == 12 || h == 14 ? x : z
+ ssef v = select(case_vy, y, select(case_vx, x, z)); // v = h<4 ? y : h == 12 || h == 14 ? x : z
- __m128i case_uneg = _mm_slli_epi32(_mm_and_si128(h, c1), 31); // 1<<31 if h&1 else 0
- __m128 case_uneg_mask = _mm_castsi128_ps(case_uneg); // -0.0 if h&1 else +0.0
- __m128 ru = _mm_xor_ps(u, case_uneg_mask); // -u if h&1 else u (copy float sign)
+ ssei case_uneg = (h & c1) << 31; // 1<<31 if h&1 else 0
+ ssef case_uneg_mask = cast(case_uneg); // -0.0 if h&1 else +0.0
+ ssef ru = u ^ case_uneg_mask; // -u if h&1 else u (copy float sign)
- __m128i case_vneg = _mm_slli_epi32(_mm_and_si128(h, c2), 30); // 2<<30 if h&2 else 0
- __m128 case_vneg_mask = _mm_castsi128_ps(case_vneg); // -0.0 if h&2 else +0.0
- __m128 rv = _mm_xor_ps(v, case_vneg_mask); // -v if h&2 else v (copy float sign)
+ ssei case_vneg = (h & c2) << 30; // 2<<30 if h&2 else 0
+ ssef case_vneg_mask = cast(case_vneg); // -0.0 if h&2 else +0.0
+ ssef rv = v ^ case_vneg_mask; // -v if h&2 else v (copy float sign)
- __m128 r = _mm_add_ps(ru, rv); // ((h&1) ? -u : u) + ((h&2) ? -v : v)
+ ssef r = ru + rv; // ((h&1) ? -u : u) + ((h&2) ? -v : v)
return r;
}
#endif
@@ -217,9 +217,9 @@ ccl_device float scale3(float result)
return 0.9820f * result;
}
#else
-ccl_device_inline __m128 scale3_sse(const __m128& result)
+ccl_device_inline ssef scale3_sse(const ssef& result)
{
- return _mm_mul_ps(_mm_set1_ps(0.9820f), result);
+ return ssef(0.9820f) * result;
}
#endif
@@ -252,41 +252,41 @@ ccl_device_noinline float perlin(float x, float y, float z)
#else
ccl_device_noinline float perlin(float x, float y, float z)
{
- __m128 xyz = _mm_setr_ps(x, y, z, 0.0f);
- __m128i XYZ;
+ ssef xyz = ssef(x, y, z, 0.0f);
+ ssei XYZ;
- __m128 fxyz = floorfrac_sse(xyz, &XYZ);
+ ssef fxyz = floorfrac_sse(xyz, &XYZ);
- __m128 uvw = fade_sse(&fxyz);
- __m128 u = broadcast<0>(uvw), v = broadcast<1>(uvw), w = broadcast<2>(uvw);
+ ssef uvw = fade_sse(&fxyz);
+ ssef u = shuffle<0>(uvw), v = shuffle<1>(uvw), w = shuffle<2>(uvw);
- __m128i XYZ_ofc = _mm_add_epi32(XYZ, _mm_set1_epi32(1));
- __m128i vdy = shuffle<1, 1, 1, 1>(XYZ, XYZ_ofc); // +0, +0, +1, +1
- __m128i vdz = shuffle<0, 2, 0, 2>(shuffle<2, 2, 2, 2>(XYZ, XYZ_ofc)); // +0, +1, +0, +1
+ ssei XYZ_ofc = XYZ + ssei(1);
+ ssei vdy = shuffle<1, 1, 1, 1>(XYZ, XYZ_ofc); // +0, +0, +1, +1
+ ssei vdz = shuffle<0, 2, 0, 2>(shuffle<2, 2, 2, 2>(XYZ, XYZ_ofc)); // +0, +1, +0, +1
- __m128i h1 = hash_sse(broadcast<0>(XYZ), vdy, vdz); // hash directions 000, 001, 010, 011
- __m128i h2 = hash_sse(broadcast<0>(XYZ_ofc), vdy, vdz); // hash directions 100, 101, 110, 111
+ ssei h1 = hash_sse(shuffle<0>(XYZ), vdy, vdz); // hash directions 000, 001, 010, 011
+ ssei h2 = hash_sse(shuffle<0>(XYZ_ofc), vdy, vdz); // hash directions 100, 101, 110, 111
- __m128 fxyz_ofc = _mm_sub_ps(fxyz, _mm_set1_ps(1.0f));
- __m128 vfy = shuffle<1, 1, 1, 1>(fxyz, fxyz_ofc);
- __m128 vfz = shuffle<0, 2, 0, 2>(shuffle<2, 2, 2, 2>(fxyz, fxyz_ofc));
+ ssef fxyz_ofc = fxyz - ssef(1.0f);
+ ssef vfy = shuffle<1, 1, 1, 1>(fxyz, fxyz_ofc);
+ ssef vfz = shuffle<0, 2, 0, 2>(shuffle<2, 2, 2, 2>(fxyz, fxyz_ofc));
- __m128 g1 = grad_sse(h1, broadcast<0>(fxyz), vfy, vfz);
- __m128 g2 = grad_sse(h2, broadcast<0>(fxyz_ofc), vfy, vfz);
- __m128 n1 = nerp_sse(u, g1, g2);
+ ssef g1 = grad_sse(h1, shuffle<0>(fxyz), vfy, vfz);
+ ssef g2 = grad_sse(h2, shuffle<0>(fxyz_ofc), vfy, vfz);
+ ssef n1 = nerp_sse(u, g1, g2);
- __m128 n1_half = shuffle<2, 3, 2, 3>(n1); // extract 2 floats to a separate vector
- __m128 n2 = nerp_sse(v, n1, n1_half); // process nerp([a, b, _, _], [c, d, _, _]) -> [a', b', _, _]
+ ssef n1_half = shuffle<2, 3, 2, 3>(n1); // extract 2 floats to a separate vector
+ ssef n2 = nerp_sse(v, n1, n1_half); // process nerp([a, b, _, _], [c, d, _, _]) -> [a', b', _, _]
- __m128 n2_second = broadcast<1>(n2); // extract b to a separate vector
- __m128 result = nerp_sse(w, n2, n2_second); // process nerp([a', _, _, _], [b', _, _, _]) -> [a'', _, _, _]
+ ssef n2_second = shuffle<1>(n2); // extract b to a separate vector
+ ssef result = nerp_sse(w, n2, n2_second); // process nerp([a', _, _, _], [b', _, _, _]) -> [a'', _, _, _]
- __m128 r = scale3_sse(result);
+ ssef r = scale3_sse(result);
- __m128 infmask = _mm_castsi128_ps(_mm_set1_epi32(0x7f800000));
- __m128 rinfmask = _mm_cmpeq_ps(_mm_and_ps(r, infmask), infmask); // 0xffffffff if r is inf/-inf/nan else 0
- __m128 rfinite = _mm_andnot_ps(rinfmask, r); // 0 if r is inf/-inf/nan else r
- return _mm_cvtss_f32(rfinite);
+ ssef infmask = cast(ssei(0x7f800000));
+ ssef rinfmask = ((r & infmask) == infmask).m128; // 0xffffffff if r is inf/-inf/nan else 0
+ ssef rfinite = andnot(rinfmask, r); // 0 if r is inf/-inf/nan else r
+ return extract<0>(rfinite);
}
#endif
@@ -357,12 +357,12 @@ ccl_device float3 cellnoise_color(float3 p)
return make_float3(r, g, b);
}
#else
-ccl_device __m128 cellnoise_color(const __m128& p)
+ccl_device ssef cellnoise_color(const ssef& p)
{
- __m128i ip = quick_floor_sse(p);
- __m128i ip_yxz = shuffle<1, 0, 2, 3>(ip);
- __m128i ip_xyy = shuffle<0, 1, 1, 3>(ip);
- __m128i ip_zzx = shuffle<2, 2, 0, 3>(ip);
+ ssei ip = quick_floor_sse(p);
+ ssei ip_yxz = shuffle<1, 0, 2, 3>(ip);
+ ssei ip_xyy = shuffle<0, 1, 1, 3>(ip);
+ ssei ip_zzx = shuffle<2, 2, 0, 3>(ip);
return bits_to_01_sse(hash_sse(ip_xyy, ip_yxz, ip_zzx));
}
#endif
diff --git a/intern/cycles/kernel/svm/svm_texture.h b/intern/cycles/kernel/svm/svm_texture.h
index 5fd9204cbf6..d97c85db36a 100644
--- a/intern/cycles/kernel/svm/svm_texture.h
+++ b/intern/cycles/kernel/svm/svm_texture.h
@@ -140,15 +140,15 @@ ccl_device float voronoi_F1_distance(float3 p)
}
}
#else
- __m128 vec_p = load_m128(p);
- __m128i xyzi = quick_floor_sse(vec_p);
+ ssef vec_p = load4f(p);
+ ssei xyzi = quick_floor_sse(vec_p);
for (int xx = -1; xx <= 1; xx++) {
for (int yy = -1; yy <= 1; yy++) {
for (int zz = -1; zz <= 1; zz++) {
- __m128 ip = _mm_cvtepi32_ps(_mm_add_epi32(xyzi, _mm_setr_epi32(xx, yy, zz, 0)));
- __m128 vp = _mm_add_ps(ip, cellnoise_color(ip));
- float d = len_squared<1, 1, 1, 0>(_mm_sub_ps(vec_p, vp));
+ ssef ip = ssef(xyzi + ssei(xx, yy, zz, 0));
+ ssef vp = ip + cellnoise_color(ip);
+ float d = len_squared<1, 1, 1, 0>(vec_p - vp);
da = min(d, da);
}
}
@@ -184,15 +184,15 @@ ccl_device float3 voronoi_F1_color(float3 p)
return cellnoise_color(pa);
#else
- __m128 pa, vec_p = load_m128(p);
- __m128i xyzi = quick_floor_sse(vec_p);
+ ssef pa, vec_p = load4f(p);
+ ssei xyzi = quick_floor_sse(vec_p);
for (int xx = -1; xx <= 1; xx++) {
for (int yy = -1; yy <= 1; yy++) {
for (int zz = -1; zz <= 1; zz++) {
- __m128 ip = _mm_cvtepi32_ps(_mm_add_epi32(xyzi, _mm_setr_epi32(xx, yy, zz, 0)));
- __m128 vp = _mm_add_ps(ip, cellnoise_color(ip));
- float d = len_squared<1, 1, 1, 0>(_mm_sub_ps(vec_p, vp));
+ ssef ip = ssef(xyzi + ssei(xx, yy, zz, 0));
+ ssef vp = ip + cellnoise_color(ip);
+ float d = len_squared<1, 1, 1, 0>(vec_p - vp);
if(d < da) {
da = d;
@@ -202,7 +202,7 @@ ccl_device float3 voronoi_F1_color(float3 p)
}
}
- __m128 color = cellnoise_color(pa);
+ ssef color = cellnoise_color(pa);
return (float3 &)color;
#endif
}
diff --git a/intern/cycles/render/curves.cpp b/intern/cycles/render/curves.cpp
index 2c96ffa655e..dc7665fe144 100644
--- a/intern/cycles/render/curves.cpp
+++ b/intern/cycles/render/curves.cpp
@@ -46,8 +46,9 @@ void curvebounds(float *lower, float *upper, float3 *p, int dim)
float discroot = curve_coef[2] * curve_coef[2] - 3 * curve_coef[3] * curve_coef[1];
float ta = -1.0f;
float tb = -1.0f;
+
if(discroot >= 0) {
- discroot = sqrt(discroot);
+ discroot = sqrtf(discroot);
ta = (-curve_coef[2] - discroot) / (3 * curve_coef[3]);
tb = (-curve_coef[2] + discroot) / (3 * curve_coef[3]);
ta = (ta > 1.0f || ta < 0.0f) ? -1.0f : ta;
@@ -56,20 +57,21 @@ void curvebounds(float *lower, float *upper, float3 *p, int dim)
*upper = max(p1[dim],p2[dim]);
*lower = min(p1[dim],p2[dim]);
+
float exa = p1[dim];
float exb = p2[dim];
- float t2;
- float t3;
+
if(ta >= 0.0f) {
- t2 = ta * ta;
- t3 = t2 * ta;
+ float t2 = ta * ta;
+ float t3 = t2 * ta;
exa = curve_coef[3] * t3 + curve_coef[2] * t2 + curve_coef[1] * ta + curve_coef[0];
}
if(tb >= 0.0f) {
- t2 = tb * tb;
- t3 = t2 * tb;
+ float t2 = tb * tb;
+ float t3 = t2 * tb;
exb = curve_coef[3] * t3 + curve_coef[2] * t2 + curve_coef[1] * tb + curve_coef[0];
}
+
*upper = max(*upper, max(exa,exb));
*lower = min(*lower, min(exa,exb));
}
diff --git a/intern/cycles/render/tile.cpp b/intern/cycles/render/tile.cpp
index 72bcdf966b5..d6094a4fa0a 100644
--- a/intern/cycles/render/tile.cpp
+++ b/intern/cycles/render/tile.cpp
@@ -202,7 +202,7 @@ list<Tile>::iterator TileManager::next_background_tile(int device, TileOrder til
case TILE_CENTER:
distx = centx - (cur_tile.x + cur_tile.w);
disty = centy - (cur_tile.y + cur_tile.h);
- distx = (int64_t) sqrt((double)distx * distx + disty * disty);
+ distx = (int64_t)sqrt((double)(distx * distx + disty * disty));
break;
case TILE_RIGHT_TO_LEFT:
distx = cordx - cur_tile.x;
diff --git a/intern/cycles/util/CMakeLists.txt b/intern/cycles/util/CMakeLists.txt
index c1150d226ae..01b5675b9f7 100644
--- a/intern/cycles/util/CMakeLists.txt
+++ b/intern/cycles/util/CMakeLists.txt
@@ -16,6 +16,7 @@ set(SRC
util_opencl.cpp
util_path.cpp
util_string.cpp
+ util_simd.cpp
util_system.cpp
util_task.cpp
util_time.cpp
@@ -53,6 +54,9 @@ set(SRC_HEADERS
util_progress.h
util_set.h
util_simd.h
+ util_sseb.h
+ util_ssef.h
+ util_ssei.h
util_stats.h
util_string.h
util_system.h
diff --git a/intern/cycles/util/util_color.h b/intern/cycles/util/util_color.h
index b72cc6bc873..d566e1bf359 100644
--- a/intern/cycles/util/util_color.h
+++ b/intern/cycles/util/util_color.h
@@ -155,28 +155,28 @@ ccl_device float3 color_srgb_to_scene_linear(float3 c)
* e2coeff = 2^(127/exponent - 127) * bias_coeff^(1/exponent), encoded as uint32_t
*/
template<unsigned exp, unsigned e2coeff>
-ccl_device_inline __m128 fastpow(const __m128 &arg)
+ccl_device_inline ssef fastpow(const ssef &arg)
{
- __m128 ret;
- ret = _mm_mul_ps(arg, _mm_castsi128_ps(_mm_set1_epi32(e2coeff)));
- ret = _mm_cvtepi32_ps(_mm_castps_si128(ret));
- ret = _mm_mul_ps(ret, _mm_castsi128_ps(_mm_set1_epi32(exp)));
- ret = _mm_castsi128_ps(_mm_cvtps_epi32(ret));
+ ssef ret;
+ ret = arg * cast(ssei(e2coeff));
+ ret = ssef(cast(ret));
+ ret = ret * cast(ssei(exp));
+ ret = cast(ssei(ret));
return ret;
}
/* Improve x ^ 1.0f/5.0f solution with Newton-Raphson method */
-ccl_device_inline __m128 improve_5throot_solution(const __m128 &old_result, const __m128 &x)
+ccl_device_inline ssef improve_5throot_solution(const ssef &old_result, const ssef &x)
{
- __m128 approx2 = _mm_mul_ps(old_result, old_result);
- __m128 approx4 = _mm_mul_ps(approx2, approx2);
- __m128 t = _mm_div_ps(x, approx4);
- __m128 summ = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(4.0f), old_result), t); /* fma */
- return _mm_mul_ps(summ, _mm_set1_ps(1.0f/5.0f));
+ ssef approx2 = old_result * old_result;
+ ssef approx4 = approx2 * approx2;
+ ssef t = x / approx4;
+ ssef summ = madd(ssef(4.0f), old_result, t);
+ return summ * ssef(1.0f/5.0f);
}
/* Calculate powf(x, 2.4). Working domain: 1e-10 < x < 1e+10 */
-ccl_device_inline __m128 fastpow24(const __m128 &arg)
+ccl_device_inline ssef fastpow24(const ssef &arg)
{
/* max, avg and |avg| errors were calculated in gcc without FMA instructions
* The final precision should be better than powf in glibc */
@@ -184,22 +184,22 @@ ccl_device_inline __m128 fastpow24(const __m128 &arg)
/* Calculate x^4/5, coefficient 0.994 was constructed manually to minimize avg error */
/* 0x3F4CCCCD = 4/5 */
/* 0x4F55A7FB = 2^(127/(4/5) - 127) * 0.994^(1/(4/5)) */
- __m128 x = fastpow<0x3F4CCCCD, 0x4F55A7FB>(arg); // error max = 0.17 avg = 0.0018 |avg| = 0.05
- __m128 arg2 = _mm_mul_ps(arg, arg);
- __m128 arg4 = _mm_mul_ps(arg2, arg2);
+ ssef x = fastpow<0x3F4CCCCD, 0x4F55A7FB>(arg); // error max = 0.17 avg = 0.0018 |avg| = 0.05
+ ssef arg2 = arg * arg;
+ ssef arg4 = arg2 * arg2;
x = improve_5throot_solution(x, arg4); /* error max = 0.018 avg = 0.0031 |avg| = 0.0031 */
x = improve_5throot_solution(x, arg4); /* error max = 0.00021 avg = 1.6e-05 |avg| = 1.6e-05 */
x = improve_5throot_solution(x, arg4); /* error max = 6.1e-07 avg = 5.2e-08 |avg| = 1.1e-07 */
- return _mm_mul_ps(x, _mm_mul_ps(x, x));
+ return x * (x * x);
}
-ccl_device __m128 color_srgb_to_scene_linear(const __m128 &c)
+ccl_device ssef color_srgb_to_scene_linear(const ssef &c)
{
- __m128 cmp = _mm_cmplt_ps(c, _mm_set1_ps(0.04045f));
- __m128 lt = _mm_max_ps(_mm_mul_ps(c, _mm_set1_ps(1.0f/12.92f)), _mm_set1_ps(0.0f));
- __m128 gtebase = _mm_mul_ps(_mm_add_ps(c, _mm_set1_ps(0.055f)), _mm_set1_ps(1.0f/1.055f)); /* fma */
- __m128 gte = fastpow24(gtebase);
- return blend(cmp, lt, gte);
+ sseb cmp = c < ssef(0.04045f);
+ ssef lt = max(c * ssef(1.0f/12.92f), ssef(0.0f));
+ ssef gtebase = (c + ssef(0.055f)) * ssef(1.0f/1.055f); /* fma */
+ ssef gte = fastpow24(gtebase);
+ return select(cmp, lt, gte);
}
#endif
diff --git a/intern/cycles/util/util_half.h b/intern/cycles/util/util_half.h
index da6fae79bb9..397133618be 100644
--- a/intern/cycles/util/util_half.h
+++ b/intern/cycles/util/util_half.h
@@ -68,18 +68,18 @@ ccl_device_inline void float4_store_half(half *h, float4 f, float scale)
}
#else
/* same as above with SSE */
- const __m128 mm_scale = _mm_set_ps1(scale);
- const __m128i mm_38800000 = _mm_set1_epi32(0x38800000);
- const __m128i mm_7FFF = _mm_set1_epi32(0x7FFF);
- const __m128i mm_7FFFFFFF = _mm_set1_epi32(0x7FFFFFFF);
- const __m128i mm_C8000000 = _mm_set1_epi32(0xC8000000);
-
- __m128 mm_fscale = _mm_mul_ps(load_m128(f), mm_scale);
- __m128i x = _mm_castps_si128(_mm_min_ps(_mm_max_ps(mm_fscale, _mm_set_ps1(0.0f)), _mm_set_ps1(65500.0f)));
- __m128i absolute = _mm_and_si128(x, mm_7FFFFFFF);
- __m128i Z = _mm_add_epi32(absolute, mm_C8000000);
- __m128i result = _mm_andnot_si128(_mm_cmplt_epi32(absolute, mm_38800000), Z);
- __m128i rh = _mm_and_si128(_mm_srai_epi32(result, 13), mm_7FFF);
+ const ssef mm_scale = ssef(scale);
+ const ssei mm_38800000 = ssei(0x38800000);
+ const ssei mm_7FFF = ssei(0x7FFF);
+ const ssei mm_7FFFFFFF = ssei(0x7FFFFFFF);
+ const ssei mm_C8000000 = ssei(0xC8000000);
+
+ ssef mm_fscale = load4f(f) * mm_scale;
+ ssei x = cast(min(max(mm_fscale, ssef(0.0f)), ssef(65500.0f)));
+ ssei absolute = x & mm_7FFFFFFF;
+ ssei Z = absolute + mm_C8000000;
+ ssei result = andnot(absolute < mm_38800000, Z);
+ ssei rh = (result >> 13) & mm_7FFF;
_mm_storel_pi((__m64*)h, _mm_castsi128_ps(_mm_packs_epi32(rh, rh)));
#endif
diff --git a/intern/cycles/util/util_optimization.h b/intern/cycles/util/util_optimization.h
index f901513ec4b..0a6013cddd4 100644
--- a/intern/cycles/util/util_optimization.h
+++ b/intern/cycles/util/util_optimization.h
@@ -101,6 +101,10 @@
/* SSE intrinsics headers */
#ifndef FREE_WINDOWS64
+#ifdef _MSC_VER
+#include <intrin.h>
+#else
+
#ifdef __KERNEL_SSE2__
#include <xmmintrin.h> /* SSE 1 */
#include <emmintrin.h> /* SSE 2 */
@@ -118,6 +122,12 @@
#include <smmintrin.h> /* SSE 4.1 */
#endif
+#ifdef __KERNEL_AVX__
+#include <immintrin.h> /* AVX */
+#endif
+
+#endif
+
#else
/* MinGW64 has conflicting declarations for these SSE headers in <windows.h>.
diff --git a/intern/cycles/util/util_simd.cpp b/intern/cycles/util/util_simd.cpp
new file mode 100644
index 00000000000..8c34f6600d3
--- /dev/null
+++ b/intern/cycles/util/util_simd.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2011-2013 Intel Corporation
+ * Modifications Copyright 2014, Blender Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+#include "util_simd.h"
+
+CCL_NAMESPACE_BEGIN
+
+const __m128 _mm_lookupmask_ps[16] = {
+ _mm_castsi128_ps(_mm_set_epi32( 0, 0, 0, 0)),
+ _mm_castsi128_ps(_mm_set_epi32( 0, 0, 0,-1)),
+ _mm_castsi128_ps(_mm_set_epi32( 0, 0,-1, 0)),
+ _mm_castsi128_ps(_mm_set_epi32( 0, 0,-1,-1)),
+ _mm_castsi128_ps(_mm_set_epi32( 0,-1, 0, 0)),
+ _mm_castsi128_ps(_mm_set_epi32( 0,-1, 0,-1)),
+ _mm_castsi128_ps(_mm_set_epi32( 0,-1,-1, 0)),
+ _mm_castsi128_ps(_mm_set_epi32( 0,-1,-1,-1)),
+ _mm_castsi128_ps(_mm_set_epi32(-1, 0, 0, 0)),
+ _mm_castsi128_ps(_mm_set_epi32(-1, 0, 0,-1)),
+ _mm_castsi128_ps(_mm_set_epi32(-1, 0,-1, 0)),
+ _mm_castsi128_ps(_mm_set_epi32(-1, 0,-1,-1)),
+ _mm_castsi128_ps(_mm_set_epi32(-1,-1, 0, 0)),
+ _mm_castsi128_ps(_mm_set_epi32(-1,-1, 0,-1)),
+ _mm_castsi128_ps(_mm_set_epi32(-1,-1,-1, 0)),
+ _mm_castsi128_ps(_mm_set_epi32(-1,-1,-1,-1))
+};
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/util/util_simd.h b/intern/cycles/util/util_simd.h
index f0f37fa57aa..0f65fab58b6 100644
--- a/intern/cycles/util/util_simd.h
+++ b/intern/cycles/util/util_simd.h
@@ -1,7 +1,8 @@
/*
- * Copyright 2011-2013 Blender Foundation
+ * Copyright 2011-2013 Intel Corporation
+ * Modifications Copyright 2014, Blender Foundation.
*
- * Licensed under the Apache License, Version 2.0 (the "License");
+ * Licensed under the Apache License, Version 2.0(the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
@@ -14,263 +15,419 @@
* limitations under the License
*/
-#ifndef __UTIL_SIMD_H__
-#define __UTIL_SIMD_H__
+#ifndef __UTIL_SIMD_TYPES_H__
+#define __UTIL_SIMD_TYPES_H__
+
+#include <limits>
+
+#include "util_debug.h"
+#include "util_types.h"
CCL_NAMESPACE_BEGIN
#ifdef __KERNEL_SSE2__
-/* SSE shuffle utility functions */
+struct sseb;
+struct ssei;
+struct ssef;
+
+extern const __m128 _mm_lookupmask_ps[16];
+
+/* Special Types */
-#ifdef __KERNEL_SSSE3__
+static struct TrueTy {
+__forceinline operator bool( ) const { return true; }
+} True ccl_maybe_unused;
-/* faster version for SSSE3 */
-typedef __m128i shuffle_swap_t;
+static struct FalseTy {
+__forceinline operator bool( ) const { return false; }
+} False ccl_maybe_unused;
-ccl_device_inline const shuffle_swap_t shuffle_swap_identity(void)
+static struct NegInfTy
{
- return _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-}
+__forceinline operator float ( ) const { return -std::numeric_limits<float>::infinity(); }
+__forceinline operator int ( ) const { return std::numeric_limits<int>::min(); }
+} neg_inf ccl_maybe_unused;
-ccl_device_inline const shuffle_swap_t shuffle_swap_swap(void)
+static struct PosInfTy
{
- return _mm_set_epi8(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+__forceinline operator float ( ) const { return std::numeric_limits<float>::infinity(); }
+__forceinline operator int ( ) const { return std::numeric_limits<int>::max(); }
+} inf ccl_maybe_unused, pos_inf ccl_maybe_unused;
+
+/* Intrinsics Functions */
+
+#if defined(__BMI__) && defined(__GNUC__)
+#define _tzcnt_u32 __tzcnt_u32
+#define _tzcnt_u64 __tzcnt_u64
+#endif
+
+#if defined(__LZCNT__)
+#define _lzcnt_u32 __lzcnt32
+#define _lzcnt_u64 __lzcnt64
+#endif
+
+#if defined(_WIN32)
+
+__forceinline int __popcnt(int in) {
+ return _mm_popcnt_u32(in);
}
-ccl_device_inline const __m128 shuffle_swap(const __m128& a, const shuffle_swap_t& shuf)
-{
- return _mm_castsi128_ps(_mm_shuffle_epi8(_mm_castps_si128(a), shuf));
+#if !defined(_MSC_VER)
+__forceinline unsigned int __popcnt(unsigned int in) {
+ return _mm_popcnt_u32(in);
+}
+#endif
+
+#if defined(__KERNEL_64_BIT__)
+__forceinline long long __popcnt(long long in) {
+ return _mm_popcnt_u64(in);
+}
+__forceinline size_t __popcnt(size_t in) {
+ return _mm_popcnt_u64(in);
+}
+#endif
+
+__forceinline int __bsf(int v) {
+#if defined(__KERNEL_AVX2__)
+ return _tzcnt_u32(v);
+#else
+ unsigned long r = 0; _BitScanForward(&r,v); return r;
+#endif
}
+__forceinline unsigned int __bsf(unsigned int v) {
+#if defined(__KERNEL_AVX2__)
+ return _tzcnt_u32(v);
#else
+ unsigned long r = 0; _BitScanForward(&r,v); return r;
+#endif
+}
-/* somewhat slower version for SSE2 */
-typedef int shuffle_swap_t;
+__forceinline int __bsr(int v) {
+ unsigned long r = 0; _BitScanReverse(&r,v); return r;
+}
-ccl_device_inline const shuffle_swap_t shuffle_swap_identity(void)
-{
- return 0;
+__forceinline int __btc(int v, int i) {
+ long r = v; _bittestandcomplement(&r,i); return r;
}
-ccl_device_inline const shuffle_swap_t shuffle_swap_swap(void)
-{
- return 1;
+__forceinline int __bts(int v, int i) {
+ long r = v; _bittestandset(&r,i); return r;
}
-ccl_device_inline const __m128 shuffle_swap(const __m128& a, shuffle_swap_t shuf)
-{
- /* shuffle value must be a constant, so we need to branch */
- if(shuf)
- return _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2));
- else
- return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 1, 0));
+__forceinline int __btr(int v, int i) {
+ long r = v; _bittestandreset(&r,i); return r;
}
+__forceinline int bitscan(int v) {
+#if defined(__KERNEL_AVX2__)
+ return _tzcnt_u32(v);
+#else
+ return __bsf(v);
#endif
+}
-#ifdef __KERNEL_SSE41__
-ccl_device_inline void gen_idirsplat_swap(const __m128 &pn, const shuffle_swap_t &shuf_identity, const shuffle_swap_t &shuf_swap,
- const float3& idir, __m128 idirsplat[3], shuffle_swap_t shufflexyz[3])
+__forceinline int clz(const int x)
{
- const __m128 idirsplat_raw[] = { _mm_set_ps1(idir.x), _mm_set_ps1(idir.y), _mm_set_ps1(idir.z) };
- idirsplat[0] = _mm_xor_ps(idirsplat_raw[0], pn);
- idirsplat[1] = _mm_xor_ps(idirsplat_raw[1], pn);
- idirsplat[2] = _mm_xor_ps(idirsplat_raw[2], pn);
-
- const __m128 signmask = _mm_castsi128_ps(_mm_set1_epi32(0x80000000));
- const __m128 shuf_identity_f = _mm_castsi128_ps(shuf_identity);
- const __m128 shuf_swap_f = _mm_castsi128_ps(shuf_swap);
- shufflexyz[0] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[0], signmask)));
- shufflexyz[1] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[1], signmask)));
- shufflexyz[2] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[2], signmask)));
-}
+#if defined(__KERNEL_AVX2__)
+ return _lzcnt_u32(x);
#else
-ccl_device_inline void gen_idirsplat_swap(const __m128 &pn, const shuffle_swap_t &shuf_identity, const shuffle_swap_t &shuf_swap,
- const float3& idir, __m128 idirsplat[3], shuffle_swap_t shufflexyz[3])
-{
- idirsplat[0] = _mm_xor_ps(_mm_set_ps1(idir.x), pn);
- idirsplat[1] = _mm_xor_ps(_mm_set_ps1(idir.y), pn);
- idirsplat[2] = _mm_xor_ps(_mm_set_ps1(idir.z), pn);
-
- shufflexyz[0] = (idir.x >= 0)? shuf_identity: shuf_swap;
- shufflexyz[1] = (idir.y >= 0)? shuf_identity: shuf_swap;
- shufflexyz[2] = (idir.z >= 0)? shuf_identity: shuf_swap;
-}
+ if (UNLIKELY(x == 0)) return 32;
+ return 31 - __bsr(x);
#endif
+}
-template<size_t i0, size_t i1, size_t i2, size_t i3> ccl_device_inline const __m128 shuffle(const __m128& a, const __m128& b)
+__forceinline int __bscf(int& v)
{
- return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));
+ int i = __bsf(v);
+ v &= v-1;
+ return i;
}
-template<size_t i0, size_t i1, size_t i2, size_t i3> ccl_device_inline const __m128 shuffle(const __m128& a)
+__forceinline unsigned int __bscf(unsigned int& v)
{
- return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(a), _MM_SHUFFLE(i3, i2, i1, i0)));
+ unsigned int i = __bsf(v);
+ v &= v-1;
+ return i;
}
-template<> __forceinline const __m128 shuffle<0, 1, 0, 1>(const __m128& a)
-{
- return _mm_movelh_ps(a, a);
+#if defined(__KERNEL_64_BIT__)
+
+__forceinline size_t __bsf(size_t v) {
+#if defined(__KERNEL_AVX2__)
+ return _tzcnt_u64(v);
+#else
+ unsigned long r = 0; _BitScanForward64(&r,v); return r;
+#endif
}
-template<> __forceinline const __m128 shuffle<2, 3, 2, 3>(const __m128& a)
-{
- return _mm_movehl_ps(a, a);
+__forceinline size_t __bsr(size_t v) {
+ unsigned long r = 0; _BitScanReverse64(&r,v); return r;
}
-template<size_t i0, size_t i1, size_t i2, size_t i3> ccl_device_inline const __m128i shuffle(const __m128i& a)
-{
- return _mm_shuffle_epi32(a, _MM_SHUFFLE(i3, i2, i1, i0));
+__forceinline size_t __btc(size_t v, size_t i) {
+ size_t r = v; _bittestandcomplement64((__int64*)&r,i); return r;
}
-template<size_t i0, size_t i1, size_t i2, size_t i3> ccl_device_inline const __m128i shuffle(const __m128i& a, const __m128i& b)
-{
- return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
+__forceinline size_t __bts(size_t v, size_t i) {
+ __int64 r = v; _bittestandset64(&r,i); return r;
}
-/* Blend 2 vectors based on mask: (a[i] & mask[i]) | (b[i] & ~mask[i]) */
-#ifdef __KERNEL_SSE41__
-ccl_device_inline const __m128 blend(const __m128& mask, const __m128& a, const __m128& b)
-{
- return _mm_blendv_ps(b, a, mask);
+__forceinline size_t __btr(size_t v, size_t i) {
+ __int64 r = v; _bittestandreset64(&r,i); return r;
}
+
+__forceinline size_t bitscan(size_t v) {
+#if defined(__KERNEL_AVX2__)
+#if defined(__KERNEL_64_BIT__)
+ return _tzcnt_u64(v);
#else
-ccl_device_inline const __m128 blend(const __m128& mask, const __m128& a, const __m128& b)
-{
- return _mm_or_ps(_mm_and_ps(mask, a), _mm_andnot_ps(mask, b));
-}
+ return _tzcnt_u32(v);
#endif
+#else
+ return __bsf(v);
+#endif
+}
-/* calculate a*b+c (replacement for fused multiply-add on SSE CPUs) */
-ccl_device_inline const __m128 fma(const __m128& a, const __m128& b, const __m128& c)
+__forceinline size_t __bscf(size_t& v)
{
- return _mm_add_ps(_mm_mul_ps(a, b), c);
+ size_t i = __bsf(v);
+ v &= v-1;
+ return i;
}
-/* calculate a*b-c (replacement for fused multiply-subtract on SSE CPUs) */
-ccl_device_inline const __m128 fms(const __m128& a, const __m128& b, const __m128& c)
-{
- return _mm_sub_ps(_mm_mul_ps(a, b), c);
+#endif /* __KERNEL_64_BIT__ */
+
+#else /* _WIN32 */
+
+__forceinline unsigned int __popcnt(unsigned int in) {
+ int r = 0; asm ("popcnt %1,%0" : "=r"(r) : "r"(in)); return r;
}
-/* calculate -a*b+c (replacement for fused negated-multiply-subtract on SSE CPUs) */
-ccl_device_inline const __m128 fnma(const __m128& a, const __m128& b, const __m128& c)
-{
- return _mm_sub_ps(c, _mm_mul_ps(a, b));
+__forceinline int __bsf(int v) {
+ int r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r;
}
-template<size_t N> ccl_device_inline const __m128 broadcast(const __m128& a)
-{
- return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(a), _MM_SHUFFLE(N, N, N, N)));
+__forceinline int __bsr(int v) {
+ int r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r;
}
-template<size_t N> ccl_device_inline const __m128i broadcast(const __m128i& a)
-{
- return _mm_shuffle_epi32(a, _MM_SHUFFLE(N, N, N, N));
+__forceinline int __btc(int v, int i) {
+ int r = 0; asm ("btc %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags" ); return r;
}
-ccl_device_inline const __m128 uint32_to_float(const __m128i &in)
-{
- __m128i a = _mm_srli_epi32(in, 16);
- __m128i b = _mm_and_si128(in, _mm_set1_epi32(0x0000ffff));
- __m128i c = _mm_or_si128(a, _mm_set1_epi32(0x53000000));
- __m128 d = _mm_cvtepi32_ps(b);
- __m128 e = _mm_sub_ps(_mm_castsi128_ps(c), _mm_castsi128_ps(_mm_set1_epi32(0x53000000)));
- return _mm_add_ps(e, d);
+__forceinline int __bts(int v, int i) {
+ int r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
}
-template<size_t S1, size_t S2, size_t S3, size_t S4>
-ccl_device_inline const __m128 set_sign_bit(const __m128 &a)
-{
- return _mm_xor_ps(a, _mm_castsi128_ps(_mm_setr_epi32(S1 << 31, S2 << 31, S3 << 31, S4 << 31)));
+__forceinline int __btr(int v, int i) {
+ int r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
}
-#ifdef __KERNEL_WITH_SSE_ALIGN__
-ccl_device_inline const __m128 load_m128(const float4 &vec)
-{
- return _mm_load_ps(&vec.x);
+__forceinline size_t __bsf(size_t v) {
+ size_t r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r;
}
-ccl_device_inline const __m128 load_m128(const float3 &vec)
-{
- return _mm_load_ps(&vec.x);
+__forceinline unsigned int __bsf(unsigned int v) {
+ unsigned int r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r;
}
-#else
+__forceinline size_t __bsr(size_t v) {
+ size_t r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r;
+}
-ccl_device_inline const __m128 load_m128(const float4 &vec)
-{
- return _mm_loadu_ps(&vec.x);
+__forceinline size_t __btc(size_t v, size_t i) {
+ size_t r = 0; asm ("btc %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags" ); return r;
}
-ccl_device_inline const __m128 load_m128(const float3 &vec)
-{
- return _mm_loadu_ps(&vec.x);
+__forceinline size_t __bts(size_t v, size_t i) {
+ size_t r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
}
-#endif /* __KERNEL_WITH_SSE_ALIGN__ */
-ccl_device_inline const __m128 dot3_splat(const __m128& a, const __m128& b)
-{
-#ifdef __KERNEL_SSE41__
- return _mm_dp_ps(a, b, 0x7f);
+__forceinline size_t __btr(size_t v, size_t i) {
+ size_t r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
+}
+
+__forceinline int bitscan(int v) {
+#if defined(__KERNEL_AVX2__)
+ return _tzcnt_u32(v);
#else
- __m128 t = _mm_mul_ps(a, b);
- return _mm_set1_ps(((float*)&t)[0] + ((float*)&t)[1] + ((float*)&t)[2]);
+ return __bsf(v);
#endif
}
-/* squared length taking only specified axes into account */
-template<size_t X, size_t Y, size_t Z, size_t W>
-ccl_device_inline float len_squared(const __m128& a)
-{
-#ifndef __KERNEL_SSE41__
- float4& t = (float4 &)a;
- return (X ? t.x * t.x : 0.0f) + (Y ? t.y * t.y : 0.0f) + (Z ? t.z * t.z : 0.0f) + (W ? t.w * t.w : 0.0f);
+__forceinline unsigned int bitscan(unsigned int v) {
+#if defined(__KERNEL_AVX2__)
+ return _tzcnt_u32(v);
#else
- return _mm_cvtss_f32(_mm_dp_ps(a, a, (X << 4) | (Y << 5) | (Z << 6) | (W << 7) | 0xf));
+ return __bsf(v);
#endif
}
-ccl_device_inline float dot3(const __m128& a, const __m128& b)
-{
-#ifdef __KERNEL_SSE41__
- return _mm_cvtss_f32(_mm_dp_ps(a, b, 0x7f));
+__forceinline size_t bitscan(size_t v) {
+#if defined(__KERNEL_AVX2__)
+#if defined(__KERNEL_64_BIT__)
+ return _tzcnt_u64(v);
#else
- __m128 t = _mm_mul_ps(a, b);
- return ((float*)&t)[0] + ((float*)&t)[1] + ((float*)&t)[2];
+ return _tzcnt_u32(v);
+#endif
+#else
+ return __bsf(v);
#endif
}
-ccl_device_inline const __m128 len3_squared_splat(const __m128& a)
+__forceinline int clz(const int x)
{
- return dot3_splat(a, a);
+#if defined(__KERNEL_AVX2__)
+ return _lzcnt_u32(x);
+#else
+ if (UNLIKELY(x == 0)) return 32;
+ return 31 - __bsr(x);
+#endif
}
-ccl_device_inline float len3_squared(const __m128& a)
+__forceinline int __bscf(int& v)
{
- return dot3(a, a);
+ int i = bitscan(v);
+#if defined(__KERNEL_AVX2__)
+ v &= v-1;
+#else
+ v = __btc(v,i);
+#endif
+ return i;
}
-ccl_device_inline float len3(const __m128& a)
+__forceinline unsigned int __bscf(unsigned int& v)
{
- return _mm_cvtss_f32(_mm_sqrt_ss(dot3_splat(a, a)));
+ unsigned int i = bitscan(v);
+ v &= v-1;
+ return i;
}
-/* calculate shuffled cross product, useful when order of components does not matter */
-ccl_device_inline const __m128 cross_zxy(const __m128& a, const __m128& b)
+__forceinline size_t __bscf(size_t& v)
{
- return fms(a, shuffle<1, 2, 0, 3>(b), _mm_mul_ps(b, shuffle<1, 2, 0, 3>(a)));
+ size_t i = bitscan(v);
+#if defined(__KERNEL_AVX2__)
+ v &= v-1;
+#else
+ v = __btc(v,i);
+#endif
+ return i;
+}
+
+#endif /* _WIN32 */
+
+static const unsigned int BITSCAN_NO_BIT_SET_32 = 32;
+static const size_t BITSCAN_NO_BIT_SET_64 = 64;
+
+/* Emulation of SSE4 functions with SSE3 */
+
+#if defined(__KERNEL_SSE3) && !defined(__KERNEL_SSE4__)
+
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _mm_blendv_ps __emu_mm_blendv_ps
+__forceinline __m128 _mm_blendv_ps( __m128 value, __m128 input, __m128 mask ) {
+ return _mm_or_ps(_mm_and_ps(mask, input), _mm_andnot_ps(mask, value));
+}
+
+#define _mm_blend_ps __emu_mm_blend_ps
+__forceinline __m128 _mm_blend_ps( __m128 value, __m128 input, const int mask ) {
+ assert(mask < 0x10); return _mm_blendv_ps(value, input, _mm_lookupmask_ps[mask]);
+}
+
+#define _mm_blendv_epi8 __emu_mm_blendv_epi8
+__forceinline __m128i _mm_blendv_epi8( __m128i value, __m128i input, __m128i mask ) {
+ return _mm_or_si128(_mm_and_si128(mask, input), _mm_andnot_si128(mask, value));
+}
+
+#define _mm_mullo_epi32 __emu_mm_mullo_epi32
+__forceinline __m128i _mm_mullo_epi32( __m128i value, __m128i input ) {
+ __m128i rvalue;
+ char* _r = (char*)(&rvalue + 1);
+ char* _v = (char*)(& value + 1);
+ char* _i = (char*)(& input + 1);
+ for ( ssize_t i = -16 ; i != 0 ; i += 4 ) *((int32*)(_r + i)) = *((int32*)(_v + i))* *((int32*)(_i + i));
+ return rvalue;
+}
+
+
+#define _mm_min_epi32 __emu_mm_min_epi32
+__forceinline __m128i _mm_min_epi32( __m128i value, __m128i input ) {
+ return _mm_blendv_epi8(input, value, _mm_cmplt_epi32(value, input));
+}
+
+#define _mm_max_epi32 __emu_mm_max_epi32
+__forceinline __m128i _mm_max_epi32( __m128i value, __m128i input ) {
+ return _mm_blendv_epi8(value, input, _mm_cmplt_epi32(value, input));
+}
+
+#define _mm_extract_epi32 __emu_mm_extract_epi32
+__forceinline int _mm_extract_epi32( __m128i input, const int index ) {
+ switch ( index ) {
+ case 0: return _mm_cvtsi128_si32(input);
+ case 1: return _mm_cvtsi128_si32(_mm_shuffle_epi32(input, _MM_SHUFFLE(1, 1, 1, 1)));
+ case 2: return _mm_cvtsi128_si32(_mm_shuffle_epi32(input, _MM_SHUFFLE(2, 2, 2, 2)));
+ case 3: return _mm_cvtsi128_si32(_mm_shuffle_epi32(input, _MM_SHUFFLE(3, 3, 3, 3)));
+ default: assert(false); return 0;
+ }
}
-ccl_device_inline const __m128 cross(const __m128& a, const __m128& b)
+#define _mm_insert_epi32 __emu_mm_insert_epi32
+__forceinline __m128i _mm_insert_epi32( __m128i value, int input, const int index ) {
+ assert(index >= 0 && index < 4); ((int*)&value)[index] = input; return value;
+}
+
+#define _mm_extract_ps __emu_mm_extract_ps
+__forceinline int _mm_extract_ps( __m128 input, const int index ) {
+ int32* ptr = (int32*)&input; return ptr[index];
+}
+
+#define _mm_insert_ps __emu_mm_insert_ps
+__forceinline __m128 _mm_insert_ps( __m128 value, __m128 input, const int index )
+{ assert(index < 0x100); ((float*)&value)[(index >> 4)&0x3] = ((float*)&input)[index >> 6]; return _mm_andnot_ps(_mm_lookupmask_ps[index&0xf], value); }
+
+#define _mm_round_ps __emu_mm_round_ps
+__forceinline __m128 _mm_round_ps( __m128 value, const int flags )
{
- return shuffle<1, 2, 0, 3>(cross_zxy(a, b));
+ switch ( flags )
+ {
+ case _MM_FROUND_TO_NEAREST_INT: return _mm_cvtepi32_ps(_mm_cvtps_epi32(value));
+ case _MM_FROUND_TO_NEG_INF : return _mm_cvtepi32_ps(_mm_cvtps_epi32(_mm_add_ps(value, _mm_set1_ps(-0.5f))));
+ case _MM_FROUND_TO_POS_INF : return _mm_cvtepi32_ps(_mm_cvtps_epi32(_mm_add_ps(value, _mm_set1_ps( 0.5f))));
+ case _MM_FROUND_TO_ZERO : return _mm_cvtepi32_ps(_mm_cvttps_epi32(value));
+ }
+ return value;
+}
+
+#ifdef _M_X64
+#define _mm_insert_epi64 __emu_mm_insert_epi64
+__forceinline __m128i _mm_insert_epi64( __m128i value, __int64 input, const int index ) {
+ assert(size_t(index) < 4); ((__int64*)&value)[index] = input; return value;
}
+#define _mm_extract_epi64 __emu_mm_extract_epi64
+__forceinline __int64 _mm_extract_epi64( __m128i input, const int index ) {
+ assert(size_t(index) < 2);
+ return index == 0 ? _mm_cvtsi128_si64x(input) : _mm_cvtsi128_si64x(_mm_unpackhi_epi64(input, input));
+}
+#endif
+
+#endif
+
#endif /* __KERNEL_SSE2__ */
CCL_NAMESPACE_END
-#endif /* __UTIL_SIMD_H__ */
+#include "util_math.h"
+#include "util_sseb.h"
+#include "util_ssei.h"
+#include "util_ssef.h"
+
+#endif /* __UTIL_SIMD_TYPES_H__ */
diff --git a/intern/cycles/util/util_sseb.h b/intern/cycles/util/util_sseb.h
new file mode 100644
index 00000000000..be510256dd3
--- /dev/null
+++ b/intern/cycles/util/util_sseb.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2011-2013 Intel Corporation
+ * Modifications Copyright 2014, Blender Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0(the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+#ifndef __UTIL_SSEB_H__
+#define __UTIL_SSEB_H__
+
+CCL_NAMESPACE_BEGIN
+
+#ifdef __KERNEL_SSE2__
+
+/*! 4-wide SSE bool type. */
+struct sseb
+{
+ typedef sseb Mask; // mask type
+ typedef ssei Int; // int type
+ typedef ssef Float; // float type
+
+ enum { size = 4 }; // number of SIMD elements
+ union { __m128 m128; int32_t v[4]; }; // data
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Constructors, Assignment & Cast Operators
+ ////////////////////////////////////////////////////////////////////////////////
+
+ __forceinline sseb ( ) {}
+ __forceinline sseb ( const sseb& other ) { m128 = other.m128; }
+ __forceinline sseb& operator=( const sseb& other ) { m128 = other.m128; return *this; }
+
+ __forceinline sseb( const __m128 input ) : m128(input) {}
+ __forceinline operator const __m128&( void ) const { return m128; }
+ __forceinline operator const __m128i( void ) const { return _mm_castps_si128(m128); }
+ __forceinline operator const __m128d( void ) const { return _mm_castps_pd(m128); }
+
+ __forceinline sseb ( bool a )
+ : m128(_mm_lookupmask_ps[(size_t(a) << 3) | (size_t(a) << 2) | (size_t(a) << 1) | size_t(a)]) {}
+ __forceinline sseb ( bool a, bool b)
+ : m128(_mm_lookupmask_ps[(size_t(b) << 3) | (size_t(a) << 2) | (size_t(b) << 1) | size_t(a)]) {}
+ __forceinline sseb ( bool a, bool b, bool c, bool d)
+ : m128(_mm_lookupmask_ps[(size_t(d) << 3) | (size_t(c) << 2) | (size_t(b) << 1) | size_t(a)]) {}
+ __forceinline sseb(int mask) {
+ assert(mask >= 0 && mask < 16);
+ m128 = _mm_lookupmask_ps[mask];
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Constants
+ ////////////////////////////////////////////////////////////////////////////////
+
+ __forceinline sseb( FalseTy ) : m128(_mm_setzero_ps()) {}
+ __forceinline sseb( TrueTy ) : m128(_mm_castsi128_ps(_mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128()))) {}
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Array Access
+ ////////////////////////////////////////////////////////////////////////////////
+
+ __forceinline bool operator []( const size_t i ) const { assert(i < 4); return (_mm_movemask_ps(m128) >> i) & 1; }
+ __forceinline int32_t& operator []( const size_t i ) { assert(i < 4); return v[i]; }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+/// Unary Operators
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const sseb operator !( const sseb& a ) { return _mm_xor_ps(a, sseb(True)); }
+
+////////////////////////////////////////////////////////////////////////////////
+/// Binary Operators
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const sseb operator &( const sseb& a, const sseb& b ) { return _mm_and_ps(a, b); }
+__forceinline const sseb operator |( const sseb& a, const sseb& b ) { return _mm_or_ps (a, b); }
+__forceinline const sseb operator ^( const sseb& a, const sseb& b ) { return _mm_xor_ps(a, b); }
+
+////////////////////////////////////////////////////////////////////////////////
+/// Assignment Operators
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const sseb operator &=( sseb& a, const sseb& b ) { return a = a & b; }
+__forceinline const sseb operator |=( sseb& a, const sseb& b ) { return a = a | b; }
+__forceinline const sseb operator ^=( sseb& a, const sseb& b ) { return a = a ^ b; }
+
+////////////////////////////////////////////////////////////////////////////////
+/// Comparison Operators + Select
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const sseb operator !=( const sseb& a, const sseb& b ) { return _mm_xor_ps(a, b); }
+__forceinline const sseb operator ==( const sseb& a, const sseb& b ) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); }
+
+__forceinline const sseb select( const sseb& m, const sseb& t, const sseb& f ) {
+#if defined(__KERNEL_SSE41__)
+ return _mm_blendv_ps(f, t, m);
+#else
+ return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Movement/Shifting/Shuffling Functions
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const sseb unpacklo( const sseb& a, const sseb& b ) { return _mm_unpacklo_ps(a, b); }
+__forceinline const sseb unpackhi( const sseb& a, const sseb& b ) { return _mm_unpackhi_ps(a, b); }
+
+template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const sseb shuffle( const sseb& a ) {
+ return _mm_shuffle_epi32(a, _MM_SHUFFLE(i3, i2, i1, i0));
+}
+
+template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const sseb shuffle( const sseb& a, const sseb& b ) {
+ return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));
+}
+
+#if defined(__KERNEL_SSE3__)
+template<> __forceinline const sseb shuffle<0, 0, 2, 2>( const sseb& a ) { return _mm_moveldup_ps(a); }
+template<> __forceinline const sseb shuffle<1, 1, 3, 3>( const sseb& a ) { return _mm_movehdup_ps(a); }
+template<> __forceinline const sseb shuffle<0, 1, 0, 1>( const sseb& a ) { return _mm_castpd_ps(_mm_movedup_pd (a)); }
+#endif
+
+#if defined(__KERNEL_SSE41__)
+template<size_t dst, size_t src, size_t clr> __forceinline const sseb insert( const sseb& a, const sseb& b ) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); }
+template<size_t dst, size_t src> __forceinline const sseb insert( const sseb& a, const sseb& b ) { return insert<dst, src, 0>(a, b); }
+template<size_t dst> __forceinline const sseb insert( const sseb& a, const bool b ) { return insert<dst,0>(a, sseb(b)); }
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+/// Reduction Operations
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(__KERNEL_SSE41__)
+__forceinline size_t popcnt( const sseb& a ) { return __popcnt(_mm_movemask_ps(a)); }
+#else
+__forceinline size_t popcnt( const sseb& a ) { return bool(a[0])+bool(a[1])+bool(a[2])+bool(a[3]); }
+#endif
+
+__forceinline bool reduce_and( const sseb& a ) { return _mm_movemask_ps(a) == 0xf; }
+__forceinline bool reduce_or ( const sseb& a ) { return _mm_movemask_ps(a) != 0x0; }
+__forceinline bool all ( const sseb& b ) { return _mm_movemask_ps(b) == 0xf; }
+__forceinline bool any ( const sseb& b ) { return _mm_movemask_ps(b) != 0x0; }
+__forceinline bool none ( const sseb& b ) { return _mm_movemask_ps(b) == 0x0; }
+
+__forceinline size_t movemask( const sseb& a ) { return _mm_movemask_ps(a); }
+
+#endif
+
+CCL_NAMESPACE_END
+
+#endif
+
diff --git a/intern/cycles/util/util_ssef.h b/intern/cycles/util/util_ssef.h
new file mode 100644
index 00000000000..f4236cc616e
--- /dev/null
+++ b/intern/cycles/util/util_ssef.h
@@ -0,0 +1,588 @@
+/*
+ * Copyright 2011-2013 Intel Corporation
+ * Modifications Copyright 2014, Blender Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0(the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+#ifndef __UTIL_SSEF_H__
+#define __UTIL_SSEF_H__
+
+CCL_NAMESPACE_BEGIN
+
+#ifdef __KERNEL_SSE2__
+
+/*! 4-wide SSE float type. */
+struct ssef
+{
+ typedef sseb Mask; // mask type
+ typedef ssei Int; // int type
+ typedef ssef Float; // float type
+
+ enum { size = 4 }; // number of SIMD elements
+ union { __m128 m128; float f[4]; int i[4]; }; // data
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Constructors, Assignment & Cast Operators
+ ////////////////////////////////////////////////////////////////////////////////
+
+ __forceinline ssef () {}
+ __forceinline ssef (const ssef& other) { m128 = other.m128; }
+ __forceinline ssef& operator=(const ssef& other) { m128 = other.m128; return *this; }
+
+ __forceinline ssef(const __m128 a) : m128(a) {}
+ __forceinline operator const __m128&(void) const { return m128; }
+ __forceinline operator __m128&(void) { return m128; }
+
+ __forceinline ssef (float a) : m128(_mm_set1_ps(a)) {}
+ __forceinline ssef (float a, float b, float c, float d) : m128(_mm_setr_ps(a, b, c, d)) {}
+
+ __forceinline explicit ssef(const __m128i a) : m128(_mm_cvtepi32_ps(a)) {}
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Loads and Stores
+ ////////////////////////////////////////////////////////////////////////////////
+
+#if defined(__KERNEL_AVX__)
+ static __forceinline ssef broadcast(const void* const a) { return _mm_broadcast_ss((float*)a); }
+#else
+ static __forceinline ssef broadcast(const void* const a) { return _mm_set1_ps(*(float*)a); }
+#endif
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Array Access
+ ////////////////////////////////////////////////////////////////////////////////
+
+ __forceinline const float& operator [](const size_t i) const { assert(i < 4); return f[i]; }
+ __forceinline float& operator [](const size_t i) { assert(i < 4); return f[i]; }
+};
+
+
+////////////////////////////////////////////////////////////////////////////////
+/// Unary Operators
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const ssef cast (const __m128i& a) { return _mm_castsi128_ps(a); }
+__forceinline const ssef operator +(const ssef& a) { return a; }
+__forceinline const ssef operator -(const ssef& a) { return _mm_xor_ps(a.m128, _mm_castsi128_ps(_mm_set1_epi32(0x80000000))); }
+__forceinline const ssef abs (const ssef& a) { return _mm_and_ps(a.m128, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff))); }
+#if defined(__KERNEL_SSE41__)
+__forceinline const ssef sign (const ssef& a) { return _mm_blendv_ps(ssef(1.0f), -ssef(1.0f), _mm_cmplt_ps(a,ssef(0.0f))); }
+#endif
+__forceinline const ssef signmsk (const ssef& a) { return _mm_and_ps(a.m128,_mm_castsi128_ps(_mm_set1_epi32(0x80000000))); }
+
+__forceinline const ssef rcp (const ssef& a) {
+ const ssef r = _mm_rcp_ps(a.m128);
+ return _mm_sub_ps(_mm_add_ps(r, r), _mm_mul_ps(_mm_mul_ps(r, r), a));
+}
+__forceinline const ssef sqr (const ssef& a) { return _mm_mul_ps(a,a); }
+__forceinline const ssef mm_sqrt(const ssef& a) { return _mm_sqrt_ps(a.m128); }
+__forceinline const ssef rsqrt(const ssef& a) {
+ const ssef r = _mm_rsqrt_ps(a.m128);
+ return _mm_add_ps(_mm_mul_ps(_mm_set_ps(1.5f, 1.5f, 1.5f, 1.5f), r),
+ _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set_ps(-0.5f, -0.5f, -0.5f, -0.5f)), r), _mm_mul_ps(r, r)));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Binary Operators
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const ssef operator +(const ssef& a, const ssef& b) { return _mm_add_ps(a.m128, b.m128); }
+__forceinline const ssef operator +(const ssef& a, const float& b) { return a + ssef(b); }
+__forceinline const ssef operator +(const float& a, const ssef& b) { return ssef(a) + b; }
+
+__forceinline const ssef operator -(const ssef& a, const ssef& b) { return _mm_sub_ps(a.m128, b.m128); }
+__forceinline const ssef operator -(const ssef& a, const float& b) { return a - ssef(b); }
+__forceinline const ssef operator -(const float& a, const ssef& b) { return ssef(a) - b; }
+
+__forceinline const ssef operator *(const ssef& a, const ssef& b) { return _mm_mul_ps(a.m128, b.m128); }
+__forceinline const ssef operator *(const ssef& a, const float& b) { return a * ssef(b); }
+__forceinline const ssef operator *(const float& a, const ssef& b) { return ssef(a) * b; }
+
+__forceinline const ssef operator /(const ssef& a, const ssef& b) { return _mm_div_ps(a.m128,b.m128); }
+__forceinline const ssef operator /(const ssef& a, const float& b) { return a/ssef(b); }
+__forceinline const ssef operator /(const float& a, const ssef& b) { return ssef(a)/b; }
+
+__forceinline const ssef operator^(const ssef& a, const ssef& b) { return _mm_xor_ps(a.m128,b.m128); }
+__forceinline const ssef operator^(const ssef& a, const ssei& b) { return _mm_xor_ps(a.m128,_mm_castsi128_ps(b.m128)); }
+
+__forceinline const ssef operator&(const ssef& a, const ssef& b) { return _mm_and_ps(a.m128,b.m128); }
+__forceinline const ssef operator&(const ssef& a, const ssei& b) { return _mm_and_ps(a.m128,_mm_castsi128_ps(b.m128)); }
+
+__forceinline const ssef andnot(const ssef& a, const ssef& b) { return _mm_andnot_ps(a.m128,b.m128); }
+
+__forceinline const ssef min(const ssef& a, const ssef& b) { return _mm_min_ps(a.m128,b.m128); }
+__forceinline const ssef min(const ssef& a, const float& b) { return _mm_min_ps(a.m128,ssef(b)); }
+__forceinline const ssef min(const float& a, const ssef& b) { return _mm_min_ps(ssef(a),b.m128); }
+
+__forceinline const ssef max(const ssef& a, const ssef& b) { return _mm_max_ps(a.m128,b.m128); }
+__forceinline const ssef max(const ssef& a, const float& b) { return _mm_max_ps(a.m128,ssef(b)); }
+__forceinline const ssef max(const float& a, const ssef& b) { return _mm_max_ps(ssef(a),b.m128); }
+
+#if defined(__KERNEL_SSE41__)
+__forceinline ssef mini(const ssef& a, const ssef& b) {
+ const ssei ai = _mm_castps_si128(a);
+ const ssei bi = _mm_castps_si128(b);
+ const ssei ci = _mm_min_epi32(ai,bi);
+ return _mm_castsi128_ps(ci);
+}
+#endif
+
+#if defined(__KERNEL_SSE41__)
+__forceinline ssef maxi(const ssef& a, const ssef& b) {
+ const ssei ai = _mm_castps_si128(a);
+ const ssei bi = _mm_castps_si128(b);
+ const ssei ci = _mm_max_epi32(ai,bi);
+ return _mm_castsi128_ps(ci);
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+/// Ternary Operators
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(__KERNEL_AVX2__)
+__forceinline const ssef madd (const ssef& a, const ssef& b, const ssef& c) { return _mm_fmadd_ps(a,b,c); }
+__forceinline const ssef msub (const ssef& a, const ssef& b, const ssef& c) { return _mm_fmsub_ps(a,b,c); }
+__forceinline const ssef nmadd(const ssef& a, const ssef& b, const ssef& c) { return _mm_fnmadd_ps(a,b,c); }
+__forceinline const ssef nmsub(const ssef& a, const ssef& b, const ssef& c) { return _mm_fnmsub_ps(a,b,c); }
+#else
+__forceinline const ssef madd (const ssef& a, const ssef& b, const ssef& c) { return a*b+c; }
+__forceinline const ssef msub (const ssef& a, const ssef& b, const ssef& c) { return a*b-c; }
+__forceinline const ssef nmadd(const ssef& a, const ssef& b, const ssef& c) { return -a*b-c;}
+__forceinline const ssef nmsub(const ssef& a, const ssef& b, const ssef& c) { return c-a*b; }
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+/// Assignment Operators
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline ssef& operator +=(ssef& a, const ssef& b) { return a = a + b; }
+__forceinline ssef& operator +=(ssef& a, const float& b) { return a = a + b; }
+
+__forceinline ssef& operator -=(ssef& a, const ssef& b) { return a = a - b; }
+__forceinline ssef& operator -=(ssef& a, const float& b) { return a = a - b; }
+
+__forceinline ssef& operator *=(ssef& a, const ssef& b) { return a = a * b; }
+__forceinline ssef& operator *=(ssef& a, const float& b) { return a = a * b; }
+
+__forceinline ssef& operator /=(ssef& a, const ssef& b) { return a = a / b; }
+__forceinline ssef& operator /=(ssef& a, const float& b) { return a = a / b; }
+
+////////////////////////////////////////////////////////////////////////////////
+/// Comparison Operators + Select
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const sseb operator ==(const ssef& a, const ssef& b) { return _mm_cmpeq_ps(a.m128, b.m128); }
+__forceinline const sseb operator ==(const ssef& a, const float& b) { return a == ssef(b); }
+__forceinline const sseb operator ==(const float& a, const ssef& b) { return ssef(a) == b; }
+
+__forceinline const sseb operator !=(const ssef& a, const ssef& b) { return _mm_cmpneq_ps(a.m128, b.m128); }
+__forceinline const sseb operator !=(const ssef& a, const float& b) { return a != ssef(b); }
+__forceinline const sseb operator !=(const float& a, const ssef& b) { return ssef(a) != b; }
+
+__forceinline const sseb operator <(const ssef& a, const ssef& b) { return _mm_cmplt_ps(a.m128, b.m128); }
+__forceinline const sseb operator <(const ssef& a, const float& b) { return a < ssef(b); }
+__forceinline const sseb operator <(const float& a, const ssef& b) { return ssef(a) < b; }
+
+__forceinline const sseb operator >=(const ssef& a, const ssef& b) { return _mm_cmpnlt_ps(a.m128, b.m128); }
+__forceinline const sseb operator >=(const ssef& a, const float& b) { return a >= ssef(b); }
+__forceinline const sseb operator >=(const float& a, const ssef& b) { return ssef(a) >= b; }
+
+__forceinline const sseb operator >(const ssef& a, const ssef& b) { return _mm_cmpnle_ps(a.m128, b.m128); }
+__forceinline const sseb operator >(const ssef& a, const float& b) { return a > ssef(b); }
+__forceinline const sseb operator >(const float& a, const ssef& b) { return ssef(a) > b; }
+
+__forceinline const sseb operator <=(const ssef& a, const ssef& b) { return _mm_cmple_ps(a.m128, b.m128); }
+__forceinline const sseb operator <=(const ssef& a, const float& b) { return a <= ssef(b); }
+__forceinline const sseb operator <=(const float& a, const ssef& b) { return ssef(a) <= b; }
+
+__forceinline const ssef select(const sseb& m, const ssef& t, const ssef& f) {
+#ifdef __KERNEL_SSE41__
+ return _mm_blendv_ps(f, t, m);
+#else
+ return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
+#endif
+}
+
+__forceinline const ssef select(const ssef& m, const ssef& t, const ssef& f) {
+#ifdef __KERNEL_SSE41__
+ return _mm_blendv_ps(f, t, m);
+#else
+ return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
+#endif
+}
+
+__forceinline const ssef select(const int mask, const ssef& t, const ssef& f) {
+#if defined(__KERNEL_SSE41__) && ((!defined(__clang__) && !defined(_MSC_VER)) || defined(__INTEL_COMPILER))
+ return _mm_blend_ps(f, t, mask);
+#else
+ return select(sseb(mask),t,f);
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Rounding Functions
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(__KERNEL_SSE41__)
+__forceinline const ssef round_even(const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); }
+__forceinline const ssef round_down(const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
+__forceinline const ssef round_up (const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); }
+__forceinline const ssef round_zero(const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_ZERO ); }
+__forceinline const ssef floor (const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
+__forceinline const ssef ceil (const ssef& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); }
+#endif
+
+__forceinline ssei truncatei(const ssef& a) {
+ return _mm_cvttps_epi32(a.m128);
+}
+
+__forceinline ssei floori(const ssef& a) {
+#if defined(__KERNEL_SSE41__)
+ return ssei(floor(a));
+#else
+ return ssei(a-ssef(0.5f));
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Movement/Shifting/Shuffling Functions
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline ssef unpacklo(const ssef& a, const ssef& b) { return _mm_unpacklo_ps(a.m128, b.m128); }
+__forceinline ssef unpackhi(const ssef& a, const ssef& b) { return _mm_unpackhi_ps(a.m128, b.m128); }
+
+template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const ssef shuffle(const ssef& b) {
+ return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(b), _MM_SHUFFLE(i3, i2, i1, i0)));
+}
+
+template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const ssef shuffle(const ssef& a, const ssef& b) {
+ return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));
+}
+
+#if defined(__KERNEL_SSSE3__)
+__forceinline const ssef shuffle8(const ssef& a, const ssei& shuf) {
+ return _mm_castsi128_ps(_mm_shuffle_epi8(_mm_castps_si128(a), shuf));
+}
+#endif
+
+#if defined(__KERNEL_SSE3__)
+template<> __forceinline const ssef shuffle<0, 0, 2, 2>(const ssef& b) { return _mm_moveldup_ps(b); }
+template<> __forceinline const ssef shuffle<1, 1, 3, 3>(const ssef& b) { return _mm_movehdup_ps(b); }
+template<> __forceinline const ssef shuffle<0, 1, 0, 1>(const ssef& b) { return _mm_castpd_ps(_mm_movedup_pd(_mm_castps_pd(b))); }
+#endif
+
+template<size_t i0> __forceinline const ssef shuffle(const ssef& b) {
+ return shuffle<i0,i0,i0,i0>(b);
+}
+
+#if defined(__KERNEL_SSE41__) && !defined(__GNUC__)
+template<size_t i> __forceinline float extract (const ssef& a) { return _mm_cvtss_f32(_mm_extract_ps(a,i)); }
+#else
+template<size_t i> __forceinline float extract (const ssef& a) { return _mm_cvtss_f32(shuffle<i,i,i,i>(a)); }
+#endif
+template<> __forceinline float extract<0>(const ssef& a) { return _mm_cvtss_f32(a); }
+
+#if defined(__KERNEL_SSE41__)
+template<size_t dst, size_t src, size_t clr> __forceinline const ssef insert(const ssef& a, const ssef& b) { return _mm_insert_ps(a, b,(dst << 4) |(src << 6) | clr); }
+template<size_t dst, size_t src> __forceinline const ssef insert(const ssef& a, const ssef& b) { return insert<dst, src, 0>(a, b); }
+template<size_t dst> __forceinline const ssef insert(const ssef& a, const float b) { return insert<dst, 0>(a, _mm_set_ss(b)); }
+#else
+template<size_t dst> __forceinline const ssef insert(const ssef& a, const float b) { ssef c = a; c[dst] = b; return c; }
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+/// Transpose
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline void transpose(const ssef& r0, const ssef& r1, const ssef& r2, const ssef& r3, ssef& c0, ssef& c1, ssef& c2, ssef& c3)
+{
+ ssef l02 = unpacklo(r0,r2);
+ ssef h02 = unpackhi(r0,r2);
+ ssef l13 = unpacklo(r1,r3);
+ ssef h13 = unpackhi(r1,r3);
+ c0 = unpacklo(l02,l13);
+ c1 = unpackhi(l02,l13);
+ c2 = unpacklo(h02,h13);
+ c3 = unpackhi(h02,h13);
+}
+
+__forceinline void transpose(const ssef& r0, const ssef& r1, const ssef& r2, const ssef& r3, ssef& c0, ssef& c1, ssef& c2)
+{
+ ssef l02 = unpacklo(r0,r2);
+ ssef h02 = unpackhi(r0,r2);
+ ssef l13 = unpacklo(r1,r3);
+ ssef h13 = unpackhi(r1,r3);
+ c0 = unpacklo(l02,l13);
+ c1 = unpackhi(l02,l13);
+ c2 = unpacklo(h02,h13);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Reductions
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const ssef vreduce_min(const ssef& v) { ssef h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); }
+__forceinline const ssef vreduce_max(const ssef& v) { ssef h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); }
+__forceinline const ssef vreduce_add(const ssef& v) { ssef h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; }
+
+__forceinline float reduce_min(const ssef& v) { return _mm_cvtss_f32(vreduce_min(v)); }
+__forceinline float reduce_max(const ssef& v) { return _mm_cvtss_f32(vreduce_max(v)); }
+__forceinline float reduce_add(const ssef& v) { return _mm_cvtss_f32(vreduce_add(v)); }
+
+__forceinline size_t select_min(const ssef& v) { return __bsf(movemask(v == vreduce_min(v))); }
+__forceinline size_t select_max(const ssef& v) { return __bsf(movemask(v == vreduce_max(v))); }
+
+__forceinline size_t select_min(const sseb& valid, const ssef& v) { const ssef a = select(valid,v,ssef(pos_inf)); return __bsf(movemask(valid &(a == vreduce_min(a)))); }
+__forceinline size_t select_max(const sseb& valid, const ssef& v) { const ssef a = select(valid,v,ssef(neg_inf)); return __bsf(movemask(valid &(a == vreduce_max(a)))); }
+
+////////////////////////////////////////////////////////////////////////////////
+/// Memory load and store operations
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline ssef load4f(const float4& a) {
+#ifdef __KERNEL_WITH_SSE_ALIGN__
+ return _mm_load_ps(&a.x);
+#else
+ return _mm_loadu_ps(&a.x);
+#endif
+}
+
+__forceinline ssef load4f(const float3& a) {
+#ifdef __KERNEL_WITH_SSE_ALIGN__
+ return _mm_load_ps(&a.x);
+#else
+ return _mm_loadu_ps(&a.x);
+#endif
+}
+
+__forceinline ssef load4f(const void* const a) {
+ return _mm_load_ps((float*)a);
+}
+
+__forceinline ssef load1f_first(const float a) {
+ return _mm_set_ss(a);
+}
+
+__forceinline void store4f(void* ptr, const ssef& v) {
+ _mm_store_ps((float*)ptr,v);
+}
+
+__forceinline ssef loadu4f(const void* const a) {
+ return _mm_loadu_ps((float*)a);
+}
+
+__forceinline void storeu4f(void* ptr, const ssef& v) {
+ _mm_storeu_ps((float*)ptr,v);
+}
+
+__forceinline void store4f(const sseb& mask, void* ptr, const ssef& f) {
+#if defined(__KERNEL_AVX__)
+ _mm_maskstore_ps((float*)ptr,(__m128i)mask,f);
+#else
+ *(ssef*)ptr = select(mask,f,*(ssef*)ptr);
+#endif
+}
+
+__forceinline ssef load4f_nt(void* ptr) {
+#if defined(__KERNEL_SSE41__)
+ return _mm_castsi128_ps(_mm_stream_load_si128((__m128i*)ptr));
+#else
+ return _mm_load_ps((float*)ptr);
+#endif
+}
+
+__forceinline void store4f_nt(void* ptr, const ssef& v) {
+#if defined(__KERNEL_SSE41__)
+ _mm_stream_ps((float*)ptr,v);
+#else
+ _mm_store_ps((float*)ptr,v);
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Euclidian Space Operators
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline float dot(const ssef& a, const ssef& b) {
+ return reduce_add(a*b);
+}
+
+/* calculate shuffled cross product, useful when order of components does not matter */
+__forceinline ssef cross_zxy(const ssef& a, const ssef& b)
+{
+ const ssef a0 = a;
+ const ssef b0 = shuffle<1,2,0,3>(b);
+ const ssef a1 = shuffle<1,2,0,3>(a);
+ const ssef b1 = b;
+ return msub(a0,b0,a1*b1);
+}
+
+__forceinline ssef cross(const ssef& a, const ssef& b)
+{
+ return shuffle<1,2,0,3>(cross_zxy(a, b));
+}
+
+ccl_device_inline const ssef dot3_splat(const ssef& a, const ssef& b)
+{
+#ifdef __KERNEL_SSE41__
+ return _mm_dp_ps(a.m128, b.m128, 0x7f);
+#else
+ ssef t = a * b;
+ return ssef(((float*)&t)[0] + ((float*)&t)[1] + ((float*)&t)[2]);
+#endif
+}
+
+/* squared length taking only specified axes into account */
+template<size_t X, size_t Y, size_t Z, size_t W>
+ccl_device_inline float len_squared(const ssef& a)
+{
+#ifndef __KERNEL_SSE41__
+ float4& t = (float4 &)a;
+ return (X ? t.x * t.x : 0.0f) + (Y ? t.y * t.y : 0.0f) + (Z ? t.z * t.z : 0.0f) + (W ? t.w * t.w : 0.0f);
+#else
+ return extract<0>(ssef(_mm_dp_ps(a.m128, a.m128, (X << 4) | (Y << 5) | (Z << 6) | (W << 7) | 0xf)));
+#endif
+}
+
+ccl_device_inline float dot3(const ssef& a, const ssef& b)
+{
+#ifdef __KERNEL_SSE41__
+ return extract<0>(ssef(_mm_dp_ps(a.m128, b.m128, 0x7f)));
+#else
+ ssef t = a * b;
+ return ((float*)&t)[0] + ((float*)&t)[1] + ((float*)&t)[2];
+#endif
+}
+
+ccl_device_inline const ssef len3_squared_splat(const ssef& a)
+{
+ return dot3_splat(a, a);
+}
+
+ccl_device_inline float len3_squared(const ssef& a)
+{
+ return dot3(a, a);
+}
+
+ccl_device_inline float len3(const ssef& a)
+{
+ return extract<0>(mm_sqrt(dot3_splat(a, a)));
+}
+
+/* SSE shuffle utility functions */
+
+#ifdef __KERNEL_SSSE3__
+
+/* faster version for SSSE3 */
+typedef ssei shuffle_swap_t;
+
+ccl_device_inline const shuffle_swap_t shuffle_swap_identity(void)
+{
+ return _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+}
+
+ccl_device_inline const shuffle_swap_t shuffle_swap_swap(void)
+{
+ return _mm_set_epi8(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+}
+
+ccl_device_inline const ssef shuffle_swap(const ssef& a, const shuffle_swap_t& shuf)
+{
+ return cast(_mm_shuffle_epi8(cast(a), shuf));
+}
+
+#else
+
+/* somewhat slower version for SSE2 */
+typedef int shuffle_swap_t;
+
+ccl_device_inline const shuffle_swap_t shuffle_swap_identity(void)
+{
+ return 0;
+}
+
+ccl_device_inline const shuffle_swap_t shuffle_swap_swap(void)
+{
+ return 1;
+}
+
+ccl_device_inline const ssef shuffle_swap(const ssef& a, shuffle_swap_t shuf)
+{
+ /* shuffle value must be a constant, so we need to branch */
+ if(shuf)
+ return ssef(_mm_shuffle_ps(a.m128, a.m128, _MM_SHUFFLE(1, 0, 3, 2)));
+ else
+ return ssef(_mm_shuffle_ps(a.m128, a.m128, _MM_SHUFFLE(3, 2, 1, 0)));
+}
+
+#endif
+
+#ifdef __KERNEL_SSE41__
+
+ccl_device_inline void gen_idirsplat_swap(const ssef &pn, const shuffle_swap_t &shuf_identity, const shuffle_swap_t &shuf_swap,
+ const float3& idir, ssef idirsplat[3], shuffle_swap_t shufflexyz[3])
+{
+ const __m128 idirsplat_raw[] = { _mm_set_ps1(idir.x), _mm_set_ps1(idir.y), _mm_set_ps1(idir.z) };
+ idirsplat[0] = _mm_xor_ps(idirsplat_raw[0], pn);
+ idirsplat[1] = _mm_xor_ps(idirsplat_raw[1], pn);
+ idirsplat[2] = _mm_xor_ps(idirsplat_raw[2], pn);
+
+ const ssef signmask = cast(ssei(0x80000000));
+ const ssef shuf_identity_f = cast(shuf_identity);
+ const ssef shuf_swap_f = cast(shuf_swap);
+
+ shufflexyz[0] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[0], signmask)));
+ shufflexyz[1] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[1], signmask)));
+ shufflexyz[2] = _mm_castps_si128(_mm_blendv_ps(shuf_identity_f, shuf_swap_f, _mm_and_ps(idirsplat_raw[2], signmask)));
+}
+
+#else
+
+ccl_device_inline void gen_idirsplat_swap(const ssef &pn, const shuffle_swap_t &shuf_identity, const shuffle_swap_t &shuf_swap,
+ const float3& idir, ssef idirsplat[3], shuffle_swap_t shufflexyz[3])
+{
+ idirsplat[0] = ssef(idir.x) ^ pn;
+ idirsplat[1] = ssef(idir.y) ^ pn;
+ idirsplat[2] = ssef(idir.z) ^ pn;
+
+ shufflexyz[0] = (idir.x >= 0)? shuf_identity: shuf_swap;
+ shufflexyz[1] = (idir.y >= 0)? shuf_identity: shuf_swap;
+ shufflexyz[2] = (idir.z >= 0)? shuf_identity: shuf_swap;
+}
+
+#endif
+
+ccl_device_inline const ssef uint32_to_float(const ssei &in)
+{
+ ssei a = _mm_srli_epi32(in, 16);
+ ssei b = _mm_and_si128(in, _mm_set1_epi32(0x0000ffff));
+ ssei c = _mm_or_si128(a, _mm_set1_epi32(0x53000000));
+ ssef d = _mm_cvtepi32_ps(b);
+ ssef e = _mm_sub_ps(_mm_castsi128_ps(c), _mm_castsi128_ps(_mm_set1_epi32(0x53000000)));
+ return _mm_add_ps(e, d);
+}
+
+template<size_t S1, size_t S2, size_t S3, size_t S4>
+ccl_device_inline const ssef set_sign_bit(const ssef &a)
+{
+ return a ^ cast(ssei(S1 << 31, S2 << 31, S3 << 31, S4 << 31));
+}
+
+#endif
+
+CCL_NAMESPACE_END
+
+#endif
+
diff --git a/intern/cycles/util/util_ssei.h b/intern/cycles/util/util_ssei.h
new file mode 100644
index 00000000000..5f5a8686e35
--- /dev/null
+++ b/intern/cycles/util/util_ssei.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2011-2013 Intel Corporation
+ * Modifications Copyright 2014, Blender Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0(the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+#ifndef __UTIL_SSEI_H__
+#define __UTIL_SSEI_H__
+
+CCL_NAMESPACE_BEGIN
+
+#ifdef __KERNEL_SSE2__
+
+/*! 4-wide SSE integer type. */
+struct ssei
+{
+ typedef sseb Mask; // mask type
+ typedef ssei Int; // int type
+ typedef ssef Float; // float type
+
+ enum { size = 4 }; // number of SIMD elements
+ union { __m128i m128; int32_t i[4]; }; // data
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Constructors, Assignment & Cast Operators
+ ////////////////////////////////////////////////////////////////////////////////
+
+ __forceinline ssei ( ) {}
+ __forceinline ssei ( const ssei& a ) { m128 = a.m128; }
+ __forceinline ssei& operator=( const ssei& a ) { m128 = a.m128; return *this; }
+
+ __forceinline ssei( const __m128i a ) : m128(a) {}
+ __forceinline operator const __m128i&( void ) const { return m128; }
+ __forceinline operator __m128i&( void ) { return m128; }
+
+ __forceinline ssei ( const int a ) : m128(_mm_set1_epi32(a)) {}
+ __forceinline ssei ( int a, int b, int c, int d ) : m128(_mm_setr_epi32(a, b, c, d)) {}
+
+ __forceinline explicit ssei( const __m128 a ) : m128(_mm_cvtps_epi32(a)) {}
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Array Access
+ ////////////////////////////////////////////////////////////////////////////////
+
+ __forceinline const int32_t& operator []( const size_t index ) const { assert(index < 4); return i[index]; }
+ __forceinline int32_t& operator []( const size_t index ) { assert(index < 4); return i[index]; }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+/// Unary Operators
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const ssei cast ( const __m128& a ) { return _mm_castps_si128(a); }
+__forceinline const ssei operator +( const ssei& a ) { return a; }
+__forceinline const ssei operator -( const ssei& a ) { return _mm_sub_epi32(_mm_setzero_si128(), a.m128); }
+#if defined(__KERNEL_SSSE3__)
+__forceinline const ssei abs ( const ssei& a ) { return _mm_abs_epi32(a.m128); }
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+/// Binary Operators
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const ssei operator +( const ssei& a, const ssei& b ) { return _mm_add_epi32(a.m128, b.m128); }
+__forceinline const ssei operator +( const ssei& a, const int32_t& b ) { return a + ssei(b); }
+__forceinline const ssei operator +( const int32_t& a, const ssei& b ) { return ssei(a) + b; }
+
+__forceinline const ssei operator -( const ssei& a, const ssei& b ) { return _mm_sub_epi32(a.m128, b.m128); }
+__forceinline const ssei operator -( const ssei& a, const int32_t& b ) { return a - ssei(b); }
+__forceinline const ssei operator -( const int32_t& a, const ssei& b ) { return ssei(a) - b; }
+
+#if defined(__KERNEL_SSE41__)
+__forceinline const ssei operator *( const ssei& a, const ssei& b ) { return _mm_mullo_epi32(a.m128, b.m128); }
+__forceinline const ssei operator *( const ssei& a, const int32_t& b ) { return a * ssei(b); }
+__forceinline const ssei operator *( const int32_t& a, const ssei& b ) { return ssei(a) * b; }
+#endif
+
+__forceinline const ssei operator &( const ssei& a, const ssei& b ) { return _mm_and_si128(a.m128, b.m128); }
+__forceinline const ssei operator &( const ssei& a, const int32_t& b ) { return a & ssei(b); }
+__forceinline const ssei operator &( const int32_t& a, const ssei& b ) { return ssei(a) & b; }
+
+__forceinline const ssei operator |( const ssei& a, const ssei& b ) { return _mm_or_si128(a.m128, b.m128); }
+__forceinline const ssei operator |( const ssei& a, const int32_t& b ) { return a | ssei(b); }
+__forceinline const ssei operator |( const int32_t& a, const ssei& b ) { return ssei(a) | b; }
+
+__forceinline const ssei operator ^( const ssei& a, const ssei& b ) { return _mm_xor_si128(a.m128, b.m128); }
+__forceinline const ssei operator ^( const ssei& a, const int32_t& b ) { return a ^ ssei(b); }
+__forceinline const ssei operator ^( const int32_t& a, const ssei& b ) { return ssei(a) ^ b; }
+
+__forceinline const ssei operator <<( const ssei& a, const int32_t& n ) { return _mm_slli_epi32(a.m128, n); }
+__forceinline const ssei operator >>( const ssei& a, const int32_t& n ) { return _mm_srai_epi32(a.m128, n); }
+
+__forceinline const ssei andnot(const ssei& a, const ssei& b) { return _mm_andnot_si128(a.m128,b.m128); }
+__forceinline const ssei andnot(const sseb& a, const ssei& b) { return _mm_andnot_si128(cast(a.m128),b.m128); }
+__forceinline const ssei andnot(const ssei& a, const sseb& b) { return _mm_andnot_si128(a.m128,cast(b.m128)); }
+
+__forceinline const ssei sra ( const ssei& a, const int32_t& b ) { return _mm_srai_epi32(a.m128, b); }
+__forceinline const ssei srl ( const ssei& a, const int32_t& b ) { return _mm_srli_epi32(a.m128, b); }
+
+#if defined(__KERNEL_SSE41__)
+__forceinline const ssei min( const ssei& a, const ssei& b ) { return _mm_min_epi32(a.m128, b.m128); }
+__forceinline const ssei min( const ssei& a, const int32_t& b ) { return min(a,ssei(b)); }
+__forceinline const ssei min( const int32_t& a, const ssei& b ) { return min(ssei(a),b); }
+
+__forceinline const ssei max( const ssei& a, const ssei& b ) { return _mm_max_epi32(a.m128, b.m128); }
+__forceinline const ssei max( const ssei& a, const int32_t& b ) { return max(a,ssei(b)); }
+__forceinline const ssei max( const int32_t& a, const ssei& b ) { return max(ssei(a),b); }
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+/// Assignment Operators
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline ssei& operator +=( ssei& a, const ssei& b ) { return a = a + b; }
+__forceinline ssei& operator +=( ssei& a, const int32_t& b ) { return a = a + b; }
+
+__forceinline ssei& operator -=( ssei& a, const ssei& b ) { return a = a - b; }
+__forceinline ssei& operator -=( ssei& a, const int32_t& b ) { return a = a - b; }
+
+#if defined(__KERNEL_SSE41__)
+__forceinline ssei& operator *=( ssei& a, const ssei& b ) { return a = a * b; }
+__forceinline ssei& operator *=( ssei& a, const int32_t& b ) { return a = a * b; }
+#endif
+
+__forceinline ssei& operator &=( ssei& a, const ssei& b ) { return a = a & b; }
+__forceinline ssei& operator &=( ssei& a, const int32_t& b ) { return a = a & b; }
+
+__forceinline ssei& operator |=( ssei& a, const ssei& b ) { return a = a | b; }
+__forceinline ssei& operator |=( ssei& a, const int32_t& b ) { return a = a | b; }
+
+__forceinline ssei& operator <<=( ssei& a, const int32_t& b ) { return a = a << b; }
+__forceinline ssei& operator >>=( ssei& a, const int32_t& b ) { return a = a >> b; }
+
+////////////////////////////////////////////////////////////////////////////////
+/// Comparison Operators + Select
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline const sseb operator ==( const ssei& a, const ssei& b ) { return _mm_castsi128_ps(_mm_cmpeq_epi32 (a.m128, b.m128)); }
+__forceinline const sseb operator ==( const ssei& a, const int32_t& b ) { return a == ssei(b); }
+__forceinline const sseb operator ==( const int32_t& a, const ssei& b ) { return ssei(a) == b; }
+
+__forceinline const sseb operator !=( const ssei& a, const ssei& b ) { return !(a == b); }
+__forceinline const sseb operator !=( const ssei& a, const int32_t& b ) { return a != ssei(b); }
+__forceinline const sseb operator !=( const int32_t& a, const ssei& b ) { return ssei(a) != b; }
+
+__forceinline const sseb operator < ( const ssei& a, const ssei& b ) { return _mm_castsi128_ps(_mm_cmplt_epi32 (a.m128, b.m128)); }
+__forceinline const sseb operator < ( const ssei& a, const int32_t& b ) { return a < ssei(b); }
+__forceinline const sseb operator < ( const int32_t& a, const ssei& b ) { return ssei(a) < b; }
+
+__forceinline const sseb operator >=( const ssei& a, const ssei& b ) { return !(a < b); }
+__forceinline const sseb operator >=( const ssei& a, const int32_t& b ) { return a >= ssei(b); }
+__forceinline const sseb operator >=( const int32_t& a, const ssei& b ) { return ssei(a) >= b; }
+
+__forceinline const sseb operator > ( const ssei& a, const ssei& b ) { return _mm_castsi128_ps(_mm_cmpgt_epi32 (a.m128, b.m128)); }
+__forceinline const sseb operator > ( const ssei& a, const int32_t& b ) { return a > ssei(b); }
+__forceinline const sseb operator > ( const int32_t& a, const ssei& b ) { return ssei(a) > b; }
+
+__forceinline const sseb operator <=( const ssei& a, const ssei& b ) { return !(a > b); }
+__forceinline const sseb operator <=( const ssei& a, const int32_t& b ) { return a <= ssei(b); }
+__forceinline const sseb operator <=( const int32_t& a, const ssei& b ) { return ssei(a) <= b; }
+
+__forceinline const ssei select( const sseb& m, const ssei& t, const ssei& f ) {
+#ifdef __KERNEL_SSE41__
+ return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
+#else
+ return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f));
+#endif
+}
+
+__forceinline const ssei select( const int mask, const ssei& t, const ssei& f ) {
+#if defined(__KERNEL_SSE41__) && ((!defined(__clang__) && !defined(_MSC_VER)) || defined(__INTEL_COMPILER))
+ return _mm_castps_si128(_mm_blend_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), mask));
+#else
+ return select(sseb(mask),t,f);
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Movement/Shifting/Shuffling Functions
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline ssei unpacklo( const ssei& a, const ssei& b ) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a.m128), _mm_castsi128_ps(b.m128))); }
+__forceinline ssei unpackhi( const ssei& a, const ssei& b ) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a.m128), _mm_castsi128_ps(b.m128))); }
+
+template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const ssei shuffle( const ssei& a ) {
+ return _mm_shuffle_epi32(a, _MM_SHUFFLE(i3, i2, i1, i0));
+}
+
+template<size_t i0, size_t i1, size_t i2, size_t i3> __forceinline const ssei shuffle( const ssei& a, const ssei& b ) {
+ return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
+}
+
+#if defined(__KERNEL_SSE3__)
+template<> __forceinline const ssei shuffle<0, 0, 2, 2>( const ssei& a ) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(a))); }
+template<> __forceinline const ssei shuffle<1, 1, 3, 3>( const ssei& a ) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(a))); }
+template<> __forceinline const ssei shuffle<0, 1, 0, 1>( const ssei& a ) { return _mm_castpd_si128(_mm_movedup_pd (_mm_castsi128_pd(a))); }
+#endif
+
+template<size_t i0> __forceinline const ssei shuffle( const ssei& b ) {
+ return shuffle<i0,i0,i0,i0>(b);
+}
+
+#if defined(__KERNEL_SSE41__)
+template<size_t src> __forceinline int extract( const ssei& b ) { return _mm_extract_epi32(b, src); }
+template<size_t dst> __forceinline const ssei insert( const ssei& a, const int32_t b ) { return _mm_insert_epi32(a, b, dst); }
+#else
+template<size_t src> __forceinline int extract( const ssei& b ) { return b[src]; }
+template<size_t dst> __forceinline const ssei insert( const ssei& a, const int32_t b ) { ssei c = a; c[dst] = b; return c; }
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+/// Reductions
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(__KERNEL_SSE41__)
+__forceinline const ssei vreduce_min(const ssei& v) { ssei h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); }
+__forceinline const ssei vreduce_max(const ssei& v) { ssei h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); }
+__forceinline const ssei vreduce_add(const ssei& v) { ssei h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; }
+
+__forceinline int reduce_min(const ssei& v) { return extract<0>(vreduce_min(v)); }
+__forceinline int reduce_max(const ssei& v) { return extract<0>(vreduce_max(v)); }
+__forceinline int reduce_add(const ssei& v) { return extract<0>(vreduce_add(v)); }
+
+__forceinline size_t select_min(const ssei& v) { return __bsf(movemask(v == vreduce_min(v))); }
+__forceinline size_t select_max(const ssei& v) { return __bsf(movemask(v == vreduce_max(v))); }
+
+__forceinline size_t select_min(const sseb& valid, const ssei& v) { const ssei a = select(valid,v,ssei((int)pos_inf)); return __bsf(movemask(valid & (a == vreduce_min(a)))); }
+__forceinline size_t select_max(const sseb& valid, const ssei& v) { const ssei a = select(valid,v,ssei((int)neg_inf)); return __bsf(movemask(valid & (a == vreduce_max(a)))); }
+
+#else
+
+__forceinline int reduce_min(const ssei& v) { return min(min(v[0],v[1]),min(v[2],v[3])); }
+__forceinline int reduce_max(const ssei& v) { return max(max(v[0],v[1]),max(v[2],v[3])); }
+__forceinline int reduce_add(const ssei& v) { return v[0]+v[1]+v[2]+v[3]; }
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+/// Memory load and store operations
+////////////////////////////////////////////////////////////////////////////////
+
+__forceinline ssei load4i( const void* const a ) {
+ return _mm_load_si128((__m128i*)a);
+}
+
+__forceinline void store4i(void* ptr, const ssei& v) {
+ _mm_store_si128((__m128i*)ptr,v);
+}
+
+__forceinline void storeu4i(void* ptr, const ssei& v) {
+ _mm_storeu_si128((__m128i*)ptr,v);
+}
+
+__forceinline void store4i( const sseb& mask, void* ptr, const ssei& i ) {
+#if defined (__KERNEL_AVX__)
+ _mm_maskstore_ps((float*)ptr,(__m128i)mask,_mm_castsi128_ps(i));
+#else
+ *(ssei*)ptr = select(mask,i,*(ssei*)ptr);
+#endif
+}
+
+__forceinline ssei load4i_nt (void* ptr) {
+#if defined(__KERNEL_SSE41__)
+ return _mm_stream_load_si128((__m128i*)ptr);
+#else
+ return _mm_load_si128((__m128i*)ptr);
+#endif
+}
+
+__forceinline void store4i_nt(void* ptr, const ssei& v) {
+#if defined(__KERNEL_SSE41__)
+ _mm_stream_ps((float*)ptr,_mm_castsi128_ps(v));
+#else
+ _mm_store_si128((__m128i*)ptr,v);
+#endif
+}
+
+#endif
+
+CCL_NAMESPACE_END
+
+#endif
+
diff --git a/intern/cycles/util/util_types.h b/intern/cycles/util/util_types.h
index b1319011936..98d70786d44 100644
--- a/intern/cycles/util/util_types.h
+++ b/intern/cycles/util/util_types.h
@@ -51,6 +51,7 @@
#endif
#define ccl_may_alias
#define ccl_always_inline __forceinline
+#define ccl_maybe_unused
#else
@@ -62,6 +63,7 @@
#define ccl_try_align(...) __attribute__((aligned(__VA_ARGS__)))
#define ccl_may_alias __attribute__((__may_alias__))
#define ccl_always_inline __attribute__((always_inline))
+#define ccl_maybe_unused __attribute__((used))
#endif