Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'extern/Eigen2/Eigen/src/Core/arch')
-rw-r--r--extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h354
-rw-r--r--extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h321
2 files changed, 0 insertions, 675 deletions
diff --git a/extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h b/extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h
deleted file mode 100644
index 4de3b5e2e0b..00000000000
--- a/extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h
+++ /dev/null
@@ -1,354 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Konstantinos Margaritis <markos@codex.gr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_PACKET_MATH_ALTIVEC_H
-#define EIGEN_PACKET_MATH_ALTIVEC_H
-
-#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4
-#endif
-
-typedef __vector float v4f;
-typedef __vector int v4i;
-typedef __vector unsigned int v4ui;
-typedef __vector __bool int v4bi;
-
-// We don't want to write the same code all the time, but we need to reuse the constants
-// and it doesn't really work to declare them global, so we define macros instead
-
-#define USE_CONST_v0i const v4i v0i = vec_splat_s32(0)
-#define USE_CONST_v1i const v4i v1i = vec_splat_s32(1)
-#define USE_CONST_v16i_ const v4i v16i_ = vec_splat_s32(-16)
-#define USE_CONST_v0f USE_CONST_v0i; const v4f v0f = (v4f) v0i
-#define USE_CONST_v1f USE_CONST_v1i; const v4f v1f = vec_ctf(v1i, 0)
-#define USE_CONST_v1i_ const v4ui v1i_ = vec_splat_u32(-1)
-#define USE_CONST_v0f_ USE_CONST_v1i_; const v4f v0f_ = (v4f) vec_sl(v1i_, v1i_)
-
-template<> struct ei_packet_traits<float> { typedef v4f type; enum {size=4}; };
-template<> struct ei_packet_traits<int> { typedef v4i type; enum {size=4}; };
-
-template<> struct ei_unpacket_traits<v4f> { typedef float type; enum {size=4}; };
-template<> struct ei_unpacket_traits<v4i> { typedef int type; enum {size=4}; };
-
-inline std::ostream & operator <<(std::ostream & s, const v4f & v)
-{
- union {
- v4f v;
- float n[4];
- } vt;
- vt.v = v;
- s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
- return s;
-}
-
-inline std::ostream & operator <<(std::ostream & s, const v4i & v)
-{
- union {
- v4i v;
- int n[4];
- } vt;
- vt.v = v;
- s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
- return s;
-}
-
-inline std::ostream & operator <<(std::ostream & s, const v4ui & v)
-{
- union {
- v4ui v;
- unsigned int n[4];
- } vt;
- vt.v = v;
- s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
- return s;
-}
-
-inline std::ostream & operator <<(std::ostream & s, const v4bi & v)
-{
- union {
- __vector __bool int v;
- unsigned int n[4];
- } vt;
- vt.v = v;
- s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
- return s;
-}
-
-template<> inline v4f ei_padd(const v4f& a, const v4f& b) { return vec_add(a,b); }
-template<> inline v4i ei_padd(const v4i& a, const v4i& b) { return vec_add(a,b); }
-
-template<> inline v4f ei_psub(const v4f& a, const v4f& b) { return vec_sub(a,b); }
-template<> inline v4i ei_psub(const v4i& a, const v4i& b) { return vec_sub(a,b); }
-
-template<> inline v4f ei_pmul(const v4f& a, const v4f& b) { USE_CONST_v0f; return vec_madd(a,b, v0f); }
-template<> inline v4i ei_pmul(const v4i& a, const v4i& b)
-{
- // Detailed in: http://freevec.org/content/32bit_signed_integer_multiplication_altivec
- //Set up constants, variables
- v4i a1, b1, bswap, low_prod, high_prod, prod, prod_, v1sel;
- USE_CONST_v0i;
- USE_CONST_v1i;
- USE_CONST_v16i_;
-
- // Get the absolute values
- a1 = vec_abs(a);
- b1 = vec_abs(b);
-
- // Get the signs using xor
- v4bi sgn = (v4bi) vec_cmplt(vec_xor(a, b), v0i);
-
- // Do the multiplication for the asbolute values.
- bswap = (v4i) vec_rl((v4ui) b1, (v4ui) v16i_ );
- low_prod = vec_mulo((__vector short)a1, (__vector short)b1);
- high_prod = vec_msum((__vector short)a1, (__vector short)bswap, v0i);
- high_prod = (v4i) vec_sl((v4ui) high_prod, (v4ui) v16i_);
- prod = vec_add( low_prod, high_prod );
-
- // NOR the product and select only the negative elements according to the sign mask
- prod_ = vec_nor(prod, prod);
- prod_ = vec_sel(v0i, prod_, sgn);
-
- // Add 1 to the result to get the negative numbers
- v1sel = vec_sel(v0i, v1i, sgn);
- prod_ = vec_add(prod_, v1sel);
-
- // Merge the results back to the final vector.
- prod = vec_sel(prod, prod_, sgn);
-
- return prod;
-}
-
-template<> inline v4f ei_pdiv(const v4f& a, const v4f& b) {
- v4f t, y_0, y_1, res;
- USE_CONST_v0f;
- USE_CONST_v1f;
-
- // Altivec does not offer a divide instruction, we have to do a reciprocal approximation
- y_0 = vec_re(b);
-
- // Do one Newton-Raphson iteration to get the needed accuracy
- t = vec_nmsub(y_0, b, v1f);
- y_1 = vec_madd(y_0, t, y_0);
-
- res = vec_madd(a, y_1, v0f);
- return res;
-}
-
-template<> inline v4f ei_pmadd(const v4f& a, const v4f& b, const v4f& c) { return vec_madd(a, b, c); }
-
-template<> inline v4f ei_pmin(const v4f& a, const v4f& b) { return vec_min(a,b); }
-template<> inline v4i ei_pmin(const v4i& a, const v4i& b) { return vec_min(a,b); }
-
-template<> inline v4f ei_pmax(const v4f& a, const v4f& b) { return vec_max(a,b); }
-template<> inline v4i ei_pmax(const v4i& a, const v4i& b) { return vec_max(a,b); }
-
-template<> inline v4f ei_pload(const float* from) { return vec_ld(0, from); }
-template<> inline v4i ei_pload(const int* from) { return vec_ld(0, from); }
-
-template<> inline v4f ei_ploadu(const float* from)
-{
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- __vector unsigned char MSQ, LSQ;
- __vector unsigned char mask;
- MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword
- LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword
- mask = vec_lvsl(0, from); // create the permute mask
- return (v4f) vec_perm(MSQ, LSQ, mask); // align the data
-}
-
-template<> inline v4i ei_ploadu(const int* from)
-{
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- __vector unsigned char MSQ, LSQ;
- __vector unsigned char mask;
- MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword
- LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword
- mask = vec_lvsl(0, from); // create the permute mask
- return (v4i) vec_perm(MSQ, LSQ, mask); // align the data
-}
-
-template<> inline v4f ei_pset1(const float& from)
-{
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- float __attribute__(aligned(16)) af[4];
- af[0] = from;
- v4f vc = vec_ld(0, af);
- vc = vec_splat(vc, 0);
- return vc;
-}
-
-template<> inline v4i ei_pset1(const int& from)
-{
- int __attribute__(aligned(16)) ai[4];
- ai[0] = from;
- v4i vc = vec_ld(0, ai);
- vc = vec_splat(vc, 0);
- return vc;
-}
-
-template<> inline void ei_pstore(float* to, const v4f& from) { vec_st(from, 0, to); }
-template<> inline void ei_pstore(int* to, const v4i& from) { vec_st(from, 0, to); }
-
-template<> inline void ei_pstoreu(float* to, const v4f& from)
-{
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- // Warning: not thread safe!
- __vector unsigned char MSQ, LSQ, edges;
- __vector unsigned char edgeAlign, align;
-
- MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword
- LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword
- edgeAlign = vec_lvsl(0, to); // permute map to extract edges
- edges=vec_perm(LSQ,MSQ,edgeAlign); // extract the edges
- align = vec_lvsr( 0, to ); // permute map to misalign data
- MSQ = vec_perm(edges,(__vector unsigned char)from,align); // misalign the data (MSQ)
- LSQ = vec_perm((__vector unsigned char)from,edges,align); // misalign the data (LSQ)
- vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first
- vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
-}
-
-template<> inline void ei_pstoreu(int* to , const v4i& from )
-{
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- // Warning: not thread safe!
- __vector unsigned char MSQ, LSQ, edges;
- __vector unsigned char edgeAlign, align;
-
- MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword
- LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword
- edgeAlign = vec_lvsl(0, to); // permute map to extract edges
- edges=vec_perm(LSQ,MSQ,edgeAlign); // extract the edges
- align = vec_lvsr( 0, to ); // permute map to misalign data
- MSQ = vec_perm(edges,(__vector unsigned char)from,align); // misalign the data (MSQ)
- LSQ = vec_perm((__vector unsigned char)from,edges,align); // misalign the data (LSQ)
- vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first
- vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
-}
-
-template<> inline float ei_pfirst(const v4f& a)
-{
- float __attribute__(aligned(16)) af[4];
- vec_st(a, 0, af);
- return af[0];
-}
-
-template<> inline int ei_pfirst(const v4i& a)
-{
- int __attribute__(aligned(16)) ai[4];
- vec_st(a, 0, ai);
- return ai[0];
-}
-
-inline v4f ei_preduxp(const v4f* vecs)
-{
- v4f v[4], sum[4];
-
- // It's easier and faster to transpose then add as columns
- // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
- // Do the transpose, first set of moves
- v[0] = vec_mergeh(vecs[0], vecs[2]);
- v[1] = vec_mergel(vecs[0], vecs[2]);
- v[2] = vec_mergeh(vecs[1], vecs[3]);
- v[3] = vec_mergel(vecs[1], vecs[3]);
- // Get the resulting vectors
- sum[0] = vec_mergeh(v[0], v[2]);
- sum[1] = vec_mergel(v[0], v[2]);
- sum[2] = vec_mergeh(v[1], v[3]);
- sum[3] = vec_mergel(v[1], v[3]);
-
- // Now do the summation:
- // Lines 0+1
- sum[0] = vec_add(sum[0], sum[1]);
- // Lines 2+3
- sum[1] = vec_add(sum[2], sum[3]);
- // Add the results
- sum[0] = vec_add(sum[0], sum[1]);
- return sum[0];
-}
-
-inline float ei_predux(const v4f& a)
-{
- v4f b, sum;
- b = (v4f)vec_sld(a, a, 8);
- sum = vec_add(a, b);
- b = (v4f)vec_sld(sum, sum, 4);
- sum = vec_add(sum, b);
- return ei_pfirst(sum);
-}
-
-inline v4i ei_preduxp(const v4i* vecs)
-{
- v4i v[4], sum[4];
-
- // It's easier and faster to transpose then add as columns
- // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
- // Do the transpose, first set of moves
- v[0] = vec_mergeh(vecs[0], vecs[2]);
- v[1] = vec_mergel(vecs[0], vecs[2]);
- v[2] = vec_mergeh(vecs[1], vecs[3]);
- v[3] = vec_mergel(vecs[1], vecs[3]);
- // Get the resulting vectors
- sum[0] = vec_mergeh(v[0], v[2]);
- sum[1] = vec_mergel(v[0], v[2]);
- sum[2] = vec_mergeh(v[1], v[3]);
- sum[3] = vec_mergel(v[1], v[3]);
-
- // Now do the summation:
- // Lines 0+1
- sum[0] = vec_add(sum[0], sum[1]);
- // Lines 2+3
- sum[1] = vec_add(sum[2], sum[3]);
- // Add the results
- sum[0] = vec_add(sum[0], sum[1]);
- return sum[0];
-}
-
-inline int ei_predux(const v4i& a)
-{
- USE_CONST_v0i;
- v4i sum;
- sum = vec_sums(a, v0i);
- sum = vec_sld(sum, v0i, 12);
- return ei_pfirst(sum);
-}
-
-template<int Offset>
-struct ei_palign_impl<Offset, v4f>
-{
- inline static void run(v4f& first, const v4f& second)
- {
- first = vec_sld(first, second, Offset*4);
- }
-};
-
-template<int Offset>
-struct ei_palign_impl<Offset, v4i>
-{
- inline static void run(v4i& first, const v4i& second)
- {
- first = vec_sld(first, second, Offset*4);
- }
-};
-
-#endif // EIGEN_PACKET_MATH_ALTIVEC_H
diff --git a/extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h b/extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h
deleted file mode 100644
index 9ca65b9be5b..00000000000
--- a/extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h
+++ /dev/null
@@ -1,321 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_PACKET_MATH_SSE_H
-#define EIGEN_PACKET_MATH_SSE_H
-
-#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16
-#endif
-
-template<> struct ei_packet_traits<float> { typedef __m128 type; enum {size=4}; };
-template<> struct ei_packet_traits<double> { typedef __m128d type; enum {size=2}; };
-template<> struct ei_packet_traits<int> { typedef __m128i type; enum {size=4}; };
-
-template<> struct ei_unpacket_traits<__m128> { typedef float type; enum {size=4}; };
-template<> struct ei_unpacket_traits<__m128d> { typedef double type; enum {size=2}; };
-template<> struct ei_unpacket_traits<__m128i> { typedef int type; enum {size=4}; };
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pset1<float>(const float& from) { return _mm_set1_ps(from); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pset1<double>(const double& from) { return _mm_set1_pd(from); }
-template<> EIGEN_STRONG_INLINE __m128i ei_pset1<int>(const int& from) { return _mm_set1_epi32(from); }
-
-template<> EIGEN_STRONG_INLINE __m128 ei_padd<__m128>(const __m128& a, const __m128& b) { return _mm_add_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_padd<__m128d>(const __m128d& a, const __m128d& b) { return _mm_add_pd(a,b); }
-template<> EIGEN_STRONG_INLINE __m128i ei_padd<__m128i>(const __m128i& a, const __m128i& b) { return _mm_add_epi32(a,b); }
-
-template<> EIGEN_STRONG_INLINE __m128 ei_psub<__m128>(const __m128& a, const __m128& b) { return _mm_sub_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_psub<__m128d>(const __m128d& a, const __m128d& b) { return _mm_sub_pd(a,b); }
-template<> EIGEN_STRONG_INLINE __m128i ei_psub<__m128i>(const __m128i& a, const __m128i& b) { return _mm_sub_epi32(a,b); }
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pmul<__m128>(const __m128& a, const __m128& b) { return _mm_mul_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pmul<__m128d>(const __m128d& a, const __m128d& b) { return _mm_mul_pd(a,b); }
-template<> EIGEN_STRONG_INLINE __m128i ei_pmul<__m128i>(const __m128i& a, const __m128i& b)
-{
- return _mm_or_si128(
- _mm_and_si128(
- _mm_mul_epu32(a,b),
- _mm_setr_epi32(0xffffffff,0,0xffffffff,0)),
- _mm_slli_si128(
- _mm_and_si128(
- _mm_mul_epu32(_mm_srli_si128(a,4),_mm_srli_si128(b,4)),
- _mm_setr_epi32(0xffffffff,0,0xffffffff,0)), 4));
-}
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pdiv<__m128>(const __m128& a, const __m128& b) { return _mm_div_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pdiv<__m128d>(const __m128d& a, const __m128d& b) { return _mm_div_pd(a,b); }
-template<> EIGEN_STRONG_INLINE __m128i ei_pdiv<__m128i>(const __m128i& /*a*/, const __m128i& /*b*/)
-{ ei_assert(false && "packet integer division are not supported by SSE");
- __m128i dummy = ei_pset1<int>(0);
- return dummy;
-}
-
-// for some weird raisons, it has to be overloaded for packet integer
-template<> EIGEN_STRONG_INLINE __m128i ei_pmadd(const __m128i& a, const __m128i& b, const __m128i& c) { return ei_padd(ei_pmul(a,b), c); }
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pmin<__m128>(const __m128& a, const __m128& b) { return _mm_min_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pmin<__m128d>(const __m128d& a, const __m128d& b) { return _mm_min_pd(a,b); }
-// FIXME this vectorized min operator is likely to be slower than the standard one
-template<> EIGEN_STRONG_INLINE __m128i ei_pmin<__m128i>(const __m128i& a, const __m128i& b)
-{
- __m128i mask = _mm_cmplt_epi32(a,b);
- return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
-}
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pmax<__m128>(const __m128& a, const __m128& b) { return _mm_max_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pmax<__m128d>(const __m128d& a, const __m128d& b) { return _mm_max_pd(a,b); }
-// FIXME this vectorized max operator is likely to be slower than the standard one
-template<> EIGEN_STRONG_INLINE __m128i ei_pmax<__m128i>(const __m128i& a, const __m128i& b)
-{
- __m128i mask = _mm_cmpgt_epi32(a,b);
- return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
-}
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pload<float>(const float* from) { return _mm_load_ps(from); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pload<double>(const double* from) { return _mm_load_pd(from); }
-template<> EIGEN_STRONG_INLINE __m128i ei_pload<int>(const int* from) { return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
-
-template<> EIGEN_STRONG_INLINE __m128 ei_ploadu<float>(const float* from) { return _mm_loadu_ps(from); }
-// template<> EIGEN_STRONG_INLINE __m128 ei_ploadu(const float* from) {
-// if (size_t(from)&0xF)
-// return _mm_loadu_ps(from);
-// else
-// return _mm_loadu_ps(from);
-// }
-template<> EIGEN_STRONG_INLINE __m128d ei_ploadu<double>(const double* from) { return _mm_loadu_pd(from); }
-template<> EIGEN_STRONG_INLINE __m128i ei_ploadu<int>(const int* from) { return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from)); }
-
-template<> EIGEN_STRONG_INLINE void ei_pstore<float>(float* to, const __m128& from) { _mm_store_ps(to, from); }
-template<> EIGEN_STRONG_INLINE void ei_pstore<double>(double* to, const __m128d& from) { _mm_store_pd(to, from); }
-template<> EIGEN_STRONG_INLINE void ei_pstore<int>(int* to, const __m128i& from) { _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
-
-template<> EIGEN_STRONG_INLINE void ei_pstoreu<float>(float* to, const __m128& from) { _mm_storeu_ps(to, from); }
-template<> EIGEN_STRONG_INLINE void ei_pstoreu<double>(double* to, const __m128d& from) { _mm_storeu_pd(to, from); }
-template<> EIGEN_STRONG_INLINE void ei_pstoreu<int>(int* to, const __m128i& from) { _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
-
-#ifdef _MSC_VER
-// this fix internal compilation error
-template<> EIGEN_STRONG_INLINE float ei_pfirst<__m128>(const __m128& a) { float x = _mm_cvtss_f32(a); return x; }
-template<> EIGEN_STRONG_INLINE double ei_pfirst<__m128d>(const __m128d& a) { double x = _mm_cvtsd_f64(a); return x; }
-template<> EIGEN_STRONG_INLINE int ei_pfirst<__m128i>(const __m128i& a) { int x = _mm_cvtsi128_si32(a); return x; }
-#else
-template<> EIGEN_STRONG_INLINE float ei_pfirst<__m128>(const __m128& a) { return _mm_cvtss_f32(a); }
-template<> EIGEN_STRONG_INLINE double ei_pfirst<__m128d>(const __m128d& a) { return _mm_cvtsd_f64(a); }
-template<> EIGEN_STRONG_INLINE int ei_pfirst<__m128i>(const __m128i& a) { return _mm_cvtsi128_si32(a); }
-#endif
-
-#ifdef __SSE3__
-// TODO implement SSE2 versions as well as integer versions
-template<> EIGEN_STRONG_INLINE __m128 ei_preduxp<__m128>(const __m128* vecs)
-{
- return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
-}
-template<> EIGEN_STRONG_INLINE __m128d ei_preduxp<__m128d>(const __m128d* vecs)
-{
- return _mm_hadd_pd(vecs[0], vecs[1]);
-}
-// SSSE3 version:
-// EIGEN_STRONG_INLINE __m128i ei_preduxp(const __m128i* vecs)
-// {
-// return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
-// }
-
-template<> EIGEN_STRONG_INLINE float ei_predux<__m128>(const __m128& a)
-{
- __m128 tmp0 = _mm_hadd_ps(a,a);
- return ei_pfirst(_mm_hadd_ps(tmp0, tmp0));
-}
-
-template<> EIGEN_STRONG_INLINE double ei_predux<__m128d>(const __m128d& a) { return ei_pfirst(_mm_hadd_pd(a, a)); }
-
-// SSSE3 version:
-// EIGEN_STRONG_INLINE float ei_predux(const __m128i& a)
-// {
-// __m128i tmp0 = _mm_hadd_epi32(a,a);
-// return ei_pfirst(_mm_hadd_epi32(tmp0, tmp0));
-// }
-#else
-// SSE2 versions
-template<> EIGEN_STRONG_INLINE float ei_predux<__m128>(const __m128& a)
-{
- __m128 tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
- return ei_pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
-}
-template<> EIGEN_STRONG_INLINE double ei_predux<__m128d>(const __m128d& a)
-{
- return ei_pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
-}
-
-template<> EIGEN_STRONG_INLINE __m128 ei_preduxp<__m128>(const __m128* vecs)
-{
- __m128 tmp0, tmp1, tmp2;
- tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
- tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
- tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
- tmp0 = _mm_add_ps(tmp0, tmp1);
- tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
- tmp1 = _mm_add_ps(tmp1, tmp2);
- tmp2 = _mm_movehl_ps(tmp1, tmp0);
- tmp0 = _mm_movelh_ps(tmp0, tmp1);
- return _mm_add_ps(tmp0, tmp2);
-}
-
-template<> EIGEN_STRONG_INLINE __m128d ei_preduxp<__m128d>(const __m128d* vecs)
-{
- return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
-}
-#endif // SSE3
-
-template<> EIGEN_STRONG_INLINE int ei_predux<__m128i>(const __m128i& a)
-{
- __m128i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
- return ei_pfirst(tmp) + ei_pfirst(_mm_shuffle_epi32(tmp, 1));
-}
-
-template<> EIGEN_STRONG_INLINE __m128i ei_preduxp<__m128i>(const __m128i* vecs)
-{
- __m128i tmp0, tmp1, tmp2;
- tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
- tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
- tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
- tmp0 = _mm_add_epi32(tmp0, tmp1);
- tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
- tmp1 = _mm_add_epi32(tmp1, tmp2);
- tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
- tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
- return _mm_add_epi32(tmp0, tmp2);
-}
-
-#if (defined __GNUC__)
-// template <> EIGEN_STRONG_INLINE __m128 ei_pmadd(const __m128& a, const __m128& b, const __m128& c)
-// {
-// __m128 res = b;
-// asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
-// return res;
-// }
-// EIGEN_STRONG_INLINE __m128i _mm_alignr_epi8(const __m128i& a, const __m128i& b, const int i)
-// {
-// __m128i res = a;
-// asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
-// return res;
-// }
-#endif
-
-#ifdef __SSSE3__
-// SSSE3 versions
-template<int Offset>
-struct ei_palign_impl<Offset,__m128>
-{
- EIGEN_STRONG_INLINE static void run(__m128& first, const __m128& second)
- {
- if (Offset!=0)
- first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
- }
-};
-
-template<int Offset>
-struct ei_palign_impl<Offset,__m128i>
-{
- EIGEN_STRONG_INLINE static void run(__m128i& first, const __m128i& second)
- {
- if (Offset!=0)
- first = _mm_alignr_epi8(second,first, Offset*4);
- }
-};
-
-template<int Offset>
-struct ei_palign_impl<Offset,__m128d>
-{
- EIGEN_STRONG_INLINE static void run(__m128d& first, const __m128d& second)
- {
- if (Offset==1)
- first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
- }
-};
-#else
-// SSE2 versions
-template<int Offset>
-struct ei_palign_impl<Offset,__m128>
-{
- EIGEN_STRONG_INLINE static void run(__m128& first, const __m128& second)
- {
- if (Offset==1)
- {
- first = _mm_move_ss(first,second);
- first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
- }
- else if (Offset==2)
- {
- first = _mm_movehl_ps(first,first);
- first = _mm_movelh_ps(first,second);
- }
- else if (Offset==3)
- {
- first = _mm_move_ss(first,second);
- first = _mm_shuffle_ps(first,second,0x93);
- }
- }
-};
-
-template<int Offset>
-struct ei_palign_impl<Offset,__m128i>
-{
- EIGEN_STRONG_INLINE static void run(__m128i& first, const __m128i& second)
- {
- if (Offset==1)
- {
- first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
- first = _mm_shuffle_epi32(first,0x39);
- }
- else if (Offset==2)
- {
- first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
- first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
- }
- else if (Offset==3)
- {
- first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
- first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
- }
- }
-};
-
-template<int Offset>
-struct ei_palign_impl<Offset,__m128d>
-{
- EIGEN_STRONG_INLINE static void run(__m128d& first, const __m128d& second)
- {
- if (Offset==1)
- {
- first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
- first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
- }
- }
-};
-#endif
-
-#define ei_vec4f_swizzle1(v,p,q,r,s) \
- (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
-
-#endif // EIGEN_PACKET_MATH_SSE_H