diff options
Diffstat (limited to 'extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h')
-rw-r--r-- | extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h | 2251 |
1 files changed, 1533 insertions, 718 deletions
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h index bcdca5b0d23..e3980f6ffd4 100644 --- a/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h +++ b/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h @@ -10,8 +10,9 @@ #ifndef EIGEN_GENERAL_BLOCK_PANEL_H #define EIGEN_GENERAL_BLOCK_PANEL_H -namespace Eigen { - + +namespace Eigen { + namespace internal { template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false> @@ -24,29 +25,51 @@ inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff return a<=0 ? b : a; } +#if EIGEN_ARCH_i386_OR_x86_64 +const std::ptrdiff_t defaultL1CacheSize = 32*1024; +const std::ptrdiff_t defaultL2CacheSize = 256*1024; +const std::ptrdiff_t defaultL3CacheSize = 2*1024*1024; +#else +const std::ptrdiff_t defaultL1CacheSize = 16*1024; +const std::ptrdiff_t defaultL2CacheSize = 512*1024; +const std::ptrdiff_t defaultL3CacheSize = 512*1024; +#endif + /** \internal */ -inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0) -{ - static std::ptrdiff_t m_l1CacheSize = 0; - static std::ptrdiff_t m_l2CacheSize = 0; - if(m_l2CacheSize==0) - { - m_l1CacheSize = manage_caching_sizes_helper(queryL1CacheSize(),8 * 1024); - m_l2CacheSize = manage_caching_sizes_helper(queryTopLevelCacheSize(),1*1024*1024); +struct CacheSizes { + CacheSizes(): m_l1(-1),m_l2(-1),m_l3(-1) { + int l1CacheSize, l2CacheSize, l3CacheSize; + queryCacheSizes(l1CacheSize, l2CacheSize, l3CacheSize); + m_l1 = manage_caching_sizes_helper(l1CacheSize, defaultL1CacheSize); + m_l2 = manage_caching_sizes_helper(l2CacheSize, defaultL2CacheSize); + m_l3 = manage_caching_sizes_helper(l3CacheSize, defaultL3CacheSize); } - + + std::ptrdiff_t m_l1; + std::ptrdiff_t m_l2; + std::ptrdiff_t m_l3; +}; + + +/** \internal */ +inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3) +{ + static CacheSizes m_cacheSizes; + if(action==SetAction) { // set the cpu cache size and cache all block sizes from a global cache size in byte eigen_internal_assert(l1!=0 && l2!=0); - m_l1CacheSize = *l1; - m_l2CacheSize = *l2; + m_cacheSizes.m_l1 = *l1; + m_cacheSizes.m_l2 = *l2; + m_cacheSizes.m_l3 = *l3; } else if(action==GetAction) { eigen_internal_assert(l1!=0 && l2!=0); - *l1 = m_l1CacheSize; - *l2 = m_l2CacheSize; + *l1 = m_cacheSizes.m_l1; + *l2 = m_cacheSizes.m_l2; + *l3 = m_cacheSizes.m_l3; } else { @@ -54,6 +77,206 @@ inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdi } } +/* Helper for computeProductBlockingSizes. + * + * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar, + * this function computes the blocking size parameters along the respective dimensions + * for matrix products and related algorithms. The blocking sizes depends on various + * parameters: + * - the L1 and L2 cache sizes, + * - the register level blocking sizes defined by gebp_traits, + * - the number of scalars that fit into a packet (when vectorization is enabled). + * + * \sa setCpuCacheSizes */ + +template<typename LhsScalar, typename RhsScalar, int KcFactor, typename Index> +void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index num_threads = 1) +{ + typedef gebp_traits<LhsScalar,RhsScalar> Traits; + + // Explanations: + // Let's recall that the product algorithms form mc x kc vertical panels A' on the lhs and + // kc x nc blocks B' on the rhs. B' has to fit into L2/L3 cache. Moreover, A' is processed + // per mr x kc horizontal small panels where mr is the blocking size along the m dimension + // at the register level. This small horizontal panel has to stay within L1 cache. + std::ptrdiff_t l1, l2, l3; + manage_caching_sizes(GetAction, &l1, &l2, &l3); + + if (num_threads > 1) { + typedef typename Traits::ResScalar ResScalar; + enum { + kdiv = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)), + ksub = Traits::mr * Traits::nr * sizeof(ResScalar), + kr = 8, + mr = Traits::mr, + nr = Traits::nr + }; + // Increasing k gives us more time to prefetch the content of the "C" + // registers. However once the latency is hidden there is no point in + // increasing the value of k, so we'll cap it at 320 (value determined + // experimentally). + const Index k_cache = (numext::mini<Index>)((l1-ksub)/kdiv, 320); + if (k_cache < k) { + k = k_cache - (k_cache % kr); + eigen_internal_assert(k > 0); + } + + const Index n_cache = (l2-l1) / (nr * sizeof(RhsScalar) * k); + const Index n_per_thread = numext::div_ceil(n, num_threads); + if (n_cache <= n_per_thread) { + // Don't exceed the capacity of the l2 cache. + eigen_internal_assert(n_cache >= static_cast<Index>(nr)); + n = n_cache - (n_cache % nr); + eigen_internal_assert(n > 0); + } else { + n = (numext::mini<Index>)(n, (n_per_thread + nr - 1) - ((n_per_thread + nr - 1) % nr)); + } + + if (l3 > l2) { + // l3 is shared between all cores, so we'll give each thread its own chunk of l3. + const Index m_cache = (l3-l2) / (sizeof(LhsScalar) * k * num_threads); + const Index m_per_thread = numext::div_ceil(m, num_threads); + if(m_cache < m_per_thread && m_cache >= static_cast<Index>(mr)) { + m = m_cache - (m_cache % mr); + eigen_internal_assert(m > 0); + } else { + m = (numext::mini<Index>)(m, (m_per_thread + mr - 1) - ((m_per_thread + mr - 1) % mr)); + } + } + } + else { + // In unit tests we do not want to use extra large matrices, + // so we reduce the cache size to check the blocking strategy is not flawed +#ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS + l1 = 9*1024; + l2 = 32*1024; + l3 = 512*1024; +#endif + + // Early return for small problems because the computation below are time consuming for small problems. + // Perhaps it would make more sense to consider k*n*m?? + // Note that for very tiny problem, this function should be bypassed anyway + // because we use the coefficient-based implementation for them. + if((numext::maxi)(k,(numext::maxi)(m,n))<48) + return; + + typedef typename Traits::ResScalar ResScalar; + enum { + k_peeling = 8, + k_div = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)), + k_sub = Traits::mr * Traits::nr * sizeof(ResScalar) + }; + + // ---- 1st level of blocking on L1, yields kc ---- + + // Blocking on the third dimension (i.e., k) is chosen so that an horizontal panel + // of size mr x kc of the lhs plus a vertical panel of kc x nr of the rhs both fits within L1 cache. + // We also include a register-level block of the result (mx x nr). + // (In an ideal world only the lhs panel would stay in L1) + // Moreover, kc has to be a multiple of 8 to be compatible with loop peeling, leading to a maximum blocking size of: + const Index max_kc = numext::maxi<Index>(((l1-k_sub)/k_div) & (~(k_peeling-1)),1); + const Index old_k = k; + if(k>max_kc) + { + // We are really blocking on the third dimension: + // -> reduce blocking size to make sure the last block is as large as possible + // while keeping the same number of sweeps over the result. + k = (k%max_kc)==0 ? max_kc + : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1))); + + eigen_internal_assert(((old_k/k) == (old_k/max_kc)) && "the number of sweeps has to remain the same"); + } + + // ---- 2nd level of blocking on max(L2,L3), yields nc ---- + + // TODO find a reliable way to get the actual amount of cache per core to use for 2nd level blocking, that is: + // actual_l2 = max(l2, l3/nb_core_sharing_l3) + // The number below is quite conservative: it is better to underestimate the cache size rather than overestimating it) + // For instance, it corresponds to 6MB of L3 shared among 4 cores. + #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS + const Index actual_l2 = l3; + #else + const Index actual_l2 = 1572864; // == 1.5 MB + #endif + + // Here, nc is chosen such that a block of kc x nc of the rhs fit within half of L2. + // The second half is implicitly reserved to access the result and lhs coefficients. + // When k<max_kc, then nc can arbitrarily growth. In practice, it seems to be fruitful + // to limit this growth: we bound nc to growth by a factor x1.5. + // However, if the entire lhs block fit within L1, then we are not going to block on the rows at all, + // and it becomes fruitful to keep the packed rhs blocks in L1 if there is enough remaining space. + Index max_nc; + const Index lhs_bytes = m * k * sizeof(LhsScalar); + const Index remaining_l1 = l1- k_sub - lhs_bytes; + if(remaining_l1 >= Index(Traits::nr*sizeof(RhsScalar))*k) + { + // L1 blocking + max_nc = remaining_l1 / (k*sizeof(RhsScalar)); + } + else + { + // L2 blocking + max_nc = (3*actual_l2)/(2*2*max_kc*sizeof(RhsScalar)); + } + // WARNING Below, we assume that Traits::nr is a power of two. + Index nc = numext::mini<Index>(actual_l2/(2*k*sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1)); + if(n>nc) + { + // We are really blocking over the columns: + // -> reduce blocking size to make sure the last block is as large as possible + // while keeping the same number of sweeps over the packed lhs. + // Here we allow one more sweep if this gives us a perfect match, thus the commented "-1" + n = (n%nc)==0 ? nc + : (nc - Traits::nr * ((nc/*-1*/-(n%nc))/(Traits::nr*(n/nc+1)))); + } + else if(old_k==k) + { + // So far, no blocking at all, i.e., kc==k, and nc==n. + // In this case, let's perform a blocking over the rows such that the packed lhs data is kept in cache L1/L2 + // TODO: part of this blocking strategy is now implemented within the kernel itself, so the L1-based heuristic here should be obsolete. + Index problem_size = k*n*sizeof(LhsScalar); + Index actual_lm = actual_l2; + Index max_mc = m; + if(problem_size<=1024) + { + // problem is small enough to keep in L1 + // Let's choose m such that lhs's block fit in 1/3 of L1 + actual_lm = l1; + } + else if(l3!=0 && problem_size<=32768) + { + // we have both L2 and L3, and problem is small enough to be kept in L2 + // Let's choose m such that lhs's block fit in 1/3 of L2 + actual_lm = l2; + max_mc = (numext::mini<Index>)(576,max_mc); + } + Index mc = (numext::mini<Index>)(actual_lm/(3*k*sizeof(LhsScalar)), max_mc); + if (mc > Traits::mr) mc -= mc % Traits::mr; + else if (mc==0) return; + m = (m%mc)==0 ? mc + : (mc - Traits::mr * ((mc/*-1*/-(m%mc))/(Traits::mr*(m/mc+1)))); + } + } +} + +template <typename Index> +inline bool useSpecificBlockingSizes(Index& k, Index& m, Index& n) +{ +#ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES + if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) { + k = numext::mini<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K); + m = numext::mini<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M); + n = numext::mini<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N); + return true; + } +#else + EIGEN_UNUSED_VARIABLE(k) + EIGEN_UNUSED_VARIABLE(m) + EIGEN_UNUSED_VARIABLE(n) +#endif + return false; +} + /** \brief Computes the blocking parameters for a m x k times k x n matrix product * * \param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension. @@ -62,48 +285,30 @@ inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdi * * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar, * this function computes the blocking size parameters along the respective dimensions - * for matrix products and related algorithms. The blocking sizes depends on various - * parameters: - * - the L1 and L2 cache sizes, - * - the register level blocking sizes defined by gebp_traits, - * - the number of scalars that fit into a packet (when vectorization is enabled). + * for matrix products and related algorithms. + * + * The blocking size parameters may be evaluated: + * - either by a heuristic based on cache sizes; + * - or using fixed prescribed values (for testing purposes). * * \sa setCpuCacheSizes */ -template<typename LhsScalar, typename RhsScalar, int KcFactor, typename SizeType> -void computeProductBlockingSizes(SizeType& k, SizeType& m, SizeType& n) -{ - EIGEN_UNUSED_VARIABLE(n); - // Explanations: - // Let's recall the product algorithms form kc x nc horizontal panels B' on the rhs and - // mc x kc blocks A' on the lhs. A' has to fit into L2 cache. Moreover, B' is processed - // per kc x nr vertical small panels where nr is the blocking size along the n dimension - // at the register level. For vectorization purpose, these small vertical panels are unpacked, - // e.g., each coefficient is replicated to fit a packet. This small vertical panel has to - // stay in L1 cache. - std::ptrdiff_t l1, l2; - typedef gebp_traits<LhsScalar,RhsScalar> Traits; - enum { - kdiv = KcFactor * 2 * Traits::nr - * Traits::RhsProgress * sizeof(RhsScalar), - mr = gebp_traits<LhsScalar,RhsScalar>::mr, - mr_mask = (0xffffffff/mr)*mr - }; - - manage_caching_sizes(GetAction, &l1, &l2); - k = std::min<SizeType>(k, l1/kdiv); - SizeType _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0; - if(_m<m) m = _m & mr_mask; +template<typename LhsScalar, typename RhsScalar, int KcFactor, typename Index> +void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1) +{ + if (!useSpecificBlockingSizes(k, m, n)) { + evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor, Index>(k, m, n, num_threads); + } } -template<typename LhsScalar, typename RhsScalar, typename SizeType> -inline void computeProductBlockingSizes(SizeType& k, SizeType& m, SizeType& n) +template<typename LhsScalar, typename RhsScalar, typename Index> +inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1) { - computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n); + computeProductBlockingSizes<LhsScalar,RhsScalar,1,Index>(k, m, n, num_threads); } -#ifdef EIGEN_HAS_FUSE_CJMADD - #define MADD(CJ,A,B,C,T) C = CJ.pmadd(A,B,C); +#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD + #define CJMADD(CJ,A,B,C,T) C = CJ.pmadd(A,B,C); #else // FIXME (a bit overkill maybe ?) @@ -128,8 +333,8 @@ inline void computeProductBlockingSizes(SizeType& k, SizeType& m, SizeType& n) gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t); } - #define MADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T); -// #define MADD(CJ,A,B,C,T) T = B; T = CJ.pmul(A,T); C = padd(C,T); + #define CJMADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T); +// #define CJMADD(CJ,A,B,C,T) T = B; T = CJ.pmul(A,T); C = padd(C,T); #endif /* Vectorization logic @@ -148,7 +353,7 @@ class gebp_traits public: typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; - typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; + typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; enum { ConjLhs = _ConjLhs, @@ -160,16 +365,22 @@ public: NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, - // register block size along the N direction (must be either 2 or 4) - nr = NumberOfRegisters/4, + // register block size along the N direction must be 1 or 4 + nr = 4, // register block size along the M direction (currently, this one cannot be modified) - mr = 2 * LhsPacketSize, + default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize, +#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX) + // we assume 16 registers + // See bug 992, if the scalar type is not vectorizable but that EIGEN_HAS_SINGLE_INSTRUCTION_MADD is defined, + // then using 3*LhsPacketSize triggers non-implemented paths in syrk. + mr = Vectorizable ? 3*LhsPacketSize : default_mr, +#else + mr = default_mr, +#endif - WorkSpaceFactor = nr * RhsPacketSize, - LhsProgress = LhsPacketSize, - RhsProgress = RhsPacketSize + RhsProgress = 1 }; typedef typename packet_traits<LhsScalar>::type _LhsPacket; @@ -186,36 +397,67 @@ public: { p = pset1<ResPacket>(ResScalar(0)); } - - EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b) + + EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) + { + pbroadcast4(b, b0, b1, b2, b3); + } + +// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1) +// { +// pbroadcast2(b, b0, b1); +// } + + template<typename RhsPacketType> + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const + { + dest = pset1<RhsPacketType>(*b); + } + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const { - for(DenseIndex k=0; k<n; k++) - pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]); + dest = ploadquad<RhsPacket>(b); } - EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const + template<typename LhsPacketType> + EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacketType& dest) const { - dest = pload<RhsPacket>(b); + dest = pload<LhsPacketType>(a); } - EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const + template<typename LhsPacketType> + EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const { - dest = pload<LhsPacket>(a); + dest = ploadu<LhsPacketType>(a); } - EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, AccPacket& tmp) const + template<typename LhsPacketType, typename RhsPacketType, typename AccPacketType> + EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const { - tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp); + conj_helper<LhsPacketType,RhsPacketType,ConjLhs,ConjRhs> cj; + // It would be a lot cleaner to call pmadd all the time. Unfortunately if we + // let gcc allocate the register in which to store the result of the pmul + // (in the case where there is no FMA) gcc fails to figure out how to avoid + // spilling register. +#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD + EIGEN_UNUSED_VARIABLE(tmp); + c = cj.pmadd(a,b,c); +#else + tmp = b; tmp = cj.pmul(a,tmp); c = padd(c,tmp); +#endif } EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const { r = pmadd(c,alpha,r); } + + template<typename ResPacketHalf> + EIGEN_STRONG_INLINE void acc(const ResPacketHalf& c, const ResPacketHalf& alpha, ResPacketHalf& r) const + { + r = pmadd(c,alpha,r); + } -protected: -// conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj; -// conj_helper<LhsPacket,RhsPacket,ConjLhs,ConjRhs> pcj; }; template<typename RealScalar, bool _ConjLhs> @@ -224,7 +466,7 @@ class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false> public: typedef std::complex<RealScalar> LhsScalar; typedef RealScalar RhsScalar; - typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; + typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; enum { ConjLhs = _ConjLhs, @@ -235,12 +477,16 @@ public: ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1, NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, - nr = NumberOfRegisters/4, - mr = 2 * LhsPacketSize, - WorkSpaceFactor = nr*RhsPacketSize, + nr = 4, +#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX) + // we assume 16 registers + mr = 3*LhsPacketSize, +#else + mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize, +#endif LhsProgress = LhsPacketSize, - RhsProgress = RhsPacketSize + RhsProgress = 1 }; typedef typename packet_traits<LhsScalar>::type _LhsPacket; @@ -258,15 +504,14 @@ public: p = pset1<ResPacket>(ResScalar(0)); } - EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b) + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const { - for(DenseIndex k=0; k<n; k++) - pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]); + dest = pset1<RhsPacket>(*b); } - - EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const { - dest = pload<RhsPacket>(b); + dest = pset1<RhsPacket>(*b); } EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const @@ -274,6 +519,21 @@ public: dest = pload<LhsPacket>(a); } + EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const + { + dest = ploadu<LhsPacket>(a); + } + + EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) + { + pbroadcast4(b, b0, b1, b2, b3); + } + +// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1) +// { +// pbroadcast2(b, b0, b1); +// } + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const { madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type()); @@ -281,7 +541,12 @@ public: EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const { +#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD + EIGEN_UNUSED_VARIABLE(tmp); + c.v = pmadd(a.v,b,c.v); +#else tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp); +#endif } EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const @@ -298,6 +563,38 @@ protected: conj_helper<ResPacket,ResPacket,ConjLhs,false> cj; }; +template<typename Packet> +struct DoublePacket +{ + Packet first; + Packet second; +}; + +template<typename Packet> +DoublePacket<Packet> padd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b) +{ + DoublePacket<Packet> res; + res.first = padd(a.first, b.first); + res.second = padd(a.second,b.second); + return res; +} + +template<typename Packet> +const DoublePacket<Packet>& predux_downto4(const DoublePacket<Packet> &a) +{ + return a; +} + +template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > { typedef DoublePacket<Packet> half; }; +// template<typename Packet> +// DoublePacket<Packet> pmadd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b) +// { +// DoublePacket<Packet> res; +// res.first = padd(a.first, b.first); +// res.second = padd(a.second,b.second); +// return res; +// } + template<typename RealScalar, bool _ConjLhs, bool _ConjRhs> class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs > { @@ -314,60 +611,80 @@ public: && packet_traits<Scalar>::Vectorizable, RealPacketSize = Vectorizable ? packet_traits<RealScalar>::size : 1, ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1, - - nr = 2, - mr = 2 * ResPacketSize, - WorkSpaceFactor = Vectorizable ? 2*nr*RealPacketSize : nr, + LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1, + RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1, + + // FIXME: should depend on NumberOfRegisters + nr = 4, + mr = ResPacketSize, LhsProgress = ResPacketSize, - RhsProgress = Vectorizable ? 2*ResPacketSize : 1 + RhsProgress = 1 }; typedef typename packet_traits<RealScalar>::type RealPacket; typedef typename packet_traits<Scalar>::type ScalarPacket; - struct DoublePacket - { - RealPacket first; - RealPacket second; - }; + typedef DoublePacket<RealPacket> DoublePacketType; typedef typename conditional<Vectorizable,RealPacket, Scalar>::type LhsPacket; - typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type RhsPacket; + typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket; typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket; - typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type AccPacket; + typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type AccPacket; EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); } - EIGEN_STRONG_INLINE void initAcc(DoublePacket& p) + EIGEN_STRONG_INLINE void initAcc(DoublePacketType& p) { p.first = pset1<RealPacket>(RealScalar(0)); p.second = pset1<RealPacket>(RealScalar(0)); } - /* Unpack the rhs coeff such that each complex coefficient is spread into - * two packects containing respectively the real and imaginary coefficient - * duplicated as many time as needed: (x+iy) => [x, ..., x] [y, ..., y] - */ - EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const Scalar* rhs, Scalar* b) + // Scalar path + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const { - for(DenseIndex k=0; k<n; k++) - { - if(Vectorizable) - { - pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+0], real(rhs[k])); - pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+ResPacketSize], imag(rhs[k])); - } - else - b[k] = rhs[k]; - } + dest = pset1<ResPacket>(*b); } - EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const { dest = *b; } - - EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacket& dest) const + // Vectorized path + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacketType& dest) const + { + dest.first = pset1<RealPacket>(real(*b)); + dest.second = pset1<RealPacket>(imag(*b)); + } + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const + { + loadRhs(b,dest); + } + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const + { + eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4); + loadRhs(b,dest); + } + + EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) { - dest.first = pload<RealPacket>((const RealScalar*)b); - dest.second = pload<RealPacket>((const RealScalar*)(b+ResPacketSize)); + // FIXME not sure that's the best way to implement it! + loadRhs(b+0, b0); + loadRhs(b+1, b1); + loadRhs(b+2, b2); + loadRhs(b+3, b3); + } + + // Vectorized path + EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1) + { + // FIXME not sure that's the best way to implement it! + loadRhs(b+0, b0); + loadRhs(b+1, b1); + } + + // Scalar path + EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsScalar& b0, RhsScalar& b1) + { + // FIXME not sure that's the best way to implement it! + loadRhs(b+0, b0); + loadRhs(b+1, b1); } // nothing special here @@ -376,7 +693,12 @@ public: dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a)); } - EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacket& c, RhsPacket& /*tmp*/) const + EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const + { + dest = ploadu<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a)); + } + + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacketType& c, RhsPacket& /*tmp*/) const { c.first = padd(pmul(a,b.first), c.first); c.second = padd(pmul(a,b.second),c.second); @@ -389,7 +711,7 @@ public: EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; } - EIGEN_STRONG_INLINE void acc(const DoublePacket& c, const ResPacket& alpha, ResPacket& r) const + EIGEN_STRONG_INLINE void acc(const DoublePacketType& c, const ResPacket& alpha, ResPacket& r) const { // assemble c ResPacket tmp; @@ -440,12 +762,12 @@ public: ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1, NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, + // FIXME: should depend on NumberOfRegisters nr = 4, - mr = 2*ResPacketSize, - WorkSpaceFactor = nr*RhsPacketSize, + mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize, LhsProgress = ResPacketSize, - RhsProgress = ResPacketSize + RhsProgress = 1 }; typedef typename packet_traits<LhsScalar>::type _LhsPacket; @@ -463,21 +785,38 @@ public: p = pset1<ResPacket>(ResScalar(0)); } - EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b) + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const { - for(DenseIndex k=0; k<n; k++) - pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]); + dest = pset1<RhsPacket>(*b); } - - EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const + + void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) { - dest = pload<RhsPacket>(b); + pbroadcast4(b, b0, b1, b2, b3); } + +// EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1) +// { +// // FIXME not sure that's the best way to implement it! +// b0 = pload1<RhsPacket>(b+0); +// b1 = pload1<RhsPacket>(b+1); +// } EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const { dest = ploaddup<LhsPacket>(a); } + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const + { + eigen_internal_assert(unpacket_traits<RhsPacket>::size<=4); + loadRhs(b,dest); + } + + EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const + { + dest = ploaddup<LhsPacket>(a); + } EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const { @@ -486,7 +825,13 @@ public: EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const { +#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD + EIGEN_UNUSED_VARIABLE(tmp); + c.v = pmadd(a,b.v,c.v); +#else tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp); +#endif + } EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const @@ -510,7 +855,7 @@ protected: * |real |cplx | no vectorization yet, would require to pack A with duplication * |cplx |real | easy vectorization */ -template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs> +template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs> struct gebp_kernel { typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits; @@ -520,6 +865,15 @@ struct gebp_kernel typedef typename Traits::ResPacket ResPacket; typedef typename Traits::AccPacket AccPacket; + typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits; + typedef typename SwappedTraits::ResScalar SResScalar; + typedef typename SwappedTraits::LhsPacket SLhsPacket; + typedef typename SwappedTraits::RhsPacket SRhsPacket; + typedef typename SwappedTraits::ResPacket SResPacket; + typedef typename SwappedTraits::AccPacket SAccPacket; + + typedef typename DataMapper::LinearMapper LinearMapper; + enum { Vectorizable = Traits::Vectorizable, LhsProgress = Traits::LhsProgress, @@ -528,571 +882,795 @@ struct gebp_kernel }; EIGEN_DONT_INLINE - void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha, - Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB=0); + void operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB, + Index rows, Index depth, Index cols, ResScalar alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); }; -template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs> +template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs> EIGEN_DONT_INLINE -void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs> - ::operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha, - Index strideA, Index strideB, Index offsetA, Index offsetB, RhsScalar* unpackedB) +void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,ConjugateRhs> + ::operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB, + Index rows, Index depth, Index cols, ResScalar alpha, + Index strideA, Index strideB, Index offsetA, Index offsetB) { Traits traits; + SwappedTraits straits; if(strideA==-1) strideA = depth; if(strideB==-1) strideB = depth; conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj; -// conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj; - Index packet_cols = (cols/nr) * nr; - const Index peeled_mc = (rows/mr)*mr; - // FIXME: - const Index peeled_mc2 = peeled_mc + (rows-peeled_mc >= LhsProgress ? LhsProgress : 0); - const Index peeled_kc = (depth/4)*4; - - if(unpackedB==0) - unpackedB = const_cast<RhsScalar*>(blockB - strideB * nr * RhsProgress); - - // loops on each micro vertical panel of rhs (depth x nr) - for(Index j2=0; j2<packet_cols; j2+=nr) + Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; + const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0; + const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0; + const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0; + enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell) + const Index peeled_kc = depth & ~(pk-1); + const Index prefetch_res_offset = 32/sizeof(ResScalar); +// const Index depth2 = depth & ~1; + + //---------- Process 3 * LhsProgress rows at once ---------- + // This corresponds to 3*LhsProgress x nr register blocks. + // Usually, make sense only with FMA + if(mr>=3*Traits::LhsProgress) { - traits.unpackRhs(depth*nr,&blockB[j2*strideB+offsetB*nr],unpackedB); - - // loops on each largest micro horizontal panel of lhs (mr x depth) - // => we select a mr x nr micro block of res which is entirely - // stored into mr/packet_size x nr registers. - for(Index i=0; i<peeled_mc; i+=mr) + // Here, the general idea is to loop on each largest micro horizontal panel of the lhs (3*Traits::LhsProgress x depth) + // and on each largest micro vertical panel of the rhs (depth * nr). + // Blocking sizes, i.e., 'depth' has been computed so that the micro horizontal panel of the lhs fit in L1. + // However, if depth is too small, we can extend the number of rows of these horizontal panels. + // This actual number of rows is computed as follow: + const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function. + // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size + // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess), + // or because we are testing specific blocking sizes. + const Index actual_panel_rows = (3*LhsProgress) * std::max<Index>(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 3*LhsProgress) )); + for(Index i1=0; i1<peeled_mc3; i1+=actual_panel_rows) { - const LhsScalar* blA = &blockA[i*strideA+offsetA*mr]; - prefetch(&blA[0]); - - // gets res block as register - AccPacket C0, C1, C2, C3, C4, C5, C6, C7; - traits.initAcc(C0); - traits.initAcc(C1); - if(nr==4) traits.initAcc(C2); - if(nr==4) traits.initAcc(C3); - traits.initAcc(C4); - traits.initAcc(C5); - if(nr==4) traits.initAcc(C6); - if(nr==4) traits.initAcc(C7); - - ResScalar* r0 = &res[(j2+0)*resStride + i]; - ResScalar* r1 = r0 + resStride; - ResScalar* r2 = r1 + resStride; - ResScalar* r3 = r2 + resStride; - - prefetch(r0+16); - prefetch(r1+16); - prefetch(r2+16); - prefetch(r3+16); - - // performs "inner" product - // TODO let's check wether the folowing peeled loop could not be - // optimized via optimal prefetching from one loop to the other - const RhsScalar* blB = unpackedB; - for(Index k=0; k<peeled_kc; k+=4) + const Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc3); + for(Index j2=0; j2<packet_cols4; j2+=nr) { - if(nr==2) - { - LhsPacket A0, A1; - RhsPacket B_0; - RhsPacket T0; - -EIGEN_ASM_COMMENT("mybegin2"); - traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadLhs(&blA[1*LhsProgress], A1); - traits.loadRhs(&blB[0*RhsProgress], B_0); - traits.madd(A0,B_0,C0,T0); - traits.madd(A1,B_0,C4,B_0); - traits.loadRhs(&blB[1*RhsProgress], B_0); - traits.madd(A0,B_0,C1,T0); - traits.madd(A1,B_0,C5,B_0); - - traits.loadLhs(&blA[2*LhsProgress], A0); - traits.loadLhs(&blA[3*LhsProgress], A1); - traits.loadRhs(&blB[2*RhsProgress], B_0); - traits.madd(A0,B_0,C0,T0); - traits.madd(A1,B_0,C4,B_0); - traits.loadRhs(&blB[3*RhsProgress], B_0); - traits.madd(A0,B_0,C1,T0); - traits.madd(A1,B_0,C5,B_0); - - traits.loadLhs(&blA[4*LhsProgress], A0); - traits.loadLhs(&blA[5*LhsProgress], A1); - traits.loadRhs(&blB[4*RhsProgress], B_0); - traits.madd(A0,B_0,C0,T0); - traits.madd(A1,B_0,C4,B_0); - traits.loadRhs(&blB[5*RhsProgress], B_0); - traits.madd(A0,B_0,C1,T0); - traits.madd(A1,B_0,C5,B_0); - - traits.loadLhs(&blA[6*LhsProgress], A0); - traits.loadLhs(&blA[7*LhsProgress], A1); - traits.loadRhs(&blB[6*RhsProgress], B_0); - traits.madd(A0,B_0,C0,T0); - traits.madd(A1,B_0,C4,B_0); - traits.loadRhs(&blB[7*RhsProgress], B_0); - traits.madd(A0,B_0,C1,T0); - traits.madd(A1,B_0,C5,B_0); -EIGEN_ASM_COMMENT("myend"); - } - else + for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress) { -EIGEN_ASM_COMMENT("mybegin4"); - LhsPacket A0, A1; - RhsPacket B_0, B1, B2, B3; - RhsPacket T0; - - traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadLhs(&blA[1*LhsProgress], A1); - traits.loadRhs(&blB[0*RhsProgress], B_0); - traits.loadRhs(&blB[1*RhsProgress], B1); - - traits.madd(A0,B_0,C0,T0); - traits.loadRhs(&blB[2*RhsProgress], B2); - traits.madd(A1,B_0,C4,B_0); - traits.loadRhs(&blB[3*RhsProgress], B3); - traits.loadRhs(&blB[4*RhsProgress], B_0); - traits.madd(A0,B1,C1,T0); - traits.madd(A1,B1,C5,B1); - traits.loadRhs(&blB[5*RhsProgress], B1); - traits.madd(A0,B2,C2,T0); - traits.madd(A1,B2,C6,B2); - traits.loadRhs(&blB[6*RhsProgress], B2); - traits.madd(A0,B3,C3,T0); - traits.loadLhs(&blA[2*LhsProgress], A0); - traits.madd(A1,B3,C7,B3); - traits.loadLhs(&blA[3*LhsProgress], A1); - traits.loadRhs(&blB[7*RhsProgress], B3); - traits.madd(A0,B_0,C0,T0); - traits.madd(A1,B_0,C4,B_0); - traits.loadRhs(&blB[8*RhsProgress], B_0); - traits.madd(A0,B1,C1,T0); - traits.madd(A1,B1,C5,B1); - traits.loadRhs(&blB[9*RhsProgress], B1); - traits.madd(A0,B2,C2,T0); - traits.madd(A1,B2,C6,B2); - traits.loadRhs(&blB[10*RhsProgress], B2); - traits.madd(A0,B3,C3,T0); - traits.loadLhs(&blA[4*LhsProgress], A0); - traits.madd(A1,B3,C7,B3); - traits.loadLhs(&blA[5*LhsProgress], A1); - traits.loadRhs(&blB[11*RhsProgress], B3); - - traits.madd(A0,B_0,C0,T0); - traits.madd(A1,B_0,C4,B_0); - traits.loadRhs(&blB[12*RhsProgress], B_0); - traits.madd(A0,B1,C1,T0); - traits.madd(A1,B1,C5,B1); - traits.loadRhs(&blB[13*RhsProgress], B1); - traits.madd(A0,B2,C2,T0); - traits.madd(A1,B2,C6,B2); - traits.loadRhs(&blB[14*RhsProgress], B2); - traits.madd(A0,B3,C3,T0); - traits.loadLhs(&blA[6*LhsProgress], A0); - traits.madd(A1,B3,C7,B3); - traits.loadLhs(&blA[7*LhsProgress], A1); - traits.loadRhs(&blB[15*RhsProgress], B3); - traits.madd(A0,B_0,C0,T0); - traits.madd(A1,B_0,C4,B_0); - traits.madd(A0,B1,C1,T0); - traits.madd(A1,B1,C5,B1); - traits.madd(A0,B2,C2,T0); - traits.madd(A1,B2,C6,B2); - traits.madd(A0,B3,C3,T0); - traits.madd(A1,B3,C7,B3); - } + + // We selected a 3*Traits::LhsProgress x nr micro block of res which is entirely + // stored into 3 x nr registers. + + const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*LhsProgress)]; + prefetch(&blA[0]); + + // gets res block as register + AccPacket C0, C1, C2, C3, + C4, C5, C6, C7, + C8, C9, C10, C11; + traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3); + traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7); + traits.initAcc(C8); traits.initAcc(C9); traits.initAcc(C10); traits.initAcc(C11); + + LinearMapper r0 = res.getLinearMapper(i, j2 + 0); + LinearMapper r1 = res.getLinearMapper(i, j2 + 1); + LinearMapper r2 = res.getLinearMapper(i, j2 + 2); + LinearMapper r3 = res.getLinearMapper(i, j2 + 3); + + r0.prefetch(0); + r1.prefetch(0); + r2.prefetch(0); + r3.prefetch(0); + + // performs "inner" products + const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr]; + prefetch(&blB[0]); + LhsPacket A0, A1; - blB += 4*nr*RhsProgress; - blA += 4*mr; - } - // process remaining peeled loop - for(Index k=peeled_kc; k<depth; k++) - { - if(nr==2) + for(Index k=0; k<peeled_kc; k+=pk) { - LhsPacket A0, A1; - RhsPacket B_0; - RhsPacket T0; - - traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadLhs(&blA[1*LhsProgress], A1); - traits.loadRhs(&blB[0*RhsProgress], B_0); - traits.madd(A0,B_0,C0,T0); - traits.madd(A1,B_0,C4,B_0); - traits.loadRhs(&blB[1*RhsProgress], B_0); - traits.madd(A0,B_0,C1,T0); - traits.madd(A1,B_0,C5,B_0); + EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX4"); + RhsPacket B_0, T0; + LhsPacket A2; + +#define EIGEN_GEBP_ONESTEP(K) \ + do { \ + EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \ + EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \ + internal::prefetch(blA+(3*K+16)*LhsProgress); \ + if (EIGEN_ARCH_ARM) { internal::prefetch(blB+(4*K+16)*RhsProgress); } /* Bug 953 */ \ + traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \ + traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \ + traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \ + traits.loadRhs(blB + (0+4*K)*Traits::RhsProgress, B_0); \ + traits.madd(A0, B_0, C0, T0); \ + traits.madd(A1, B_0, C4, T0); \ + traits.madd(A2, B_0, C8, B_0); \ + traits.loadRhs(blB + (1+4*K)*Traits::RhsProgress, B_0); \ + traits.madd(A0, B_0, C1, T0); \ + traits.madd(A1, B_0, C5, T0); \ + traits.madd(A2, B_0, C9, B_0); \ + traits.loadRhs(blB + (2+4*K)*Traits::RhsProgress, B_0); \ + traits.madd(A0, B_0, C2, T0); \ + traits.madd(A1, B_0, C6, T0); \ + traits.madd(A2, B_0, C10, B_0); \ + traits.loadRhs(blB + (3+4*K)*Traits::RhsProgress, B_0); \ + traits.madd(A0, B_0, C3 , T0); \ + traits.madd(A1, B_0, C7, T0); \ + traits.madd(A2, B_0, C11, B_0); \ + EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \ + } while(false) + + internal::prefetch(blB); + EIGEN_GEBP_ONESTEP(0); + EIGEN_GEBP_ONESTEP(1); + EIGEN_GEBP_ONESTEP(2); + EIGEN_GEBP_ONESTEP(3); + EIGEN_GEBP_ONESTEP(4); + EIGEN_GEBP_ONESTEP(5); + EIGEN_GEBP_ONESTEP(6); + EIGEN_GEBP_ONESTEP(7); + + blB += pk*4*RhsProgress; + blA += pk*3*Traits::LhsProgress; + + EIGEN_ASM_COMMENT("end gebp micro kernel 3pX4"); } - else + // process remaining peeled loop + for(Index k=peeled_kc; k<depth; k++) { - LhsPacket A0, A1; - RhsPacket B_0, B1, B2, B3; - RhsPacket T0; - - traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadLhs(&blA[1*LhsProgress], A1); - traits.loadRhs(&blB[0*RhsProgress], B_0); - traits.loadRhs(&blB[1*RhsProgress], B1); - - traits.madd(A0,B_0,C0,T0); - traits.loadRhs(&blB[2*RhsProgress], B2); - traits.madd(A1,B_0,C4,B_0); - traits.loadRhs(&blB[3*RhsProgress], B3); - traits.madd(A0,B1,C1,T0); - traits.madd(A1,B1,C5,B1); - traits.madd(A0,B2,C2,T0); - traits.madd(A1,B2,C6,B2); - traits.madd(A0,B3,C3,T0); - traits.madd(A1,B3,C7,B3); + RhsPacket B_0, T0; + LhsPacket A2; + EIGEN_GEBP_ONESTEP(0); + blB += 4*RhsProgress; + blA += 3*Traits::LhsProgress; } - blB += nr*RhsProgress; - blA += mr; - } +#undef EIGEN_GEBP_ONESTEP - if(nr==4) - { - ResPacket R0, R1, R2, R3, R4, R5, R6; + ResPacket R0, R1, R2; ResPacket alphav = pset1<ResPacket>(alpha); - R0 = ploadu<ResPacket>(r0); - R1 = ploadu<ResPacket>(r1); - R2 = ploadu<ResPacket>(r2); - R3 = ploadu<ResPacket>(r3); - R4 = ploadu<ResPacket>(r0 + ResPacketSize); - R5 = ploadu<ResPacket>(r1 + ResPacketSize); - R6 = ploadu<ResPacket>(r2 + ResPacketSize); + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + R1 = r0.loadPacket(1 * Traits::ResPacketSize); + R2 = r0.loadPacket(2 * Traits::ResPacketSize); traits.acc(C0, alphav, R0); - pstoreu(r0, R0); - R0 = ploadu<ResPacket>(r3 + ResPacketSize); - - traits.acc(C1, alphav, R1); - traits.acc(C2, alphav, R2); - traits.acc(C3, alphav, R3); - traits.acc(C4, alphav, R4); - traits.acc(C5, alphav, R5); - traits.acc(C6, alphav, R6); - traits.acc(C7, alphav, R0); - - pstoreu(r1, R1); - pstoreu(r2, R2); - pstoreu(r3, R3); - pstoreu(r0 + ResPacketSize, R4); - pstoreu(r1 + ResPacketSize, R5); - pstoreu(r2 + ResPacketSize, R6); - pstoreu(r3 + ResPacketSize, R0); + traits.acc(C4, alphav, R1); + traits.acc(C8, alphav, R2); + r0.storePacket(0 * Traits::ResPacketSize, R0); + r0.storePacket(1 * Traits::ResPacketSize, R1); + r0.storePacket(2 * Traits::ResPacketSize, R2); + + R0 = r1.loadPacket(0 * Traits::ResPacketSize); + R1 = r1.loadPacket(1 * Traits::ResPacketSize); + R2 = r1.loadPacket(2 * Traits::ResPacketSize); + traits.acc(C1, alphav, R0); + traits.acc(C5, alphav, R1); + traits.acc(C9, alphav, R2); + r1.storePacket(0 * Traits::ResPacketSize, R0); + r1.storePacket(1 * Traits::ResPacketSize, R1); + r1.storePacket(2 * Traits::ResPacketSize, R2); + + R0 = r2.loadPacket(0 * Traits::ResPacketSize); + R1 = r2.loadPacket(1 * Traits::ResPacketSize); + R2 = r2.loadPacket(2 * Traits::ResPacketSize); + traits.acc(C2, alphav, R0); + traits.acc(C6, alphav, R1); + traits.acc(C10, alphav, R2); + r2.storePacket(0 * Traits::ResPacketSize, R0); + r2.storePacket(1 * Traits::ResPacketSize, R1); + r2.storePacket(2 * Traits::ResPacketSize, R2); + + R0 = r3.loadPacket(0 * Traits::ResPacketSize); + R1 = r3.loadPacket(1 * Traits::ResPacketSize); + R2 = r3.loadPacket(2 * Traits::ResPacketSize); + traits.acc(C3, alphav, R0); + traits.acc(C7, alphav, R1); + traits.acc(C11, alphav, R2); + r3.storePacket(0 * Traits::ResPacketSize, R0); + r3.storePacket(1 * Traits::ResPacketSize, R1); + r3.storePacket(2 * Traits::ResPacketSize, R2); + } } - else + + // Deal with remaining columns of the rhs + for(Index j2=packet_cols4; j2<cols; j2++) { - ResPacket R0, R1, R4; + for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress) + { + // One column at a time + const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*Traits::LhsProgress)]; + prefetch(&blA[0]); + + // gets res block as register + AccPacket C0, C4, C8; + traits.initAcc(C0); + traits.initAcc(C4); + traits.initAcc(C8); + + LinearMapper r0 = res.getLinearMapper(i, j2); + r0.prefetch(0); + + // performs "inner" products + const RhsScalar* blB = &blockB[j2*strideB+offsetB]; + LhsPacket A0, A1, A2; + + for(Index k=0; k<peeled_kc; k+=pk) + { + EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX1"); + RhsPacket B_0; +#define EIGEN_GEBGP_ONESTEP(K) \ + do { \ + EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \ + EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \ + traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \ + traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \ + traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \ + traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \ + traits.madd(A0, B_0, C0, B_0); \ + traits.madd(A1, B_0, C4, B_0); \ + traits.madd(A2, B_0, C8, B_0); \ + EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \ + } while(false) + + EIGEN_GEBGP_ONESTEP(0); + EIGEN_GEBGP_ONESTEP(1); + EIGEN_GEBGP_ONESTEP(2); + EIGEN_GEBGP_ONESTEP(3); + EIGEN_GEBGP_ONESTEP(4); + EIGEN_GEBGP_ONESTEP(5); + EIGEN_GEBGP_ONESTEP(6); + EIGEN_GEBGP_ONESTEP(7); + + blB += pk*RhsProgress; + blA += pk*3*Traits::LhsProgress; + + EIGEN_ASM_COMMENT("end gebp micro kernel 3pX1"); + } + + // process remaining peeled loop + for(Index k=peeled_kc; k<depth; k++) + { + RhsPacket B_0; + EIGEN_GEBGP_ONESTEP(0); + blB += RhsProgress; + blA += 3*Traits::LhsProgress; + } +#undef EIGEN_GEBGP_ONESTEP + ResPacket R0, R1, R2; ResPacket alphav = pset1<ResPacket>(alpha); - R0 = ploadu<ResPacket>(r0); - R1 = ploadu<ResPacket>(r1); - R4 = ploadu<ResPacket>(r0 + ResPacketSize); + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + R1 = r0.loadPacket(1 * Traits::ResPacketSize); + R2 = r0.loadPacket(2 * Traits::ResPacketSize); traits.acc(C0, alphav, R0); - pstoreu(r0, R0); - R0 = ploadu<ResPacket>(r1 + ResPacketSize); - traits.acc(C1, alphav, R1); - traits.acc(C4, alphav, R4); - traits.acc(C5, alphav, R0); - pstoreu(r1, R1); - pstoreu(r0 + ResPacketSize, R4); - pstoreu(r1 + ResPacketSize, R0); + traits.acc(C4, alphav, R1); + traits.acc(C8, alphav, R2); + r0.storePacket(0 * Traits::ResPacketSize, R0); + r0.storePacket(1 * Traits::ResPacketSize, R1); + r0.storePacket(2 * Traits::ResPacketSize, R2); + } } - } - - if(rows-peeled_mc>=LhsProgress) + } + + //---------- Process 2 * LhsProgress rows at once ---------- + if(mr>=2*Traits::LhsProgress) + { + const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function. + // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size + // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess), + // or because we are testing specific blocking sizes. + Index actual_panel_rows = (2*LhsProgress) * std::max<Index>(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 2*LhsProgress) )); + + for(Index i1=peeled_mc3; i1<peeled_mc2; i1+=actual_panel_rows) { - Index i = peeled_mc; - const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress]; - prefetch(&blA[0]); - - // gets res block as register - AccPacket C0, C1, C2, C3; - traits.initAcc(C0); - traits.initAcc(C1); - if(nr==4) traits.initAcc(C2); - if(nr==4) traits.initAcc(C3); - - // performs "inner" product - const RhsScalar* blB = unpackedB; - for(Index k=0; k<peeled_kc; k+=4) + Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc2); + for(Index j2=0; j2<packet_cols4; j2+=nr) { - if(nr==2) + for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress) { - LhsPacket A0; - RhsPacket B_0, B1; + + // We selected a 2*Traits::LhsProgress x nr micro block of res which is entirely + // stored into 2 x nr registers. + + const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)]; + prefetch(&blA[0]); + + // gets res block as register + AccPacket C0, C1, C2, C3, + C4, C5, C6, C7; + traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3); + traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7); + + LinearMapper r0 = res.getLinearMapper(i, j2 + 0); + LinearMapper r1 = res.getLinearMapper(i, j2 + 1); + LinearMapper r2 = res.getLinearMapper(i, j2 + 2); + LinearMapper r3 = res.getLinearMapper(i, j2 + 3); + + r0.prefetch(prefetch_res_offset); + r1.prefetch(prefetch_res_offset); + r2.prefetch(prefetch_res_offset); + r3.prefetch(prefetch_res_offset); + + // performs "inner" products + const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr]; + prefetch(&blB[0]); + LhsPacket A0, A1; - traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadRhs(&blB[0*RhsProgress], B_0); - traits.loadRhs(&blB[1*RhsProgress], B1); - traits.madd(A0,B_0,C0,B_0); - traits.loadRhs(&blB[2*RhsProgress], B_0); - traits.madd(A0,B1,C1,B1); - traits.loadLhs(&blA[1*LhsProgress], A0); - traits.loadRhs(&blB[3*RhsProgress], B1); - traits.madd(A0,B_0,C0,B_0); - traits.loadRhs(&blB[4*RhsProgress], B_0); - traits.madd(A0,B1,C1,B1); - traits.loadLhs(&blA[2*LhsProgress], A0); - traits.loadRhs(&blB[5*RhsProgress], B1); - traits.madd(A0,B_0,C0,B_0); - traits.loadRhs(&blB[6*RhsProgress], B_0); - traits.madd(A0,B1,C1,B1); - traits.loadLhs(&blA[3*LhsProgress], A0); - traits.loadRhs(&blB[7*RhsProgress], B1); - traits.madd(A0,B_0,C0,B_0); - traits.madd(A0,B1,C1,B1); - } - else + for(Index k=0; k<peeled_kc; k+=pk) { - LhsPacket A0; - RhsPacket B_0, B1, B2, B3; - - traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadRhs(&blB[0*RhsProgress], B_0); - traits.loadRhs(&blB[1*RhsProgress], B1); - - traits.madd(A0,B_0,C0,B_0); - traits.loadRhs(&blB[2*RhsProgress], B2); - traits.loadRhs(&blB[3*RhsProgress], B3); - traits.loadRhs(&blB[4*RhsProgress], B_0); - traits.madd(A0,B1,C1,B1); - traits.loadRhs(&blB[5*RhsProgress], B1); - traits.madd(A0,B2,C2,B2); - traits.loadRhs(&blB[6*RhsProgress], B2); - traits.madd(A0,B3,C3,B3); - traits.loadLhs(&blA[1*LhsProgress], A0); - traits.loadRhs(&blB[7*RhsProgress], B3); - traits.madd(A0,B_0,C0,B_0); - traits.loadRhs(&blB[8*RhsProgress], B_0); - traits.madd(A0,B1,C1,B1); - traits.loadRhs(&blB[9*RhsProgress], B1); - traits.madd(A0,B2,C2,B2); - traits.loadRhs(&blB[10*RhsProgress], B2); - traits.madd(A0,B3,C3,B3); - traits.loadLhs(&blA[2*LhsProgress], A0); - traits.loadRhs(&blB[11*RhsProgress], B3); - - traits.madd(A0,B_0,C0,B_0); - traits.loadRhs(&blB[12*RhsProgress], B_0); - traits.madd(A0,B1,C1,B1); - traits.loadRhs(&blB[13*RhsProgress], B1); - traits.madd(A0,B2,C2,B2); - traits.loadRhs(&blB[14*RhsProgress], B2); - traits.madd(A0,B3,C3,B3); - - traits.loadLhs(&blA[3*LhsProgress], A0); - traits.loadRhs(&blB[15*RhsProgress], B3); - traits.madd(A0,B_0,C0,B_0); - traits.madd(A0,B1,C1,B1); - traits.madd(A0,B2,C2,B2); - traits.madd(A0,B3,C3,B3); + EIGEN_ASM_COMMENT("begin gebp micro kernel 2pX4"); + RhsPacket B_0, B1, B2, B3, T0; + + // NOTE: the begin/end asm comments below work around bug 935! + // but they are not enough for gcc>=6 without FMA (bug 1637) + #if EIGEN_GNUC_AT_LEAST(6,0) && defined(EIGEN_VECTORIZE_SSE) + #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND __asm__ ("" : [a0] "+x,m" (A0),[a1] "+x,m" (A1)); + #else + #define EIGEN_GEBP_2PX4_SPILLING_WORKAROUND + #endif + #define EIGEN_GEBGP_ONESTEP(K) \ + do { \ + EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4"); \ + traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \ + traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \ + traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \ + traits.madd(A0, B_0, C0, T0); \ + traits.madd(A1, B_0, C4, B_0); \ + traits.madd(A0, B1, C1, T0); \ + traits.madd(A1, B1, C5, B1); \ + traits.madd(A0, B2, C2, T0); \ + traits.madd(A1, B2, C6, B2); \ + traits.madd(A0, B3, C3, T0); \ + traits.madd(A1, B3, C7, B3); \ + EIGEN_GEBP_2PX4_SPILLING_WORKAROUND \ + EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4"); \ + } while(false) + + internal::prefetch(blB+(48+0)); + EIGEN_GEBGP_ONESTEP(0); + EIGEN_GEBGP_ONESTEP(1); + EIGEN_GEBGP_ONESTEP(2); + EIGEN_GEBGP_ONESTEP(3); + internal::prefetch(blB+(48+16)); + EIGEN_GEBGP_ONESTEP(4); + EIGEN_GEBGP_ONESTEP(5); + EIGEN_GEBGP_ONESTEP(6); + EIGEN_GEBGP_ONESTEP(7); + + blB += pk*4*RhsProgress; + blA += pk*(2*Traits::LhsProgress); + + EIGEN_ASM_COMMENT("end gebp micro kernel 2pX4"); } - - blB += nr*4*RhsProgress; - blA += 4*LhsProgress; - } - // process remaining peeled loop - for(Index k=peeled_kc; k<depth; k++) - { - if(nr==2) + // process remaining peeled loop + for(Index k=peeled_kc; k<depth; k++) { - LhsPacket A0; - RhsPacket B_0, B1; - - traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadRhs(&blB[0*RhsProgress], B_0); - traits.loadRhs(&blB[1*RhsProgress], B1); - traits.madd(A0,B_0,C0,B_0); - traits.madd(A0,B1,C1,B1); + RhsPacket B_0, B1, B2, B3, T0; + EIGEN_GEBGP_ONESTEP(0); + blB += 4*RhsProgress; + blA += 2*Traits::LhsProgress; } - else - { - LhsPacket A0; - RhsPacket B_0, B1, B2, B3; +#undef EIGEN_GEBGP_ONESTEP - traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadRhs(&blB[0*RhsProgress], B_0); - traits.loadRhs(&blB[1*RhsProgress], B1); - traits.loadRhs(&blB[2*RhsProgress], B2); - traits.loadRhs(&blB[3*RhsProgress], B3); + ResPacket R0, R1, R2, R3; + ResPacket alphav = pset1<ResPacket>(alpha); - traits.madd(A0,B_0,C0,B_0); - traits.madd(A0,B1,C1,B1); - traits.madd(A0,B2,C2,B2); - traits.madd(A0,B3,C3,B3); + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + R1 = r0.loadPacket(1 * Traits::ResPacketSize); + R2 = r1.loadPacket(0 * Traits::ResPacketSize); + R3 = r1.loadPacket(1 * Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C4, alphav, R1); + traits.acc(C1, alphav, R2); + traits.acc(C5, alphav, R3); + r0.storePacket(0 * Traits::ResPacketSize, R0); + r0.storePacket(1 * Traits::ResPacketSize, R1); + r1.storePacket(0 * Traits::ResPacketSize, R2); + r1.storePacket(1 * Traits::ResPacketSize, R3); + + R0 = r2.loadPacket(0 * Traits::ResPacketSize); + R1 = r2.loadPacket(1 * Traits::ResPacketSize); + R2 = r3.loadPacket(0 * Traits::ResPacketSize); + R3 = r3.loadPacket(1 * Traits::ResPacketSize); + traits.acc(C2, alphav, R0); + traits.acc(C6, alphav, R1); + traits.acc(C3, alphav, R2); + traits.acc(C7, alphav, R3); + r2.storePacket(0 * Traits::ResPacketSize, R0); + r2.storePacket(1 * Traits::ResPacketSize, R1); + r3.storePacket(0 * Traits::ResPacketSize, R2); + r3.storePacket(1 * Traits::ResPacketSize, R3); } - - blB += nr*RhsProgress; - blA += LhsProgress; } + + // Deal with remaining columns of the rhs + for(Index j2=packet_cols4; j2<cols; j2++) + { + for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress) + { + // One column at a time + const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)]; + prefetch(&blA[0]); - ResPacket R0, R1, R2, R3; - ResPacket alphav = pset1<ResPacket>(alpha); - - ResScalar* r0 = &res[(j2+0)*resStride + i]; - ResScalar* r1 = r0 + resStride; - ResScalar* r2 = r1 + resStride; - ResScalar* r3 = r2 + resStride; + // gets res block as register + AccPacket C0, C4; + traits.initAcc(C0); + traits.initAcc(C4); - R0 = ploadu<ResPacket>(r0); - R1 = ploadu<ResPacket>(r1); - if(nr==4) R2 = ploadu<ResPacket>(r2); - if(nr==4) R3 = ploadu<ResPacket>(r3); + LinearMapper r0 = res.getLinearMapper(i, j2); + r0.prefetch(prefetch_res_offset); - traits.acc(C0, alphav, R0); - traits.acc(C1, alphav, R1); - if(nr==4) traits.acc(C2, alphav, R2); - if(nr==4) traits.acc(C3, alphav, R3); + // performs "inner" products + const RhsScalar* blB = &blockB[j2*strideB+offsetB]; + LhsPacket A0, A1; - pstoreu(r0, R0); - pstoreu(r1, R1); - if(nr==4) pstoreu(r2, R2); - if(nr==4) pstoreu(r3, R3); - } - for(Index i=peeled_mc2; i<rows; i++) - { - const LhsScalar* blA = &blockA[i*strideA+offsetA]; - prefetch(&blA[0]); - - // gets a 1 x nr res block as registers - ResScalar C0(0), C1(0), C2(0), C3(0); - // TODO directly use blockB ??? - const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr]; - for(Index k=0; k<depth; k++) - { - if(nr==2) + for(Index k=0; k<peeled_kc; k+=pk) { - LhsScalar A0; - RhsScalar B_0, B1; - - A0 = blA[k]; - B_0 = blB[0]; - B1 = blB[1]; - MADD(cj,A0,B_0,C0,B_0); - MADD(cj,A0,B1,C1,B1); + EIGEN_ASM_COMMENT("begin gebp micro kernel 2pX1"); + RhsPacket B_0, B1; + +#define EIGEN_GEBGP_ONESTEP(K) \ + do { \ + EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX1"); \ + EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \ + traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \ + traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \ + traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \ + traits.madd(A0, B_0, C0, B1); \ + traits.madd(A1, B_0, C4, B_0); \ + EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX1"); \ + } while(false) + + EIGEN_GEBGP_ONESTEP(0); + EIGEN_GEBGP_ONESTEP(1); + EIGEN_GEBGP_ONESTEP(2); + EIGEN_GEBGP_ONESTEP(3); + EIGEN_GEBGP_ONESTEP(4); + EIGEN_GEBGP_ONESTEP(5); + EIGEN_GEBGP_ONESTEP(6); + EIGEN_GEBGP_ONESTEP(7); + + blB += pk*RhsProgress; + blA += pk*2*Traits::LhsProgress; + + EIGEN_ASM_COMMENT("end gebp micro kernel 2pX1"); } - else + + // process remaining peeled loop + for(Index k=peeled_kc; k<depth; k++) { - LhsScalar A0; - RhsScalar B_0, B1, B2, B3; - - A0 = blA[k]; - B_0 = blB[0]; - B1 = blB[1]; - B2 = blB[2]; - B3 = blB[3]; - - MADD(cj,A0,B_0,C0,B_0); - MADD(cj,A0,B1,C1,B1); - MADD(cj,A0,B2,C2,B2); - MADD(cj,A0,B3,C3,B3); + RhsPacket B_0, B1; + EIGEN_GEBGP_ONESTEP(0); + blB += RhsProgress; + blA += 2*Traits::LhsProgress; } +#undef EIGEN_GEBGP_ONESTEP + ResPacket R0, R1; + ResPacket alphav = pset1<ResPacket>(alpha); - blB += nr; + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + R1 = r0.loadPacket(1 * Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C4, alphav, R1); + r0.storePacket(0 * Traits::ResPacketSize, R0); + r0.storePacket(1 * Traits::ResPacketSize, R1); + } } - res[(j2+0)*resStride + i] += alpha*C0; - res[(j2+1)*resStride + i] += alpha*C1; - if(nr==4) res[(j2+2)*resStride + i] += alpha*C2; - if(nr==4) res[(j2+3)*resStride + i] += alpha*C3; } } - // process remaining rhs/res columns one at a time - // => do the same but with nr==1 - for(Index j2=packet_cols; j2<cols; j2++) + //---------- Process 1 * LhsProgress rows at once ---------- + if(mr>=1*Traits::LhsProgress) { - // unpack B - traits.unpackRhs(depth, &blockB[j2*strideB+offsetB], unpackedB); - - for(Index i=0; i<peeled_mc; i+=mr) + // loops on each largest micro horizontal panel of lhs (1*LhsProgress x depth) + for(Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress) { - const LhsScalar* blA = &blockA[i*strideA+offsetA*mr]; - prefetch(&blA[0]); + // loops on each largest micro vertical panel of rhs (depth * nr) + for(Index j2=0; j2<packet_cols4; j2+=nr) + { + // We select a 1*Traits::LhsProgress x nr micro block of res which is entirely + // stored into 1 x nr registers. + + const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)]; + prefetch(&blA[0]); + + // gets res block as register + AccPacket C0, C1, C2, C3; + traits.initAcc(C0); + traits.initAcc(C1); + traits.initAcc(C2); + traits.initAcc(C3); + + LinearMapper r0 = res.getLinearMapper(i, j2 + 0); + LinearMapper r1 = res.getLinearMapper(i, j2 + 1); + LinearMapper r2 = res.getLinearMapper(i, j2 + 2); + LinearMapper r3 = res.getLinearMapper(i, j2 + 3); + + r0.prefetch(prefetch_res_offset); + r1.prefetch(prefetch_res_offset); + r2.prefetch(prefetch_res_offset); + r3.prefetch(prefetch_res_offset); + + // performs "inner" products + const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr]; + prefetch(&blB[0]); + LhsPacket A0; + + for(Index k=0; k<peeled_kc; k+=pk) + { + EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX4"); + RhsPacket B_0, B1, B2, B3; + +#define EIGEN_GEBGP_ONESTEP(K) \ + do { \ + EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX4"); \ + EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \ + traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \ + traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3); \ + traits.madd(A0, B_0, C0, B_0); \ + traits.madd(A0, B1, C1, B1); \ + traits.madd(A0, B2, C2, B2); \ + traits.madd(A0, B3, C3, B3); \ + EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX4"); \ + } while(false) + + internal::prefetch(blB+(48+0)); + EIGEN_GEBGP_ONESTEP(0); + EIGEN_GEBGP_ONESTEP(1); + EIGEN_GEBGP_ONESTEP(2); + EIGEN_GEBGP_ONESTEP(3); + internal::prefetch(blB+(48+16)); + EIGEN_GEBGP_ONESTEP(4); + EIGEN_GEBGP_ONESTEP(5); + EIGEN_GEBGP_ONESTEP(6); + EIGEN_GEBGP_ONESTEP(7); + + blB += pk*4*RhsProgress; + blA += pk*1*LhsProgress; + + EIGEN_ASM_COMMENT("end gebp micro kernel 1pX4"); + } + // process remaining peeled loop + for(Index k=peeled_kc; k<depth; k++) + { + RhsPacket B_0, B1, B2, B3; + EIGEN_GEBGP_ONESTEP(0); + blB += 4*RhsProgress; + blA += 1*LhsProgress; + } +#undef EIGEN_GEBGP_ONESTEP - // TODO move the res loads to the stores + ResPacket R0, R1; + ResPacket alphav = pset1<ResPacket>(alpha); - // get res block as registers - AccPacket C0, C4; - traits.initAcc(C0); - traits.initAcc(C4); + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + R1 = r1.loadPacket(0 * Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C1, alphav, R1); + r0.storePacket(0 * Traits::ResPacketSize, R0); + r1.storePacket(0 * Traits::ResPacketSize, R1); + + R0 = r2.loadPacket(0 * Traits::ResPacketSize); + R1 = r3.loadPacket(0 * Traits::ResPacketSize); + traits.acc(C2, alphav, R0); + traits.acc(C3, alphav, R1); + r2.storePacket(0 * Traits::ResPacketSize, R0); + r3.storePacket(0 * Traits::ResPacketSize, R1); + } - const RhsScalar* blB = unpackedB; - for(Index k=0; k<depth; k++) + // Deal with remaining columns of the rhs + for(Index j2=packet_cols4; j2<cols; j2++) { - LhsPacket A0, A1; - RhsPacket B_0; - RhsPacket T0; - - traits.loadLhs(&blA[0*LhsProgress], A0); - traits.loadLhs(&blA[1*LhsProgress], A1); - traits.loadRhs(&blB[0*RhsProgress], B_0); - traits.madd(A0,B_0,C0,T0); - traits.madd(A1,B_0,C4,B_0); + // One column at a time + const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)]; + prefetch(&blA[0]); - blB += RhsProgress; - blA += 2*LhsProgress; - } - ResPacket R0, R4; - ResPacket alphav = pset1<ResPacket>(alpha); + // gets res block as register + AccPacket C0; + traits.initAcc(C0); - ResScalar* r0 = &res[(j2+0)*resStride + i]; + LinearMapper r0 = res.getLinearMapper(i, j2); - R0 = ploadu<ResPacket>(r0); - R4 = ploadu<ResPacket>(r0+ResPacketSize); + // performs "inner" products + const RhsScalar* blB = &blockB[j2*strideB+offsetB]; + LhsPacket A0; - traits.acc(C0, alphav, R0); - traits.acc(C4, alphav, R4); + for(Index k=0; k<peeled_kc; k+=pk) + { + EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX1"); + RhsPacket B_0; + +#define EIGEN_GEBGP_ONESTEP(K) \ + do { \ + EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX1"); \ + EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \ + traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \ + traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \ + traits.madd(A0, B_0, C0, B_0); \ + EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX1"); \ + } while(false); + + EIGEN_GEBGP_ONESTEP(0); + EIGEN_GEBGP_ONESTEP(1); + EIGEN_GEBGP_ONESTEP(2); + EIGEN_GEBGP_ONESTEP(3); + EIGEN_GEBGP_ONESTEP(4); + EIGEN_GEBGP_ONESTEP(5); + EIGEN_GEBGP_ONESTEP(6); + EIGEN_GEBGP_ONESTEP(7); + + blB += pk*RhsProgress; + blA += pk*1*Traits::LhsProgress; + + EIGEN_ASM_COMMENT("end gebp micro kernel 1pX1"); + } - pstoreu(r0, R0); - pstoreu(r0+ResPacketSize, R4); + // process remaining peeled loop + for(Index k=peeled_kc; k<depth; k++) + { + RhsPacket B_0; + EIGEN_GEBGP_ONESTEP(0); + blB += RhsProgress; + blA += 1*Traits::LhsProgress; + } +#undef EIGEN_GEBGP_ONESTEP + ResPacket R0; + ResPacket alphav = pset1<ResPacket>(alpha); + R0 = r0.loadPacket(0 * Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + r0.storePacket(0 * Traits::ResPacketSize, R0); + } } - if(rows-peeled_mc>=LhsProgress) + } + //---------- Process remaining rows, 1 at once ---------- + if(peeled_mc1<rows) + { + // loop on each panel of the rhs + for(Index j2=0; j2<packet_cols4; j2+=nr) { - Index i = peeled_mc; - const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress]; - prefetch(&blA[0]); - - AccPacket C0; - traits.initAcc(C0); - - const RhsScalar* blB = unpackedB; - for(Index k=0; k<depth; k++) + // loop on each row of the lhs (1*LhsProgress x depth) + for(Index i=peeled_mc1; i<rows; i+=1) { - LhsPacket A0; - RhsPacket B_0; - traits.loadLhs(blA, A0); - traits.loadRhs(blB, B_0); - traits.madd(A0, B_0, C0, B_0); - blB += RhsProgress; - blA += LhsProgress; + const LhsScalar* blA = &blockA[i*strideA+offsetA]; + prefetch(&blA[0]); + const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr]; + + // The following piece of code wont work for 512 bit registers + // Moreover, if LhsProgress==8 it assumes that there is a half packet of the same size + // as nr (which is currently 4) for the return type. + const int SResPacketHalfSize = unpacket_traits<typename unpacket_traits<SResPacket>::half>::size; + if ((SwappedTraits::LhsProgress % 4) == 0 && + (SwappedTraits::LhsProgress <= 8) && + (SwappedTraits::LhsProgress!=8 || SResPacketHalfSize==nr)) + { + SAccPacket C0, C1, C2, C3; + straits.initAcc(C0); + straits.initAcc(C1); + straits.initAcc(C2); + straits.initAcc(C3); + + const Index spk = (std::max)(1,SwappedTraits::LhsProgress/4); + const Index endk = (depth/spk)*spk; + const Index endk4 = (depth/(spk*4))*(spk*4); + + Index k=0; + for(; k<endk4; k+=4*spk) + { + SLhsPacket A0,A1; + SRhsPacket B_0,B_1; + + straits.loadLhsUnaligned(blB+0*SwappedTraits::LhsProgress, A0); + straits.loadLhsUnaligned(blB+1*SwappedTraits::LhsProgress, A1); + + straits.loadRhsQuad(blA+0*spk, B_0); + straits.loadRhsQuad(blA+1*spk, B_1); + straits.madd(A0,B_0,C0,B_0); + straits.madd(A1,B_1,C1,B_1); + + straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0); + straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A1); + straits.loadRhsQuad(blA+2*spk, B_0); + straits.loadRhsQuad(blA+3*spk, B_1); + straits.madd(A0,B_0,C2,B_0); + straits.madd(A1,B_1,C3,B_1); + + blB += 4*SwappedTraits::LhsProgress; + blA += 4*spk; + } + C0 = padd(padd(C0,C1),padd(C2,C3)); + for(; k<endk; k+=spk) + { + SLhsPacket A0; + SRhsPacket B_0; + + straits.loadLhsUnaligned(blB, A0); + straits.loadRhsQuad(blA, B_0); + straits.madd(A0,B_0,C0,B_0); + + blB += SwappedTraits::LhsProgress; + blA += spk; + } + if(SwappedTraits::LhsProgress==8) + { + // Special case where we have to first reduce the accumulation register C0 + typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf; + typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf; + typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf; + typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf; + + SResPacketHalf R = res.template gatherPacket<SResPacketHalf>(i, j2); + SResPacketHalf alphav = pset1<SResPacketHalf>(alpha); + + if(depth-endk>0) + { + // We have to handle the last row of the rhs which corresponds to a half-packet + SLhsPacketHalf a0; + SRhsPacketHalf b0; + straits.loadLhsUnaligned(blB, a0); + straits.loadRhs(blA, b0); + SAccPacketHalf c0 = predux_downto4(C0); + straits.madd(a0,b0,c0,b0); + straits.acc(c0, alphav, R); + } + else + { + straits.acc(predux_downto4(C0), alphav, R); + } + res.scatterPacket(i, j2, R); + } + else + { + SResPacket R = res.template gatherPacket<SResPacket>(i, j2); + SResPacket alphav = pset1<SResPacket>(alpha); + straits.acc(C0, alphav, R); + res.scatterPacket(i, j2, R); + } + } + else // scalar path + { + // get a 1 x 4 res block as registers + ResScalar C0(0), C1(0), C2(0), C3(0); + + for(Index k=0; k<depth; k++) + { + LhsScalar A0; + RhsScalar B_0, B_1; + + A0 = blA[k]; + + B_0 = blB[0]; + B_1 = blB[1]; + CJMADD(cj,A0,B_0,C0, B_0); + CJMADD(cj,A0,B_1,C1, B_1); + + B_0 = blB[2]; + B_1 = blB[3]; + CJMADD(cj,A0,B_0,C2, B_0); + CJMADD(cj,A0,B_1,C3, B_1); + + blB += 4; + } + res(i, j2 + 0) += alpha * C0; + res(i, j2 + 1) += alpha * C1; + res(i, j2 + 2) += alpha * C2; + res(i, j2 + 3) += alpha * C3; + } } - - ResPacket alphav = pset1<ResPacket>(alpha); - ResPacket R0 = ploadu<ResPacket>(&res[(j2+0)*resStride + i]); - traits.acc(C0, alphav, R0); - pstoreu(&res[(j2+0)*resStride + i], R0); } - for(Index i=peeled_mc2; i<rows; i++) + // remaining columns + for(Index j2=packet_cols4; j2<cols; j2++) { - const LhsScalar* blA = &blockA[i*strideA+offsetA]; - prefetch(&blA[0]); - - // gets a 1 x 1 res block as registers - ResScalar C0(0); - // FIXME directly use blockB ?? - const RhsScalar* blB = &blockB[j2*strideB+offsetB]; - for(Index k=0; k<depth; k++) + // loop on each row of the lhs (1*LhsProgress x depth) + for(Index i=peeled_mc1; i<rows; i+=1) { - LhsScalar A0 = blA[k]; - RhsScalar B_0 = blB[k]; - MADD(cj, A0, B_0, C0, B_0); + const LhsScalar* blA = &blockA[i*strideA+offsetA]; + prefetch(&blA[0]); + // gets a 1 x 1 res block as registers + ResScalar C0(0); + const RhsScalar* blB = &blockB[j2*strideB+offsetB]; + for(Index k=0; k<depth; k++) + { + LhsScalar A0 = blA[k]; + RhsScalar B_0 = blB[k]; + CJMADD(cj, A0, B_0, C0, B_0); + } + res(i, j2) += alpha * C0; } - res[(j2+0)*resStride + i] += alpha*C0; } } } @@ -1114,81 +1692,193 @@ EIGEN_ASM_COMMENT("mybegin4"); // // 32 33 34 35 ... // 36 36 38 39 ... -template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode> -struct gemm_pack_lhs +template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode> +struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode> { - EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride=0, Index offset=0); + typedef typename DataMapper::LinearMapper LinearMapper; + EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; -template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode> -EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder, Conjugate, PanelMode> - ::operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride, Index offset) +template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode> +EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode> + ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { typedef typename packet_traits<Scalar>::type Packet; enum { PacketSize = packet_traits<Scalar>::size }; EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS"); - EIGEN_UNUSED_VARIABLE(stride) - EIGEN_UNUSED_VARIABLE(offset) + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(offset); eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); - eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) ); + eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) ); conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj; - const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride); Index count = 0; - Index peeled_mc = (rows/Pack1)*Pack1; - for(Index i=0; i<peeled_mc; i+=Pack1) + + const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; + const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0; + const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; + const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1 + : Pack2>1 ? (rows/Pack2)*Pack2 : 0; + + Index i=0; + + // Pack 3 packets + if(Pack1>=3*PacketSize) { - if(PanelMode) count += Pack1 * offset; + for(; i<peeled_mc3; i+=3*PacketSize) + { + if(PanelMode) count += (3*PacketSize) * offset; - if(StorageOrder==ColMajor) + for(Index k=0; k<depth; k++) + { + Packet A, B, C; + A = lhs.loadPacket(i+0*PacketSize, k); + B = lhs.loadPacket(i+1*PacketSize, k); + C = lhs.loadPacket(i+2*PacketSize, k); + pstore(blockA+count, cj.pconj(A)); count+=PacketSize; + pstore(blockA+count, cj.pconj(B)); count+=PacketSize; + pstore(blockA+count, cj.pconj(C)); count+=PacketSize; + } + if(PanelMode) count += (3*PacketSize) * (stride-offset-depth); + } + } + // Pack 2 packets + if(Pack1>=2*PacketSize) + { + for(; i<peeled_mc2; i+=2*PacketSize) { + if(PanelMode) count += (2*PacketSize) * offset; + for(Index k=0; k<depth; k++) { - Packet A, B, C, D; - if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k)); - if(Pack1>=2*PacketSize) B = ploadu<Packet>(&lhs(i+1*PacketSize, k)); - if(Pack1>=3*PacketSize) C = ploadu<Packet>(&lhs(i+2*PacketSize, k)); - if(Pack1>=4*PacketSize) D = ploadu<Packet>(&lhs(i+3*PacketSize, k)); - if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; } - if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; } - if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; } - if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; } + Packet A, B; + A = lhs.loadPacket(i+0*PacketSize, k); + B = lhs.loadPacket(i+1*PacketSize, k); + pstore(blockA+count, cj.pconj(A)); count+=PacketSize; + pstore(blockA+count, cj.pconj(B)); count+=PacketSize; } + if(PanelMode) count += (2*PacketSize) * (stride-offset-depth); } - else + } + // Pack 1 packets + if(Pack1>=1*PacketSize) + { + for(; i<peeled_mc1; i+=1*PacketSize) { + if(PanelMode) count += (1*PacketSize) * offset; + + for(Index k=0; k<depth; k++) + { + Packet A; + A = lhs.loadPacket(i+0*PacketSize, k); + pstore(blockA+count, cj.pconj(A)); + count+=PacketSize; + } + if(PanelMode) count += (1*PacketSize) * (stride-offset-depth); + } + } + // Pack scalars + if(Pack2<PacketSize && Pack2>1) + { + for(; i<peeled_mc0; i+=Pack2) + { + if(PanelMode) count += Pack2 * offset; + for(Index k=0; k<depth; k++) + for(Index w=0; w<Pack2; w++) + blockA[count++] = cj(lhs(i+w, k)); + + if(PanelMode) count += Pack2 * (stride-offset-depth); + } + } + for(; i<rows; i++) + { + if(PanelMode) count += offset; + for(Index k=0; k<depth; k++) + blockA[count++] = cj(lhs(i, k)); + if(PanelMode) count += (stride-offset-depth); + } +} + +template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode> +struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode> +{ + typedef typename DataMapper::LinearMapper LinearMapper; + EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode> +EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode> + ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + typedef typename packet_traits<Scalar>::type Packet; + enum { PacketSize = packet_traits<Scalar>::size }; + + EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS"); + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(offset); + eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); + conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj; + Index count = 0; + +// const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; +// const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0; +// const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; + + int pack = Pack1; + Index i = 0; + while(pack>0) + { + Index remaining_rows = rows-i; + Index peeled_mc = i+(remaining_rows/pack)*pack; + for(; i<peeled_mc; i+=pack) + { + if(PanelMode) count += pack * offset; + + const Index peeled_k = (depth/PacketSize)*PacketSize; + Index k=0; + if(pack>=PacketSize) + { + for(; k<peeled_k; k+=PacketSize) + { + for (Index m = 0; m < pack; m += PacketSize) + { + PacketBlock<Packet> kernel; + for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k); + ptranspose(kernel); + for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p])); + } + count += PacketSize*pack; + } + } + for(; k<depth; k++) { - // TODO add a vectorized transpose here Index w=0; - for(; w<Pack1-3; w+=4) + for(; w<pack-3; w+=4) { Scalar a(cj(lhs(i+w+0, k))), - b(cj(lhs(i+w+1, k))), - c(cj(lhs(i+w+2, k))), - d(cj(lhs(i+w+3, k))); + b(cj(lhs(i+w+1, k))), + c(cj(lhs(i+w+2, k))), + d(cj(lhs(i+w+3, k))); blockA[count++] = a; blockA[count++] = b; blockA[count++] = c; blockA[count++] = d; } - if(Pack1%4) - for(;w<Pack1;++w) + if(pack%4) + for(;w<pack;++w) blockA[count++] = cj(lhs(i+w, k)); } + + if(PanelMode) count += pack * (stride-offset-depth); } - if(PanelMode) count += Pack1 * (stride-offset-depth); - } - if(rows-peeled_mc>=Pack2) - { - if(PanelMode) count += Pack2*offset; - for(Index k=0; k<depth; k++) - for(Index w=0; w<Pack2; w++) - blockA[count++] = cj(lhs(peeled_mc+w, k)); - if(PanelMode) count += Pack2 * (stride-offset-depth); - peeled_mc += Pack2; + + pack -= PacketSize; + if(pack<Pack2 && (pack+PacketSize)!=Pack2) + pack = Pack2; } - for(Index i=peeled_mc; i<rows; i++) + + for(; i<rows; i++) { if(PanelMode) count += offset; for(Index k=0; k<depth; k++) @@ -1204,53 +1894,123 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder, // 4 5 6 7 16 17 18 19 25 28 // 8 9 10 11 20 21 22 23 26 29 // . . . . . . . . . . -template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode> -struct gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode> +template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode> +struct gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode> { typedef typename packet_traits<Scalar>::type Packet; + typedef typename DataMapper::LinearMapper LinearMapper; enum { PacketSize = packet_traits<Scalar>::size }; - EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0); + EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; -template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode> -EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode> - ::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset) +template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode> +EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode> + ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR"); - EIGEN_UNUSED_VARIABLE(stride) - EIGEN_UNUSED_VARIABLE(offset) + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(offset); eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj; - Index packet_cols = (cols/nr) * nr; + Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0; + Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; Index count = 0; - for(Index j2=0; j2<packet_cols; j2+=nr) + const Index peeled_k = (depth/PacketSize)*PacketSize; +// if(nr>=8) +// { +// for(Index j2=0; j2<packet_cols8; j2+=8) +// { +// // skip what we have before +// if(PanelMode) count += 8 * offset; +// const Scalar* b0 = &rhs[(j2+0)*rhsStride]; +// const Scalar* b1 = &rhs[(j2+1)*rhsStride]; +// const Scalar* b2 = &rhs[(j2+2)*rhsStride]; +// const Scalar* b3 = &rhs[(j2+3)*rhsStride]; +// const Scalar* b4 = &rhs[(j2+4)*rhsStride]; +// const Scalar* b5 = &rhs[(j2+5)*rhsStride]; +// const Scalar* b6 = &rhs[(j2+6)*rhsStride]; +// const Scalar* b7 = &rhs[(j2+7)*rhsStride]; +// Index k=0; +// if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4 +// { +// for(; k<peeled_k; k+=PacketSize) { +// PacketBlock<Packet> kernel; +// for (int p = 0; p < PacketSize; ++p) { +// kernel.packet[p] = ploadu<Packet>(&rhs[(j2+p)*rhsStride+k]); +// } +// ptranspose(kernel); +// for (int p = 0; p < PacketSize; ++p) { +// pstoreu(blockB+count, cj.pconj(kernel.packet[p])); +// count+=PacketSize; +// } +// } +// } +// for(; k<depth; k++) +// { +// blockB[count+0] = cj(b0[k]); +// blockB[count+1] = cj(b1[k]); +// blockB[count+2] = cj(b2[k]); +// blockB[count+3] = cj(b3[k]); +// blockB[count+4] = cj(b4[k]); +// blockB[count+5] = cj(b5[k]); +// blockB[count+6] = cj(b6[k]); +// blockB[count+7] = cj(b7[k]); +// count += 8; +// } +// // skip what we have after +// if(PanelMode) count += 8 * (stride-offset-depth); +// } +// } + + if(nr>=4) { - // skip what we have before - if(PanelMode) count += nr * offset; - const Scalar* b0 = &rhs[(j2+0)*rhsStride]; - const Scalar* b1 = &rhs[(j2+1)*rhsStride]; - const Scalar* b2 = &rhs[(j2+2)*rhsStride]; - const Scalar* b3 = &rhs[(j2+3)*rhsStride]; - for(Index k=0; k<depth; k++) + for(Index j2=packet_cols8; j2<packet_cols4; j2+=4) { - blockB[count+0] = cj(b0[k]); - blockB[count+1] = cj(b1[k]); - if(nr==4) blockB[count+2] = cj(b2[k]); - if(nr==4) blockB[count+3] = cj(b3[k]); - count += nr; + // skip what we have before + if(PanelMode) count += 4 * offset; + const LinearMapper dm0 = rhs.getLinearMapper(0, j2 + 0); + const LinearMapper dm1 = rhs.getLinearMapper(0, j2 + 1); + const LinearMapper dm2 = rhs.getLinearMapper(0, j2 + 2); + const LinearMapper dm3 = rhs.getLinearMapper(0, j2 + 3); + + Index k=0; + if((PacketSize%4)==0) // TODO enable vectorized transposition for PacketSize==2 ?? + { + for(; k<peeled_k; k+=PacketSize) { + PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize> kernel; + kernel.packet[0] = dm0.loadPacket(k); + kernel.packet[1%PacketSize] = dm1.loadPacket(k); + kernel.packet[2%PacketSize] = dm2.loadPacket(k); + kernel.packet[3%PacketSize] = dm3.loadPacket(k); + ptranspose(kernel); + pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0])); + pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize])); + pstoreu(blockB+count+2*PacketSize, cj.pconj(kernel.packet[2%PacketSize])); + pstoreu(blockB+count+3*PacketSize, cj.pconj(kernel.packet[3%PacketSize])); + count+=4*PacketSize; + } + } + for(; k<depth; k++) + { + blockB[count+0] = cj(dm0(k)); + blockB[count+1] = cj(dm1(k)); + blockB[count+2] = cj(dm2(k)); + blockB[count+3] = cj(dm3(k)); + count += 4; + } + // skip what we have after + if(PanelMode) count += 4 * (stride-offset-depth); } - // skip what we have after - if(PanelMode) count += nr * (stride-offset-depth); } // copy the remaining columns one at a time (nr==1) - for(Index j2=packet_cols; j2<cols; ++j2) + for(Index j2=packet_cols4; j2<cols; ++j2) { if(PanelMode) count += offset; - const Scalar* b0 = &rhs[(j2+0)*rhsStride]; + const LinearMapper dm0 = rhs.getLinearMapper(0, j2); for(Index k=0; k<depth; k++) { - blockB[count] = cj(b0[k]); + blockB[count] = cj(dm0(k)); count += 1; } if(PanelMode) count += (stride-offset-depth); @@ -1258,48 +2018,93 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, Pan } // this version is optimized for row major matrices -template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode> -struct gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode> +template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode> +struct gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode> { + typedef typename packet_traits<Scalar>::type Packet; + typedef typename DataMapper::LinearMapper LinearMapper; enum { PacketSize = packet_traits<Scalar>::size }; - EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0); + EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; -template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode> -EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode> - ::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset) +template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode> +EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode> + ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR"); - EIGEN_UNUSED_VARIABLE(stride) - EIGEN_UNUSED_VARIABLE(offset) + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(offset); eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj; - Index packet_cols = (cols/nr) * nr; + Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0; + Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; Index count = 0; - for(Index j2=0; j2<packet_cols; j2+=nr) + +// if(nr>=8) +// { +// for(Index j2=0; j2<packet_cols8; j2+=8) +// { +// // skip what we have before +// if(PanelMode) count += 8 * offset; +// for(Index k=0; k<depth; k++) +// { +// if (PacketSize==8) { +// Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]); +// pstoreu(blockB+count, cj.pconj(A)); +// } else if (PacketSize==4) { +// Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]); +// Packet B = ploadu<Packet>(&rhs[k*rhsStride + j2 + PacketSize]); +// pstoreu(blockB+count, cj.pconj(A)); +// pstoreu(blockB+count+PacketSize, cj.pconj(B)); +// } else { +// const Scalar* b0 = &rhs[k*rhsStride + j2]; +// blockB[count+0] = cj(b0[0]); +// blockB[count+1] = cj(b0[1]); +// blockB[count+2] = cj(b0[2]); +// blockB[count+3] = cj(b0[3]); +// blockB[count+4] = cj(b0[4]); +// blockB[count+5] = cj(b0[5]); +// blockB[count+6] = cj(b0[6]); +// blockB[count+7] = cj(b0[7]); +// } +// count += 8; +// } +// // skip what we have after +// if(PanelMode) count += 8 * (stride-offset-depth); +// } +// } + if(nr>=4) { - // skip what we have before - if(PanelMode) count += nr * offset; - for(Index k=0; k<depth; k++) + for(Index j2=packet_cols8; j2<packet_cols4; j2+=4) { - const Scalar* b0 = &rhs[k*rhsStride + j2]; - blockB[count+0] = cj(b0[0]); - blockB[count+1] = cj(b0[1]); - if(nr==4) blockB[count+2] = cj(b0[2]); - if(nr==4) blockB[count+3] = cj(b0[3]); - count += nr; + // skip what we have before + if(PanelMode) count += 4 * offset; + for(Index k=0; k<depth; k++) + { + if (PacketSize==4) { + Packet A = rhs.loadPacket(k, j2); + pstoreu(blockB+count, cj.pconj(A)); + count += PacketSize; + } else { + const LinearMapper dm0 = rhs.getLinearMapper(k, j2); + blockB[count+0] = cj(dm0(0)); + blockB[count+1] = cj(dm0(1)); + blockB[count+2] = cj(dm0(2)); + blockB[count+3] = cj(dm0(3)); + count += 4; + } + } + // skip what we have after + if(PanelMode) count += 4 * (stride-offset-depth); } - // skip what we have after - if(PanelMode) count += nr * (stride-offset-depth); } // copy the remaining columns one at a time (nr==1) - for(Index j2=packet_cols; j2<cols; ++j2) + for(Index j2=packet_cols4; j2<cols; ++j2) { if(PanelMode) count += offset; - const Scalar* b0 = &rhs[j2]; for(Index k=0; k<depth; k++) { - blockB[count] = cj(b0[k*rhsStride]); + blockB[count] = cj(rhs(k, j2)); count += 1; } if(PanelMode) count += stride-offset-depth; @@ -1312,8 +2117,8 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, Pan * \sa setCpuCacheSize */ inline std::ptrdiff_t l1CacheSize() { - std::ptrdiff_t l1, l2; - internal::manage_caching_sizes(GetAction, &l1, &l2); + std::ptrdiff_t l1, l2, l3; + internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); return l1; } @@ -1321,19 +2126,29 @@ inline std::ptrdiff_t l1CacheSize() * \sa setCpuCacheSize */ inline std::ptrdiff_t l2CacheSize() { - std::ptrdiff_t l1, l2; - internal::manage_caching_sizes(GetAction, &l1, &l2); + std::ptrdiff_t l1, l2, l3; + internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); return l2; } +/** \returns the currently set level 3 cpu cache size (in bytes) used to estimate the ideal blocking size paramete\ +rs. +* \sa setCpuCacheSize */ +inline std::ptrdiff_t l3CacheSize() +{ + std::ptrdiff_t l1, l2, l3; + internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); + return l3; +} + /** Set the cpu L1 and L2 cache sizes (in bytes). * These values are use to adjust the size of the blocks * for the algorithms working per blocks. * * \sa computeProductBlockingSizes */ -inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2) +inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2, std::ptrdiff_t l3) { - internal::manage_caching_sizes(SetAction, &l1, &l2); + internal::manage_caching_sizes(SetAction, &l1, &l2, &l3); } } // end namespace Eigen |