From d5a795f67366db20a132cc70e4f0217f42372357 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Wed, 16 Apr 2014 17:05:11 +0200 Subject: [PATCH] New gebp kernel handling up to 3 packets x 4 register-level blocks. Huge speeup on Haswell. This changeset also introduce new vector functions: ploadquad and predux4. --- Eigen/src/Core/GenericPacketMath.h | 35 +- Eigen/src/Core/arch/AVX/Complex.h | 4 +- Eigen/src/Core/arch/AVX/PacketMath.h | 29 +- Eigen/src/Core/arch/AltiVec/PacketMath.h | 4 +- Eigen/src/Core/arch/NEON/Complex.h | 2 +- Eigen/src/Core/arch/NEON/PacketMath.h | 4 +- Eigen/src/Core/arch/SSE/Complex.h | 4 +- Eigen/src/Core/arch/SSE/PacketMath.h | 6 +- .../Core/products/GeneralBlockPanelKernel.h | 1430 +++++++++++------ Eigen/src/Core/products/GeneralMatrixMatrix.h | 121 +- Eigen/src/Core/products/Parallelizer.h | 25 +- .../Core/products/SelfadjointMatrixMatrix.h | 33 +- 12 files changed, 1064 insertions(+), 633 deletions(-) diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h index 3e5db1a88..147298009 100755 --- a/Eigen/src/Core/GenericPacketMath.h +++ b/Eigen/src/Core/GenericPacketMath.h @@ -161,14 +161,6 @@ pload(const typename unpacket_traits::type* from) { return *from; } template EIGEN_DEVICE_FUNC inline Packet ploadu(const typename unpacket_traits::type* from) { return *from; } -/** \internal \returns a packet with elements of \a *from duplicated. - * For instance, for a packet of 8 elements, 4 scalar will be read from \a *from and - * duplicated to form: {from[0],from[0],from[1],from[1],,from[2],from[2],,from[3],from[3]} - * Currently, this function is only used for scalar * complex products. - */ -template EIGEN_DEVICE_FUNC inline Packet -ploaddup(const typename unpacket_traits::type* from) { return *from; } - /** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */ template EIGEN_DEVICE_FUNC inline Packet pset1(const typename unpacket_traits::type& a) { return a; } @@ -177,6 +169,24 @@ pset1(const typename unpacket_traits::type& a) { return a; } template EIGEN_DEVICE_FUNC inline Packet pload1(const typename unpacket_traits::type *a) { return pset1(*a); } +/** \internal \returns a packet with elements of \a *from duplicated. + * For instance, for a packet of 8 elements, 4 scalars will be read from \a *from and + * duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]} + * Currently, this function is only used for scalar * complex products. + */ +template EIGEN_DEVICE_FUNC inline Packet +ploaddup(const typename unpacket_traits::type* from) { return *from; } + +/** \internal \returns a packet with elements of \a *from quadrupled. + * For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and + * replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]} + * Currently, this function is only used in matrix products. + * For packet-size smaller or equal to 4, this function is equivalent to pload1 + */ +template EIGEN_DEVICE_FUNC inline Packet +ploadquad(const typename unpacket_traits::type* from) +{ return pload1(from); } + /** \internal equivalent to * \code * a0 = pload1(a+0); @@ -249,6 +259,15 @@ preduxp(const Packet* vecs) { return vecs[0]; } template EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux(const Packet& a) { return a; } +/** \internal \returns the sum of the elements of \a a by block of 4 elements. + * For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7} + * For packet-size smaller or equal to 4, this boils down to a noop. + */ +template EIGEN_DEVICE_FUNC inline +typename conditional<(unpacket_traits::size%8)==0,typename unpacket_traits::half,Packet>::type +predux4(const Packet& a) +{ return a; } + /** \internal \returns the product of the elements of \a a*/ template EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_mul(const Packet& a) { return a; } diff --git a/Eigen/src/Core/arch/AVX/Complex.h b/Eigen/src/Core/arch/AVX/Complex.h index cb16180c5..8f95a7be7 100644 --- a/Eigen/src/Core/arch/AVX/Complex.h +++ b/Eigen/src/Core/arch/AVX/Complex.h @@ -45,7 +45,7 @@ template<> struct packet_traits > : default_packet_traits }; }; -template<> struct unpacket_traits { typedef std::complex type; enum {size=4}; }; +template<> struct unpacket_traits { typedef std::complex type; enum {size=4}; typedef Packet2cf half; }; template<> EIGEN_STRONG_INLINE Packet4cf padd(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_add_ps(a.v,b.v)); } template<> EIGEN_STRONG_INLINE Packet4cf psub(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_sub_ps(a.v,b.v)); } @@ -271,7 +271,7 @@ template<> struct packet_traits > : default_packet_traits }; }; -template<> struct unpacket_traits { typedef std::complex type; enum {size=2}; }; +template<> struct unpacket_traits { typedef std::complex type; enum {size=2}; typedef Packet1cd half; }; template<> EIGEN_STRONG_INLINE Packet2cd padd(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_add_pd(a.v,b.v)); } template<> EIGEN_STRONG_INLINE Packet2cd psub(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_sub_pd(a.v,b.v)); } diff --git a/Eigen/src/Core/arch/AVX/PacketMath.h b/Eigen/src/Core/arch/AVX/PacketMath.h index 38f52ecc8..47e10f6da 100644 --- a/Eigen/src/Core/arch/AVX/PacketMath.h +++ b/Eigen/src/Core/arch/AVX/PacketMath.h @@ -83,9 +83,9 @@ template<> struct packet_traits : default_packet_traits }; */ -template<> struct unpacket_traits { typedef float type; enum {size=8}; }; -template<> struct unpacket_traits { typedef double type; enum {size=4}; }; -template<> struct unpacket_traits { typedef int type; enum {size=8}; }; +template<> struct unpacket_traits { typedef float type; typedef Packet4f half; enum {size=8}; }; +template<> struct unpacket_traits { typedef double type; typedef Packet2d half; enum {size=4}; }; +template<> struct unpacket_traits { typedef int type; typedef Packet4i half; enum {size=8}; }; template<> EIGEN_STRONG_INLINE Packet8f pset1(const float& from) { return _mm256_set1_ps(from); } template<> EIGEN_STRONG_INLINE Packet4d pset1(const double& from) { return _mm256_set1_pd(from); } @@ -141,7 +141,16 @@ template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& return _mm256_fmadd_ps(a,b,c); #endif } -template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) { return _mm256_fmadd_pd(a,b,c); } +template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) { +#if defined(__clang__) || defined(__GNUC__) + // see above + Packet4d res = c; + asm("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b)); + return res; +#else + return _mm256_fmadd_pd(a,b,c); +#endif +} #endif template<> EIGEN_STRONG_INLINE Packet8f pmin(const Packet8f& a, const Packet8f& b) { return _mm256_min_ps(a,b); } @@ -189,6 +198,13 @@ template<> EIGEN_STRONG_INLINE Packet4d ploaddup(const double* from) return _mm256_blend_pd(tmp1,_mm256_permute2f128_pd(tmp2,tmp2,1),12); } +// Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1} +template<> EIGEN_STRONG_INLINE Packet8f ploadquad(const float* from) +{ + Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from)); + return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1); +} + template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); } template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); } template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } @@ -345,6 +361,11 @@ template<> EIGEN_STRONG_INLINE double predux(const Packet4d& a) return pfirst(_mm256_hadd_pd(tmp0,tmp0)); } +template<> EIGEN_STRONG_INLINE Packet4f predux4(const Packet8f& a) +{ + return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1)); +} + template<> EIGEN_STRONG_INLINE float predux_mul(const Packet8f& a) { Packet8f tmp; diff --git a/Eigen/src/Core/arch/AltiVec/PacketMath.h b/Eigen/src/Core/arch/AltiVec/PacketMath.h index 5d7a16f5c..16948264f 100755 --- a/Eigen/src/Core/arch/AltiVec/PacketMath.h +++ b/Eigen/src/Core/arch/AltiVec/PacketMath.h @@ -99,8 +99,8 @@ template<> struct packet_traits : default_packet_traits }; }; -template<> struct unpacket_traits { typedef float type; enum {size=4}; }; -template<> struct unpacket_traits { typedef int type; enum {size=4}; }; +template<> struct unpacket_traits { typedef float type; enum {size=4}; typedef Packet4f half; }; +template<> struct unpacket_traits { typedef int type; enum {size=4}; typedef Packet4i half; }; /* inline std::ostream & operator <<(std::ostream & s, const Packet4f & v) { diff --git a/Eigen/src/Core/arch/NEON/Complex.h b/Eigen/src/Core/arch/NEON/Complex.h index e49c1a873..7ca76714f 100644 --- a/Eigen/src/Core/arch/NEON/Complex.h +++ b/Eigen/src/Core/arch/NEON/Complex.h @@ -47,7 +47,7 @@ template<> struct packet_traits > : default_packet_traits }; }; -template<> struct unpacket_traits { typedef std::complex type; enum {size=2}; }; +template<> struct unpacket_traits { typedef std::complex type; enum {size=2}; typedef Packet2cf half; }; template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) { diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h index fae7b55fc..83150507a 100644 --- a/Eigen/src/Core/arch/NEON/PacketMath.h +++ b/Eigen/src/Core/arch/NEON/PacketMath.h @@ -101,8 +101,8 @@ EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); } #endif -template<> struct unpacket_traits { typedef float type; enum {size=4}; }; -template<> struct unpacket_traits { typedef int type; enum {size=4}; }; +template<> struct unpacket_traits { typedef float type; enum {size=4}; typedef Packet4f half; }; +template<> struct unpacket_traits { typedef int type; enum {size=4}; typedef Packet4i half; }; template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return vdupq_n_f32(from); } template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return vdupq_n_s32(from); } diff --git a/Eigen/src/Core/arch/SSE/Complex.h b/Eigen/src/Core/arch/SSE/Complex.h index e54ebbf90..715e5a13c 100644 --- a/Eigen/src/Core/arch/SSE/Complex.h +++ b/Eigen/src/Core/arch/SSE/Complex.h @@ -49,7 +49,7 @@ template<> struct packet_traits > : default_packet_traits }; #endif -template<> struct unpacket_traits { typedef std::complex type; enum {size=2}; }; +template<> struct unpacket_traits { typedef std::complex type; enum {size=2}; typedef Packet2cf half; }; template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); } template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); } @@ -296,7 +296,7 @@ template<> struct packet_traits > : default_packet_traits }; #endif -template<> struct unpacket_traits { typedef std::complex type; enum {size=1}; }; +template<> struct unpacket_traits { typedef std::complex type; enum {size=1}; typedef Packet1cd half; }; template<> EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); } template<> EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); } diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h index bc17726b4..89dfa6975 100755 --- a/Eigen/src/Core/arch/SSE/PacketMath.h +++ b/Eigen/src/Core/arch/SSE/PacketMath.h @@ -107,9 +107,9 @@ template<> struct packet_traits : default_packet_traits }; }; -template<> struct unpacket_traits { typedef float type; enum {size=4}; }; -template<> struct unpacket_traits { typedef double type; enum {size=2}; }; -template<> struct unpacket_traits { typedef int type; enum {size=4}; }; +template<> struct unpacket_traits { typedef float type; enum {size=4}; typedef Packet4f half; }; +template<> struct unpacket_traits { typedef double type; enum {size=2}; typedef Packet2d half; }; +template<> struct unpacket_traits { typedef int type; enum {size=4}; typedef Packet4i half; }; #if defined(_MSC_VER) && (_MSC_VER==1500) // Workaround MSVC 9 internal compiler error. diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h index d9e659c9a..df30fdd3e 100644 --- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h +++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h @@ -10,6 +10,12 @@ #ifndef EIGEN_GENERAL_BLOCK_PANEL_H #define EIGEN_GENERAL_BLOCK_PANEL_H +#ifdef USE_IACA +#include "iacaMarks.h" +#else +#define IACA_START +#define IACA_END +#endif namespace Eigen { @@ -92,12 +98,22 @@ void computeProductBlockingSizes(SizeType& k, SizeType& m, SizeType& n) }; manage_caching_sizes(GetAction, &l1, &l2); - k = std::min(k, l1/kdiv); - SizeType _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0; - if(_m(k, l1/kdiv); +// SizeType _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0; +// if(_m(k,240); + n = std::min(n,3840/sizeof(RhsScalar)); + m = std::min(m,3840/sizeof(RhsScalar)); +#else + k = std::min(k,24); + n = std::min(n,384/sizeof(RhsScalar)); + m = std::min(m,384/sizeof(RhsScalar)); +#endif } template @@ -164,11 +180,15 @@ public: NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, - // register block size along the N direction (must be either 4 or 8) - nr = NumberOfRegisters/2, + // register block size along the N direction (must be either 2 or 4) + nr = 4,//NumberOfRegisters/4, // register block size along the M direction (currently, this one cannot be modified) - mr = LhsPacketSize, +#ifdef __FMA__ + mr = 3*LhsPacketSize, +#else + mr = 2*LhsPacketSize, +#endif LhsProgress = LhsPacketSize, RhsProgress = 1 @@ -198,23 +218,32 @@ public: { pbroadcast2(b, b0, b1); } - - EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const + + template + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const { - dest = pset1(*b); + dest = pset1(*b); + } + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const + { + dest = ploadquad(b); } - EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const + template + EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacketType& dest) const { - dest = pload(a); + dest = pload(a); } - EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const + template + EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const { - dest = ploadu(a); + dest = ploadu(a); } - EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, AccPacket& tmp) const + template + EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const { // It would be a lot cleaner to call pmadd all the time. Unfortunately if we // let gcc allocate the register in which to store the result of the pmul @@ -232,6 +261,12 @@ public: { r = pmadd(c,alpha,r); } + + template + EIGEN_STRONG_INLINE void acc(const ResPacketHalf& c, const ResPacketHalf& alpha, ResPacketHalf& r) const + { + r = pmadd(c,alpha,r); + } protected: // conj_helper cj; @@ -281,6 +316,11 @@ public: { dest = pset1(*b); } + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const + { + dest = pset1(*b); + } EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const { @@ -346,7 +386,23 @@ DoublePacket padd(const DoublePacket &a, const DoublePacket +const DoublePacket& predux4(const DoublePacket &a) +{ + return a; +} + +template struct unpacket_traits > { typedef DoublePacket half; }; +// template +// DoublePacket pmadd(const DoublePacket &a, const DoublePacket &b) +// { +// DoublePacket res; +// res.first = padd(a.first, b.first); +// res.second = padd(a.second,b.second); +// return res; +// } + template class gebp_traits, std::complex, _ConjLhs, _ConjRhs > { @@ -404,6 +460,16 @@ public: dest.second = pset1(imag(*b)); } + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const + { + loadRhs(b,dest); + } + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const + { + eigen_internal_assert(unpacket_traits::size<=4); + loadRhs(b,dest); + } + // linking error if instantiated without being optimized out: void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3); @@ -619,26 +685,246 @@ void gebp_kernel if(strideA==-1) strideA = depth; if(strideB==-1) strideB = depth; conj_helper cj; - Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0; Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; - // Here we assume that mr==LhsProgress - const Index peeled_mc = (rows/mr)*mr; + const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0; + const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0; + const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0; enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell) const Index peeled_kc = depth & ~(pk-1); - const Index depth2 = depth & ~1; - - // loops on each micro vertical panel of rhs (depth x nr) - // First pass using depth x 8 panels - if(nr>=8) +// const Index depth2 = depth & ~1; + +// std::cout << mr << " " << peeled_mc3 << " " << peeled_mc2 << " " << peeled_mc1 << "\n"; + + + //---------- Process 3 * LhsProgress rows at once ---------- + // This corresponds to 3*LhsProgress x nr register blocks. + // Usually, make sense only with FMA + if(mr>=3*Traits::LhsProgress) { - for(Index j2=0; j2 we select a mr x nr micro block of res which is entirely - // stored into mr/packet_size x nr registers. - for(Index i=0; i(alpha); + + R0 = ploadu(r0+0*Traits::ResPacketSize); + R1 = ploadu(r0+1*Traits::ResPacketSize); + R2 = ploadu(r0+2*Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C4, alphav, R1); + traits.acc(C8, alphav, R2); + pstoreu(r0+0*Traits::ResPacketSize, R0); + pstoreu(r0+1*Traits::ResPacketSize, R1); + pstoreu(r0+2*Traits::ResPacketSize, R2); + + R0 = ploadu(r1+0*Traits::ResPacketSize); + R1 = ploadu(r1+1*Traits::ResPacketSize); + R2 = ploadu(r1+2*Traits::ResPacketSize); + traits.acc(C1, alphav, R0); + traits.acc(C5, alphav, R1); + traits.acc(C9, alphav, R2); + pstoreu(r1+0*Traits::ResPacketSize, R0); + pstoreu(r1+1*Traits::ResPacketSize, R1); + pstoreu(r1+2*Traits::ResPacketSize, R2); + + R0 = ploadu(r2+0*Traits::ResPacketSize); + R1 = ploadu(r2+1*Traits::ResPacketSize); + R2 = ploadu(r2+2*Traits::ResPacketSize); + traits.acc(C2, alphav, R0); + traits.acc(C6, alphav, R1); + traits.acc(C10, alphav, R2); + pstoreu(r2+0*Traits::ResPacketSize, R0); + pstoreu(r2+1*Traits::ResPacketSize, R1); + pstoreu(r2+2*Traits::ResPacketSize, R2); + + R0 = ploadu(r3+0*Traits::ResPacketSize); + R1 = ploadu(r3+1*Traits::ResPacketSize); + R2 = ploadu(r3+2*Traits::ResPacketSize); + traits.acc(C3, alphav, R0); + traits.acc(C7, alphav, R1); + traits.acc(C11, alphav, R2); + pstoreu(r3+0*Traits::ResPacketSize, R0); + pstoreu(r3+1*Traits::ResPacketSize, R1); + pstoreu(r3+2*Traits::ResPacketSize, R2); + } + + // Deal with remaining columns of the rhs + for(Index j2=packet_cols4; j2(alpha); + + R0 = ploadu(r0+0*Traits::ResPacketSize); + R1 = ploadu(r0+1*Traits::ResPacketSize); + R2 = ploadu(r0+2*Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C4, alphav, R1); + traits.acc(C8 , alphav, R2); + pstoreu(r0+0*Traits::ResPacketSize, R0); + pstoreu(r0+1*Traits::ResPacketSize, R1); + pstoreu(r0+2*Traits::ResPacketSize, R2); + } + } + } + + //---------- Process 2 * LhsProgress rows at once ---------- + if(mr>=2*Traits::LhsProgress) + { + // loops on each largest micro horizontal panel of lhs (2*LhsProgress x depth) + for(Index i=peeled_mc3; i traits.initAcc(C7); ResScalar* r0 = &res[(j2+0)*resStride + i]; + ResScalar* r1 = &res[(j2+1)*resStride + i]; + ResScalar* r2 = &res[(j2+2)*resStride + i]; + ResScalar* r3 = &res[(j2+3)*resStride + i]; + + internal::prefetch(r0); + internal::prefetch(r1); + internal::prefetch(r2); + internal::prefetch(r3); // performs "inner" products const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr]; - LhsPacket A0; - // uncomment for register prefetching - // LhsPacket A1; - // traits.loadLhs(blA, A0); + prefetch(&blB[0]); + LhsPacket A0, A1; + for(Index k=0; k(alpha); - - R0 = ploadu(r0+0*resStride); - R1 = ploadu(r0+1*resStride); - R2 = ploadu(r0+2*resStride); - R3 = ploadu(r0+3*resStride); - R4 = ploadu(r0+4*resStride); - R5 = ploadu(r0+5*resStride); - R6 = ploadu(r0+6*resStride); - traits.acc(C0, alphav, R0); - pstoreu(r0+0*resStride, R0); - R0 = ploadu(r0+7*resStride); - - traits.acc(C1, alphav, R1); - traits.acc(C2, alphav, R2); - traits.acc(C3, alphav, R3); - traits.acc(C4, alphav, R4); - traits.acc(C5, alphav, R5); - traits.acc(C6, alphav, R6); - traits.acc(C7, alphav, R0); - pstoreu(r0+1*resStride, R1); - pstoreu(r0+2*resStride, R2); - pstoreu(r0+3*resStride, R3); - pstoreu(r0+4*resStride, R4); - pstoreu(r0+5*resStride, R5); - pstoreu(r0+6*resStride, R6); - pstoreu(r0+7*resStride, R0); + R0 = ploadu(r0+0*Traits::ResPacketSize); + R1 = ploadu(r0+1*Traits::ResPacketSize); + R2 = ploadu(r1+0*Traits::ResPacketSize); + R3 = ploadu(r1+1*Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C4, alphav, R1); + traits.acc(C1, alphav, R2); + traits.acc(C5, alphav, R3); + pstoreu(r0+0*Traits::ResPacketSize, R0); + pstoreu(r0+1*Traits::ResPacketSize, R1); + pstoreu(r1+0*Traits::ResPacketSize, R2); + pstoreu(r1+1*Traits::ResPacketSize, R3); + + R0 = ploadu(r2+0*Traits::ResPacketSize); + R1 = ploadu(r2+1*Traits::ResPacketSize); + R2 = ploadu(r3+0*Traits::ResPacketSize); + R3 = ploadu(r3+1*Traits::ResPacketSize); + traits.acc(C2, alphav, R0); + traits.acc(C6, alphav, R1); + traits.acc(C3, alphav, R2); + traits.acc(C7, alphav, R3); + pstoreu(r2+0*Traits::ResPacketSize, R0); + pstoreu(r2+1*Traits::ResPacketSize, R1); + pstoreu(r3+0*Traits::ResPacketSize, R2); + pstoreu(r3+1*Traits::ResPacketSize, R3); } - // Deal with remaining rows of the lhs - // TODO we should vectorize if <= 8, and not strictly == - if(SwappedTraits::LhsProgress == 8) + // Deal with remaining columns of the rhs + for(Index j2=packet_cols4; j2 SwappedTraits; - typedef typename SwappedTraits::ResScalar SResScalar; - typedef typename SwappedTraits::LhsPacket SLhsPacket; - typedef typename SwappedTraits::RhsPacket SRhsPacket; - typedef typename SwappedTraits::ResPacket SResPacket; - typedef typename SwappedTraits::AccPacket SAccPacket; - SwappedTraits straits; - - Index rows2 = (rows & ~1); - for(Index i=peeled_mc; i(&res[j2*resStride + i], resStride); - SResPacket alphav = pset1(alpha); - straits.acc(padd(C0,C1), alphav, R); - pscatter(&res[j2*resStride + i], R, resStride); - - R = pgather(&res[j2*resStride + i + 1], resStride); - straits.acc(padd(C2,C3), alphav, R); - pscatter(&res[j2*resStride + i + 1], R, resStride); - - EIGEN_ASM_COMMENT("end_vectorized_multiplication_of_last_rows 8"); - } - if(rows2!=rows) - { - Index i = rows-1; - const LhsScalar* blA = &blockA[i*strideA+offsetA]; - prefetch(&blA[0]); - const RhsScalar* blB = &blockB[j2*strideB+offsetB*8]; - - EIGEN_ASM_COMMENT("begin_vectorized_multiplication_of_last_rows 8"); - - SAccPacket C0,C1; - straits.initAcc(C0); // even - straits.initAcc(C1); // odd - - for(Index k=0; k(&res[j2*resStride + i], resStride); - SResPacket alphav = pset1(alpha); - straits.acc(padd(C0,C1), alphav, R); - pscatter(&res[j2*resStride + i], R, resStride); - } - } - else - { - // Pure scalar path - for(Index i=peeled_mc; i(alpha); + + R0 = ploadu(r0+0*Traits::ResPacketSize); + R1 = ploadu(r0+1*Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C4, alphav, R1); + pstoreu(r0+0*Traits::ResPacketSize, R0); + pstoreu(r0+1*Traits::ResPacketSize, R1); } } } - - // Second pass using depth x 4 panels - // If nr==8, then we have at most one such panel - // TODO: with 16 registers, we coud optimize this part to leverage more pipelinining, - // for instance, by using a 2 packet * 4 kernel. Useful when the rhs is thin - if(nr>=4) + //---------- Process 1 * LhsProgress rows at once ---------- + if(mr>=1*Traits::LhsProgress) { - for(Index j2=packet_cols8; j2 we select a mr x 4 micro block of res which is entirely - // stored into mr/packet_size x 4 registers. - for(Index i=0; i traits.initAcc(C3); ResScalar* r0 = &res[(j2+0)*resStride + i]; + ResScalar* r1 = &res[(j2+1)*resStride + i]; + ResScalar* r2 = &res[(j2+2)*resStride + i]; + ResScalar* r3 = &res[(j2+3)*resStride + i]; + + internal::prefetch(r0); + internal::prefetch(r1); + internal::prefetch(r2); + internal::prefetch(r3); // performs "inner" products - const RhsScalar* blB = &blockB[j2*strideB+offsetB*4]; + const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr]; + prefetch(&blB[0]); LhsPacket A0; + for(Index k=0; k(alpha); - - R0 = ploadu(r0+0*resStride); - R1 = ploadu(r0+1*resStride); - R2 = ploadu(r0+2*resStride); - traits.acc(C0, alphav, R0); - pstoreu(r0+0*resStride, R0); - R0 = ploadu(r0+3*resStride); - - traits.acc(C1, alphav, R1); - traits.acc(C2, alphav, R2); - traits.acc(C3, alphav, R0); - pstoreu(r0+1*resStride, R1); - pstoreu(r0+2*resStride, R2); - pstoreu(r0+3*resStride, R0); + R0 = ploadu(r0+0*Traits::ResPacketSize); + R1 = ploadu(r1+0*Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + traits.acc(C1, alphav, R1); + pstoreu(r0+0*Traits::ResPacketSize, R0); + pstoreu(r1+0*Traits::ResPacketSize, R1); + + R0 = ploadu(r2+0*Traits::ResPacketSize); + R1 = ploadu(r3+0*Traits::ResPacketSize); + traits.acc(C2, alphav, R0); + traits.acc(C3, alphav, R1); + pstoreu(r2+0*Traits::ResPacketSize, R0); + pstoreu(r3+0*Traits::ResPacketSize, R1); } - for(Index i=peeled_mc; i(alpha); + R0 = ploadu(r0+0*Traits::ResPacketSize); + traits.acc(C0, alphav, R0); + pstoreu(r0+0*Traits::ResPacketSize, R0); + } + } + } + //---------- Process remaining rows, 1 at once ---------- + { + // loop on each row of the lhs (1*LhsProgress x depth) + for(Index i=peeled_mc1; i::half,SResPacket>::type SResPacketHalf; + typedef typename conditional::half,SLhsPacket>::type SLhsPacketHalf; + typedef typename conditional::half,SRhsPacket>::type SRhsPacketHalf; + typedef typename conditional::half,SAccPacket>::type SAccPacketHalf; + + SResPacketHalf R = pgather(&res[j2*resStride + i], resStride); + SResPacketHalf alphav = pset1(alpha); + + if(depth-endk>0) + { + // We have to handle the last row of the rhs which corresponds to a half-packet + SLhsPacketHalf a0; + SRhsPacketHalf b0; + straits.loadLhsUnaligned(blB, a0); + straits.loadRhs(blA, b0); + SAccPacketHalf c0 = predux4(C0); + straits.madd(a0,b0,c0,b0); + straits.acc(c0, alphav, R); + } + else + { + straits.acc(predux4(C0), alphav, R); + } + pscatter(&res[j2*resStride + i], R, resStride); + } + else + { + SResPacket R = pgather(&res[j2*resStride + i], resStride); + SResPacket alphav = pset1(alpha); + straits.acc(C0, alphav, R); + pscatter(&res[j2*resStride + i], R, resStride); } - SResPacket R = pgather(&res[j2*resStride + i], resStride); - SResPacket alphav = pset1(alpha); - straits.acc(C0, alphav, R); - pscatter(&res[j2*resStride + i], R, resStride); - - EIGEN_ASM_COMMENT("end_vectorized_multiplication_of_last_rows 1x4"); } - else + else // scalar path { - // Pure scalar path - // gets a 1 x 4 res block as registers + // get a 1 x 4 res block as registers ResScalar C0(0), C1(0), C2(0), C3(0); for(Index k=0; k B_1 = blB[3]; MADD(cj,A0,B_0,C2, B_0); MADD(cj,A0,B_1,C3, B_1); - + blB += 4; } res[(j2+0)*resStride + i] += alpha*C0; @@ -1058,56 +1377,22 @@ void gebp_kernel res[(j2+3)*resStride + i] += alpha*C3; } } - } - } - - // process remaining rhs/res columns one at a time - for(Index j2=packet_cols4; j2(alpha); - ResScalar* r0 = &res[(j2+0)*resStride + i]; - R0 = ploadu(r0); - traits.acc(C0, alphav, R0); - pstoreu(r0, R0); - } - // pure scalar path - for(Index i=peeled_mc; i // // 32 33 34 35 ... // 36 36 38 39 ... -template -struct gemm_pack_lhs +template +struct gemm_pack_lhs { EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride=0, Index offset=0); }; -template -EIGEN_DONT_INLINE void gemm_pack_lhs +template +EIGEN_DONT_INLINE void gemm_pack_lhs ::operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride, Index offset) { typedef typename packet_traits::type Packet; @@ -1146,87 +1431,174 @@ EIGEN_DONT_INLINE void gemm_pack_lhs=depth && offset<=stride)); - eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) ); + eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) ); conj_if::IsComplex && Conjugate> cj; - const_blas_data_mapper lhs(_lhs,lhsStride); + const_blas_data_mapper lhs(_lhs,lhsStride); Index count = 0; - Index peeled_mc = (rows/Pack1)*Pack1; - for(Index i=0; i=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; + const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0; + const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; + + // Pack 3 packets + if(Pack1>=3*PacketSize) { - if(PanelMode) count += Pack1 * offset; - - if(StorageOrder==ColMajor) + for(Index i=0; i=1*PacketSize) A = ploadu(&lhs(i+0*PacketSize, k)); - if(Pack1>=2*PacketSize) B = ploadu(&lhs(i+1*PacketSize, k)); - if(Pack1>=3*PacketSize) C = ploadu(&lhs(i+2*PacketSize, k)); - if(Pack1>=4*PacketSize) D = ploadu(&lhs(i+3*PacketSize, k)); - if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; } - if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; } - if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; } - if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; } - } - else - { - if(Pack1>=1) blockA[count++] = cj(lhs(i+0, k)); - if(Pack1>=2) blockA[count++] = cj(lhs(i+1, k)); - if(Pack1>=3) blockA[count++] = cj(lhs(i+2, k)); - if(Pack1>=4) blockA[count++] = cj(lhs(i+3, k)); - } + Packet A, B, C; + A = ploadu(&lhs(i+0*PacketSize, k)); + B = ploadu(&lhs(i+1*PacketSize, k)); + C = ploadu(&lhs(i+2*PacketSize, k)); + pstore(blockA+count, cj.pconj(A)); count+=PacketSize; + pstore(blockA+count, cj.pconj(B)); count+=PacketSize; + pstore(blockA+count, cj.pconj(C)); count+=PacketSize; } + if(PanelMode) count += (3*PacketSize) * (stride-offset-depth); } - else + } + // Pack 2 packets + if(Pack1>=2*PacketSize) + { + for(Index i=peeled_mc3; i(&lhs(i+0*PacketSize, k)); + B = ploadu(&lhs(i+1*PacketSize, k)); + pstore(blockA+count, cj.pconj(A)); count+=PacketSize; + pstore(blockA+count, cj.pconj(B)); count+=PacketSize; + } + if(PanelMode) count += (2*PacketSize) * (stride-offset-depth); + } + } + // Pack 1 packets + if(Pack1>=1*PacketSize) + { + for(Index i=peeled_mc2; i(&lhs(i+0*PacketSize, k)); + pstore(blockA+count, cj.pconj(A)); + count+=PacketSize; + } + if(PanelMode) count += (1*PacketSize) * (stride-offset-depth); + } + } + // Pack scalars +// if(rows-peeled_mc>=Pack2) +// { +// if(PanelMode) count += Pack2*offset; +// for(Index k=0; k +struct gemm_pack_lhs +{ + EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +EIGEN_DONT_INLINE void gemm_pack_lhs + ::operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride, Index offset) +{ + typedef typename packet_traits::type Packet; + enum { PacketSize = packet_traits::size }; + + EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS"); + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(offset); + eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); + conj_if::IsComplex && Conjugate> cj; + const_blas_data_mapper lhs(_lhs,lhsStride); + Index count = 0; + +// const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; +// const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0; +// const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; + + int pack_packets = Pack1/PacketSize; + Index i = 0; + while(pack_packets>0) + { + + Index remaining_rows = rows-i; + Index peeled_mc = i+(remaining_rows/(pack_packets*PacketSize))*(pack_packets*PacketSize); +// std::cout << "pack_packets = " << pack_packets << " from " << i << " to " << peeled_mc << "\n"; + for(; i kernel; - for (int p = 0; p < PacketSize; ++p) { - kernel.packet[p] = ploadu(&lhs(i+p+m, k)); - } + for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = ploadu(&lhs(i+p+m, k)); ptranspose(kernel); - for (int p = 0; p < PacketSize; ++p) { - pstore(blockA+count+m+Pack1*p, cj.pconj(kernel.packet[p])); - } + for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack_packets*PacketSize)*p, cj.pconj(kernel.packet[p])); } - count += PacketSize*Pack1; + count += PacketSize*(pack_packets*PacketSize); } - for(; k=Pack2) - { - if(PanelMode) count += Pack2*offset; - for(Index k=0; k=Pack2) +// { +// if(PanelMode) count += Pack2*offset; +// for(Index k=0; k=4 ? (cols/4) * 4 : 0; Index count = 0; const Index peeled_k = (depth/PacketSize)*PacketSize; - if(nr>=8) - { - for(Index j2=0; j2 kernel; - for (int p = 0; p < PacketSize; ++p) { - kernel.packet[p] = ploadu(&rhs[(j2+p)*rhsStride+k]); - } - ptranspose(kernel); - for (int p = 0; p < PacketSize; ++p) { - pstoreu(blockB+count, cj.pconj(kernel.packet[p])); - count+=PacketSize; - } - } - } - for(; k=8) +// { +// for(Index j2=0; j2 kernel; +// for (int p = 0; p < PacketSize; ++p) { +// kernel.packet[p] = ploadu(&rhs[(j2+p)*rhsStride+k]); +// } +// ptranspose(kernel); +// for (int p = 0; p < PacketSize; ++p) { +// pstoreu(blockB+count, cj.pconj(kernel.packet[p])); +// count+=PacketSize; +// } +// } +// } +// for(; k=4) { @@ -1383,39 +1755,39 @@ EIGEN_DONT_INLINE void gemm_pack_rhs=4 ? (cols/4) * 4 : 0; Index count = 0; - if(nr>=8) - { - for(Index j2=0; j2(&rhs[k*rhsStride + j2]); - pstoreu(blockB+count, cj.pconj(A)); - } else if (PacketSize==4) { - Packet A = ploadu(&rhs[k*rhsStride + j2]); - Packet B = ploadu(&rhs[k*rhsStride + j2 + PacketSize]); - pstoreu(blockB+count, cj.pconj(A)); - pstoreu(blockB+count+PacketSize, cj.pconj(B)); - } else { - const Scalar* b0 = &rhs[k*rhsStride + j2]; - blockB[count+0] = cj(b0[0]); - blockB[count+1] = cj(b0[1]); - blockB[count+2] = cj(b0[2]); - blockB[count+3] = cj(b0[3]); - blockB[count+4] = cj(b0[4]); - blockB[count+5] = cj(b0[5]); - blockB[count+6] = cj(b0[6]); - blockB[count+7] = cj(b0[7]); - } - count += 8; - } - // skip what we have after - if(PanelMode) count += 8 * (stride-offset-depth); - } - } +// if(nr>=8) +// { +// for(Index j2=0; j2(&rhs[k*rhsStride + j2]); +// pstoreu(blockB+count, cj.pconj(A)); +// } else if (PacketSize==4) { +// Packet A = ploadu(&rhs[k*rhsStride + j2]); +// Packet B = ploadu(&rhs[k*rhsStride + j2 + PacketSize]); +// pstoreu(blockB+count, cj.pconj(A)); +// pstoreu(blockB+count+PacketSize, cj.pconj(B)); +// } else { +// const Scalar* b0 = &rhs[k*rhsStride + j2]; +// blockB[count+0] = cj(b0[0]); +// blockB[count+1] = cj(b0[1]); +// blockB[count+2] = cj(b0[2]); +// blockB[count+3] = cj(b0[3]); +// blockB[count+4] = cj(b0[4]); +// blockB[count+5] = cj(b0[5]); +// blockB[count+6] = cj(b0[6]); +// blockB[count+7] = cj(b0[7]); +// } +// count += 8; +// } +// // skip what we have after +// if(PanelMode) count += 8 * (stride-offset-depth); +// } +// } if(nr>=4) { for(Index j2=packet_cols8; j2 struct general_matrix_matrix_product { + typedef gebp_traits Traits; + typedef typename scalar_product_traits::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, @@ -51,6 +53,8 @@ template< struct general_matrix_matrix_product { +typedef gebp_traits Traits; + typedef typename scalar_product_traits::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, @@ -63,11 +67,9 @@ static void run(Index rows, Index cols, Index depth, const_blas_data_mapper lhs(_lhs,lhsStride); const_blas_data_mapper rhs(_rhs,rhsStride); - typedef gebp_traits Traits; - Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction - //Index nc = blocking.nc(); // cache block size along the N direction + Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs pack_lhs; gemm_pack_rhs pack_rhs; @@ -80,66 +82,68 @@ static void run(Index rows, Index cols, Index depth, Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); - std::size_t sizeA = kc*mc; - ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0); + LhsScalar* blockA = blocking.blockA(); + eigen_internal_assert(blockA!=0); - RhsScalar* blockB = blocking.blockB(); - eigen_internal_assert(blockB!=0); - + std::size_t sizeB = kc*nc; + ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); + // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, - // let's start by packing A'. - pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc); + // let's start by packing B'. + pack_rhs(blockB, &rhs(k,0), rhsStride, actual_kc, nc); - // Pack B_k to B' in a parallel fashion: - // each thread packs the sub block B_k,j to B'_j where j is the thread id. + // Pack A_k to A' in a parallel fashion: + // each thread packs the sub block A_k,i to A'_i where i is the thread id. - // However, before copying to B'_j, we have to make sure that no other thread is still using it, + // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; + + pack_lhs(blockA+info[tid].lhs_start*actual_kc, &lhs(info[tid].lhs_start,k), lhsStride, actual_kc, info[tid].lhs_length); - pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length); - - // Notify the other threads that the part B'_j is ready to go. + // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; - - // Computes C_i += A' * B' per B'_j + + // Computes C_i += A' * B' per A'_i for(Index shift=0; shift0) - while(info[j].sync!=k) {} - - gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0); + while(info[i].sync!=k) {} + gebp(res+info[i].lhs_start, resStride, blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } - // Then keep going as usual with the remaining A' - for(Index i=mc; i Pack rhs's panel into a sequential chunk of memory (L2 caching) - // Note that this panel will be read as many times as the number of blocks in the lhs's - // vertical panel which is, in practice, a very low number. - pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols); + // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) + // Note that this panel will be read as many times as the number of blocks in the rhs's + // horizontal panel which is, in practice, a very low number. + pack_lhs(blockA, &lhs(0,k2), lhsStride, actual_kc, rows); - // For each mc x kc block of the lhs's vertical panel... - // (==GEPP_VAR1) - for(Index i2=0; i2 > template struct gemm_functor { - gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, - BlockingType& blocking) + gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession() const { - m_blocking.allocateB(); + m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo* info=0) const @@ -220,6 +221,8 @@ struct gemm_functor (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } + + typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; @@ -316,13 +319,23 @@ class gemm_blocking_spacem_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; - computeProductBlockingSizes(this->m_kc, this->m_mc, this->m_nc); + if(full_rows) + { + DenseIndex m = this->m_mc; + computeProductBlockingSizes(this->m_kc, m, this->m_nc); + } + else // full columns + { + DenseIndex n = this->m_nc; + computeProductBlockingSizes(this->m_kc, this->m_mc, n); + } + m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } @@ -396,7 +409,7 @@ class GeneralProduct (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor; - BlockingType blocking(dst.rows(), dst.cols(), lhs.cols()); + BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit); } diff --git a/Eigen/src/Core/products/Parallelizer.h b/Eigen/src/Core/products/Parallelizer.h index 5c3e9b7ac..4079063eb 100644 --- a/Eigen/src/Core/products/Parallelizer.h +++ b/Eigen/src/Core/products/Parallelizer.h @@ -73,13 +73,13 @@ namespace internal { template struct GemmParallelInfo { - GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {} + GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} int volatile sync; int volatile users; - Index rhs_start; - Index rhs_length; + Index lhs_start; + Index lhs_length; }; template @@ -107,7 +107,7 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos if((!Condition) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); - Index size = transpose ? cols : rows; + Index size = transpose ? rows : cols; // 2- compute the maximal number of threads from the size of the product: // FIXME this has to be fine tuned @@ -126,26 +126,25 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos std::swap(rows,cols); Index blockCols = (cols / threads) & ~Index(0x3); - Index blockRows = (rows / threads) & ~Index(0x7); + Index blockRows = (rows / threads); + blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; GemmParallelInfo* info = new GemmParallelInfo[threads]; - #pragma omp parallel for schedule(static,1) num_threads(threads) - for(Index i=0; i +template struct symm_pack_lhs { template inline @@ -45,22 +45,29 @@ struct symm_pack_lhs } void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows) { + enum { PacketSize = packet_traits::size }; const_blas_data_mapper lhs(_lhs,lhsStride); Index count = 0; - Index peeled_mc = (rows/Pack1)*Pack1; - for(Index i=0; i(blockA, lhs, cols, i, count); - } - - if(rows-peeled_mc>=Pack2) - { - pack(blockA, lhs, cols, peeled_mc, count); - peeled_mc += Pack2; - } + //Index peeled_mc3 = (rows/Pack1)*Pack1; + + const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; + const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0; + const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; + + if(Pack1>=3*PacketSize) + for(Index i=0; i(blockA, lhs, cols, i, count); + + if(Pack1>=2*PacketSize) + for(Index i=peeled_mc3; i(blockA, lhs, cols, i, count); + + if(Pack1>=1*PacketSize) + for(Index i=peeled_mc2; i(blockA, lhs, cols, i, count); // do the same with mr==1 - for(Index i=peeled_mc; i