mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-09-23 23:03:15 +08:00
Merged latest updates from the parent branch
This commit is contained in:
commit
cc73164aa8
38
Eigen/src/Core/GenericPacketMath.h
Normal file → Executable file
38
Eigen/src/Core/GenericPacketMath.h
Normal file → Executable file
@ -169,6 +169,44 @@ ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
|
|||||||
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
|
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
|
||||||
pset1(const typename unpacket_traits<Packet>::type& a) { return a; }
|
pset1(const typename unpacket_traits<Packet>::type& a) { return a; }
|
||||||
|
|
||||||
|
/** \internal \returns a packet with constant coefficients \a a[0], e.g.: (a[0],a[0],a[0],a[0]) */
|
||||||
|
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
|
||||||
|
pload1(const typename unpacket_traits<Packet>::type *a) { return pset1<Packet>(*a); }
|
||||||
|
|
||||||
|
/** \internal equivalent to
|
||||||
|
* \code
|
||||||
|
* a0 = pload1(a+0);
|
||||||
|
* a1 = pload1(a+1);
|
||||||
|
* a2 = pload1(a+2);
|
||||||
|
* a3 = pload1(a+3);
|
||||||
|
* \endcode
|
||||||
|
* \sa pset1, pload1, ploaddup, pbroadcast2
|
||||||
|
*/
|
||||||
|
template<typename Packet> EIGEN_DEVICE_FUNC
|
||||||
|
inline void pbroadcast4(const typename unpacket_traits<Packet>::type *a,
|
||||||
|
Packet& a0, Packet& a1, Packet& a2, Packet& a3)
|
||||||
|
{
|
||||||
|
a0 = pload1<Packet>(a+0);
|
||||||
|
a1 = pload1<Packet>(a+1);
|
||||||
|
a2 = pload1<Packet>(a+2);
|
||||||
|
a3 = pload1<Packet>(a+3);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** \internal equivalent to
|
||||||
|
* \code
|
||||||
|
* a0 = pload1(a+0);
|
||||||
|
* a1 = pload1(a+1);
|
||||||
|
* \endcode
|
||||||
|
* \sa pset1, pload1, ploaddup, pbroadcast4
|
||||||
|
*/
|
||||||
|
template<typename Packet> EIGEN_DEVICE_FUNC
|
||||||
|
inline void pbroadcast2(const typename unpacket_traits<Packet>::type *a,
|
||||||
|
Packet& a0, Packet& a1)
|
||||||
|
{
|
||||||
|
a0 = pload1<Packet>(a+0);
|
||||||
|
a1 = pload1<Packet>(a+1);
|
||||||
|
}
|
||||||
|
|
||||||
/** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */
|
/** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */
|
||||||
template<typename Scalar> inline typename packet_traits<Scalar>::type
|
template<typename Scalar> inline typename packet_traits<Scalar>::type
|
||||||
plset(const Scalar& a) { return a; }
|
plset(const Scalar& a) { return a; }
|
||||||
|
45
Eigen/src/Core/arch/SSE/PacketMath.h
Normal file → Executable file
45
Eigen/src/Core/arch/SSE/PacketMath.h
Normal file → Executable file
@ -114,11 +114,22 @@ template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { re
|
|||||||
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
|
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set_epi32(from,from,from,from); }
|
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set_epi32(from,from,from,from); }
|
||||||
#else
|
#else
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set1_ps(from); }
|
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps1(from); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
|
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
|
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction.
|
||||||
|
// However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203)
|
||||||
|
// Using inline assembly is also not an option because then gcc fails to reorder properly the instructions.
|
||||||
|
// Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply.
|
||||||
|
// Also note that with AVX, we want it to generate a vbroadcastss.
|
||||||
|
#if (defined __GNUC__) && (!defined __INTEL_COMPILER) && (!defined __clang__) && (!defined __AVX__)
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float *from) {
|
||||||
|
return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef EIGEN_VECTORIZE_AVX
|
#ifndef EIGEN_VECTORIZE_AVX
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
|
template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
|
template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
|
||||||
@ -392,6 +403,38 @@ template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// with AVX, the default implementations based on pload1 are faster
|
||||||
|
#ifndef __AVX__
|
||||||
|
template<> EIGEN_STRONG_INLINE void
|
||||||
|
pbroadcast4<Packet4f>(const float *a,
|
||||||
|
Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
|
||||||
|
{
|
||||||
|
a3 = ploadu<Packet4f>(a);
|
||||||
|
a0 = vec4f_swizzle1(a3, 0,0,0,0);
|
||||||
|
a1 = vec4f_swizzle1(a3, 1,1,1,1);
|
||||||
|
a2 = vec4f_swizzle1(a3, 2,2,2,2);
|
||||||
|
a3 = vec4f_swizzle1(a3, 3,3,3,3);
|
||||||
|
}
|
||||||
|
template<> EIGEN_STRONG_INLINE void
|
||||||
|
pbroadcast4<Packet2d>(const double *a,
|
||||||
|
Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
|
||||||
|
{
|
||||||
|
#ifdef EIGEN_VECTORIZE_SSE3
|
||||||
|
a0 = _mm_loaddup_pd(a+0);
|
||||||
|
a1 = _mm_loaddup_pd(a+1);
|
||||||
|
a2 = _mm_loaddup_pd(a+2);
|
||||||
|
a3 = _mm_loaddup_pd(a+3);
|
||||||
|
#else
|
||||||
|
a1 = ploadu<Packet2d>(a);
|
||||||
|
a0 = vec2d_swizzle1(a1, 0,0);
|
||||||
|
a1 = vec2d_swizzle1(a1, 1,1);
|
||||||
|
a3 = ploadu<Packet2d>(a+2);
|
||||||
|
a2 = vec2d_swizzle1(a3, 0,0);
|
||||||
|
a3 = vec2d_swizzle1(a3, 1,1);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
|
EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
|
||||||
{
|
{
|
||||||
vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
|
vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
|
#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
|
||||||
#define EIGEN_GENERAL_BLOCK_PANEL_H
|
#define EIGEN_GENERAL_BLOCK_PANEL_H
|
||||||
|
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
@ -160,16 +161,16 @@ public:
|
|||||||
|
|
||||||
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
|
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
|
||||||
|
|
||||||
// register block size along the N direction (must be either 2 or 4)
|
// register block size along the N direction (must be either 4 or 8)
|
||||||
nr = NumberOfRegisters/4,
|
nr = NumberOfRegisters/2,
|
||||||
|
|
||||||
// register block size along the M direction (currently, this one cannot be modified)
|
// register block size along the M direction (currently, this one cannot be modified)
|
||||||
mr = 2 * LhsPacketSize,
|
mr = LhsPacketSize,
|
||||||
|
|
||||||
WorkSpaceFactor = nr * RhsPacketSize,
|
WorkSpaceFactor = nr * RhsPacketSize,
|
||||||
|
|
||||||
LhsProgress = LhsPacketSize,
|
LhsProgress = LhsPacketSize,
|
||||||
RhsProgress = RhsPacketSize
|
RhsProgress = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef typename packet_traits<LhsScalar>::type _LhsPacket;
|
typedef typename packet_traits<LhsScalar>::type _LhsPacket;
|
||||||
@ -187,15 +188,19 @@ public:
|
|||||||
p = pset1<ResPacket>(ResScalar(0));
|
p = pset1<ResPacket>(ResScalar(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
|
EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
|
||||||
{
|
{
|
||||||
for(DenseIndex k=0; k<n; k++)
|
pbroadcast4(b, b0, b1, b2, b3);
|
||||||
pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
|
}
|
||||||
|
|
||||||
|
EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
|
||||||
|
{
|
||||||
|
pbroadcast2(b, b0, b1);
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
|
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
|
||||||
{
|
{
|
||||||
dest = pload<RhsPacket>(b);
|
dest = pset1<RhsPacket>(*b);
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
|
EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
|
||||||
@ -243,12 +248,12 @@ public:
|
|||||||
ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
|
ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
|
||||||
|
|
||||||
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
|
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
|
||||||
nr = NumberOfRegisters/4,
|
nr = NumberOfRegisters/2,
|
||||||
mr = 2 * LhsPacketSize,
|
mr = LhsPacketSize,
|
||||||
WorkSpaceFactor = nr*RhsPacketSize,
|
WorkSpaceFactor = nr*RhsPacketSize,
|
||||||
|
|
||||||
LhsProgress = LhsPacketSize,
|
LhsProgress = LhsPacketSize,
|
||||||
RhsProgress = RhsPacketSize
|
RhsProgress = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef typename packet_traits<LhsScalar>::type _LhsPacket;
|
typedef typename packet_traits<LhsScalar>::type _LhsPacket;
|
||||||
@ -266,15 +271,9 @@ public:
|
|||||||
p = pset1<ResPacket>(ResScalar(0));
|
p = pset1<ResPacket>(ResScalar(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
|
|
||||||
{
|
|
||||||
for(DenseIndex k=0; k<n; k++)
|
|
||||||
pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
|
|
||||||
}
|
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
|
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
|
||||||
{
|
{
|
||||||
dest = pload<RhsPacket>(b);
|
dest = pset1<RhsPacket>(*b);
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
|
EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
|
||||||
@ -282,6 +281,16 @@ public:
|
|||||||
dest = pload<LhsPacket>(a);
|
dest = pload<LhsPacket>(a);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
|
||||||
|
{
|
||||||
|
pbroadcast4(b, b0, b1, b2, b3);
|
||||||
|
}
|
||||||
|
|
||||||
|
EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
|
||||||
|
{
|
||||||
|
pbroadcast2(b, b0, b1);
|
||||||
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
|
EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
|
||||||
{
|
{
|
||||||
madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
|
madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
|
||||||
@ -327,12 +336,13 @@ public:
|
|||||||
RealPacketSize = Vectorizable ? packet_traits<RealScalar>::size : 1,
|
RealPacketSize = Vectorizable ? packet_traits<RealScalar>::size : 1,
|
||||||
ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
|
ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
|
||||||
|
|
||||||
nr = 2,
|
// FIXME: should depend on NumberOfRegisters
|
||||||
mr = 2 * ResPacketSize,
|
nr = 4,
|
||||||
|
mr = ResPacketSize,
|
||||||
WorkSpaceFactor = Vectorizable ? 2*nr*RealPacketSize : nr,
|
WorkSpaceFactor = Vectorizable ? 2*nr*RealPacketSize : nr,
|
||||||
|
|
||||||
LhsProgress = ResPacketSize,
|
LhsProgress = ResPacketSize,
|
||||||
RhsProgress = Vectorizable ? 2*ResPacketSize : 1
|
RhsProgress = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef typename packet_traits<RealScalar>::type RealPacket;
|
typedef typename packet_traits<RealScalar>::type RealPacket;
|
||||||
@ -356,30 +366,36 @@ public:
|
|||||||
p.second = pset1<RealPacket>(RealScalar(0));
|
p.second = pset1<RealPacket>(RealScalar(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unpack the rhs coeff such that each complex coefficient is spread into
|
// Scalar path
|
||||||
* two packects containing respectively the real and imaginary coefficient
|
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const
|
||||||
* duplicated as many time as needed: (x+iy) => [x, ..., x] [y, ..., y]
|
|
||||||
*/
|
|
||||||
EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const Scalar* rhs, Scalar* b)
|
|
||||||
{
|
{
|
||||||
for(DenseIndex k=0; k<n; k++)
|
dest = pset1<ResPacket>(*b);
|
||||||
{
|
|
||||||
if(Vectorizable)
|
|
||||||
{
|
|
||||||
pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+0], real(rhs[k]));
|
|
||||||
pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+ResPacketSize], imag(rhs[k]));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
b[k] = rhs[k];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const { dest = *b; }
|
// Vectorized path
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacket& dest) const
|
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacket& dest) const
|
||||||
{
|
{
|
||||||
dest.first = pload<RealPacket>((const RealScalar*)b);
|
dest.first = pset1<RealPacket>(real(*b));
|
||||||
dest.second = pload<RealPacket>((const RealScalar*)(b+ResPacketSize));
|
dest.second = pset1<RealPacket>(imag(*b));
|
||||||
|
}
|
||||||
|
|
||||||
|
// linking error if instantiated without being optimized out:
|
||||||
|
void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3);
|
||||||
|
|
||||||
|
// Vectorized path
|
||||||
|
EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, DoublePacket& b0, DoublePacket& b1)
|
||||||
|
{
|
||||||
|
// FIXME not sure that's the best way to implement it!
|
||||||
|
loadRhs(b+0, b0);
|
||||||
|
loadRhs(b+1, b1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scalar path
|
||||||
|
EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsScalar& b0, RhsScalar& b1)
|
||||||
|
{
|
||||||
|
// FIXME not sure that's the best way to implement it!
|
||||||
|
loadRhs(b+0, b0);
|
||||||
|
loadRhs(b+1, b1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// nothing special here
|
// nothing special here
|
||||||
@ -452,12 +468,13 @@ public:
|
|||||||
ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
|
ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
|
||||||
|
|
||||||
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
|
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
|
||||||
|
// FIXME: should depend on NumberOfRegisters
|
||||||
nr = 4,
|
nr = 4,
|
||||||
mr = 2*ResPacketSize,
|
mr = ResPacketSize,
|
||||||
WorkSpaceFactor = nr*RhsPacketSize,
|
WorkSpaceFactor = nr*RhsPacketSize,
|
||||||
|
|
||||||
LhsProgress = ResPacketSize,
|
LhsProgress = ResPacketSize,
|
||||||
RhsProgress = ResPacketSize
|
RhsProgress = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef typename packet_traits<LhsScalar>::type _LhsPacket;
|
typedef typename packet_traits<LhsScalar>::type _LhsPacket;
|
||||||
@ -475,15 +492,19 @@ public:
|
|||||||
p = pset1<ResPacket>(ResScalar(0));
|
p = pset1<ResPacket>(ResScalar(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
|
|
||||||
{
|
|
||||||
for(DenseIndex k=0; k<n; k++)
|
|
||||||
pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
|
|
||||||
}
|
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
|
EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
|
||||||
{
|
{
|
||||||
dest = pload<RhsPacket>(b);
|
dest = pset1<RhsPacket>(*b);
|
||||||
|
}
|
||||||
|
|
||||||
|
// linking error if instantiated without being optimized out:
|
||||||
|
void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3);
|
||||||
|
|
||||||
|
EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
|
||||||
|
{
|
||||||
|
// FIXME not sure that's the best way to implement it!
|
||||||
|
b0 = pload1<RhsPacket>(b+0);
|
||||||
|
b1 = pload1<RhsPacket>(b+1);
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
|
EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
|
||||||
@ -545,179 +566,116 @@ struct gebp_kernel
|
|||||||
|
|
||||||
EIGEN_DONT_INLINE
|
EIGEN_DONT_INLINE
|
||||||
void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
|
void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
|
||||||
Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB=0);
|
Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
|
template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
|
||||||
EIGEN_DONT_INLINE
|
EIGEN_DONT_INLINE
|
||||||
void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
|
void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
|
||||||
::operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
|
::operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
|
||||||
Index strideA, Index strideB, Index offsetA, Index offsetB, RhsScalar* unpackedB)
|
Index strideA, Index strideB, Index offsetA, Index offsetB)
|
||||||
{
|
{
|
||||||
Traits traits;
|
Traits traits;
|
||||||
|
|
||||||
if(strideA==-1) strideA = depth;
|
if(strideA==-1) strideA = depth;
|
||||||
if(strideB==-1) strideB = depth;
|
if(strideB==-1) strideB = depth;
|
||||||
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
|
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
|
||||||
// conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
|
|
||||||
Index packet_cols = (cols/nr) * nr;
|
Index packet_cols = (cols/nr) * nr;
|
||||||
|
// Here we assume that mr==LhsProgress
|
||||||
const Index peeled_mc = (rows/mr)*mr;
|
const Index peeled_mc = (rows/mr)*mr;
|
||||||
// FIXME:
|
|
||||||
const Index peeled_mc2 = peeled_mc + (rows-peeled_mc >= LhsProgress ? LhsProgress : 0);
|
|
||||||
const Index peeled_kc = (depth/4)*4;
|
const Index peeled_kc = (depth/4)*4;
|
||||||
|
|
||||||
if(unpackedB==0)
|
|
||||||
unpackedB = const_cast<RhsScalar*>(blockB - strideB * nr * RhsProgress);
|
|
||||||
|
|
||||||
// loops on each micro vertical panel of rhs (depth x nr)
|
// loops on each micro vertical panel of rhs (depth x nr)
|
||||||
for(Index j2=0; j2<packet_cols; j2+=nr)
|
for(Index j2=0; j2<packet_cols; j2+=nr)
|
||||||
{
|
{
|
||||||
traits.unpackRhs(depth*nr,&blockB[j2*strideB+offsetB*nr],unpackedB);
|
|
||||||
|
|
||||||
// loops on each largest micro horizontal panel of lhs (mr x depth)
|
// loops on each largest micro horizontal panel of lhs (mr x depth)
|
||||||
// => we select a mr x nr micro block of res which is entirely
|
// => we select a mr x nr micro block of res which is entirely
|
||||||
// stored into mr/packet_size x nr registers.
|
// stored into mr/packet_size x nr registers.
|
||||||
for(Index i=0; i<peeled_mc; i+=mr)
|
for(Index i=0; i<peeled_mc; i+=mr)
|
||||||
{
|
{
|
||||||
const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
|
const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
|
||||||
prefetch(&blA[0]);
|
// prefetch(&blA[0]);
|
||||||
|
|
||||||
// gets res block as register
|
// gets res block as register
|
||||||
AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
|
AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
|
||||||
traits.initAcc(C0);
|
traits.initAcc(C0);
|
||||||
traits.initAcc(C1);
|
traits.initAcc(C1);
|
||||||
if(nr==4) traits.initAcc(C2);
|
traits.initAcc(C2);
|
||||||
if(nr==4) traits.initAcc(C3);
|
traits.initAcc(C3);
|
||||||
traits.initAcc(C4);
|
if(nr==8) traits.initAcc(C4);
|
||||||
traits.initAcc(C5);
|
if(nr==8) traits.initAcc(C5);
|
||||||
if(nr==4) traits.initAcc(C6);
|
if(nr==8) traits.initAcc(C6);
|
||||||
if(nr==4) traits.initAcc(C7);
|
if(nr==8) traits.initAcc(C7);
|
||||||
|
|
||||||
ResScalar* r0 = &res[(j2+0)*resStride + i];
|
ResScalar* r0 = &res[(j2+0)*resStride + i];
|
||||||
ResScalar* r1 = r0 + resStride;
|
|
||||||
ResScalar* r2 = r1 + resStride;
|
|
||||||
ResScalar* r3 = r2 + resStride;
|
|
||||||
|
|
||||||
prefetch(r0+16);
|
// performs "inner" products
|
||||||
prefetch(r1+16);
|
const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
|
||||||
prefetch(r2+16);
|
LhsPacket A0, A1;
|
||||||
prefetch(r3+16);
|
// uncomment for register prefetching
|
||||||
|
// traits.loadLhs(blA, A0);
|
||||||
// performs "inner" product
|
|
||||||
// TODO let's check wether the folowing peeled loop could not be
|
|
||||||
// optimized via optimal prefetching from one loop to the other
|
|
||||||
const RhsScalar* blB = unpackedB;
|
|
||||||
for(Index k=0; k<peeled_kc; k+=4)
|
for(Index k=0; k<peeled_kc; k+=4)
|
||||||
{
|
{
|
||||||
if(nr==2)
|
if(nr==4)
|
||||||
{
|
{
|
||||||
LhsPacket A0, A1;
|
EIGEN_ASM_COMMENT("begin gegp micro kernel 1p x 4");
|
||||||
RhsPacket B_0;
|
|
||||||
RhsPacket T0;
|
|
||||||
|
|
||||||
EIGEN_ASM_COMMENT("mybegin2");
|
RhsPacket B_0, B1;
|
||||||
traits.loadLhs(&blA[0*LhsProgress], A0);
|
|
||||||
traits.loadLhs(&blA[1*LhsProgress], A1);
|
|
||||||
traits.loadRhs(&blB[0*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B_0,C0,T0);
|
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
|
||||||
traits.loadRhs(&blB[1*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B_0,C1,T0);
|
|
||||||
traits.madd(A1,B_0,C5,B_0);
|
|
||||||
|
|
||||||
traits.loadLhs(&blA[2*LhsProgress], A0);
|
#define EIGEN_GEBGP_ONESTEP4(K) \
|
||||||
traits.loadLhs(&blA[3*LhsProgress], A1);
|
traits.loadLhs(&blA[K*LhsProgress], A0); \
|
||||||
traits.loadRhs(&blB[2*RhsProgress], B_0);
|
traits.broadcastRhs(&blB[0+4*K*RhsProgress], B_0, B1); \
|
||||||
traits.madd(A0,B_0,C0,T0);
|
traits.madd(A0, B_0,C0, B_0); \
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
traits.madd(A0, B1, C1, B1); \
|
||||||
traits.loadRhs(&blB[3*RhsProgress], B_0);
|
traits.broadcastRhs(&blB[2+4*K*RhsProgress], B_0, B1); \
|
||||||
traits.madd(A0,B_0,C1,T0);
|
traits.madd(A0, B_0,C2, B_0); \
|
||||||
traits.madd(A1,B_0,C5,B_0);
|
traits.madd(A0, B1, C3, B1)
|
||||||
|
|
||||||
traits.loadLhs(&blA[4*LhsProgress], A0);
|
EIGEN_GEBGP_ONESTEP4(0);
|
||||||
traits.loadLhs(&blA[5*LhsProgress], A1);
|
EIGEN_GEBGP_ONESTEP4(1);
|
||||||
traits.loadRhs(&blB[4*RhsProgress], B_0);
|
EIGEN_GEBGP_ONESTEP4(2);
|
||||||
traits.madd(A0,B_0,C0,T0);
|
EIGEN_GEBGP_ONESTEP4(3);
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
|
||||||
traits.loadRhs(&blB[5*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B_0,C1,T0);
|
|
||||||
traits.madd(A1,B_0,C5,B_0);
|
|
||||||
|
|
||||||
traits.loadLhs(&blA[6*LhsProgress], A0);
|
|
||||||
traits.loadLhs(&blA[7*LhsProgress], A1);
|
|
||||||
traits.loadRhs(&blB[6*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B_0,C0,T0);
|
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
|
||||||
traits.loadRhs(&blB[7*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B_0,C1,T0);
|
|
||||||
traits.madd(A1,B_0,C5,B_0);
|
|
||||||
EIGEN_ASM_COMMENT("myend");
|
|
||||||
}
|
}
|
||||||
else
|
else // nr==8
|
||||||
{
|
{
|
||||||
EIGEN_ASM_COMMENT("mybegin4");
|
EIGEN_ASM_COMMENT("begin gegp micro kernel 1p x 8");
|
||||||
LhsPacket A0, A1;
|
|
||||||
RhsPacket B_0, B1, B2, B3;
|
RhsPacket B_0, B1, B2, B3;
|
||||||
RhsPacket T0;
|
|
||||||
|
|
||||||
traits.loadLhs(&blA[0*LhsProgress], A0);
|
// The following version is faster on some architures
|
||||||
traits.loadLhs(&blA[1*LhsProgress], A1);
|
// but sometimes leads to segfaults because it might read one packet outside the bounds
|
||||||
traits.loadRhs(&blB[0*RhsProgress], B_0);
|
// To test it, you also need to uncomment the initialization of A0 above and the copy of A1 to A0 below.
|
||||||
traits.loadRhs(&blB[1*RhsProgress], B1);
|
#if 0
|
||||||
|
#define EIGEN_GEBGP_ONESTEP8(K,L,M) \
|
||||||
|
traits.loadLhs(&blA[(K+1)*LhsProgress], L); \
|
||||||
|
traits.broadcastRhs(&blB[0+8*K*RhsProgress], B_0, B1, B2, B3); \
|
||||||
|
traits.madd(M, B_0,C0, B_0); \
|
||||||
|
traits.madd(M, B1, C1, B1); \
|
||||||
|
traits.madd(M, B2, C2, B2); \
|
||||||
|
traits.madd(M, B3, C3, B3); \
|
||||||
|
traits.broadcastRhs(&blB[4+8*K*RhsProgress], B_0, B1, B2, B3); \
|
||||||
|
traits.madd(M, B_0,C4, B_0); \
|
||||||
|
traits.madd(M, B1, C5, B1); \
|
||||||
|
traits.madd(M, B2, C6, B2); \
|
||||||
|
traits.madd(M, B3, C7, B3)
|
||||||
|
#endif
|
||||||
|
|
||||||
traits.madd(A0,B_0,C0,T0);
|
#define EIGEN_GEBGP_ONESTEP8(K,L,M) \
|
||||||
traits.loadRhs(&blB[2*RhsProgress], B2);
|
traits.loadLhs(&blA[K*LhsProgress], A0); \
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
traits.broadcastRhs(&blB[0+8*K*RhsProgress], B_0, B1, B2, B3); \
|
||||||
traits.loadRhs(&blB[3*RhsProgress], B3);
|
traits.madd(A0, B_0,C0, B_0); \
|
||||||
traits.loadRhs(&blB[4*RhsProgress], B_0);
|
traits.madd(A0, B1, C1, B1); \
|
||||||
traits.madd(A0,B1,C1,T0);
|
traits.madd(A0, B2, C2, B2); \
|
||||||
traits.madd(A1,B1,C5,B1);
|
traits.madd(A0, B3, C3, B3); \
|
||||||
traits.loadRhs(&blB[5*RhsProgress], B1);
|
traits.broadcastRhs(&blB[4+8*K*RhsProgress], B_0, B1, B2, B3); \
|
||||||
traits.madd(A0,B2,C2,T0);
|
traits.madd(A0, B_0,C4, B_0); \
|
||||||
traits.madd(A1,B2,C6,B2);
|
traits.madd(A0, B1, C5, B1); \
|
||||||
traits.loadRhs(&blB[6*RhsProgress], B2);
|
traits.madd(A0, B2, C6, B2); \
|
||||||
traits.madd(A0,B3,C3,T0);
|
traits.madd(A0, B3, C7, B3)
|
||||||
traits.loadLhs(&blA[2*LhsProgress], A0);
|
|
||||||
traits.madd(A1,B3,C7,B3);
|
|
||||||
traits.loadLhs(&blA[3*LhsProgress], A1);
|
|
||||||
traits.loadRhs(&blB[7*RhsProgress], B3);
|
|
||||||
traits.madd(A0,B_0,C0,T0);
|
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
|
||||||
traits.loadRhs(&blB[8*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B1,C1,T0);
|
|
||||||
traits.madd(A1,B1,C5,B1);
|
|
||||||
traits.loadRhs(&blB[9*RhsProgress], B1);
|
|
||||||
traits.madd(A0,B2,C2,T0);
|
|
||||||
traits.madd(A1,B2,C6,B2);
|
|
||||||
traits.loadRhs(&blB[10*RhsProgress], B2);
|
|
||||||
traits.madd(A0,B3,C3,T0);
|
|
||||||
traits.loadLhs(&blA[4*LhsProgress], A0);
|
|
||||||
traits.madd(A1,B3,C7,B3);
|
|
||||||
traits.loadLhs(&blA[5*LhsProgress], A1);
|
|
||||||
traits.loadRhs(&blB[11*RhsProgress], B3);
|
|
||||||
|
|
||||||
traits.madd(A0,B_0,C0,T0);
|
EIGEN_GEBGP_ONESTEP8(0,A1,A0);
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
EIGEN_GEBGP_ONESTEP8(1,A0,A1);
|
||||||
traits.loadRhs(&blB[12*RhsProgress], B_0);
|
EIGEN_GEBGP_ONESTEP8(2,A1,A0);
|
||||||
traits.madd(A0,B1,C1,T0);
|
EIGEN_GEBGP_ONESTEP8(3,A0,A1);
|
||||||
traits.madd(A1,B1,C5,B1);
|
|
||||||
traits.loadRhs(&blB[13*RhsProgress], B1);
|
|
||||||
traits.madd(A0,B2,C2,T0);
|
|
||||||
traits.madd(A1,B2,C6,B2);
|
|
||||||
traits.loadRhs(&blB[14*RhsProgress], B2);
|
|
||||||
traits.madd(A0,B3,C3,T0);
|
|
||||||
traits.loadLhs(&blA[6*LhsProgress], A0);
|
|
||||||
traits.madd(A1,B3,C7,B3);
|
|
||||||
traits.loadLhs(&blA[7*LhsProgress], A1);
|
|
||||||
traits.loadRhs(&blB[15*RhsProgress], B3);
|
|
||||||
traits.madd(A0,B_0,C0,T0);
|
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
|
||||||
traits.madd(A0,B1,C1,T0);
|
|
||||||
traits.madd(A1,B1,C5,B1);
|
|
||||||
traits.madd(A0,B2,C2,T0);
|
|
||||||
traits.madd(A1,B2,C6,B2);
|
|
||||||
traits.madd(A0,B3,C3,T0);
|
|
||||||
traits.madd(A1,B3,C7,B3);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
blB += 4*nr*RhsProgress;
|
blB += 4*nr*RhsProgress;
|
||||||
@ -726,63 +684,40 @@ EIGEN_ASM_COMMENT("mybegin4");
|
|||||||
// process remaining peeled loop
|
// process remaining peeled loop
|
||||||
for(Index k=peeled_kc; k<depth; k++)
|
for(Index k=peeled_kc; k<depth; k++)
|
||||||
{
|
{
|
||||||
if(nr==2)
|
if(nr==4)
|
||||||
{
|
{
|
||||||
LhsPacket A0, A1;
|
RhsPacket B_0, B1;
|
||||||
RhsPacket B_0;
|
EIGEN_GEBGP_ONESTEP4(0);
|
||||||
RhsPacket T0;
|
|
||||||
|
|
||||||
traits.loadLhs(&blA[0*LhsProgress], A0);
|
|
||||||
traits.loadLhs(&blA[1*LhsProgress], A1);
|
|
||||||
traits.loadRhs(&blB[0*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B_0,C0,T0);
|
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
|
||||||
traits.loadRhs(&blB[1*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B_0,C1,T0);
|
|
||||||
traits.madd(A1,B_0,C5,B_0);
|
|
||||||
}
|
}
|
||||||
else
|
else // nr == 8
|
||||||
{
|
{
|
||||||
LhsPacket A0, A1;
|
|
||||||
RhsPacket B_0, B1, B2, B3;
|
RhsPacket B_0, B1, B2, B3;
|
||||||
RhsPacket T0;
|
EIGEN_GEBGP_ONESTEP8(0,A1,A0);
|
||||||
|
// uncomment for register prefetching
|
||||||
traits.loadLhs(&blA[0*LhsProgress], A0);
|
// A0 = A1;
|
||||||
traits.loadLhs(&blA[1*LhsProgress], A1);
|
|
||||||
traits.loadRhs(&blB[0*RhsProgress], B_0);
|
|
||||||
traits.loadRhs(&blB[1*RhsProgress], B1);
|
|
||||||
|
|
||||||
traits.madd(A0,B_0,C0,T0);
|
|
||||||
traits.loadRhs(&blB[2*RhsProgress], B2);
|
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
|
||||||
traits.loadRhs(&blB[3*RhsProgress], B3);
|
|
||||||
traits.madd(A0,B1,C1,T0);
|
|
||||||
traits.madd(A1,B1,C5,B1);
|
|
||||||
traits.madd(A0,B2,C2,T0);
|
|
||||||
traits.madd(A1,B2,C6,B2);
|
|
||||||
traits.madd(A0,B3,C3,T0);
|
|
||||||
traits.madd(A1,B3,C7,B3);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
blB += nr*RhsProgress;
|
blB += nr*RhsProgress;
|
||||||
blA += mr;
|
blA += mr;
|
||||||
}
|
}
|
||||||
|
#undef EIGEN_GEBGP_ONESTEP4
|
||||||
|
#undef EIGEN_GEBGP_ONESTEP8
|
||||||
|
|
||||||
if(nr==4)
|
if(nr==8)
|
||||||
{
|
{
|
||||||
ResPacket R0, R1, R2, R3, R4, R5, R6;
|
ResPacket R0, R1, R2, R3, R4, R5, R6;
|
||||||
ResPacket alphav = pset1<ResPacket>(alpha);
|
ResPacket alphav = pset1<ResPacket>(alpha);
|
||||||
|
|
||||||
R0 = ploadu<ResPacket>(r0);
|
R0 = ploadu<ResPacket>(r0+0*resStride);
|
||||||
R1 = ploadu<ResPacket>(r1);
|
R1 = ploadu<ResPacket>(r0+1*resStride);
|
||||||
R2 = ploadu<ResPacket>(r2);
|
R2 = ploadu<ResPacket>(r0+2*resStride);
|
||||||
R3 = ploadu<ResPacket>(r3);
|
R3 = ploadu<ResPacket>(r0+3*resStride);
|
||||||
R4 = ploadu<ResPacket>(r0 + ResPacketSize);
|
R4 = ploadu<ResPacket>(r0+4*resStride);
|
||||||
R5 = ploadu<ResPacket>(r1 + ResPacketSize);
|
R5 = ploadu<ResPacket>(r0+5*resStride);
|
||||||
R6 = ploadu<ResPacket>(r2 + ResPacketSize);
|
R6 = ploadu<ResPacket>(r0+6*resStride);
|
||||||
traits.acc(C0, alphav, R0);
|
traits.acc(C0, alphav, R0);
|
||||||
pstoreu(r0, R0);
|
pstoreu(r0+0*resStride, R0);
|
||||||
R0 = ploadu<ResPacket>(r3 + ResPacketSize);
|
R0 = ploadu<ResPacket>(r0+7*resStride);
|
||||||
|
|
||||||
traits.acc(C1, alphav, R1);
|
traits.acc(C1, alphav, R1);
|
||||||
traits.acc(C2, alphav, R2);
|
traits.acc(C2, alphav, R2);
|
||||||
@ -792,239 +727,111 @@ EIGEN_ASM_COMMENT("mybegin4");
|
|||||||
traits.acc(C6, alphav, R6);
|
traits.acc(C6, alphav, R6);
|
||||||
traits.acc(C7, alphav, R0);
|
traits.acc(C7, alphav, R0);
|
||||||
|
|
||||||
pstoreu(r1, R1);
|
pstoreu(r0+1*resStride, R1);
|
||||||
pstoreu(r2, R2);
|
pstoreu(r0+2*resStride, R2);
|
||||||
pstoreu(r3, R3);
|
pstoreu(r0+3*resStride, R3);
|
||||||
pstoreu(r0 + ResPacketSize, R4);
|
pstoreu(r0+4*resStride, R4);
|
||||||
pstoreu(r1 + ResPacketSize, R5);
|
pstoreu(r0+5*resStride, R5);
|
||||||
pstoreu(r2 + ResPacketSize, R6);
|
pstoreu(r0+6*resStride, R6);
|
||||||
pstoreu(r3 + ResPacketSize, R0);
|
pstoreu(r0+7*resStride, R0);
|
||||||
}
|
}
|
||||||
else
|
else // nr==4
|
||||||
{
|
{
|
||||||
ResPacket R0, R1, R4;
|
ResPacket R0, R1, R2;
|
||||||
ResPacket alphav = pset1<ResPacket>(alpha);
|
ResPacket alphav = pset1<ResPacket>(alpha);
|
||||||
|
|
||||||
R0 = ploadu<ResPacket>(r0);
|
R0 = ploadu<ResPacket>(r0+0*resStride);
|
||||||
R1 = ploadu<ResPacket>(r1);
|
R1 = ploadu<ResPacket>(r0+1*resStride);
|
||||||
R4 = ploadu<ResPacket>(r0 + ResPacketSize);
|
R2 = ploadu<ResPacket>(r0+2*resStride);
|
||||||
traits.acc(C0, alphav, R0);
|
traits.acc(C0, alphav, R0);
|
||||||
pstoreu(r0, R0);
|
pstoreu(r0+0*resStride, R0);
|
||||||
R0 = ploadu<ResPacket>(r1 + ResPacketSize);
|
R0 = ploadu<ResPacket>(r0+3*resStride);
|
||||||
|
|
||||||
traits.acc(C1, alphav, R1);
|
traits.acc(C1, alphav, R1);
|
||||||
traits.acc(C4, alphav, R4);
|
traits.acc(C2, alphav, R2);
|
||||||
traits.acc(C5, alphav, R0);
|
traits.acc(C3, alphav, R0);
|
||||||
pstoreu(r1, R1);
|
|
||||||
pstoreu(r0 + ResPacketSize, R4);
|
pstoreu(r0+1*resStride, R1);
|
||||||
pstoreu(r1 + ResPacketSize, R0);
|
pstoreu(r0+2*resStride, R2);
|
||||||
|
pstoreu(r0+3*resStride, R0);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if(rows-peeled_mc>=LhsProgress)
|
for(Index i=peeled_mc; i<rows; i++)
|
||||||
{
|
|
||||||
Index i = peeled_mc;
|
|
||||||
const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
|
|
||||||
prefetch(&blA[0]);
|
|
||||||
|
|
||||||
// gets res block as register
|
|
||||||
AccPacket C0, C1, C2, C3;
|
|
||||||
traits.initAcc(C0);
|
|
||||||
traits.initAcc(C1);
|
|
||||||
if(nr==4) traits.initAcc(C2);
|
|
||||||
if(nr==4) traits.initAcc(C3);
|
|
||||||
|
|
||||||
// performs "inner" product
|
|
||||||
const RhsScalar* blB = unpackedB;
|
|
||||||
for(Index k=0; k<peeled_kc; k+=4)
|
|
||||||
{
|
|
||||||
if(nr==2)
|
|
||||||
{
|
|
||||||
LhsPacket A0;
|
|
||||||
RhsPacket B_0, B1;
|
|
||||||
|
|
||||||
traits.loadLhs(&blA[0*LhsProgress], A0);
|
|
||||||
traits.loadRhs(&blB[0*RhsProgress], B_0);
|
|
||||||
traits.loadRhs(&blB[1*RhsProgress], B1);
|
|
||||||
traits.madd(A0,B_0,C0,B_0);
|
|
||||||
traits.loadRhs(&blB[2*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B1,C1,B1);
|
|
||||||
traits.loadLhs(&blA[1*LhsProgress], A0);
|
|
||||||
traits.loadRhs(&blB[3*RhsProgress], B1);
|
|
||||||
traits.madd(A0,B_0,C0,B_0);
|
|
||||||
traits.loadRhs(&blB[4*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B1,C1,B1);
|
|
||||||
traits.loadLhs(&blA[2*LhsProgress], A0);
|
|
||||||
traits.loadRhs(&blB[5*RhsProgress], B1);
|
|
||||||
traits.madd(A0,B_0,C0,B_0);
|
|
||||||
traits.loadRhs(&blB[6*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B1,C1,B1);
|
|
||||||
traits.loadLhs(&blA[3*LhsProgress], A0);
|
|
||||||
traits.loadRhs(&blB[7*RhsProgress], B1);
|
|
||||||
traits.madd(A0,B_0,C0,B_0);
|
|
||||||
traits.madd(A0,B1,C1,B1);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
LhsPacket A0;
|
|
||||||
RhsPacket B_0, B1, B2, B3;
|
|
||||||
|
|
||||||
traits.loadLhs(&blA[0*LhsProgress], A0);
|
|
||||||
traits.loadRhs(&blB[0*RhsProgress], B_0);
|
|
||||||
traits.loadRhs(&blB[1*RhsProgress], B1);
|
|
||||||
|
|
||||||
traits.madd(A0,B_0,C0,B_0);
|
|
||||||
traits.loadRhs(&blB[2*RhsProgress], B2);
|
|
||||||
traits.loadRhs(&blB[3*RhsProgress], B3);
|
|
||||||
traits.loadRhs(&blB[4*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B1,C1,B1);
|
|
||||||
traits.loadRhs(&blB[5*RhsProgress], B1);
|
|
||||||
traits.madd(A0,B2,C2,B2);
|
|
||||||
traits.loadRhs(&blB[6*RhsProgress], B2);
|
|
||||||
traits.madd(A0,B3,C3,B3);
|
|
||||||
traits.loadLhs(&blA[1*LhsProgress], A0);
|
|
||||||
traits.loadRhs(&blB[7*RhsProgress], B3);
|
|
||||||
traits.madd(A0,B_0,C0,B_0);
|
|
||||||
traits.loadRhs(&blB[8*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B1,C1,B1);
|
|
||||||
traits.loadRhs(&blB[9*RhsProgress], B1);
|
|
||||||
traits.madd(A0,B2,C2,B2);
|
|
||||||
traits.loadRhs(&blB[10*RhsProgress], B2);
|
|
||||||
traits.madd(A0,B3,C3,B3);
|
|
||||||
traits.loadLhs(&blA[2*LhsProgress], A0);
|
|
||||||
traits.loadRhs(&blB[11*RhsProgress], B3);
|
|
||||||
|
|
||||||
traits.madd(A0,B_0,C0,B_0);
|
|
||||||
traits.loadRhs(&blB[12*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B1,C1,B1);
|
|
||||||
traits.loadRhs(&blB[13*RhsProgress], B1);
|
|
||||||
traits.madd(A0,B2,C2,B2);
|
|
||||||
traits.loadRhs(&blB[14*RhsProgress], B2);
|
|
||||||
traits.madd(A0,B3,C3,B3);
|
|
||||||
|
|
||||||
traits.loadLhs(&blA[3*LhsProgress], A0);
|
|
||||||
traits.loadRhs(&blB[15*RhsProgress], B3);
|
|
||||||
traits.madd(A0,B_0,C0,B_0);
|
|
||||||
traits.madd(A0,B1,C1,B1);
|
|
||||||
traits.madd(A0,B2,C2,B2);
|
|
||||||
traits.madd(A0,B3,C3,B3);
|
|
||||||
}
|
|
||||||
|
|
||||||
blB += nr*4*RhsProgress;
|
|
||||||
blA += 4*LhsProgress;
|
|
||||||
}
|
|
||||||
// process remaining peeled loop
|
|
||||||
for(Index k=peeled_kc; k<depth; k++)
|
|
||||||
{
|
|
||||||
if(nr==2)
|
|
||||||
{
|
|
||||||
LhsPacket A0;
|
|
||||||
RhsPacket B_0, B1;
|
|
||||||
|
|
||||||
traits.loadLhs(&blA[0*LhsProgress], A0);
|
|
||||||
traits.loadRhs(&blB[0*RhsProgress], B_0);
|
|
||||||
traits.loadRhs(&blB[1*RhsProgress], B1);
|
|
||||||
traits.madd(A0,B_0,C0,B_0);
|
|
||||||
traits.madd(A0,B1,C1,B1);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
LhsPacket A0;
|
|
||||||
RhsPacket B_0, B1, B2, B3;
|
|
||||||
|
|
||||||
traits.loadLhs(&blA[0*LhsProgress], A0);
|
|
||||||
traits.loadRhs(&blB[0*RhsProgress], B_0);
|
|
||||||
traits.loadRhs(&blB[1*RhsProgress], B1);
|
|
||||||
traits.loadRhs(&blB[2*RhsProgress], B2);
|
|
||||||
traits.loadRhs(&blB[3*RhsProgress], B3);
|
|
||||||
|
|
||||||
traits.madd(A0,B_0,C0,B_0);
|
|
||||||
traits.madd(A0,B1,C1,B1);
|
|
||||||
traits.madd(A0,B2,C2,B2);
|
|
||||||
traits.madd(A0,B3,C3,B3);
|
|
||||||
}
|
|
||||||
|
|
||||||
blB += nr*RhsProgress;
|
|
||||||
blA += LhsProgress;
|
|
||||||
}
|
|
||||||
|
|
||||||
ResPacket R0, R1, R2, R3;
|
|
||||||
ResPacket alphav = pset1<ResPacket>(alpha);
|
|
||||||
|
|
||||||
ResScalar* r0 = &res[(j2+0)*resStride + i];
|
|
||||||
ResScalar* r1 = r0 + resStride;
|
|
||||||
ResScalar* r2 = r1 + resStride;
|
|
||||||
ResScalar* r3 = r2 + resStride;
|
|
||||||
|
|
||||||
R0 = ploadu<ResPacket>(r0);
|
|
||||||
R1 = ploadu<ResPacket>(r1);
|
|
||||||
if(nr==4) R2 = ploadu<ResPacket>(r2);
|
|
||||||
if(nr==4) R3 = ploadu<ResPacket>(r3);
|
|
||||||
|
|
||||||
traits.acc(C0, alphav, R0);
|
|
||||||
traits.acc(C1, alphav, R1);
|
|
||||||
if(nr==4) traits.acc(C2, alphav, R2);
|
|
||||||
if(nr==4) traits.acc(C3, alphav, R3);
|
|
||||||
|
|
||||||
pstoreu(r0, R0);
|
|
||||||
pstoreu(r1, R1);
|
|
||||||
if(nr==4) pstoreu(r2, R2);
|
|
||||||
if(nr==4) pstoreu(r3, R3);
|
|
||||||
}
|
|
||||||
for(Index i=peeled_mc2; i<rows; i++)
|
|
||||||
{
|
{
|
||||||
const LhsScalar* blA = &blockA[i*strideA+offsetA];
|
const LhsScalar* blA = &blockA[i*strideA+offsetA];
|
||||||
prefetch(&blA[0]);
|
prefetch(&blA[0]);
|
||||||
|
|
||||||
// gets a 1 x nr res block as registers
|
// gets a 1 x nr res block as registers
|
||||||
ResScalar C0(0), C1(0), C2(0), C3(0);
|
ResScalar C0(0), C1(0), C2(0), C3(0), C4(0), C5(0), C6(0), C7(0);
|
||||||
// TODO directly use blockB ???
|
// FIXME directly use blockB ???
|
||||||
const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
|
const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
|
||||||
|
// TODO peel this loop
|
||||||
for(Index k=0; k<depth; k++)
|
for(Index k=0; k<depth; k++)
|
||||||
{
|
{
|
||||||
if(nr==2)
|
if(nr==4)
|
||||||
{
|
{
|
||||||
LhsScalar A0;
|
LhsScalar A0;
|
||||||
RhsScalar B_0, B1;
|
RhsScalar B_0, B_1;
|
||||||
|
|
||||||
A0 = blA[k];
|
A0 = blA[k];
|
||||||
|
|
||||||
B_0 = blB[0];
|
B_0 = blB[0];
|
||||||
B1 = blB[1];
|
B_1 = blB[1];
|
||||||
MADD(cj,A0,B_0,C0,B_0);
|
MADD(cj,A0,B_0,C0, B_0);
|
||||||
MADD(cj,A0,B1,C1,B1);
|
MADD(cj,A0,B_1,C1, B_1);
|
||||||
|
|
||||||
|
B_0 = blB[2];
|
||||||
|
B_1 = blB[3];
|
||||||
|
MADD(cj,A0,B_0,C2, B_0);
|
||||||
|
MADD(cj,A0,B_1,C3, B_1);
|
||||||
}
|
}
|
||||||
else
|
else // nr==8
|
||||||
{
|
{
|
||||||
LhsScalar A0;
|
LhsScalar A0;
|
||||||
RhsScalar B_0, B1, B2, B3;
|
RhsScalar B_0, B_1;
|
||||||
|
|
||||||
A0 = blA[k];
|
A0 = blA[k];
|
||||||
B_0 = blB[0];
|
|
||||||
B1 = blB[1];
|
|
||||||
B2 = blB[2];
|
|
||||||
B3 = blB[3];
|
|
||||||
|
|
||||||
MADD(cj,A0,B_0,C0,B_0);
|
B_0 = blB[0];
|
||||||
MADD(cj,A0,B1,C1,B1);
|
B_1 = blB[1];
|
||||||
MADD(cj,A0,B2,C2,B2);
|
MADD(cj,A0,B_0,C0, B_0);
|
||||||
MADD(cj,A0,B3,C3,B3);
|
MADD(cj,A0,B_1,C1, B_1);
|
||||||
|
|
||||||
|
B_0 = blB[2];
|
||||||
|
B_1 = blB[3];
|
||||||
|
MADD(cj,A0,B_0,C2, B_0);
|
||||||
|
MADD(cj,A0,B_1,C3, B_1);
|
||||||
|
|
||||||
|
B_0 = blB[4];
|
||||||
|
B_1 = blB[5];
|
||||||
|
MADD(cj,A0,B_0,C4, B_0);
|
||||||
|
MADD(cj,A0,B_1,C5, B_1);
|
||||||
|
|
||||||
|
B_0 = blB[6];
|
||||||
|
B_1 = blB[7];
|
||||||
|
MADD(cj,A0,B_0,C6, B_0);
|
||||||
|
MADD(cj,A0,B_1,C7, B_1);
|
||||||
}
|
}
|
||||||
|
|
||||||
blB += nr;
|
blB += nr;
|
||||||
}
|
}
|
||||||
res[(j2+0)*resStride + i] += alpha*C0;
|
res[(j2+0)*resStride + i] += alpha*C0;
|
||||||
res[(j2+1)*resStride + i] += alpha*C1;
|
res[(j2+1)*resStride + i] += alpha*C1;
|
||||||
if(nr==4) res[(j2+2)*resStride + i] += alpha*C2;
|
res[(j2+2)*resStride + i] += alpha*C2;
|
||||||
if(nr==4) res[(j2+3)*resStride + i] += alpha*C3;
|
res[(j2+3)*resStride + i] += alpha*C3;
|
||||||
|
if(nr==8) res[(j2+4)*resStride + i] += alpha*C4;
|
||||||
|
if(nr==8) res[(j2+5)*resStride + i] += alpha*C5;
|
||||||
|
if(nr==8) res[(j2+6)*resStride + i] += alpha*C6;
|
||||||
|
if(nr==8) res[(j2+7)*resStride + i] += alpha*C7;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// process remaining rhs/res columns one at a time
|
// process remaining rhs/res columns one at a time
|
||||||
// => do the same but with nr==1
|
// => do the same but with nr==1
|
||||||
for(Index j2=packet_cols; j2<cols; j2++)
|
for(Index j2=packet_cols; j2<cols; j2++)
|
||||||
{
|
{
|
||||||
// unpack B
|
|
||||||
traits.unpackRhs(depth, &blockB[j2*strideB+offsetB], unpackedB);
|
|
||||||
|
|
||||||
for(Index i=0; i<peeled_mc; i+=mr)
|
for(Index i=0; i<peeled_mc; i+=mr)
|
||||||
{
|
{
|
||||||
const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
|
const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
|
||||||
@ -1033,67 +840,31 @@ EIGEN_ASM_COMMENT("mybegin4");
|
|||||||
// TODO move the res loads to the stores
|
// TODO move the res loads to the stores
|
||||||
|
|
||||||
// get res block as registers
|
// get res block as registers
|
||||||
AccPacket C0, C4;
|
|
||||||
traits.initAcc(C0);
|
|
||||||
traits.initAcc(C4);
|
|
||||||
|
|
||||||
const RhsScalar* blB = unpackedB;
|
|
||||||
for(Index k=0; k<depth; k++)
|
|
||||||
{
|
|
||||||
LhsPacket A0, A1;
|
|
||||||
RhsPacket B_0;
|
|
||||||
RhsPacket T0;
|
|
||||||
|
|
||||||
traits.loadLhs(&blA[0*LhsProgress], A0);
|
|
||||||
traits.loadLhs(&blA[1*LhsProgress], A1);
|
|
||||||
traits.loadRhs(&blB[0*RhsProgress], B_0);
|
|
||||||
traits.madd(A0,B_0,C0,T0);
|
|
||||||
traits.madd(A1,B_0,C4,B_0);
|
|
||||||
|
|
||||||
blB += RhsProgress;
|
|
||||||
blA += 2*LhsProgress;
|
|
||||||
}
|
|
||||||
ResPacket R0, R4;
|
|
||||||
ResPacket alphav = pset1<ResPacket>(alpha);
|
|
||||||
|
|
||||||
ResScalar* r0 = &res[(j2+0)*resStride + i];
|
|
||||||
|
|
||||||
R0 = ploadu<ResPacket>(r0);
|
|
||||||
R4 = ploadu<ResPacket>(r0+ResPacketSize);
|
|
||||||
|
|
||||||
traits.acc(C0, alphav, R0);
|
|
||||||
traits.acc(C4, alphav, R4);
|
|
||||||
|
|
||||||
pstoreu(r0, R0);
|
|
||||||
pstoreu(r0+ResPacketSize, R4);
|
|
||||||
}
|
|
||||||
if(rows-peeled_mc>=LhsProgress)
|
|
||||||
{
|
|
||||||
Index i = peeled_mc;
|
|
||||||
const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
|
|
||||||
prefetch(&blA[0]);
|
|
||||||
|
|
||||||
AccPacket C0;
|
AccPacket C0;
|
||||||
traits.initAcc(C0);
|
traits.initAcc(C0);
|
||||||
|
|
||||||
const RhsScalar* blB = unpackedB;
|
const RhsScalar* blB = &blockB[j2*strideB+offsetB];
|
||||||
for(Index k=0; k<depth; k++)
|
for(Index k=0; k<depth; k++)
|
||||||
{
|
{
|
||||||
LhsPacket A0;
|
LhsPacket A0;
|
||||||
RhsPacket B_0;
|
RhsPacket B_0;
|
||||||
traits.loadLhs(blA, A0);
|
RhsPacket T0;
|
||||||
traits.loadRhs(blB, B_0);
|
|
||||||
traits.madd(A0, B_0, C0, B_0);
|
traits.loadLhs(&blA[0*LhsProgress], A0);
|
||||||
|
traits.loadRhs(&blB[0*RhsProgress], B_0);
|
||||||
|
traits.madd(A0,B_0,C0,T0);
|
||||||
|
|
||||||
blB += RhsProgress;
|
blB += RhsProgress;
|
||||||
blA += LhsProgress;
|
blA += LhsProgress;
|
||||||
}
|
}
|
||||||
|
ResPacket R0;
|
||||||
ResPacket alphav = pset1<ResPacket>(alpha);
|
ResPacket alphav = pset1<ResPacket>(alpha);
|
||||||
ResPacket R0 = ploadu<ResPacket>(&res[(j2+0)*resStride + i]);
|
ResScalar* r0 = &res[(j2+0)*resStride + i];
|
||||||
|
R0 = ploadu<ResPacket>(r0);
|
||||||
traits.acc(C0, alphav, R0);
|
traits.acc(C0, alphav, R0);
|
||||||
pstoreu(&res[(j2+0)*resStride + i], R0);
|
pstoreu(r0, R0);
|
||||||
}
|
}
|
||||||
for(Index i=peeled_mc2; i<rows; i++)
|
for(Index i=peeled_mc; i<rows; i++)
|
||||||
{
|
{
|
||||||
const LhsScalar* blA = &blockA[i*strideA+offsetA];
|
const LhsScalar* blA = &blockA[i*strideA+offsetA];
|
||||||
prefetch(&blA[0]);
|
prefetch(&blA[0]);
|
||||||
@ -1147,7 +918,7 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder,
|
|||||||
EIGEN_UNUSED_VARIABLE(stride);
|
EIGEN_UNUSED_VARIABLE(stride);
|
||||||
EIGEN_UNUSED_VARIABLE(offset);
|
EIGEN_UNUSED_VARIABLE(offset);
|
||||||
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
|
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
|
||||||
eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
|
eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) );
|
||||||
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
|
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
|
||||||
const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
|
const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
|
||||||
Index count = 0;
|
Index count = 0;
|
||||||
@ -1159,6 +930,8 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder,
|
|||||||
if(StorageOrder==ColMajor)
|
if(StorageOrder==ColMajor)
|
||||||
{
|
{
|
||||||
for(Index k=0; k<depth; k++)
|
for(Index k=0; k<depth; k++)
|
||||||
|
{
|
||||||
|
if((Pack1%PacketSize)==0)
|
||||||
{
|
{
|
||||||
Packet A, B, C, D;
|
Packet A, B, C, D;
|
||||||
if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
|
if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
|
||||||
@ -1170,6 +943,14 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder,
|
|||||||
if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
|
if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
|
||||||
if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
|
if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if(Pack1>=1) blockA[count++] = cj(lhs(i+0, k));
|
||||||
|
if(Pack1>=2) blockA[count++] = cj(lhs(i+1, k));
|
||||||
|
if(Pack1>=3) blockA[count++] = cj(lhs(i+2, k));
|
||||||
|
if(Pack1>=4) blockA[count++] = cj(lhs(i+3, k));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -1247,12 +1028,20 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, Pan
|
|||||||
const Scalar* b1 = &rhs[(j2+1)*rhsStride];
|
const Scalar* b1 = &rhs[(j2+1)*rhsStride];
|
||||||
const Scalar* b2 = &rhs[(j2+2)*rhsStride];
|
const Scalar* b2 = &rhs[(j2+2)*rhsStride];
|
||||||
const Scalar* b3 = &rhs[(j2+3)*rhsStride];
|
const Scalar* b3 = &rhs[(j2+3)*rhsStride];
|
||||||
|
const Scalar* b4 = &rhs[(j2+4)*rhsStride];
|
||||||
|
const Scalar* b5 = &rhs[(j2+5)*rhsStride];
|
||||||
|
const Scalar* b6 = &rhs[(j2+6)*rhsStride];
|
||||||
|
const Scalar* b7 = &rhs[(j2+7)*rhsStride];
|
||||||
for(Index k=0; k<depth; k++)
|
for(Index k=0; k<depth; k++)
|
||||||
{
|
{
|
||||||
blockB[count+0] = cj(b0[k]);
|
blockB[count+0] = cj(b0[k]);
|
||||||
blockB[count+1] = cj(b1[k]);
|
blockB[count+1] = cj(b1[k]);
|
||||||
if(nr==4) blockB[count+2] = cj(b2[k]);
|
if(nr>=4) blockB[count+2] = cj(b2[k]);
|
||||||
if(nr==4) blockB[count+3] = cj(b3[k]);
|
if(nr>=4) blockB[count+3] = cj(b3[k]);
|
||||||
|
if(nr>=8) blockB[count+4] = cj(b4[k]);
|
||||||
|
if(nr>=8) blockB[count+5] = cj(b5[k]);
|
||||||
|
if(nr>=8) blockB[count+6] = cj(b6[k]);
|
||||||
|
if(nr>=8) blockB[count+7] = cj(b7[k]);
|
||||||
count += nr;
|
count += nr;
|
||||||
}
|
}
|
||||||
// skip what we have after
|
// skip what we have after
|
||||||
@ -1307,8 +1096,12 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, Pan
|
|||||||
const Scalar* b0 = &rhs[k*rhsStride + j2];
|
const Scalar* b0 = &rhs[k*rhsStride + j2];
|
||||||
blockB[count+0] = cj(b0[0]);
|
blockB[count+0] = cj(b0[0]);
|
||||||
blockB[count+1] = cj(b0[1]);
|
blockB[count+1] = cj(b0[1]);
|
||||||
if(nr==4) blockB[count+2] = cj(b0[2]);
|
if(nr>=4) blockB[count+2] = cj(b0[2]);
|
||||||
if(nr==4) blockB[count+3] = cj(b0[3]);
|
if(nr>=4) blockB[count+3] = cj(b0[3]);
|
||||||
|
if(nr>=8) blockB[count+4] = cj(b0[4]);
|
||||||
|
if(nr>=8) blockB[count+5] = cj(b0[5]);
|
||||||
|
if(nr>=8) blockB[count+6] = cj(b0[6]);
|
||||||
|
if(nr>=8) blockB[count+7] = cj(b0[7]);
|
||||||
count += nr;
|
count += nr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -81,9 +81,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||||||
Index threads = omp_get_num_threads();
|
Index threads = omp_get_num_threads();
|
||||||
|
|
||||||
std::size_t sizeA = kc*mc;
|
std::size_t sizeA = kc*mc;
|
||||||
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
|
||||||
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
|
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
|
||||||
ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
|
|
||||||
|
|
||||||
RhsScalar* blockB = blocking.blockB();
|
RhsScalar* blockB = blocking.blockB();
|
||||||
eigen_internal_assert(blockB!=0);
|
eigen_internal_assert(blockB!=0);
|
||||||
@ -122,7 +120,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||||||
if(shift>0)
|
if(shift>0)
|
||||||
while(info[j].sync!=k) {}
|
while(info[j].sync!=k) {}
|
||||||
|
|
||||||
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
|
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Then keep going as usual with the remaining A'
|
// Then keep going as usual with the remaining A'
|
||||||
@ -134,7 +132,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||||||
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
|
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
// C_i += A' * B'
|
// C_i += A' * B'
|
||||||
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
|
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Release all the sub blocks B'_j of B' for the current thread,
|
// Release all the sub blocks B'_j of B' for the current thread,
|
||||||
@ -152,11 +150,9 @@ static void run(Index rows, Index cols, Index depth,
|
|||||||
// this is the sequential version!
|
// this is the sequential version!
|
||||||
std::size_t sizeA = kc*mc;
|
std::size_t sizeA = kc*mc;
|
||||||
std::size_t sizeB = kc*cols;
|
std::size_t sizeB = kc*cols;
|
||||||
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
|
||||||
|
|
||||||
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
|
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
|
||||||
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
|
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
|
||||||
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
|
|
||||||
|
|
||||||
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
|
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
|
||||||
// (==GEMM_VAR1)
|
// (==GEMM_VAR1)
|
||||||
@ -182,7 +178,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||||||
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
|
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
// Everything is packed, we can now call the block * panel kernel:
|
// Everything is packed, we can now call the block * panel kernel:
|
||||||
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
|
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -73,11 +73,8 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
|||||||
if(mc > Traits::nr)
|
if(mc > Traits::nr)
|
||||||
mc = (mc/Traits::nr)*Traits::nr;
|
mc = (mc/Traits::nr)*Traits::nr;
|
||||||
|
|
||||||
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
|
||||||
std::size_t sizeB = sizeW + kc*size;
|
|
||||||
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, kc*mc, 0);
|
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, kc*mc, 0);
|
||||||
ei_declare_aligned_stack_constructed_variable(RhsScalar, allocatedBlockB, sizeB, 0);
|
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, kc*size, 0);
|
||||||
RhsScalar* blockB = allocatedBlockB + sizeW;
|
|
||||||
|
|
||||||
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
|
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
|
||||||
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
|
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
|
||||||
@ -103,15 +100,15 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
|||||||
// 3 - after the diagonal => processed with gebp or skipped
|
// 3 - after the diagonal => processed with gebp or skipped
|
||||||
if (UpLo==Lower)
|
if (UpLo==Lower)
|
||||||
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, (std::min)(size,i2), alpha,
|
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, (std::min)(size,i2), alpha,
|
||||||
-1, -1, 0, 0, allocatedBlockB);
|
-1, -1, 0, 0);
|
||||||
|
|
||||||
sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha, allocatedBlockB);
|
sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha);
|
||||||
|
|
||||||
if (UpLo==Upper)
|
if (UpLo==Upper)
|
||||||
{
|
{
|
||||||
Index j2 = i2+actual_mc;
|
Index j2 = i2+actual_mc;
|
||||||
gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, (std::max)(Index(0), size-j2), alpha,
|
gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, (std::max)(Index(0), size-j2), alpha,
|
||||||
-1, -1, 0, 0, allocatedBlockB);
|
-1, -1, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -136,7 +133,7 @@ struct tribb_kernel
|
|||||||
enum {
|
enum {
|
||||||
BlockSize = EIGEN_PLAIN_ENUM_MAX(mr,nr)
|
BlockSize = EIGEN_PLAIN_ENUM_MAX(mr,nr)
|
||||||
};
|
};
|
||||||
void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha, RhsScalar* workspace)
|
void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha)
|
||||||
{
|
{
|
||||||
gebp_kernel<LhsScalar, RhsScalar, Index, mr, nr, ConjLhs, ConjRhs> gebp_kernel;
|
gebp_kernel<LhsScalar, RhsScalar, Index, mr, nr, ConjLhs, ConjRhs> gebp_kernel;
|
||||||
Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer;
|
Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer;
|
||||||
@ -150,7 +147,7 @@ struct tribb_kernel
|
|||||||
|
|
||||||
if(UpLo==Upper)
|
if(UpLo==Upper)
|
||||||
gebp_kernel(res+j*resStride, resStride, blockA, actual_b, j, depth, actualBlockSize, alpha,
|
gebp_kernel(res+j*resStride, resStride, blockA, actual_b, j, depth, actualBlockSize, alpha,
|
||||||
-1, -1, 0, 0, workspace);
|
-1, -1, 0, 0);
|
||||||
|
|
||||||
// selfadjoint micro block
|
// selfadjoint micro block
|
||||||
{
|
{
|
||||||
@ -158,7 +155,7 @@ struct tribb_kernel
|
|||||||
buffer.setZero();
|
buffer.setZero();
|
||||||
// 1 - apply the kernel on the temporary buffer
|
// 1 - apply the kernel on the temporary buffer
|
||||||
gebp_kernel(buffer.data(), BlockSize, blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha,
|
gebp_kernel(buffer.data(), BlockSize, blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha,
|
||||||
-1, -1, 0, 0, workspace);
|
-1, -1, 0, 0);
|
||||||
// 2 - triangular accumulation
|
// 2 - triangular accumulation
|
||||||
for(Index j1=0; j1<actualBlockSize; ++j1)
|
for(Index j1=0; j1<actualBlockSize; ++j1)
|
||||||
{
|
{
|
||||||
@ -173,7 +170,7 @@ struct tribb_kernel
|
|||||||
{
|
{
|
||||||
Index i = j+actualBlockSize;
|
Index i = j+actualBlockSize;
|
||||||
gebp_kernel(res+j*resStride+i, resStride, blockA+depth*i, actual_b, size-i, depth, actualBlockSize, alpha,
|
gebp_kernel(res+j*resStride+i, resStride, blockA+depth*i, actual_b, size-i, depth, actualBlockSize, alpha,
|
||||||
-1, -1, 0, 0, workspace);
|
-1, -1, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -91,11 +91,18 @@ struct symm_pack_rhs
|
|||||||
{
|
{
|
||||||
blockB[count+0] = rhs(k,j2+0);
|
blockB[count+0] = rhs(k,j2+0);
|
||||||
blockB[count+1] = rhs(k,j2+1);
|
blockB[count+1] = rhs(k,j2+1);
|
||||||
if (nr==4)
|
if (nr>=4)
|
||||||
{
|
{
|
||||||
blockB[count+2] = rhs(k,j2+2);
|
blockB[count+2] = rhs(k,j2+2);
|
||||||
blockB[count+3] = rhs(k,j2+3);
|
blockB[count+3] = rhs(k,j2+3);
|
||||||
}
|
}
|
||||||
|
if (nr>=8)
|
||||||
|
{
|
||||||
|
blockB[count+4] = rhs(k,j2+4);
|
||||||
|
blockB[count+5] = rhs(k,j2+5);
|
||||||
|
blockB[count+6] = rhs(k,j2+6);
|
||||||
|
blockB[count+7] = rhs(k,j2+7);
|
||||||
|
}
|
||||||
count += nr;
|
count += nr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -109,11 +116,18 @@ struct symm_pack_rhs
|
|||||||
{
|
{
|
||||||
blockB[count+0] = numext::conj(rhs(j2+0,k));
|
blockB[count+0] = numext::conj(rhs(j2+0,k));
|
||||||
blockB[count+1] = numext::conj(rhs(j2+1,k));
|
blockB[count+1] = numext::conj(rhs(j2+1,k));
|
||||||
if (nr==4)
|
if (nr>=4)
|
||||||
{
|
{
|
||||||
blockB[count+2] = numext::conj(rhs(j2+2,k));
|
blockB[count+2] = numext::conj(rhs(j2+2,k));
|
||||||
blockB[count+3] = numext::conj(rhs(j2+3,k));
|
blockB[count+3] = numext::conj(rhs(j2+3,k));
|
||||||
}
|
}
|
||||||
|
if (nr>=8)
|
||||||
|
{
|
||||||
|
blockB[count+4] = numext::conj(rhs(j2+4,k));
|
||||||
|
blockB[count+5] = numext::conj(rhs(j2+5,k));
|
||||||
|
blockB[count+6] = numext::conj(rhs(j2+6,k));
|
||||||
|
blockB[count+7] = numext::conj(rhs(j2+7,k));
|
||||||
|
}
|
||||||
count += nr;
|
count += nr;
|
||||||
}
|
}
|
||||||
// symmetric
|
// symmetric
|
||||||
@ -137,11 +151,18 @@ struct symm_pack_rhs
|
|||||||
{
|
{
|
||||||
blockB[count+0] = rhs(k,j2+0);
|
blockB[count+0] = rhs(k,j2+0);
|
||||||
blockB[count+1] = rhs(k,j2+1);
|
blockB[count+1] = rhs(k,j2+1);
|
||||||
if (nr==4)
|
if (nr>=4)
|
||||||
{
|
{
|
||||||
blockB[count+2] = rhs(k,j2+2);
|
blockB[count+2] = rhs(k,j2+2);
|
||||||
blockB[count+3] = rhs(k,j2+3);
|
blockB[count+3] = rhs(k,j2+3);
|
||||||
}
|
}
|
||||||
|
if (nr>=8)
|
||||||
|
{
|
||||||
|
blockB[count+4] = rhs(k,j2+4);
|
||||||
|
blockB[count+5] = rhs(k,j2+5);
|
||||||
|
blockB[count+6] = rhs(k,j2+6);
|
||||||
|
blockB[count+7] = rhs(k,j2+7);
|
||||||
|
}
|
||||||
count += nr;
|
count += nr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -153,11 +174,18 @@ struct symm_pack_rhs
|
|||||||
{
|
{
|
||||||
blockB[count+0] = numext::conj(rhs(j2+0,k));
|
blockB[count+0] = numext::conj(rhs(j2+0,k));
|
||||||
blockB[count+1] = numext::conj(rhs(j2+1,k));
|
blockB[count+1] = numext::conj(rhs(j2+1,k));
|
||||||
if (nr==4)
|
if (nr>=4)
|
||||||
{
|
{
|
||||||
blockB[count+2] = numext::conj(rhs(j2+2,k));
|
blockB[count+2] = numext::conj(rhs(j2+2,k));
|
||||||
blockB[count+3] = numext::conj(rhs(j2+3,k));
|
blockB[count+3] = numext::conj(rhs(j2+3,k));
|
||||||
}
|
}
|
||||||
|
if (nr>=8)
|
||||||
|
{
|
||||||
|
blockB[count+4] = numext::conj(rhs(j2+4,k));
|
||||||
|
blockB[count+5] = numext::conj(rhs(j2+5,k));
|
||||||
|
blockB[count+6] = numext::conj(rhs(j2+6,k));
|
||||||
|
blockB[count+7] = numext::conj(rhs(j2+7,k));
|
||||||
|
}
|
||||||
count += nr;
|
count += nr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -125,11 +125,9 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
|||||||
|
|
||||||
std::size_t sizeA = kc*mc;
|
std::size_t sizeA = kc*mc;
|
||||||
std::size_t sizeB = kc*cols;
|
std::size_t sizeB = kc*cols;
|
||||||
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
|
||||||
|
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockW, sizeW, blocking.blockW());
|
|
||||||
|
|
||||||
Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer;
|
Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer;
|
||||||
triangularBuffer.setZero();
|
triangularBuffer.setZero();
|
||||||
@ -187,7 +185,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
|||||||
pack_lhs(blockA, triangularBuffer.data(), triangularBuffer.outerStride(), actualPanelWidth, actualPanelWidth);
|
pack_lhs(blockA, triangularBuffer.data(), triangularBuffer.outerStride(), actualPanelWidth, actualPanelWidth);
|
||||||
|
|
||||||
gebp_kernel(res+startBlock, resStride, blockA, blockB, actualPanelWidth, actualPanelWidth, cols, alpha,
|
gebp_kernel(res+startBlock, resStride, blockA, blockB, actualPanelWidth, actualPanelWidth, cols, alpha,
|
||||||
actualPanelWidth, actual_kc, 0, blockBOffset, blockW);
|
actualPanelWidth, actual_kc, 0, blockBOffset);
|
||||||
|
|
||||||
// GEBP with remaining micro panel
|
// GEBP with remaining micro panel
|
||||||
if (lengthTarget>0)
|
if (lengthTarget>0)
|
||||||
@ -197,7 +195,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
|||||||
pack_lhs(blockA, &lhs(startTarget,startBlock), lhsStride, actualPanelWidth, lengthTarget);
|
pack_lhs(blockA, &lhs(startTarget,startBlock), lhsStride, actualPanelWidth, lengthTarget);
|
||||||
|
|
||||||
gebp_kernel(res+startTarget, resStride, blockA, blockB, lengthTarget, actualPanelWidth, cols, alpha,
|
gebp_kernel(res+startTarget, resStride, blockA, blockB, lengthTarget, actualPanelWidth, cols, alpha,
|
||||||
actualPanelWidth, actual_kc, 0, blockBOffset, blockW);
|
actualPanelWidth, actual_kc, 0, blockBOffset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -211,7 +209,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
|||||||
gemm_pack_lhs<Scalar, Index, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
|
gemm_pack_lhs<Scalar, Index, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
|
||||||
(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
|
(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
|
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -266,11 +264,9 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
|||||||
|
|
||||||
std::size_t sizeA = kc*mc;
|
std::size_t sizeA = kc*mc;
|
||||||
std::size_t sizeB = kc*cols;
|
std::size_t sizeB = kc*cols;
|
||||||
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
|
||||||
|
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockW, sizeW, blocking.blockW());
|
|
||||||
|
|
||||||
Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer;
|
Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer;
|
||||||
triangularBuffer.setZero();
|
triangularBuffer.setZero();
|
||||||
@ -357,14 +353,13 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
|||||||
actual_mc, panelLength, actualPanelWidth,
|
actual_mc, panelLength, actualPanelWidth,
|
||||||
alpha,
|
alpha,
|
||||||
actual_kc, actual_kc, // strides
|
actual_kc, actual_kc, // strides
|
||||||
blockOffset, blockOffset,// offsets
|
blockOffset, blockOffset);// offsets
|
||||||
blockW); // workspace
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gebp_kernel(res+i2+(IsLower ? 0 : k2)*resStride, resStride,
|
gebp_kernel(res+i2+(IsLower ? 0 : k2)*resStride, resStride,
|
||||||
blockA, geb, actual_mc, actual_kc, rs,
|
blockA, geb, actual_mc, actual_kc, rs,
|
||||||
alpha,
|
alpha,
|
||||||
-1, -1, 0, 0, blockW);
|
-1, -1, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -66,11 +66,9 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
|
|||||||
|
|
||||||
std::size_t sizeA = kc*mc;
|
std::size_t sizeA = kc*mc;
|
||||||
std::size_t sizeB = kc*cols;
|
std::size_t sizeB = kc*cols;
|
||||||
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
|
||||||
|
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockW, sizeW, blocking.blockW());
|
|
||||||
|
|
||||||
conj_if<Conjugate> conj;
|
conj_if<Conjugate> conj;
|
||||||
gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, Conjugate, false> gebp_kernel;
|
gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, Conjugate, false> gebp_kernel;
|
||||||
@ -158,7 +156,7 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
|
|||||||
pack_lhs(blockA, &tri(startTarget,startBlock), triStride, actualPanelWidth, lengthTarget);
|
pack_lhs(blockA, &tri(startTarget,startBlock), triStride, actualPanelWidth, lengthTarget);
|
||||||
|
|
||||||
gebp_kernel(&other(startTarget,j2), otherStride, blockA, blockB+actual_kc*j2, lengthTarget, actualPanelWidth, actual_cols, Scalar(-1),
|
gebp_kernel(&other(startTarget,j2), otherStride, blockA, blockB+actual_kc*j2, lengthTarget, actualPanelWidth, actual_cols, Scalar(-1),
|
||||||
actualPanelWidth, actual_kc, 0, blockBOffset, blockW);
|
actualPanelWidth, actual_kc, 0, blockBOffset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -174,7 +172,7 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
|
|||||||
{
|
{
|
||||||
pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc);
|
pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
gebp_kernel(_other+i2, otherStride, blockA, blockB, actual_mc, actual_kc, cols, Scalar(-1), -1, -1, 0, 0, blockW);
|
gebp_kernel(_other+i2, otherStride, blockA, blockB, actual_mc, actual_kc, cols, Scalar(-1), -1, -1, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -215,11 +213,9 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conj
|
|||||||
|
|
||||||
std::size_t sizeA = kc*mc;
|
std::size_t sizeA = kc*mc;
|
||||||
std::size_t sizeB = kc*size;
|
std::size_t sizeB = kc*size;
|
||||||
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
|
||||||
|
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockW, sizeW, blocking.blockW());
|
|
||||||
|
|
||||||
conj_if<Conjugate> conj;
|
conj_if<Conjugate> conj;
|
||||||
gebp_kernel<Scalar,Scalar, Index, Traits::mr, Traits::nr, false, Conjugate> gebp_kernel;
|
gebp_kernel<Scalar,Scalar, Index, Traits::mr, Traits::nr, false, Conjugate> gebp_kernel;
|
||||||
@ -285,8 +281,7 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conj
|
|||||||
actual_mc, panelLength, actualPanelWidth,
|
actual_mc, panelLength, actualPanelWidth,
|
||||||
Scalar(-1),
|
Scalar(-1),
|
||||||
actual_kc, actual_kc, // strides
|
actual_kc, actual_kc, // strides
|
||||||
panelOffset, panelOffset, // offsets
|
panelOffset, panelOffset); // offsets
|
||||||
blockW); // workspace
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// unblocked triangular solve
|
// unblocked triangular solve
|
||||||
@ -317,7 +312,7 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conj
|
|||||||
if (rs>0)
|
if (rs>0)
|
||||||
gebp_kernel(_other+i2+startPanel*otherStride, otherStride, blockA, geb,
|
gebp_kernel(_other+i2+startPanel*otherStride, otherStride, blockA, geb,
|
||||||
actual_mc, actual_kc, rs, Scalar(-1),
|
actual_mc, actual_kc, rs, Scalar(-1),
|
||||||
-1, -1, 0, 0, blockW);
|
-1, -1, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -700,7 +700,7 @@ template<typename T> class aligned_stack_memory_handler
|
|||||||
* \sa \ref TopicStlContainers.
|
* \sa \ref TopicStlContainers.
|
||||||
*/
|
*/
|
||||||
template<class T>
|
template<class T>
|
||||||
class aligned_allocator
|
class aligned_allocator : public std::allocator<T>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
typedef size_t size_type;
|
typedef size_t size_type;
|
||||||
@ -717,81 +717,25 @@ public:
|
|||||||
typedef aligned_allocator<U> other;
|
typedef aligned_allocator<U> other;
|
||||||
};
|
};
|
||||||
|
|
||||||
pointer address( reference value ) const
|
aligned_allocator() : std::allocator<T>() {}
|
||||||
{
|
|
||||||
return &value;
|
|
||||||
}
|
|
||||||
|
|
||||||
const_pointer address( const_reference value ) const
|
aligned_allocator(const aligned_allocator& other) : std::allocator<T>(other) {}
|
||||||
{
|
|
||||||
return &value;
|
|
||||||
}
|
|
||||||
|
|
||||||
aligned_allocator()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
aligned_allocator( const aligned_allocator& )
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
template<class U>
|
template<class U>
|
||||||
aligned_allocator( const aligned_allocator<U>& )
|
aligned_allocator(const aligned_allocator<U>& other) : std::allocator<T>(other) {}
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
~aligned_allocator()
|
~aligned_allocator() {}
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
size_type max_size() const
|
pointer allocate(size_type num, const void* /*hint*/ = 0)
|
||||||
{
|
{
|
||||||
return (std::numeric_limits<size_type>::max)();
|
|
||||||
}
|
|
||||||
|
|
||||||
pointer allocate( size_type num, const void* hint = 0 )
|
|
||||||
{
|
|
||||||
EIGEN_UNUSED_VARIABLE(hint);
|
|
||||||
internal::check_size_for_overflow<T>(num);
|
internal::check_size_for_overflow<T>(num);
|
||||||
return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) );
|
return static_cast<pointer>( internal::aligned_malloc(num * sizeof(T)) );
|
||||||
}
|
}
|
||||||
|
|
||||||
void construct( pointer p, const T& value )
|
void deallocate(pointer p, size_type /*num*/)
|
||||||
{
|
{
|
||||||
::new( p ) T( value );
|
internal::aligned_free(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if (__cplusplus >= 201103L)
|
|
||||||
template <typename U, typename... Args>
|
|
||||||
void construct( U* u, Args&&... args)
|
|
||||||
{
|
|
||||||
::new( static_cast<void*>(u) ) U( std::forward<Args>( args )... );
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void destroy( pointer p )
|
|
||||||
{
|
|
||||||
p->~T();
|
|
||||||
}
|
|
||||||
|
|
||||||
#if (__cplusplus >= 201103L)
|
|
||||||
template <typename U>
|
|
||||||
void destroy( U* u )
|
|
||||||
{
|
|
||||||
u->~U();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void deallocate( pointer p, size_type /*num*/ )
|
|
||||||
{
|
|
||||||
internal::aligned_free( p );
|
|
||||||
}
|
|
||||||
|
|
||||||
bool operator!=(const aligned_allocator<T>& ) const
|
|
||||||
{ return false; }
|
|
||||||
|
|
||||||
bool operator==(const aligned_allocator<T>& ) const
|
|
||||||
{ return true; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//---------- Cache sizes ----------
|
//---------- Cache sizes ----------
|
||||||
|
@ -165,8 +165,8 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived
|
|||||||
Scalar n2 = q.vec().squaredNorm();
|
Scalar n2 = q.vec().squaredNorm();
|
||||||
if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
|
if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
|
||||||
{
|
{
|
||||||
m_angle = 0;
|
m_angle = Scalar(0);
|
||||||
m_axis << 1, 0, 0;
|
m_axis << Scalar(1), Scalar(0), Scalar(0);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -48,7 +48,7 @@ void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vec
|
|||||||
typedef typename MatrixType::Index Index;
|
typedef typename MatrixType::Index Index;
|
||||||
enum { TFactorSize = MatrixType::ColsAtCompileTime };
|
enum { TFactorSize = MatrixType::ColsAtCompileTime };
|
||||||
Index nbVecs = vectors.cols();
|
Index nbVecs = vectors.cols();
|
||||||
Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize> T(nbVecs,nbVecs);
|
Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, ColMajor> T(nbVecs,nbVecs);
|
||||||
make_block_householder_triangular_factor(T, vectors, hCoeffs);
|
make_block_householder_triangular_factor(T, vectors, hCoeffs);
|
||||||
|
|
||||||
const TriangularView<const VectorsType, UnitLower>& V(vectors);
|
const TriangularView<const VectorsType, UnitLower>& V(vectors);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user