Generalized the matrix vector product code.

This commit is contained in:
Benoit Steiner 2014-10-31 16:33:51 -07:00
parent 7f2c6ed2fa
commit 2dde63499c
5 changed files with 228 additions and 167 deletions

View File

@ -11,7 +11,7 @@
#ifndef EIGEN_GENERAL_PRODUCT_H #ifndef EIGEN_GENERAL_PRODUCT_H
#define EIGEN_GENERAL_PRODUCT_H #define EIGEN_GENERAL_PRODUCT_H
namespace Eigen { namespace Eigen {
/** \class GeneralProduct /** \class GeneralProduct
* \ingroup Core_Module * \ingroup Core_Module
@ -257,7 +257,7 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs> : public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
{ {
template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {}; template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
public: public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
@ -266,7 +266,7 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value), EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
} }
struct set { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } }; struct set { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } };
struct add { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } }; struct add { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };
struct sub { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } }; struct sub { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } };
@ -277,12 +277,12 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
dst.const_cast_derived() += m_scale * src; dst.const_cast_derived() += m_scale * src;
} }
}; };
template<typename Dest> template<typename Dest>
inline void evalTo(Dest& dest) const { inline void evalTo(Dest& dest) const {
internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>()); internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>());
} }
template<typename Dest> template<typename Dest>
inline void addTo(Dest& dest) const { inline void addTo(Dest& dest) const {
internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>()); internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>());
@ -436,12 +436,12 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0)); bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha); RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
evalToDest ? dest.data() : static_dest.data()); evalToDest ? dest.data() : static_dest.data());
if(!evalToDest) if(!evalToDest)
{ {
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
@ -457,11 +457,13 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
MappedDest(actualDestPtr, dest.size()) = dest; MappedDest(actualDestPtr, dest.size()) = dest;
} }
typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;
typedef const_blas_data_mapper<RhsScalar,Index,RowMajor> RhsMapper;
general_matrix_vector_product general_matrix_vector_product
<Index,LhsScalar,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run( <Index,LhsScalar,LhsMapper,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsMapper,RhsBlasTraits::NeedToConjugate>::run(
actualLhs.rows(), actualLhs.cols(), actualLhs.rows(), actualLhs.cols(),
actualLhs.data(), actualLhs.outerStride(), LhsMapper(actualLhs.data(), actualLhs.outerStride()),
actualRhs.data(), actualRhs.innerStride(), RhsMapper(actualRhs.data(), actualRhs.innerStride()),
actualDestPtr, 1, actualDestPtr, 1,
compatibleAlpha); compatibleAlpha);
@ -516,11 +518,13 @@ template<> struct gemv_selector<OnTheRight,RowMajor,true>
Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs; Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
} }
typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper;
typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper;
general_matrix_vector_product general_matrix_vector_product
<Index,LhsScalar,RowMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run( <Index,LhsScalar,LhsMapper,RowMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsMapper,RhsBlasTraits::NeedToConjugate>::run(
actualLhs.rows(), actualLhs.cols(), actualLhs.rows(), actualLhs.cols(),
actualLhs.data(), actualLhs.outerStride(), LhsMapper(actualLhs.data(), actualLhs.outerStride()),
actualRhsPtr, 1, RhsMapper(actualRhsPtr, 1),
dest.data(), dest.innerStride(), dest.data(), dest.innerStride(),
actualAlpha); actualAlpha);
} }
@ -594,7 +598,7 @@ MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
#ifdef EIGEN_DEBUG_PRODUCT #ifdef EIGEN_DEBUG_PRODUCT
internal::product_type<Derived,OtherDerived>::debug(); internal::product_type<Derived,OtherDerived>::debug();
#endif #endif
return Product<Derived, OtherDerived>(derived(), other.derived()); return Product<Derived, OtherDerived>(derived(), other.derived());
} }
#else #else

View File

@ -10,7 +10,7 @@
#ifndef EIGEN_GENERAL_MATRIX_VECTOR_H #ifndef EIGEN_GENERAL_MATRIX_VECTOR_H
#define EIGEN_GENERAL_MATRIX_VECTOR_H #define EIGEN_GENERAL_MATRIX_VECTOR_H
namespace Eigen { namespace Eigen {
namespace internal { namespace internal {
@ -48,17 +48,17 @@ namespace internal {
* // we currently fall back to the NoneAligned case * // we currently fall back to the NoneAligned case
* *
* The same reasoning apply for the transposed case. * The same reasoning apply for the transposed case.
* *
* The last case (PacketSize>4) could probably be improved by generalizing the FirstAligned case, but since we do not support AVX yet... * The last case (PacketSize>4) could probably be improved by generalizing the FirstAligned case, but since we do not support AVX yet...
* One might also wonder why in the EvenAligned case we perform unaligned loads instead of using the aligned-loads plus re-alignment * One might also wonder why in the EvenAligned case we perform unaligned loads instead of using the aligned-loads plus re-alignment
* strategy as in the FirstAligned case. The reason is that we observed that unaligned loads on a 8 byte boundary are not too slow * strategy as in the FirstAligned case. The reason is that we observed that unaligned loads on a 8 byte boundary are not too slow
* compared to unaligned loads on a 4 byte boundary. * compared to unaligned loads on a 4 byte boundary.
* *
*/ */
template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version> template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
struct general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version> struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
{ {
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
enum { enum {
Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
@ -78,17 +78,17 @@ typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
EIGEN_DONT_INLINE static void run( EIGEN_DONT_INLINE static void run(
Index rows, Index cols, Index rows, Index cols,
const LhsScalar* lhs, Index lhsStride, const LhsMapper& lhs,
const RhsScalar* rhs, Index rhsIncr, const RhsMapper& rhs,
ResScalar* res, Index resIncr, ResScalar* res, Index resIncr,
RhsScalar alpha); RhsScalar alpha);
}; };
template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version> template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>::run( EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
Index rows, Index cols, Index rows, Index cols,
const LhsScalar* lhs, Index lhsStride, const LhsMapper& lhs,
const RhsScalar* rhs, Index rhsIncr, const RhsMapper& rhs,
ResScalar* res, Index resIncr, ResScalar* res, Index resIncr,
RhsScalar alpha) RhsScalar alpha)
{ {
@ -97,14 +97,16 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
#ifdef _EIGEN_ACCUMULATE_PACKETS #ifdef _EIGEN_ACCUMULATE_PACKETS
#error _EIGEN_ACCUMULATE_PACKETS has already been defined #error _EIGEN_ACCUMULATE_PACKETS has already been defined
#endif #endif
#define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) \ #define _EIGEN_ACCUMULATE_PACKETS(Alignment0,Alignment13,Alignment2) \
pstore(&res[j], \ pstore(&res[j], \
padd(pload<ResPacket>(&res[j]), \ padd(pload<ResPacket>(&res[j]), \
padd( \ padd( \
padd(pcj.pmul(EIGEN_CAT(ploa , A0)<LhsPacket>(&lhs0[j]), ptmp0), \ padd(pcj.pmul(lhs0.template load<LhsPacket, Alignment0>(j), ptmp0), \
pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs1[j]), ptmp1)), \ pcj.pmul(lhs1.template load<LhsPacket, Alignment13>(j), ptmp1)), \
padd(pcj.pmul(EIGEN_CAT(ploa , A2)<LhsPacket>(&lhs2[j]), ptmp2), \ padd(pcj.pmul(lhs2.template load<LhsPacket, Alignment2>(j), ptmp2), \
pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs3[j]), ptmp3)) ))) pcj.pmul(lhs3.template load<LhsPacket, Alignment13>(j), ptmp3)) )))
typedef typename LhsMapper::VectorMapper LhsScalars;
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj; conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj; conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
@ -118,7 +120,9 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
const Index ResPacketAlignedMask = ResPacketSize-1; const Index ResPacketAlignedMask = ResPacketSize-1;
// const Index PeelAlignedMask = ResPacketSize*peels-1; // const Index PeelAlignedMask = ResPacketSize*peels-1;
const Index size = rows; const Index size = rows;
const Index lhsStride = lhs.stride();
// How many coeffs of the result do we have to skip to be aligned. // How many coeffs of the result do we have to skip to be aligned.
// Here we assume data are at least aligned on the base scalar type. // Here we assume data are at least aligned on the base scalar type.
Index alignedStart = internal::first_aligned(res,size); Index alignedStart = internal::first_aligned(res,size);
@ -131,12 +135,12 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
: FirstAligned; : FirstAligned;
// we cannot assume the first element is aligned because of sub-matrices // we cannot assume the first element is aligned because of sub-matrices
const Index lhsAlignmentOffset = internal::first_aligned(lhs,size); const Index lhsAlignmentOffset = lhs.firstAligned(size);
// find how many columns do we have to skip to be aligned with the result (if possible) // find how many columns do we have to skip to be aligned with the result (if possible)
Index skipColumns = 0; Index skipColumns = 0;
// if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats) // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
if( (size_t(lhs)%sizeof(LhsScalar)) || (size_t(res)%sizeof(ResScalar)) ) if( (lhsAlignmentOffset < 0) || (size_t(res)%sizeof(ResScalar)) )
{ {
alignedSize = 0; alignedSize = 0;
alignedStart = 0; alignedStart = 0;
@ -149,7 +153,7 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
} }
else if (LhsPacketSize>1) else if (LhsPacketSize>1)
{ {
eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || size<LhsPacketSize); // eigen_internal_assert(size_t(firstLhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || size<LhsPacketSize);
while (skipColumns<LhsPacketSize && while (skipColumns<LhsPacketSize &&
alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%LhsPacketSize)) alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%LhsPacketSize))
@ -166,10 +170,10 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
// note that the skiped columns are processed later. // note that the skiped columns are processed later.
} }
eigen_internal_assert( (alignmentPattern==NoneAligned) /* eigen_internal_assert( (alignmentPattern==NoneAligned)
|| (skipColumns + columnsAtOnce >= cols) || (skipColumns + columnsAtOnce >= cols)
|| LhsPacketSize > size || LhsPacketSize > size
|| (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(LhsPacket))==0); || (size_t(firstLhs+alignedStart+lhsStride*skipColumns)%sizeof(LhsPacket))==0);*/
} }
else if(Vectorizable) else if(Vectorizable)
{ {
@ -178,20 +182,20 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
alignmentPattern = AllAligned; alignmentPattern = AllAligned;
} }
Index offset1 = (FirstAligned && alignmentStep==1?3:1); const Index offset1 = (FirstAligned && alignmentStep==1?3:1);
Index offset3 = (FirstAligned && alignmentStep==1?1:3); const Index offset3 = (FirstAligned && alignmentStep==1?1:3);
Index columnBound = ((cols-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns; Index columnBound = ((cols-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
for (Index i=skipColumns; i<columnBound; i+=columnsAtOnce) for (Index i=skipColumns; i<columnBound; i+=columnsAtOnce)
{ {
RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[i*rhsIncr]), RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs(i, 0)),
ptmp1 = pset1<RhsPacket>(alpha*rhs[(i+offset1)*rhsIncr]), ptmp1 = pset1<RhsPacket>(alpha*rhs(i+offset1, 0)),
ptmp2 = pset1<RhsPacket>(alpha*rhs[(i+2)*rhsIncr]), ptmp2 = pset1<RhsPacket>(alpha*rhs(i+2, 0)),
ptmp3 = pset1<RhsPacket>(alpha*rhs[(i+offset3)*rhsIncr]); ptmp3 = pset1<RhsPacket>(alpha*rhs(i+offset3, 0));
// this helps a lot generating better binary code // this helps a lot generating better binary code
const LhsScalar *lhs0 = lhs + i*lhsStride, *lhs1 = lhs + (i+offset1)*lhsStride, const LhsScalars lhs0 = lhs.getVectorMapper(0, i+0), lhs1 = lhs.getVectorMapper(0, i+offset1),
*lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride; lhs2 = lhs.getVectorMapper(0, i+2), lhs3 = lhs.getVectorMapper(0, i+offset3);
if (Vectorizable) if (Vectorizable)
{ {
@ -199,10 +203,10 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
// process initial unaligned coeffs // process initial unaligned coeffs
for (Index j=0; j<alignedStart; ++j) for (Index j=0; j<alignedStart; ++j)
{ {
res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]); res[j] = cj.pmadd(lhs0(j), pfirst(ptmp0), res[j]);
res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]); res[j] = cj.pmadd(lhs1(j), pfirst(ptmp1), res[j]);
res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]); res[j] = cj.pmadd(lhs2(j), pfirst(ptmp2), res[j]);
res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]); res[j] = cj.pmadd(lhs3(j), pfirst(ptmp3), res[j]);
} }
if (alignedSize>alignedStart) if (alignedSize>alignedStart)
@ -211,11 +215,11 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
{ {
case AllAligned: case AllAligned:
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize) for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,d,d); _EIGEN_ACCUMULATE_PACKETS(Aligned,Aligned,Aligned);
break; break;
case EvenAligned: case EvenAligned:
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize) for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,d); _EIGEN_ACCUMULATE_PACKETS(Aligned,Unaligned,Aligned);
break; break;
case FirstAligned: case FirstAligned:
{ {
@ -225,28 +229,28 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
LhsPacket A00, A01, A02, A03, A10, A11, A12, A13; LhsPacket A00, A01, A02, A03, A10, A11, A12, A13;
ResPacket T0, T1; ResPacket T0, T1;
A01 = pload<LhsPacket>(&lhs1[alignedStart-1]); A01 = lhs1.template load<LhsPacket, Aligned>(alignedStart-1);
A02 = pload<LhsPacket>(&lhs2[alignedStart-2]); A02 = lhs2.template load<LhsPacket, Aligned>(alignedStart-2);
A03 = pload<LhsPacket>(&lhs3[alignedStart-3]); A03 = lhs3.template load<LhsPacket, Aligned>(alignedStart-3);
for (; j<peeledSize; j+=peels*ResPacketSize) for (; j<peeledSize; j+=peels*ResPacketSize)
{ {
A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11); A11 = lhs1.template load<LhsPacket, Aligned>(j-1+LhsPacketSize); palign<1>(A01,A11);
A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]); palign<2>(A02,A12); A12 = lhs2.template load<LhsPacket, Aligned>(j-2+LhsPacketSize); palign<2>(A02,A12);
A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]); palign<3>(A03,A13); A13 = lhs3.template load<LhsPacket, Aligned>(j-3+LhsPacketSize); palign<3>(A03,A13);
A00 = pload<LhsPacket>(&lhs0[j]); A00 = lhs0.template load<LhsPacket, Aligned>(j);
A10 = pload<LhsPacket>(&lhs0[j+LhsPacketSize]); A10 = lhs0.template load<LhsPacket, Aligned>(j+LhsPacketSize);
T0 = pcj.pmadd(A00, ptmp0, pload<ResPacket>(&res[j])); T0 = pcj.pmadd(A00, ptmp0, pload<ResPacket>(&res[j]));
T1 = pcj.pmadd(A10, ptmp0, pload<ResPacket>(&res[j+ResPacketSize])); T1 = pcj.pmadd(A10, ptmp0, pload<ResPacket>(&res[j+ResPacketSize]));
T0 = pcj.pmadd(A01, ptmp1, T0); T0 = pcj.pmadd(A01, ptmp1, T0);
A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]); palign<1>(A11,A01); A01 = lhs1.template load<LhsPacket, Aligned>(j-1+2*LhsPacketSize); palign<1>(A11,A01);
T0 = pcj.pmadd(A02, ptmp2, T0); T0 = pcj.pmadd(A02, ptmp2, T0);
A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]); palign<2>(A12,A02); A02 = lhs2.template load<LhsPacket, Aligned>(j-2+2*LhsPacketSize); palign<2>(A12,A02);
T0 = pcj.pmadd(A03, ptmp3, T0); T0 = pcj.pmadd(A03, ptmp3, T0);
pstore(&res[j],T0); pstore(&res[j],T0);
A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]); palign<3>(A13,A03); A03 = lhs3.template load<LhsPacket, Aligned>(j-3+2*LhsPacketSize); palign<3>(A13,A03);
T1 = pcj.pmadd(A11, ptmp1, T1); T1 = pcj.pmadd(A11, ptmp1, T1);
T1 = pcj.pmadd(A12, ptmp2, T1); T1 = pcj.pmadd(A12, ptmp2, T1);
T1 = pcj.pmadd(A13, ptmp3, T1); T1 = pcj.pmadd(A13, ptmp3, T1);
@ -254,12 +258,12 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
} }
} }
for (; j<alignedSize; j+=ResPacketSize) for (; j<alignedSize; j+=ResPacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,du); _EIGEN_ACCUMULATE_PACKETS(Aligned,Unaligned,Unaligned);
break; break;
} }
default: default:
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize) for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
_EIGEN_ACCUMULATE_PACKETS(du,du,du); _EIGEN_ACCUMULATE_PACKETS(Unaligned,Unaligned,Unaligned);
break; break;
} }
} }
@ -268,10 +272,10 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
/* process remaining coeffs (or all if there is no explicit vectorization) */ /* process remaining coeffs (or all if there is no explicit vectorization) */
for (Index j=alignedSize; j<size; ++j) for (Index j=alignedSize; j<size; ++j)
{ {
res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]); res[j] = cj.pmadd(lhs0(j), pfirst(ptmp0), res[j]);
res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]); res[j] = cj.pmadd(lhs1(j), pfirst(ptmp1), res[j]);
res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]); res[j] = cj.pmadd(lhs2(j), pfirst(ptmp2), res[j]);
res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]); res[j] = cj.pmadd(lhs3(j), pfirst(ptmp3), res[j]);
} }
} }
@ -282,27 +286,27 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
{ {
for (Index k=start; k<end; ++k) for (Index k=start; k<end; ++k)
{ {
RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[k*rhsIncr]); RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs(k, 0));
const LhsScalar* lhs0 = lhs + k*lhsStride; const LhsScalars lhs0 = lhs.getVectorMapper(0, k);
if (Vectorizable) if (Vectorizable)
{ {
/* explicit vectorization */ /* explicit vectorization */
// process first unaligned result's coeffs // process first unaligned result's coeffs
for (Index j=0; j<alignedStart; ++j) for (Index j=0; j<alignedStart; ++j)
res[j] += cj.pmul(lhs0[j], pfirst(ptmp0)); res[j] += cj.pmul(lhs0(j), pfirst(ptmp0));
// process aligned result's coeffs // process aligned result's coeffs
if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0) if (lhs0.template aligned<LhsPacket>(alignedStart))
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize) for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
pstore(&res[i], pcj.pmadd(pload<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i]))); pstore(&res[i], pcj.pmadd(lhs0.template load<LhsPacket, Aligned>(i), ptmp0, pload<ResPacket>(&res[i])));
else else
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize) for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i]))); pstore(&res[i], pcj.pmadd(lhs0.template load<LhsPacket, Unaligned>(i), ptmp0, pload<ResPacket>(&res[i])));
} }
// process remaining scalars (or all if no explicit vectorization) // process remaining scalars (or all if no explicit vectorization)
for (Index i=alignedSize; i<size; ++i) for (Index i=alignedSize; i<size; ++i)
res[i] += cj.pmul(lhs0[i], pfirst(ptmp0)); res[i] += cj.pmul(lhs0(i), pfirst(ptmp0));
} }
if (skipColumns) if (skipColumns)
{ {
@ -326,8 +330,8 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
* - alpha is always a complex (or converted to a complex) * - alpha is always a complex (or converted to a complex)
* - no vectorization * - no vectorization
*/ */
template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version> template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
struct general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version> struct general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>
{ {
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
@ -346,67 +350,69 @@ typedef typename packet_traits<ResScalar>::type _ResPacket;
typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket; typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket; typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket; typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
EIGEN_DONT_INLINE static void run( EIGEN_DONT_INLINE static void run(
Index rows, Index cols, Index rows, Index cols,
const LhsScalar* lhs, Index lhsStride, const LhsMapper& lhs,
const RhsScalar* rhs, Index rhsIncr, const RhsMapper& rhs,
ResScalar* res, Index resIncr, ResScalar* res, Index resIncr,
ResScalar alpha); ResScalar alpha);
}; };
template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version> template<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>
EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>::run( EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(
Index rows, Index cols, Index rows, Index cols,
const LhsScalar* lhs, Index lhsStride, const LhsMapper& lhs,
const RhsScalar* rhs, Index rhsIncr, const RhsMapper& rhs,
ResScalar* res, Index resIncr, ResScalar* res, Index resIncr,
ResScalar alpha) ResScalar alpha)
{ {
EIGEN_UNUSED_VARIABLE(rhsIncr); eigen_internal_assert(rhs.stride()==1);
eigen_internal_assert(rhsIncr==1);
#ifdef _EIGEN_ACCUMULATE_PACKETS #ifdef _EIGEN_ACCUMULATE_PACKETS
#error _EIGEN_ACCUMULATE_PACKETS has already been defined #error _EIGEN_ACCUMULATE_PACKETS has already been defined
#endif #endif
#define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) {\ #define _EIGEN_ACCUMULATE_PACKETS(Alignment0,Alignment13,Alignment2) {\
RhsPacket b = pload<RhsPacket>(&rhs[j]); \ RhsPacket b = rhs.getVectorMapper(j, 0).template load<RhsPacket, Aligned>(0); \
ptmp0 = pcj.pmadd(EIGEN_CAT(ploa,A0) <LhsPacket>(&lhs0[j]), b, ptmp0); \ ptmp0 = pcj.pmadd(lhs0.template load<LhsPacket, Alignment0>(j), b, ptmp0); \
ptmp1 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs1[j]), b, ptmp1); \ ptmp1 = pcj.pmadd(lhs1.template load<LhsPacket, Alignment13>(j), b, ptmp1); \
ptmp2 = pcj.pmadd(EIGEN_CAT(ploa,A2) <LhsPacket>(&lhs2[j]), b, ptmp2); \ ptmp2 = pcj.pmadd(lhs2.template load<LhsPacket, Alignment2>(j), b, ptmp2); \
ptmp3 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs3[j]), b, ptmp3); } ptmp3 = pcj.pmadd(lhs3.template load<LhsPacket, Alignment13>(j), b, ptmp3); }
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj; conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj; conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
typedef typename LhsMapper::VectorMapper LhsScalars;
enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 }; enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
const Index rowsAtOnce = 4; const Index rowsAtOnce = 4;
const Index peels = 2; const Index peels = 2;
const Index RhsPacketAlignedMask = RhsPacketSize-1; const Index RhsPacketAlignedMask = RhsPacketSize-1;
const Index LhsPacketAlignedMask = LhsPacketSize-1; const Index LhsPacketAlignedMask = LhsPacketSize-1;
// const Index PeelAlignedMask = RhsPacketSize*peels-1;
const Index depth = cols; const Index depth = cols;
const Index lhsStride = lhs.stride();
// How many coeffs of the result do we have to skip to be aligned. // How many coeffs of the result do we have to skip to be aligned.
// Here we assume data are at least aligned on the base scalar type // Here we assume data are at least aligned on the base scalar type
// if that's not the case then vectorization is discarded, see below. // if that's not the case then vectorization is discarded, see below.
Index alignedStart = internal::first_aligned(rhs, depth); Index alignedStart = rhs.firstAligned(depth);
Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0; Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0;
const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1; const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0; const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
Index alignmentPattern = alignmentStep==0 ? AllAligned Index alignmentPattern = alignmentStep==0 ? AllAligned
: alignmentStep==(LhsPacketSize/2) ? EvenAligned : alignmentStep==(LhsPacketSize/2) ? EvenAligned
: FirstAligned; : FirstAligned;
// we cannot assume the first element is aligned because of sub-matrices // we cannot assume the first element is aligned because of sub-matrices
const Index lhsAlignmentOffset = internal::first_aligned(lhs,depth); const Index lhsAlignmentOffset = lhs.firstAligned(depth);
const Index rhsAlignmentOffset = rhs.firstAligned(rows);
// find how many rows do we have to skip to be aligned with rhs (if possible) // find how many rows do we have to skip to be aligned with rhs (if possible)
Index skipRows = 0; Index skipRows = 0;
// if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats) // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
if( (sizeof(LhsScalar)!=sizeof(RhsScalar)) || (size_t(lhs)%sizeof(LhsScalar)) || (size_t(rhs)%sizeof(RhsScalar)) ) if( (sizeof(LhsScalar)!=sizeof(RhsScalar)) || (lhsAlignmentOffset < 0) || (rhsAlignmentOffset < 0) )
{ {
alignedSize = 0; alignedSize = 0;
alignedStart = 0; alignedStart = 0;
@ -418,7 +424,7 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,Co
} }
else if (LhsPacketSize>1) else if (LhsPacketSize>1)
{ {
eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || depth<LhsPacketSize); // eigen_internal_assert(size_t(firstLhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || depth<LhsPacketSize);
while (skipRows<LhsPacketSize && while (skipRows<LhsPacketSize &&
alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%LhsPacketSize)) alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%LhsPacketSize))
@ -434,11 +440,11 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,Co
skipRows = (std::min)(skipRows,Index(rows)); skipRows = (std::min)(skipRows,Index(rows));
// note that the skiped columns are processed later. // note that the skiped columns are processed later.
} }
eigen_internal_assert( alignmentPattern==NoneAligned /* eigen_internal_assert( alignmentPattern==NoneAligned
|| LhsPacketSize==1 || LhsPacketSize==1
|| (skipRows + rowsAtOnce >= rows) || (skipRows + rowsAtOnce >= rows)
|| LhsPacketSize > depth || LhsPacketSize > depth
|| (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(LhsPacket))==0); || (size_t(firstLhs+alignedStart+lhsStride*skipRows)%sizeof(LhsPacket))==0);*/
} }
else if(Vectorizable) else if(Vectorizable)
{ {
@ -447,8 +453,8 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,Co
alignmentPattern = AllAligned; alignmentPattern = AllAligned;
} }
Index offset1 = (FirstAligned && alignmentStep==1?3:1); const Index offset1 = (FirstAligned && alignmentStep==1?3:1);
Index offset3 = (FirstAligned && alignmentStep==1?1:3); const Index offset3 = (FirstAligned && alignmentStep==1?1:3);
Index rowBound = ((rows-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows; Index rowBound = ((rows-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
for (Index i=skipRows; i<rowBound; i+=rowsAtOnce) for (Index i=skipRows; i<rowBound; i+=rowsAtOnce)
@ -457,8 +463,8 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,Co
ResScalar tmp1 = ResScalar(0), tmp2 = ResScalar(0), tmp3 = ResScalar(0); ResScalar tmp1 = ResScalar(0), tmp2 = ResScalar(0), tmp3 = ResScalar(0);
// this helps the compiler generating good binary code // this helps the compiler generating good binary code
const LhsScalar *lhs0 = lhs + i*lhsStride, *lhs1 = lhs + (i+offset1)*lhsStride, const LhsScalars lhs0 = lhs.getVectorMapper(i+0, 0), lhs1 = lhs.getVectorMapper(i+offset1, 0),
*lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride; lhs2 = lhs.getVectorMapper(i+2, 0), lhs3 = lhs.getVectorMapper(i+offset3, 0);
if (Vectorizable) if (Vectorizable)
{ {
@ -470,9 +476,9 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,Co
// FIXME this loop get vectorized by the compiler ! // FIXME this loop get vectorized by the compiler !
for (Index j=0; j<alignedStart; ++j) for (Index j=0; j<alignedStart; ++j)
{ {
RhsScalar b = rhs[j]; RhsScalar b = rhs(j, 0);
tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b); tmp0 += cj.pmul(lhs0(j),b); tmp1 += cj.pmul(lhs1(j),b);
tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b); tmp2 += cj.pmul(lhs2(j),b); tmp3 += cj.pmul(lhs3(j),b);
} }
if (alignedSize>alignedStart) if (alignedSize>alignedStart)
@ -481,11 +487,11 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,Co
{ {
case AllAligned: case AllAligned:
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize) for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,d,d); _EIGEN_ACCUMULATE_PACKETS(Aligned,Aligned,Aligned);
break; break;
case EvenAligned: case EvenAligned:
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize) for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,d); _EIGEN_ACCUMULATE_PACKETS(Aligned,Unaligned,Aligned);
break; break;
case FirstAligned: case FirstAligned:
{ {
@ -499,39 +505,39 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,Co
* than basic unaligned loads. * than basic unaligned loads.
*/ */
LhsPacket A01, A02, A03, A11, A12, A13; LhsPacket A01, A02, A03, A11, A12, A13;
A01 = pload<LhsPacket>(&lhs1[alignedStart-1]); A01 = lhs1.template load<LhsPacket, Aligned>(alignedStart-1);
A02 = pload<LhsPacket>(&lhs2[alignedStart-2]); A02 = lhs2.template load<LhsPacket, Aligned>(alignedStart-2);
A03 = pload<LhsPacket>(&lhs3[alignedStart-3]); A03 = lhs3.template load<LhsPacket, Aligned>(alignedStart-3);
for (; j<peeledSize; j+=peels*RhsPacketSize) for (; j<peeledSize; j+=peels*RhsPacketSize)
{ {
RhsPacket b = pload<RhsPacket>(&rhs[j]); RhsPacket b = rhs.getVectorMapper(j, 0).template load<RhsPacket, Aligned>(0);
A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11); A11 = lhs1.template load<LhsPacket, Aligned>(j-1+LhsPacketSize); palign<1>(A01,A11);
A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]); palign<2>(A02,A12); A12 = lhs2.template load<LhsPacket, Aligned>(j-2+LhsPacketSize); palign<2>(A02,A12);
A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]); palign<3>(A03,A13); A13 = lhs3.template load<LhsPacket, Aligned>(j-3+LhsPacketSize); palign<3>(A03,A13);
ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), b, ptmp0); ptmp0 = pcj.pmadd(lhs0.template load<LhsPacket, Aligned>(j), b, ptmp0);
ptmp1 = pcj.pmadd(A01, b, ptmp1); ptmp1 = pcj.pmadd(A01, b, ptmp1);
A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]); palign<1>(A11,A01); A01 = lhs1.template load<LhsPacket, Aligned>(j-1+2*LhsPacketSize); palign<1>(A11,A01);
ptmp2 = pcj.pmadd(A02, b, ptmp2); ptmp2 = pcj.pmadd(A02, b, ptmp2);
A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]); palign<2>(A12,A02); A02 = lhs2.template load<LhsPacket, Aligned>(j-2+2*LhsPacketSize); palign<2>(A12,A02);
ptmp3 = pcj.pmadd(A03, b, ptmp3); ptmp3 = pcj.pmadd(A03, b, ptmp3);
A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]); palign<3>(A13,A03); A03 = lhs3.template load<LhsPacket, Aligned>(j-3+2*LhsPacketSize); palign<3>(A13,A03);
b = pload<RhsPacket>(&rhs[j+RhsPacketSize]); b = rhs.getVectorMapper(j+RhsPacketSize, 0).template load<RhsPacket, Aligned>(0);
ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j+LhsPacketSize]), b, ptmp0); ptmp0 = pcj.pmadd(lhs0.template load<LhsPacket, Aligned>(j+LhsPacketSize), b, ptmp0);
ptmp1 = pcj.pmadd(A11, b, ptmp1); ptmp1 = pcj.pmadd(A11, b, ptmp1);
ptmp2 = pcj.pmadd(A12, b, ptmp2); ptmp2 = pcj.pmadd(A12, b, ptmp2);
ptmp3 = pcj.pmadd(A13, b, ptmp3); ptmp3 = pcj.pmadd(A13, b, ptmp3);
} }
} }
for (; j<alignedSize; j+=RhsPacketSize) for (; j<alignedSize; j+=RhsPacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,du); _EIGEN_ACCUMULATE_PACKETS(Aligned,Unaligned,Unaligned);
break; break;
} }
default: default:
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize) for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
_EIGEN_ACCUMULATE_PACKETS(du,du,du); _EIGEN_ACCUMULATE_PACKETS(Unaligned,Unaligned,Unaligned);
break; break;
} }
tmp0 += predux(ptmp0); tmp0 += predux(ptmp0);
@ -545,9 +551,9 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,Co
// FIXME this loop get vectorized by the compiler ! // FIXME this loop get vectorized by the compiler !
for (Index j=alignedSize; j<depth; ++j) for (Index j=alignedSize; j<depth; ++j)
{ {
RhsScalar b = rhs[j]; RhsScalar b = rhs(j, 0);
tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b); tmp0 += cj.pmul(lhs0(j),b); tmp1 += cj.pmul(lhs1(j),b);
tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b); tmp2 += cj.pmul(lhs2(j),b); tmp3 += cj.pmul(lhs3(j),b);
} }
res[i*resIncr] += alpha*tmp0; res[i*resIncr] += alpha*tmp0;
res[(i+offset1)*resIncr] += alpha*tmp1; res[(i+offset1)*resIncr] += alpha*tmp1;
@ -564,28 +570,28 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,Co
{ {
EIGEN_ALIGN_DEFAULT ResScalar tmp0 = ResScalar(0); EIGEN_ALIGN_DEFAULT ResScalar tmp0 = ResScalar(0);
ResPacket ptmp0 = pset1<ResPacket>(tmp0); ResPacket ptmp0 = pset1<ResPacket>(tmp0);
const LhsScalar* lhs0 = lhs + i*lhsStride; const LhsScalars lhs0 = lhs.getVectorMapper(i, 0);
// process first unaligned result's coeffs // process first unaligned result's coeffs
// FIXME this loop get vectorized by the compiler ! // FIXME this loop get vectorized by the compiler !
for (Index j=0; j<alignedStart; ++j) for (Index j=0; j<alignedStart; ++j)
tmp0 += cj.pmul(lhs0[j], rhs[j]); tmp0 += cj.pmul(lhs0(j), rhs(j, 0));
if (alignedSize>alignedStart) if (alignedSize>alignedStart)
{ {
// process aligned rhs coeffs // process aligned rhs coeffs
if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0) if (lhs0.template aligned<LhsPacket>(alignedStart))
for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize) for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0); ptmp0 = pcj.pmadd(lhs0.template load<LhsPacket, Aligned>(j), rhs.getVectorMapper(j, 0).template load<RhsPacket, Aligned>(0), ptmp0);
else else
for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize) for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
ptmp0 = pcj.pmadd(ploadu<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0); ptmp0 = pcj.pmadd(lhs0.template load<LhsPacket, Unaligned>(j), rhs.getVectorMapper(j, 0).template load<RhsPacket, Aligned>(0), ptmp0);
tmp0 += predux(ptmp0); tmp0 += predux(ptmp0);
} }
// process remaining scalars // process remaining scalars
// FIXME this loop get vectorized by the compiler ! // FIXME this loop get vectorized by the compiler !
for (Index j=alignedSize; j<depth; ++j) for (Index j=alignedSize; j<depth; ++j)
tmp0 += cj.pmul(lhs0[j], rhs[j]); tmp0 += cj.pmul(lhs0(j), rhs(j, 0));
res[i*resIncr] += alpha*tmp0; res[i*resIncr] += alpha*tmp0;
} }
if (skipRows) if (skipRows)

View File

@ -10,7 +10,7 @@
#ifndef EIGEN_TRIANGULARMATRIXVECTOR_H #ifndef EIGEN_TRIANGULARMATRIXVECTOR_H
#define EIGEN_TRIANGULARMATRIXVECTOR_H #define EIGEN_TRIANGULARMATRIXVECTOR_H
namespace Eigen { namespace Eigen {
namespace internal { namespace internal {
@ -43,7 +43,7 @@ EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,Con
typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap; typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap;
const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride)); const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride));
typename conj_expr_if<ConjLhs,LhsMap>::type cjLhs(lhs); typename conj_expr_if<ConjLhs,LhsMap>::type cjLhs(lhs);
typedef Map<const Matrix<RhsScalar,Dynamic,1>, 0, InnerStride<> > RhsMap; typedef Map<const Matrix<RhsScalar,Dynamic,1>, 0, InnerStride<> > RhsMap;
const RhsMap rhs(_rhs,cols,InnerStride<>(rhsIncr)); const RhsMap rhs(_rhs,cols,InnerStride<>(rhsIncr));
typename conj_expr_if<ConjRhs,RhsMap>::type cjRhs(rhs); typename conj_expr_if<ConjRhs,RhsMap>::type cjRhs(rhs);
@ -51,6 +51,9 @@ EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,Con
typedef Map<Matrix<ResScalar,Dynamic,1> > ResMap; typedef Map<Matrix<ResScalar,Dynamic,1> > ResMap;
ResMap res(_res,rows); ResMap res(_res,rows);
typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;
typedef const_blas_data_mapper<RhsScalar,Index,RowMajor> RhsMapper;
for (Index pi=0; pi<size; pi+=PanelWidth) for (Index pi=0; pi<size; pi+=PanelWidth)
{ {
Index actualPanelWidth = (std::min)(PanelWidth, size-pi); Index actualPanelWidth = (std::min)(PanelWidth, size-pi);
@ -68,19 +71,19 @@ EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,Con
if (r>0) if (r>0)
{ {
Index s = IsLower ? pi+actualPanelWidth : 0; Index s = IsLower ? pi+actualPanelWidth : 0;
general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjLhs,RhsScalar,ConjRhs,BuiltIn>::run( general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs,BuiltIn>::run(
r, actualPanelWidth, r, actualPanelWidth,
&lhs.coeffRef(s,pi), lhsStride, LhsMapper(&lhs.coeffRef(s,pi), lhsStride),
&rhs.coeffRef(pi), rhsIncr, RhsMapper(&rhs.coeffRef(pi), rhsIncr),
&res.coeffRef(s), resIncr, alpha); &res.coeffRef(s), resIncr, alpha);
} }
} }
if((!IsLower) && cols>size) if((!IsLower) && cols>size)
{ {
general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjLhs,RhsScalar,ConjRhs>::run( general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs>::run(
rows, cols-size, rows, cols-size,
&lhs.coeffRef(0,size), lhsStride, LhsMapper(&lhs.coeffRef(0,size), lhsStride),
&rhs.coeffRef(size), rhsIncr, RhsMapper(&rhs.coeffRef(size), rhsIncr),
_res, resIncr, alpha); _res, resIncr, alpha);
} }
} }
@ -118,7 +121,10 @@ EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,Con
typedef Map<Matrix<ResScalar,Dynamic,1>, 0, InnerStride<> > ResMap; typedef Map<Matrix<ResScalar,Dynamic,1>, 0, InnerStride<> > ResMap;
ResMap res(_res,rows,InnerStride<>(resIncr)); ResMap res(_res,rows,InnerStride<>(resIncr));
typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper;
typedef const_blas_data_mapper<RhsScalar,Index,RowMajor> RhsMapper;
for (Index pi=0; pi<diagSize; pi+=PanelWidth) for (Index pi=0; pi<diagSize; pi+=PanelWidth)
{ {
Index actualPanelWidth = (std::min)(PanelWidth, diagSize-pi); Index actualPanelWidth = (std::min)(PanelWidth, diagSize-pi);
@ -136,19 +142,19 @@ EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,Con
if (r>0) if (r>0)
{ {
Index s = IsLower ? 0 : pi + actualPanelWidth; Index s = IsLower ? 0 : pi + actualPanelWidth;
general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjLhs,RhsScalar,ConjRhs,BuiltIn>::run( general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs,BuiltIn>::run(
actualPanelWidth, r, actualPanelWidth, r,
&lhs.coeffRef(pi,s), lhsStride, LhsMapper(&lhs.coeffRef(pi,s), lhsStride),
&rhs.coeffRef(s), rhsIncr, RhsMapper(&rhs.coeffRef(s), rhsIncr),
&res.coeffRef(pi), resIncr, alpha); &res.coeffRef(pi), resIncr, alpha);
} }
} }
if(IsLower && rows>diagSize) if(IsLower && rows>diagSize)
{ {
general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjLhs,RhsScalar,ConjRhs>::run( general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs>::run(
rows-diagSize, cols, rows-diagSize, cols,
&lhs.coeffRef(diagSize,0), lhsStride, LhsMapper(&lhs.coeffRef(diagSize,0), lhsStride),
&rhs.coeffRef(0), rhsIncr, RhsMapper(&rhs.coeffRef(0), rhsIncr),
&res.coeffRef(diagSize), resIncr, alpha); &res.coeffRef(diagSize), resIncr, alpha);
} }
} }
@ -184,7 +190,7 @@ struct TriangularProduct<Mode,true,Lhs,false,Rhs,true>
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
{ {
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
internal::trmv_selector<(int(internal::traits<Lhs>::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dst, alpha); internal::trmv_selector<(int(internal::traits<Lhs>::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dst, alpha);
} }
}; };
@ -211,7 +217,7 @@ struct TriangularProduct<Mode,false,Lhs,true,Rhs,false>
namespace internal { namespace internal {
// TODO: find a way to factorize this piece of code with gemv_selector since the logic is exactly the same. // TODO: find a way to factorize this piece of code with gemv_selector since the logic is exactly the same.
template<> struct trmv_selector<ColMajor> template<> struct trmv_selector<ColMajor>
{ {
template<int Mode, typename Lhs, typename Rhs, typename Dest> template<int Mode, typename Lhs, typename Rhs, typename Dest>
@ -247,7 +253,7 @@ template<> struct trmv_selector<ColMajor>
bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0)); bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha); RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
@ -267,7 +273,7 @@ template<> struct trmv_selector<ColMajor>
else else
MappedDest(actualDestPtr, dest.size()) = dest; MappedDest(actualDestPtr, dest.size()) = dest;
} }
internal::triangular_matrix_vector_product internal::triangular_matrix_vector_product
<Index,Mode, <Index,Mode,
LhsScalar, LhsBlasTraits::NeedToConjugate, LhsScalar, LhsBlasTraits::NeedToConjugate,
@ -327,7 +333,7 @@ template<> struct trmv_selector<RowMajor>
#endif #endif
Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs; Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
} }
internal::triangular_matrix_vector_product internal::triangular_matrix_vector_product
<Index,Mode, <Index,Mode,
LhsScalar, LhsBlasTraits::NeedToConjugate, LhsScalar, LhsBlasTraits::NeedToConjugate,

View File

@ -10,7 +10,7 @@
#ifndef EIGEN_TRIANGULAR_SOLVER_VECTOR_H #ifndef EIGEN_TRIANGULAR_SOLVER_VECTOR_H
#define EIGEN_TRIANGULAR_SOLVER_VECTOR_H #define EIGEN_TRIANGULAR_SOLVER_VECTOR_H
namespace Eigen { namespace Eigen {
namespace internal { namespace internal {
@ -25,7 +25,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheRight, Mode, Co
>::run(size, _lhs, lhsStride, rhs); >::run(size, _lhs, lhsStride, rhs);
} }
}; };
// forward and backward substitution, row-major, rhs is a vector // forward and backward substitution, row-major, rhs is a vector
template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate> template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate>
struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, RowMajor> struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, RowMajor>
@ -37,6 +37,10 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
{ {
typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap; typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap;
const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride)); const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride));
typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper;
typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper;
typename internal::conditional< typename internal::conditional<
Conjugate, Conjugate,
const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>, const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>,
@ -58,10 +62,10 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
Index startRow = IsLower ? pi : pi-actualPanelWidth; Index startRow = IsLower ? pi : pi-actualPanelWidth;
Index startCol = IsLower ? 0 : pi; Index startCol = IsLower ? 0 : pi;
general_matrix_vector_product<Index,LhsScalar,RowMajor,Conjugate,RhsScalar,false>::run( general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,Conjugate,RhsScalar,RhsMapper,false>::run(
actualPanelWidth, r, actualPanelWidth, r,
&lhs.coeffRef(startRow,startCol), lhsStride, LhsMapper(&lhs.coeffRef(startRow,startCol), lhsStride),
rhs + startCol, 1, RhsMapper(rhs + startCol, 1),
rhs + startRow, 1, rhs + startRow, 1,
RhsScalar(-1)); RhsScalar(-1));
} }
@ -72,7 +76,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
Index s = IsLower ? pi : i+1; Index s = IsLower ? pi : i+1;
if (k>0) if (k>0)
rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map<const Matrix<RhsScalar,Dynamic,1> >(rhs+s,k))).sum(); rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map<const Matrix<RhsScalar,Dynamic,1> >(rhs+s,k))).sum();
if(!(Mode & UnitDiag)) if(!(Mode & UnitDiag))
rhs[i] /= cjLhs(i,i); rhs[i] /= cjLhs(i,i);
} }
@ -91,6 +95,8 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
{ {
typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap; typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap;
const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride)); const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride));
typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;
typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper;
typename internal::conditional<Conjugate, typename internal::conditional<Conjugate,
const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>, const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>,
const LhsMap& const LhsMap&
@ -122,10 +128,10 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
// let's directly call the low level product function because: // let's directly call the low level product function because:
// 1 - it is faster to compile // 1 - it is faster to compile
// 2 - it is slighlty faster at runtime // 2 - it is slighlty faster at runtime
general_matrix_vector_product<Index,LhsScalar,ColMajor,Conjugate,RhsScalar,false>::run( general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,Conjugate,RhsScalar,RhsMapper,false>::run(
r, actualPanelWidth, r, actualPanelWidth,
&lhs.coeffRef(endBlock,startBlock), lhsStride, LhsMapper(&lhs.coeffRef(endBlock,startBlock), lhsStride),
rhs+startBlock, 1, RhsMapper(rhs+startBlock, 1),
rhs+endBlock, 1, RhsScalar(-1)); rhs+endBlock, 1, RhsScalar(-1));
} }
} }

View File

@ -34,7 +34,9 @@ template<
int ResStorageOrder> int ResStorageOrder>
struct general_matrix_matrix_product; struct general_matrix_matrix_product;
template<typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version=Specialized> template<typename Index,
typename LhsScalar, typename LhsMapper, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version=Specialized>
struct general_matrix_vector_product; struct general_matrix_vector_product;
@ -118,13 +120,35 @@ template<typename Scalar> struct get_factor<Scalar,typename NumTraits<Scalar>::R
}; };
template<typename Scalar, typename Index>
class BlasVectorMapper {
public:
EIGEN_ALWAYS_INLINE BlasVectorMapper(Scalar *data) : m_data(data) {}
EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const {
return m_data[i];
}
template <typename Packet, int AlignmentType>
EIGEN_ALWAYS_INLINE Packet load(Index i) const {
return ploadt<Packet, AlignmentType>(m_data + i);
}
template <typename Packet>
bool aligned(Index i) const {
return (size_t(m_data+i)%sizeof(Packet))==0;
}
protected:
Scalar* m_data;
};
template<typename Scalar, typename Index, int AlignmentType> template<typename Scalar, typename Index, int AlignmentType>
class MatrixLinearMapper { class BlasLinearMapper {
public: public:
typedef typename packet_traits<Scalar>::type Packet; typedef typename packet_traits<Scalar>::type Packet;
typedef typename packet_traits<Scalar>::half HalfPacket; typedef typename packet_traits<Scalar>::half HalfPacket;
EIGEN_ALWAYS_INLINE MatrixLinearMapper(Scalar *data) : m_data(data) {} EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data) : m_data(data) {}
EIGEN_ALWAYS_INLINE void prefetch(int i) const { EIGEN_ALWAYS_INLINE void prefetch(int i) const {
internal::prefetch(&operator()(i)); internal::prefetch(&operator()(i));
@ -157,7 +181,8 @@ class blas_data_mapper {
typedef typename packet_traits<Scalar>::type Packet; typedef typename packet_traits<Scalar>::type Packet;
typedef typename packet_traits<Scalar>::half HalfPacket; typedef typename packet_traits<Scalar>::half HalfPacket;
typedef MatrixLinearMapper<Scalar, Index, AlignmentType> LinearMapper; typedef BlasLinearMapper<Scalar, Index, AlignmentType> LinearMapper;
typedef BlasVectorMapper<Scalar, Index> VectorMapper;
EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {} EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
@ -170,6 +195,11 @@ class blas_data_mapper {
return LinearMapper(&operator()(i, j)); return LinearMapper(&operator()(i, j));
} }
EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const {
return VectorMapper(&operator()(i, j));
}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE Scalar& operator()(Index i, Index j) const { EIGEN_ALWAYS_INLINE Scalar& operator()(Index i, Index j) const {
return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride];
@ -193,6 +223,15 @@ class blas_data_mapper {
return pgather<Scalar, SubPacket>(&operator()(i, j), m_stride); return pgather<Scalar, SubPacket>(&operator()(i, j), m_stride);
} }
const Index stride() const { return m_stride; }
Index firstAligned(Index size) const {
if (size_t(m_data)%sizeof(Scalar)) {
return -1;
}
return internal::first_aligned(m_data, size);
}
protected: protected:
Scalar* EIGEN_RESTRICT m_data; Scalar* EIGEN_RESTRICT m_data;
const Index m_stride; const Index m_stride;