diff --git a/Eigen/Core b/Eigen/Core index 333e80a6d..6c6de3ab5 100644 --- a/Eigen/Core +++ b/Eigen/Core @@ -59,10 +59,7 @@ namespace Eigen { #include "src/Core/CommaInitializer.h" #include "src/Core/Extract.h" #include "src/Core/Part.h" - -#ifndef EIGEN_EXTERN_INSTANTIATIONS #include "src/Core/CacheFriendlyProduct.h" -#endif } // namespace Eigen diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h index 828b49725..ba53e299d 100644 --- a/Eigen/src/Core/Assign.h +++ b/Eigen/src/Core/Assign.h @@ -249,7 +249,6 @@ struct ei_assign_impl { static void run(Derived1 &dst, const Derived2 &src) { - const bool rowMajor = int(Derived1::Flags)&RowMajorBit; const int innerSize = dst.innerSize(); const int outerSize = dst.outerSize(); const int packetSize = ei_packet_traits::size; diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h index 245357511..ae4e83c9e 100644 --- a/Eigen/src/Core/Block.h +++ b/Eigen/src/Core/Block.h @@ -155,7 +155,6 @@ template class Block return m_matrix.const_cast_derived() .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); - } inline const Scalar coeff(int index) const diff --git a/Eigen/src/Core/CacheFriendlyProduct.h b/Eigen/src/Core/CacheFriendlyProduct.h index 0da84eeac..a710d44d4 100644 --- a/Eigen/src/Core/CacheFriendlyProduct.h +++ b/Eigen/src/Core/CacheFriendlyProduct.h @@ -25,6 +25,8 @@ #ifndef EIGEN_CACHE_FRIENDLY_PRODUCT_H #define EIGEN_CACHE_FRIENDLY_PRODUCT_H +#ifndef EIGEN_EXTERN_INSTANTIATIONS + template static void ei_cache_friendly_product( int _rows, int _cols, int depth, @@ -77,8 +79,6 @@ static void ei_cache_friendly_product( MaxL2BlockSize = EIGEN_TUNE_FOR_L2_CACHE_SIZE / sizeof(Scalar) }; - - //const bool rhsIsAligned = (PacketSize==1) || (((rhsStride%PacketSize) == 0) && (size_t(rhs)%16==0)); const bool resIsAligned = (PacketSize==1) || (((resStride%PacketSize) == 0) && (size_t(res)%16==0)); const int remainingSize = depth % PacketSize; @@ -357,4 +357,165 @@ static void ei_cache_friendly_product( free(block); } +#endif // EIGEN_EXTERN_INSTANTIATIONS + +/* Optimized col-major matrix * vector product: + * This algorithm processes 4 columns at onces that allows to both reduce + * the number of load/stores of the result by a factor 4 and to reduce + * the instruction dependency. Moreover, we know that all bands have the + * same alignment pattern. + * TODO: since rhs gets evaluated only once, no need to evaluate it + */ +template +EIGEN_DONT_INLINE static void ei_cache_friendly_product( + int size, + const Scalar* lhs, int lhsStride, + const RhsType& rhs, + Scalar* res) +{ + #ifdef _EIGEN_ACCUMULATE_PACKETS + #error _EIGEN_ACCUMULATE_PACKETS has already been defined + #endif + + #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2,OFFSET) \ + ei_pstore(&res[j OFFSET], \ + ei_padd(ei_pload(&res[j OFFSET]), \ + ei_padd( \ + ei_padd(ei_pmul(ptmp0,ei_pload ## A0(&lhs[j OFFSET +iN0])),ei_pmul(ptmp1,ei_pload ## A13(&lhs[j OFFSET +iN1]))), \ + ei_padd(ei_pmul(ptmp2,ei_pload ## A2(&lhs[j OFFSET +iN2])),ei_pmul(ptmp3,ei_pload ## A13(&lhs[j OFFSET +iN3]))) ))) + + asm("#begin matrix_vector_product"); + typedef typename ei_packet_traits::type Packet; + const int PacketSize = sizeof(Packet)/sizeof(Scalar); + + enum { AllAligned, EvenAligned, FirstAligned, NoneAligned }; + const int columnsAtOnce = 4; + const int peels = 2; + const int PacketAlignedMask = PacketSize-1; + const int PeelAlignedMask = PacketSize*peels-1; + const bool Vectorized = sizeof(Packet) != sizeof(Scalar); + + // How many coeffs of the result do we have to skip to be aligned. + // Here we assume data are at least aligned on the base scalar type that is mandatory anyway. + const int alignedStart = Vectorized + ? std::min( (PacketSize - ((size_t(res)/sizeof(Scalar)) & PacketAlignedMask)) & PacketAlignedMask, size) + : 0; + const int alignedSize = alignedStart + ((size-alignedStart) & ~PacketAlignedMask); + const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : 0; + + const int alignmentStep = lhsStride % PacketSize; + int alignmentPattern = alignmentStep==0 ? AllAligned + : alignmentStep==2 ? EvenAligned + : FirstAligned; + + // find how many column do we have to skip to be aligned with the result (if possible) + int skipColumns=0; + for (; skipColumns0) + { + switch(alignmentPattern) + { + case AllAligned: + for (int j = alignedStart; j1) + for (int j = alignedStart; j2) _EIGEN_ACCUMULATE_PACKETS(,u,u,+2*PacketSize); + if (peels>3) _EIGEN_ACCUMULATE_PACKETS(,u,u,+3*PacketSize); + if (peels>4) _EIGEN_ACCUMULATE_PACKETS(,u,u,+4*PacketSize); + if (peels>5) _EIGEN_ACCUMULATE_PACKETS(,u,u,+5*PacketSize); + if (peels>6) _EIGEN_ACCUMULATE_PACKETS(,u,u,+6*PacketSize); + if (peels>7) _EIGEN_ACCUMULATE_PACKETS(,u,u,+7*PacketSize); + } + for (int j = peeledSize; j0) + { + bool aligned0 = (iN0 % PacketSize) == 0; + if (aligned0) + for (int j = 0;j class Product return res; } - const _LhsNested& lhs() const { return m_lhs; } - const _RhsNested& rhs() const { return m_rhs; } + inline const _LhsNested& lhs() const { return m_lhs; } + inline const _RhsNested& rhs() const { return m_rhs; } protected: const LhsNested m_lhs; @@ -480,11 +480,22 @@ struct ei_product_packet_impl +static void ei_cache_friendly_product( + int size, const Scalar* lhs, int lhsStride, const RhsType& rhs, Scalar* res); + +enum { + HasDirectAccess, + NoDirectAccess +}; + template::RowsAtCompileTime, int LhsOrder = int(ei_traits::LhsFlags)&RowMajorBit ? RowMajor : ColMajor, + int LhsHasDirectAccess = int(ei_traits::LhsFlags)&DirectAccessBit? HasDirectAccess : NoDirectAccess, int RhsCols = ei_traits::ColsAtCompileTime, - int RhsOrder = int(ei_traits::RhsFlags)&RowMajorBit ? RowMajor : ColMajor> + int RhsOrder = int(ei_traits::RhsFlags)&RowMajorBit ? RowMajor : ColMajor, + int RhsHasDirectAccess = int(ei_traits::RhsFlags)&DirectAccessBit? HasDirectAccess : NoDirectAccess> struct ei_cache_friendly_product_selector { template @@ -495,21 +506,57 @@ struct ei_cache_friendly_product_selector }; // optimized colmajor * vector path -template -struct ei_cache_friendly_product_selector +template +struct ei_cache_friendly_product_selector { + typedef typename ei_traits::_LhsNested Lhs; template inline static void run(DestDerived& res, const ProductType& product) { - const int rows = product.rhs().rows(); - for (int j=0; j _sum; + const int size = product.rhs().rows(); + for (int k=0; k(&product.lhs().const_cast_derived().coeffRef(0,k),product.lhs().rows()); + else + res += product.rhs().coeff(k) * product.lhs().col(k); + } +}; + +// optimized cache friendly colmajor * vector path for matrix with direct access flag +// NOTE this path coul also be enabled for expressions if we add runtime align queries +template +struct ei_cache_friendly_product_selector +{ + typedef typename ProductType::Scalar Scalar; + + template + inline static void run(DestDerived& res, const ProductType& product) + { + enum { + EvalToRes = (ei_packet_traits::size==1) + ||(DestDerived::Flags&ActualPacketAccessBit) && (!(DestDerived::Flags & RowMajorBit)) }; + Scalar* __restrict__ _res; + if (EvalToRes) + _res = &res.coeffRef(0); + else + { + _res = (Scalar*)alloca(sizeof(Scalar)*res.size()); + Map >(_res, res.size()) = res; + } + ei_cache_friendly_product(res.size(), + &product.lhs().const_cast_derived().coeffRef(0,0), product.lhs().stride(), + product.rhs(), _res); + + if (!EvalToRes) + res = Map >(_res, res.size()); } }; // optimized vector * rowmajor path -template -struct ei_cache_friendly_product_selector +template +struct ei_cache_friendly_product_selector { template inline static void run(DestDerived& res, const ProductType& product) @@ -520,6 +567,36 @@ struct ei_cache_friendly_product_selector +struct ei_cache_friendly_product_selector +{ + typedef typename ProductType::Scalar Scalar; + + template + inline static void run(DestDerived& res, const ProductType& product) + { + enum { + EvalToRes = (ei_packet_traits::size==1) + ||(DestDerived::Flags & ActualPacketAccessBit) && (DestDerived::Flags & RowMajorBit) }; + Scalar* __restrict__ _res; + if (EvalToRes) + _res = &res.coeffRef(0); + else + { + _res = (Scalar*)alloca(sizeof(Scalar)*res.size()); + Map >(_res, res.size()) = res; + } + ei_cache_friendly_product(res.size(), + &product.rhs().const_cast_derived().coeffRef(0,0), product.rhs().stride(), + product.lhs().transpose(), _res); + + if (!EvalToRes) + res = Map >(_res, res.size()); + } +}; + /** \internal */ template template diff --git a/test/product.cpp b/test/product.cpp index ffc845ca4..50ec64d4d 100644 --- a/test/product.cpp +++ b/test/product.cpp @@ -39,9 +39,13 @@ template void product(const MatrixType& m) typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::FloatingPoint FloatingPoint; - typedef Matrix VectorType; + typedef Matrix RowVectorType; + typedef Matrix ColVectorType; typedef Matrix RowSquareMatrixType; typedef Matrix ColSquareMatrixType; + typedef Matrix OtherMajorMatrixType; int rows = m.rows(); int cols = m.cols(); @@ -59,9 +63,11 @@ template void product(const MatrixType& m) ColSquareMatrixType square2 = ColSquareMatrixType::random(cols, cols), res2 = ColSquareMatrixType::random(cols, cols); - VectorType v1 = VectorType::random(rows), - v2 = VectorType::random(rows), - vzero = VectorType::zero(rows); + RowVectorType v1 = RowVectorType::random(rows), + v2 = RowVectorType::random(rows), + vzero = RowVectorType::zero(rows); + ColVectorType vc2 = ColVectorType::random(cols), vcres; + OtherMajorMatrixType tm1 = m1; Scalar s1 = ei_random(); @@ -89,6 +95,7 @@ template void product(const MatrixType& m) // test Product.h together with Identity.h VERIFY_IS_APPROX(v1, identity*v1); + VERIFY_IS_APPROX(v1.transpose(), v1.transpose() * identity); // again, test operator() to check const-qualification VERIFY_IS_APPROX(MatrixType::identity(rows, cols)(r,c), static_cast(r==c)); @@ -110,6 +117,21 @@ template void product(const MatrixType& m) { VERIFY(areNotApprox(res,square + m2 * m1.transpose())); } + vcres = vc2; + vcres += (m1.transpose() * v1).lazy(); + VERIFY_IS_APPROX(vcres, vc2 + m1.transpose() * v1); + tm1 = m1; + VERIFY_IS_APPROX(tm1.transpose() * v1, m1.transpose() * v1); + VERIFY_IS_APPROX(v1.transpose() * tm1, v1.transpose() * m1); + + // test submatrix and matrix/vector product + for (int i=0; i