Add support for non trivial scalar factor in sparse selfadjoint * dense products, and enable +=/-= assignement for such products.

This changeset also improves the performance by working on column of the result at once.
This commit is contained in:
Gael Guennebaud 2016-08-24 13:06:34 +02:00
parent 8132a12625
commit 441b7eaab2
2 changed files with 47 additions and 30 deletions

View File

@ -250,11 +250,11 @@ template<int Mode, typename SparseLhsType, typename DenseRhsType, typename Dense
inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{ {
EIGEN_ONLY_USED_FOR_DEBUG(alpha); EIGEN_ONLY_USED_FOR_DEBUG(alpha);
// TODO use alpha
eigen_assert(alpha==AlphaType(1) && "alpha != 1 is not implemented yet, sorry");
typedef evaluator<SparseLhsType> LhsEval; typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
typedef typename evaluator<SparseLhsType>::InnerIterator LhsIterator; typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
typedef typename LhsEval::InnerIterator LhsIterator;
typedef typename SparseLhsType::Scalar LhsScalar; typedef typename SparseLhsType::Scalar LhsScalar;
enum { enum {
@ -266,39 +266,53 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
ProcessSecondHalf = !ProcessFirstHalf ProcessSecondHalf = !ProcessFirstHalf
}; };
LhsEval lhsEval(lhs); SparseLhsTypeNested lhs_nested(lhs);
LhsEval lhsEval(lhs_nested);
// work on one column at once
for (Index k=0; k<rhs.cols(); ++k)
{
for (Index j=0; j<lhs.outerSize(); ++j) for (Index j=0; j<lhs.outerSize(); ++j)
{ {
LhsIterator i(lhsEval,j); LhsIterator i(lhsEval,j);
// handle diagonal coeff
if (ProcessSecondHalf) if (ProcessSecondHalf)
{ {
while (i && i.index()<j) ++i; while (i && i.index()<j) ++i;
if(i && i.index()==j) if(i && i.index()==j)
{ {
res.row(j) += i.value() * rhs.row(j); res(j,k) += alpha * i.value() * rhs(j,k);
++i; ++i;
} }
} }
// premultiplied rhs for scatters
typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
// accumulator for partial scalar product
typename DenseResType::Scalar res_j(0);
for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i) for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
{ {
Index a = LhsIsRowMajor ? j : i.index(); LhsScalar lhs_ij = i.value();
Index b = LhsIsRowMajor ? i.index() : j; if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
LhsScalar v = i.value(); res_j += lhs_ij * rhs(i.index(),k);
res.row(a) += (v) * rhs.row(b); res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
res.row(b) += numext::conj(v) * rhs.row(a);
} }
res(j,k) += alpha * res_j;
// handle diagonal coeff
if (ProcessFirstHalf && i && (i.index()==j)) if (ProcessFirstHalf && i && (i.index()==j))
res.row(j) += i.value() * rhs.row(j); res(j,k) += alpha * i.value() * rhs(j,k);
}
} }
} }
template<typename LhsView, typename Rhs, int ProductType> template<typename LhsView, typename Rhs, int ProductType>
struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
: generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
{ {
template<typename Dest> template<typename Dest>
static void evalTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs) static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
{ {
typedef typename LhsView::_MatrixTypeNested Lhs; typedef typename LhsView::_MatrixTypeNested Lhs;
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested; typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
@ -306,16 +320,16 @@ struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, Pr
LhsNested lhsNested(lhsView.matrix()); LhsNested lhsNested(lhsView.matrix());
RhsNested rhsNested(rhs); RhsNested rhsNested(rhs);
dst.setZero(); internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, typename Dest::Scalar(1));
} }
}; };
template<typename Lhs, typename RhsView, int ProductType> template<typename Lhs, typename RhsView, int ProductType>
struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
: generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
{ {
template<typename Dest> template<typename Dest>
static void evalTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView) static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
{ {
typedef typename RhsView::_MatrixTypeNested Rhs; typedef typename RhsView::_MatrixTypeNested Rhs;
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested; typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
@ -323,10 +337,9 @@ struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, Pr
LhsNested lhsNested(lhs); LhsNested lhsNested(lhs);
RhsNested rhsNested(rhsView.matrix()); RhsNested rhsNested(rhsView.matrix());
dst.setZero(); // transpose everything
// transpoe everything
Transpose<Dest> dstT(dst); Transpose<Dest> dstT(dst);
internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, typename Dest::Scalar(1)); internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
} }
}; };

View File

@ -293,6 +293,10 @@ template<typename SparseMatrixType> void sparse_product()
VERIFY_IS_APPROX(x=mLo.template selfadjointView<Lower>()*b, refX=refS*b); VERIFY_IS_APPROX(x=mLo.template selfadjointView<Lower>()*b, refX=refS*b);
VERIFY_IS_APPROX(x=mS.template selfadjointView<Upper|Lower>()*b, refX=refS*b); VERIFY_IS_APPROX(x=mS.template selfadjointView<Upper|Lower>()*b, refX=refS*b);
VERIFY_IS_APPROX(x.noalias()+=mUp.template selfadjointView<Upper>()*b, refX+=refS*b);
VERIFY_IS_APPROX(x.noalias()-=mLo.template selfadjointView<Lower>()*b, refX-=refS*b);
VERIFY_IS_APPROX(x.noalias()+=mS.template selfadjointView<Upper|Lower>()*b, refX+=refS*b);
// sparse selfadjointView with sparse matrices // sparse selfadjointView with sparse matrices
SparseMatrixType mSres(rows,rows); SparseMatrixType mSres(rows,rows);
VERIFY_IS_APPROX(mSres = mLo.template selfadjointView<Lower>()*mS, VERIFY_IS_APPROX(mSres = mLo.template selfadjointView<Lower>()*mS,