diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h index a48520c0c..d31d9babf 100644 --- a/Eigen/src/SparseCore/SparseSelfAdjointView.h +++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h @@ -250,11 +250,11 @@ template LhsEval; - typedef typename evaluator::InnerIterator LhsIterator; + typedef typename internal::nested_eval::type SparseLhsTypeNested; + typedef typename internal::remove_all::type SparseLhsTypeNestedCleaned; + typedef evaluator LhsEval; + typedef typename LhsEval::InnerIterator LhsIterator; typedef typename SparseLhsType::Scalar LhsScalar; enum { @@ -266,39 +266,53 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons ProcessSecondHalf = !ProcessFirstHalf }; - LhsEval lhsEval(lhs); - - for (Index j=0; j::ReturnType rhs_j(alpha*rhs(j,k)); + // accumulator for partial scalar product + typename DenseResType::Scalar res_j(0); + for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i) + { + LhsScalar lhs_ij = i.value(); + if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij); + res_j += lhs_ij * rhs(i.index(),k); + res(i.index(),k) += numext::conj(lhs_ij) * rhs_j; + } + res(j,k) += alpha * res_j; + + // handle diagonal coeff + if (ProcessFirstHalf && i && (i.index()==j)) + res(j,k) += alpha * i.value() * rhs(j,k); } - for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i) - { - Index a = LhsIsRowMajor ? j : i.index(); - Index b = LhsIsRowMajor ? i.index() : j; - LhsScalar v = i.value(); - res.row(a) += (v) * rhs.row(b); - res.row(b) += numext::conj(v) * rhs.row(a); - } - if (ProcessFirstHalf && i && (i.index()==j)) - res.row(j) += i.value() * rhs.row(j); } } template struct generic_product_impl +: generic_product_impl_base > { template - static void evalTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs) + static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha) { typedef typename LhsView::_MatrixTypeNested Lhs; typedef typename nested_eval::type LhsNested; @@ -306,16 +320,16 @@ struct generic_product_impl(lhsNested, rhsNested, dst, typename Dest::Scalar(1)); + internal::sparse_selfadjoint_time_dense_product(lhsNested, rhsNested, dst, alpha); } }; template struct generic_product_impl +: generic_product_impl_base > { template - static void evalTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView) + static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha) { typedef typename RhsView::_MatrixTypeNested Rhs; typedef typename nested_eval::type LhsNested; @@ -323,10 +337,9 @@ struct generic_product_impl dstT(dst); - internal::sparse_selfadjoint_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, typename Dest::Scalar(1)); + internal::sparse_selfadjoint_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha); } }; diff --git a/test/sparse_product.cpp b/test/sparse_product.cpp index c518a6e55..c7c93373d 100644 --- a/test/sparse_product.cpp +++ b/test/sparse_product.cpp @@ -292,6 +292,10 @@ template void sparse_product() VERIFY_IS_APPROX(x=mUp.template selfadjointView()*b, refX=refS*b); VERIFY_IS_APPROX(x=mLo.template selfadjointView()*b, refX=refS*b); VERIFY_IS_APPROX(x=mS.template selfadjointView()*b, refX=refS*b); + + VERIFY_IS_APPROX(x.noalias()+=mUp.template selfadjointView()*b, refX+=refS*b); + VERIFY_IS_APPROX(x.noalias()-=mLo.template selfadjointView()*b, refX-=refS*b); + VERIFY_IS_APPROX(x.noalias()+=mS.template selfadjointView()*b, refX+=refS*b); // sparse selfadjointView with sparse matrices SparseMatrixType mSres(rows,rows);