bug #482: pass scalar by const ref - pass on the sparse module

(also fix a compilation issue due to previous pass)
This commit is contained in:
Gael Guennebaud 2012-06-28 21:01:02 +02:00
parent 23184527fa
commit 9629ba361a
10 changed files with 31 additions and 32 deletions

View File

@ -154,7 +154,7 @@ class CompressedStorage
/** \returns the stored value at index \a key /** \returns the stored value at index \a key
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */ * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const
{ {
if (m_size==0) if (m_size==0)
return defaultValue; return defaultValue;
@ -167,7 +167,7 @@ class CompressedStorage
} }
/** Like at(), but the search is performed in the range [start,end) */ /** Like at(), but the search is performed in the range [start,end) */
inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const inline Scalar atInRange(size_t start, size_t end, Index key, const Scalar& defaultValue = Scalar(0)) const
{ {
if (start>=end) if (start>=end)
return Scalar(0); return Scalar(0);
@ -182,7 +182,7 @@ class CompressedStorage
/** \returns a reference to the value at index \a key /** \returns a reference to the value at index \a key
* If the value does not exist, then the value \a defaultValue is inserted * If the value does not exist, then the value \a defaultValue is inserted
* such that the keys are sorted. */ * such that the keys are sorted. */
inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0)) inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
{ {
size_t id = searchLowerIndex(0,m_size,key); size_t id = searchLowerIndex(0,m_size,key);
if (id>=m_size || m_indices[id]!=key) if (id>=m_size || m_indices[id]!=key)
@ -199,7 +199,7 @@ class CompressedStorage
return m_values[id]; return m_values[id];
} }
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{ {
size_t k = 0; size_t k = 0;
size_t n = size(); size_t n = size();

View File

@ -165,7 +165,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
typedef typename internal::remove_all<DenseResType>::type Res; typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename Lhs::Index Index; typedef typename Lhs::Index Index;
typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{ {
for(Index c=0; c<rhs.cols(); ++c) for(Index c=0; c<rhs.cols(); ++c)
{ {
@ -189,7 +189,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, C
typedef typename internal::remove_all<DenseResType>::type Res; typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::InnerIterator LhsInnerIterator;
typedef typename Lhs::Index Index; typedef typename Lhs::Index Index;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{ {
for(Index c=0; c<rhs.cols(); ++c) for(Index c=0; c<rhs.cols(); ++c)
{ {
@ -211,7 +211,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
typedef typename internal::remove_all<DenseResType>::type Res; typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::InnerIterator LhsInnerIterator;
typedef typename Lhs::Index Index; typedef typename Lhs::Index Index;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{ {
for(Index j=0; j<lhs.outerSize(); ++j) for(Index j=0; j<lhs.outerSize(); ++j)
{ {
@ -230,7 +230,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, C
typedef typename internal::remove_all<DenseResType>::type Res; typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::InnerIterator LhsInnerIterator;
typedef typename Lhs::Index Index; typedef typename Lhs::Index Index;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{ {
for(Index j=0; j<lhs.outerSize(); ++j) for(Index j=0; j<lhs.outerSize(); ++j)
{ {
@ -259,7 +259,7 @@ class SparseTimeDenseProduct
SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{} {}
template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
{ {
internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha); internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha);
} }
@ -289,7 +289,7 @@ class DenseTimeSparseProduct
DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{} {}
template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
{ {
Transpose<const _LhsNested> lhs_t(m_lhs); Transpose<const _LhsNested> lhs_t(m_lhs);
Transpose<const _RhsNested> rhs_t(m_rhs); Transpose<const _RhsNested> rhs_t(m_rhs);

View File

@ -478,7 +478,7 @@ class SparseMatrix
} }
/** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{ {
prune(default_prunning_func(reference,epsilon)); prune(default_prunning_func(reference,epsilon));
} }
@ -909,7 +909,7 @@ protected:
public: public:
/** \internal /** \internal
* \sa insert(Index,Index) */ * \sa insert(Index,Index) */
inline Scalar& insertBackUncompressed(Index row, Index col) EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
{ {
const Index outer = IsRowMajor ? row : col; const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row; const Index inner = IsRowMajor ? col : row;
@ -917,8 +917,7 @@ public:
eigen_assert(!isCompressed()); eigen_assert(!isCompressed());
eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
Index p = m_outerIndex[outer] + m_innerNonZeros[outer]; Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
m_innerNonZeros[outer]++;
m_data.index(p) = inner; m_data.index(p) = inner;
return (m_data.value(p) = 0); return (m_data.value(p) = 0);
} }
@ -930,7 +929,7 @@ private:
} }
struct default_prunning_func { struct default_prunning_func {
default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {} default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
inline bool operator() (const Index&, const Index&, const Scalar& value) const inline bool operator() (const Index&, const Index&, const Scalar& value) const
{ {
return !internal::isMuchSmallerThan(value, reference, epsilon); return !internal::isMuchSmallerThan(value, reference, epsilon);

View File

@ -445,12 +445,12 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
template<typename OtherDerived> template<typename OtherDerived>
bool isApprox(const SparseMatrixBase<OtherDerived>& other, bool isApprox(const SparseMatrixBase<OtherDerived>& other,
RealScalar prec = NumTraits<Scalar>::dummy_precision()) const const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
{ return toDense().isApprox(other.toDense(),prec); } { return toDense().isApprox(other.toDense(),prec); }
template<typename OtherDerived> template<typename OtherDerived>
bool isApprox(const MatrixBase<OtherDerived>& other, bool isApprox(const MatrixBase<OtherDerived>& other,
RealScalar prec = NumTraits<Scalar>::dummy_precision()) const const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
{ return toDense().isApprox(other,prec); } { return toDense().isApprox(other,prec); }
/** \returns the matrix or vector obtained by evaluating this expression. /** \returns the matrix or vector obtained by evaluating this expression.

View File

@ -114,13 +114,13 @@ class SparseSparseProduct : internal::no_assignment_operator,
} }
template<typename Lhs, typename Rhs> template<typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, RealScalar tolerance) EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance)
: m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false) : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false)
{ {
init(); init();
} }
SparseSparseProduct pruned(Scalar reference = 0, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) const SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) const
{ {
return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon); return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon);
} }

View File

@ -109,7 +109,7 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
* call this function with u.adjoint(). * call this function with u.adjoint().
*/ */
template<typename DerivedU> template<typename DerivedU>
SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1)); SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
/** \internal triggered by sparse_matrix = SparseSelfadjointView; */ /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
@ -188,7 +188,7 @@ SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView(
template<typename MatrixType, unsigned int UpLo> template<typename MatrixType, unsigned int UpLo>
template<typename DerivedU> template<typename DerivedU>
SparseSelfAdjointView<MatrixType,UpLo>& SparseSelfAdjointView<MatrixType,UpLo>&
SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha) SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
{ {
SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint(); SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
if(alpha==Scalar(0)) if(alpha==Scalar(0))
@ -222,7 +222,7 @@ class SparseSelfAdjointTimeDenseProduct
SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{} {}
template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
{ {
// TODO use alpha // TODO use alpha
eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry"); eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
@ -283,7 +283,7 @@ class DenseTimeSparseSelfAdjointProduct
DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{} {}
template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, Scalar /*alpha*/) const template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const
{ {
// TODO // TODO
} }

View File

@ -32,7 +32,7 @@ namespace internal {
// perform a pseudo in-place sparse * sparse product assuming all matrices are col major // perform a pseudo in-place sparse * sparse product assuming all matrices are col major
template<typename Lhs, typename Rhs, typename ResultType> template<typename Lhs, typename Rhs, typename ResultType>
static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, typename ResultType::RealScalar tolerance) static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance)
{ {
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res); // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
@ -100,7 +100,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,C
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar; typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
typedef typename ResultType::RealScalar RealScalar; typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{ {
typename remove_all<ResultType>::type _res(res.rows(), res.cols()); typename remove_all<ResultType>::type _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance); internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
@ -112,7 +112,7 @@ template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor> struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
{ {
typedef typename ResultType::RealScalar RealScalar; typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{ {
// we need a col-major matrix to hold the result // we need a col-major matrix to hold the result
typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType; typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
@ -126,7 +126,7 @@ template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor> struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
{ {
typedef typename ResultType::RealScalar RealScalar; typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{ {
// let's transpose the product to get a column x column product // let's transpose the product to get a column x column product
typename remove_all<ResultType>::type _res(res.rows(), res.cols()); typename remove_all<ResultType>::type _res(res.rows(), res.cols());
@ -139,7 +139,7 @@ template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor> struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
{ {
typedef typename ResultType::RealScalar RealScalar; typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{ {
typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
ColMajorMatrix colLhs(lhs); ColMajorMatrix colLhs(lhs);

View File

@ -184,7 +184,7 @@ class SparseVector
inline void finalize() {} inline void finalize() {}
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{ {
m_data.prune(reference,epsilon); m_data.prune(reference,epsilon);
} }

View File

@ -103,7 +103,7 @@ private:
template<typename Derived> template<typename Derived>
const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference, const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
typename NumTraits<Scalar>::Real m_epsilon) const const typename NumTraits<Scalar>::Real& m_epsilon) const
{ {
return SparseView<Derived>(derived(), m_reference, m_epsilon); return SparseView<Derived>(derived(), m_reference, m_epsilon);
} }