- move CompressedStorage and AmbiVector into internal namespace

- remove innerVectorNonZeros(j) => use innerVector(j).nonZeros()
This commit is contained in:
Gael Guennebaud 2011-12-02 10:00:24 +01:00
parent a0bcaa88af
commit f10bae74e8
8 changed files with 27 additions and 22 deletions

View File

@ -13,7 +13,8 @@
namespace Eigen {
/** \defgroup Sparse_Module SparseCore module
/** \ingroup Sparse_modules
* \defgroup SparseCore_Module SparseCore module
*
* This module provides a sparse matrix representation, and basic associatd matrix manipulations
* and operations.

View File

@ -25,6 +25,8 @@
#ifndef EIGEN_AMBIVECTOR_H
#define EIGEN_AMBIVECTOR_H
namespace internal {
/** \internal
* Hybrid sparse/dense vector class designed for intensive read-write operations.
*
@ -375,5 +377,6 @@ class AmbiVector<_Scalar,_Index>::Iterator
bool m_isDense; // mode of the vector
};
} // namespace internal
#endif // EIGEN_AMBIVECTOR_H

View File

@ -25,7 +25,10 @@
#ifndef EIGEN_COMPRESSED_STORAGE_H
#define EIGEN_COMPRESSED_STORAGE_H
/** Stores a sparse set of values as a list of values and a list of indices.
namespace internal {
/** \internal
* Stores a sparse set of values as a list of values and a list of indices.
*
*/
template<typename _Scalar,typename _Index>
@ -236,4 +239,6 @@ class CompressedStorage
};
} // namespace internal
#endif // EIGEN_COMPRESSED_STORAGE_H

View File

@ -63,7 +63,6 @@ class MappedSparseMatrix
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
inline Index innerSize() const { return m_innerSize; }
inline Index outerSize() const { return m_outerSize; }
inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
//----------------------------------------
// direct access interface

View File

@ -83,7 +83,7 @@ class SparseMatrix
typedef MappedSparseMatrix<Scalar,Flags> Map;
using Base::IsRowMajor;
typedef CompressedStorage<Scalar,Index> Storage;
typedef internal::CompressedStorage<Scalar,Index> Storage;
enum {
Options = _Options
};
@ -96,7 +96,7 @@ class SparseMatrix
Index m_innerSize;
Index* m_outerIndex;
Index* m_innerNonZeros; // optional, if null then the data is compressed
CompressedStorage<Scalar,Index> m_data;
Storage m_data;
Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
@ -105,17 +105,15 @@ class SparseMatrix
inline bool compressed() const { return m_innerNonZeros==0; }
/** \returns the number of rows of the matrix */
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
/** \returns the number of columns of the matrix */
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
/** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
inline Index innerSize() const { return m_innerSize; }
/** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
inline Index outerSize() const { return m_outerSize; }
/** \returns the number of non zeros in the inner vector \a j
*/
inline Index innerNonZeros(Index j) const
{
return m_innerNonZeros ? m_innerNonZeros[j] : m_outerIndex[j+1]-m_outerIndex[j];
}
/** \internal
* \returns a const pointer to the array of values */

View File

@ -79,11 +79,11 @@ class SparseVector
Options = _Options
};
CompressedStorage<Scalar,Index> m_data;
internal::CompressedStorage<Scalar,Index> m_data;
Index m_size;
CompressedStorage<Scalar,Index>& _data() { return m_data; }
CompressedStorage<Scalar,Index>& _data() const { return m_data; }
internal::CompressedStorage<Scalar,Index>& _data() { return m_data; }
internal::CompressedStorage<Scalar,Index>& _data() const { return m_data; }
public:
@ -91,7 +91,6 @@ class SparseVector
EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
EIGEN_STRONG_INLINE Index innerNonZeros(Index j) const { eigen_assert(j==0); return m_size; }
EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); }
EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); }
@ -330,7 +329,7 @@ class SparseVector<Scalar,_Options,_Index>::InnerIterator
eigen_assert(outer==0);
}
InnerIterator(const CompressedStorage<Scalar,Index>& data)
InnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
: m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
{}
@ -351,7 +350,7 @@ class SparseVector<Scalar,_Options,_Index>::InnerIterator
inline operator bool() const { return (m_id < m_end); }
protected:
const CompressedStorage<Scalar,Index>& m_data;
const internal::CompressedStorage<Scalar,Index>& m_data;
Index m_id;
const Index m_end;
};

View File

@ -86,7 +86,7 @@ template<typename _Scalar, int _Options, typename _Index>
typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
Index m_innerSize;
std::vector<CompressedStorage<Scalar,Index> > m_data;
std::vector<internal::CompressedStorage<Scalar,Index> > m_data;
public:
@ -96,8 +96,8 @@ template<typename _Scalar, int _Options, typename _Index>
inline Index outerSize() const { return static_cast<Index>(m_data.size()); }
inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
std::vector<CompressedStorage<Scalar,Index> >& _data() { return m_data; }
const std::vector<CompressedStorage<Scalar,Index> >& _data() const { return m_data; }
std::vector<internal::CompressedStorage<Scalar,Index> >& _data() { return m_data; }
const std::vector<internal::CompressedStorage<Scalar,Index> >& _data() const { return m_data; }
/** \returns the coefficient value at given position \a row, \a col
* This operation involes a log(rho*outer_size) binary search.

View File

@ -177,7 +177,7 @@ void SparseLLT<_MatrixType,Backend>::compute(const _MatrixType& a)
m_matrix.resize(size, size);
// allocate a temporary vector for accumulations
AmbiVector<Scalar,Index> tempVector(size);
internal::AmbiVector<Scalar,Index> tempVector(size);
RealScalar density = a.nonZeros()/RealScalar(size*size);
// TODO estimate the number of non zeros
@ -222,7 +222,7 @@ void SparseLLT<_MatrixType,Backend>::compute(const _MatrixType& a)
RealScalar rx = internal::sqrt(internal::real(x));
m_matrix.insert(j,j) = rx; // FIXME use insertBack
Scalar y = Scalar(1)/rx;
for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector, m_precision*rx); it; ++it)
for (typename internal::AmbiVector<Scalar,Index>::Iterator it(tempVector, m_precision*rx); it; ++it)
{
// FIXME use insertBack
m_matrix.insertBack(it.index(), j) = it.value() * y;