add more support for uncompressed mode

This commit is contained in:
Gael Guennebaud 2011-11-30 19:24:43 +01:00
parent cda397b117
commit d1b54ecfa3
3 changed files with 66 additions and 23 deletions

View File

@ -237,15 +237,23 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
Index nonZeros() const
{
if(m_matrix.compressed())
return std::size_t(m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()])
- std::size_t(m_matrix._outerIndexPtr()[m_outerStart]);
else if(m_outerSize.value()==0)
return 0;
else
return Map<const Matrix<Index,Size,1> >(m_matrix._innerNonZeroPtr(), m_outerSize.value()).sum();
}
const Scalar& lastCoeff() const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
eigen_assert(nonZeros()>0);
if(m_matrix.compressed())
return m_matrix._valuePtr()[m_matrix._outerIndexPtr()[m_outerStart+1]-1];
else
return m_matrix._valuePtr()[m_matrix._outerIndexPtr()[m_outerStart]+m_matrix._innerNonZeroPtr()[m_outerStart]-1];
}
// template<typename Sparse>

View File

@ -138,6 +138,15 @@ class SparseMatrix
* \returns a non-const pointer to the array of the starting positions of the inner vectors */
inline Index* _outerIndexPtr() { return m_outerIndex; }
/** \internal
* \returns a const pointer to the array of the number of non zeros of the inner vectors
* \warning it returns 0 in compressed mode */
inline const Index* _innerNonZeroPtr() const { return m_innerNonZeros; }
/** \internal
* \returns a non-const pointer to the array of the number of non zeros of the inner vectors
* \warning it returns 0 in compressed mode */
inline Index* _innerNonZeroPtr() { return m_innerNonZeros; }
inline Storage& data() { return m_data; }
inline const Storage& data() const { return m_data; }
@ -357,7 +366,10 @@ class SparseMatrix
return insertUncompressed(row,col);
}
EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
{
return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
}
/** Must be called after inserting a set of non zero entries.
@ -527,12 +539,7 @@ class SparseMatrix
}
else
{
resize(other.rows(), other.cols());
if(m_innerNonZeros)
{
delete[] m_innerNonZeros;
m_innerNonZeros = 0;
}
initAssignment(other);
if(other.compressed())
{
memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
@ -549,20 +556,30 @@ class SparseMatrix
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Lhs, typename Rhs>
inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
{ return Base::operator=(product); }
{
initAssignment(product);
return Base::operator=(product);
}
template<typename OtherDerived>
inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
{ return Base::operator=(other.derived()); }
{
initAssignment(other.derived());
return Base::operator=(other.derived());
}
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
{ return Base::operator=(other.derived()); }
{
initAssignment(other.derived());
return Base::operator=(other.derived());
}
#endif
template<typename OtherDerived>
EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
{
initAssignment(other.derived());
const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
if (needToTranspose)
{
@ -574,7 +591,6 @@ class SparseMatrix
typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
OtherCopy otherCopy(other.derived());
resize(other.rows(), other.cols());
Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
// pass 1
// FIXME the above copy could be merged with that pass
@ -610,7 +626,7 @@ class SparseMatrix
else
{
// there is no special optimization
return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
return Base::operator=(other.derived());
}
}
@ -640,6 +656,7 @@ class SparseMatrix
inline ~SparseMatrix()
{
delete[] m_outerIndex;
delete[] m_innerNonZeros;
}
/** Overloaded for performance */
@ -650,6 +667,18 @@ class SparseMatrix
# endif
protected:
template<typename Other>
void initAssignment(const Other& other)
{
resize(other.rows(), other.cols());
if(m_innerNonZeros)
{
delete[] m_innerNonZeros;
m_innerNonZeros = 0;
}
}
/** \internal
* \sa insert(Index,Index) */
EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
@ -820,9 +849,13 @@ class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
{
public:
InnerIterator(const SparseMatrix& mat, Index outer)
: m_values(mat._valuePtr()), m_indices(mat._innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]),
m_end(mat.m_outerIndex[outer+1])
{}
: m_values(mat._valuePtr()), m_indices(mat._innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
{
if(mat.compressed())
m_end = mat.m_outerIndex[outer+1];
else
m_end = m_id + mat.m_innerNonZeros[outer];
}
inline InnerIterator& operator++() { m_id++; return *this; }
@ -841,7 +874,7 @@ class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
const Index* m_indices;
const Index m_outer;
Index m_id;
const Index m_end;
Index m_end;
};
#endif // EIGEN_SPARSEMATRIX_H

View File

@ -78,11 +78,12 @@ initSparse(double density,
{
enum { IsRowMajor = SparseMatrix<Scalar,Opt2,Index>::IsRowMajor };
sparseMat.setZero();
sparseMat.reserve(int(refMat.rows()*refMat.cols()*density));
//sparseMat.reserve(int(refMat.rows()*refMat.cols()*density));
sparseMat.reserve(VectorXi::Constant(IsRowMajor ? refMat.rows() : refMat.cols(), (1.5*density)*(IsRowMajor?refMat.cols():refMat.rows())));
for(int j=0; j<sparseMat.outerSize(); j++)
{
sparseMat.startVec(j);
//sparseMat.startVec(j);
for(int i=0; i<sparseMat.innerSize(); i++)
{
int ai(i), aj(j);
@ -104,7 +105,8 @@ initSparse(double density,
if (v!=Scalar(0))
{
sparseMat.insertBackByOuterInner(j,i) = v;
//sparseMat.insertBackByOuterInner(j,i) = v;
sparseMat.insertByOuterInner(j,i) = v;
if (nonzeroCoords)
nonzeroCoords->push_back(Vector2i(ai,aj));
}
@ -115,7 +117,7 @@ initSparse(double density,
refMat(ai,aj) = v;
}
}
sparseMat.finalize();
//sparseMat.finalize();
}
template<typename Scalar,int Opt1,int Opt2,typename Index> void