mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-04-16 14:49:39 +08:00
new simplified API to fill sparse matrices (the old functions are
deprecated). Basically there are now only 2 functions to set a coefficient: 1) mat.coeffRef(row,col) = value; 2) mat.insert(row,col) = value; coeffRef has no limitation, insert assumes the coeff has not already been set, and raises an assert otherwise. In addition I added a much lower level, but more efficient filling mechanism for internal use only.
This commit is contained in:
parent
ddb6e96d48
commit
2829314284
@ -155,7 +155,7 @@ class CompressedStorage
|
||||
/** Like at(), but the search is performed in the range [start,end) */
|
||||
inline Scalar atInRange(size_t start, size_t end, int key, Scalar defaultValue = Scalar(0)) const
|
||||
{
|
||||
if (start==end)
|
||||
if (start>=end)
|
||||
return Scalar(0);
|
||||
else if (end>start && key==m_indices[end-1])
|
||||
return m_values[end-1];
|
||||
|
@ -110,14 +110,14 @@ class DynamicSparseMatrix
|
||||
|
||||
class InnerIterator;
|
||||
|
||||
inline void setZero()
|
||||
void setZero()
|
||||
{
|
||||
for (int j=0; j<outerSize(); ++j)
|
||||
m_data[j].clear();
|
||||
}
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline int nonZeros() const
|
||||
int nonZeros() const
|
||||
{
|
||||
int res = 0;
|
||||
for (int j=0; j<outerSize(); ++j)
|
||||
@ -125,21 +125,39 @@ class DynamicSparseMatrix
|
||||
return res;
|
||||
}
|
||||
|
||||
/** Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
|
||||
inline void startFill(int reserveSize = 1000)
|
||||
/** \deprecated
|
||||
* Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
|
||||
EIGEN_DEPRECATED void startFill(int reserveSize = 1000)
|
||||
{
|
||||
setZero();
|
||||
reserve(reserveSize);
|
||||
}
|
||||
|
||||
void reserve(int reserveSize = 1000)
|
||||
{
|
||||
if (outerSize()>0)
|
||||
{
|
||||
int reserveSizePerVector = std::max(reserveSize/outerSize(),4);
|
||||
for (int j=0; j<outerSize(); ++j)
|
||||
{
|
||||
m_data[j].clear();
|
||||
m_data[j].reserve(reserveSizePerVector);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void startVec(int /*outer*/) {}
|
||||
|
||||
inline Scalar& insertBack(int outer, int inner)
|
||||
{
|
||||
ei_assert(outer<int(m_data.size()) && inner<m_innerSize && "out of range");
|
||||
ei_assert(((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner))
|
||||
&& "wrong sorted insertion");
|
||||
m_data[outer].append(0, inner);
|
||||
return m_data[outer].value(m_data[outer].size()-1);
|
||||
}
|
||||
|
||||
/** inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
|
||||
/** \deprecated use insert()
|
||||
* inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
|
||||
* 1 - the coefficient does not exist yet
|
||||
* 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
|
||||
* In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
|
||||
@ -147,21 +165,24 @@ class DynamicSparseMatrix
|
||||
*
|
||||
* \see fillrand(), coeffRef()
|
||||
*/
|
||||
inline Scalar& fill(int row, int col)
|
||||
EIGEN_DEPRECATED Scalar& fill(int row, int col)
|
||||
{
|
||||
const int outer = IsRowMajor ? row : col;
|
||||
const int inner = IsRowMajor ? col : row;
|
||||
ei_assert(outer<int(m_data.size()) && inner<m_innerSize);
|
||||
ei_assert((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner));
|
||||
m_data[outer].append(0, inner);
|
||||
return m_data[outer].value(m_data[outer].size()-1);
|
||||
return insertBack(outer,inner);
|
||||
}
|
||||
|
||||
/** Like fill() but with random inner coordinates.
|
||||
/** \deprecated use insert()
|
||||
* Like fill() but with random inner coordinates.
|
||||
* Compared to the generic coeffRef(), the unique limitation is that we assume
|
||||
* the coefficient does not exist yet.
|
||||
*/
|
||||
inline Scalar& fillrand(int row, int col)
|
||||
EIGEN_DEPRECATED Scalar& fillrand(int row, int col)
|
||||
{
|
||||
return insert(row,col);
|
||||
}
|
||||
|
||||
inline Scalar& insert(int row, int col)
|
||||
{
|
||||
const int outer = IsRowMajor ? row : col;
|
||||
const int inner = IsRowMajor ? col : row;
|
||||
@ -181,8 +202,11 @@ class DynamicSparseMatrix
|
||||
return m_data[outer].value(id+1);
|
||||
}
|
||||
|
||||
/** Does nothing. Provided for compatibility with SparseMatrix. */
|
||||
inline void endFill() {}
|
||||
/** \deprecated use finalize()
|
||||
* Does nothing. Provided for compatibility with SparseMatrix. */
|
||||
EIGEN_DEPRECATED void endFill() {}
|
||||
|
||||
inline void finalize() {}
|
||||
|
||||
void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
|
||||
{
|
||||
|
@ -224,19 +224,28 @@ class RandomSetter
|
||||
KeyType keyBitsMask = (1<<m_keyBitsOffset)-1;
|
||||
if (!SwapStorage) // also means the map is sorted
|
||||
{
|
||||
mp_target->startFill(nonZeros());
|
||||
mp_target->setZero();
|
||||
mp_target->reserve(nonZeros());
|
||||
int prevOuter = -1;
|
||||
for (int k=0; k<m_outerPackets; ++k)
|
||||
{
|
||||
mp_target->startVec(k);
|
||||
const int outerOffset = (1<<OuterPacketBits) * k;
|
||||
typename HashMapType::iterator end = m_hashmaps[k].end();
|
||||
for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
|
||||
{
|
||||
const int outer = (it->first >> m_keyBitsOffset) + outerOffset;
|
||||
const int inner = it->first & keyBitsMask;
|
||||
mp_target->fill(TargetRowMajor ? outer : inner, TargetRowMajor ? inner : outer) = it->second.value;
|
||||
if (prevOuter!=outer)
|
||||
{
|
||||
for (int j=prevOuter+1;j<=outer;++j)
|
||||
mp_target->startVec(j);
|
||||
prevOuter = outer;
|
||||
}
|
||||
mp_target->insertBack(outer, inner) = it->second.value;
|
||||
}
|
||||
}
|
||||
mp_target->endFill();
|
||||
mp_target->finalize();
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -135,7 +135,8 @@ void SparseLLT<MatrixType,Backend>::compute(const MatrixType& a)
|
||||
RealScalar density = a.nonZeros()/RealScalar(size*size);
|
||||
|
||||
// TODO estimate the number of non zeros
|
||||
m_matrix.startFill(a.nonZeros()*2);
|
||||
m_matrix.setZero();
|
||||
m_matrix.reserve(a.nonZeros()*2);
|
||||
for (int j = 0; j < size; ++j)
|
||||
{
|
||||
Scalar x = ei_real(a.coeff(j,j));
|
||||
@ -173,14 +174,15 @@ void SparseLLT<MatrixType,Backend>::compute(const MatrixType& a)
|
||||
// copy the temporary vector to the respective m_matrix.col()
|
||||
// while scaling the result by 1/real(x)
|
||||
RealScalar rx = ei_sqrt(ei_real(x));
|
||||
m_matrix.fill(j,j) = rx;
|
||||
m_matrix.insert(j,j) = rx; // FIXME use insertBack
|
||||
Scalar y = Scalar(1)/rx;
|
||||
for (typename AmbiVector<Scalar>::Iterator it(tempVector, m_precision*rx); it; ++it)
|
||||
{
|
||||
m_matrix.fill(it.index(), j) = it.value() * y;
|
||||
// FIXME use insertBack
|
||||
m_matrix.insert(it.index(), j) = it.value() * y;
|
||||
}
|
||||
}
|
||||
m_matrix.endFill();
|
||||
m_matrix.finalize();
|
||||
}
|
||||
|
||||
/** Computes b = L^-T L^-1 b */
|
||||
|
@ -118,33 +118,36 @@ class SparseMatrix
|
||||
|
||||
class InnerIterator;
|
||||
|
||||
/** Removes all non zeros */
|
||||
inline void setZero()
|
||||
{
|
||||
m_data.clear();
|
||||
//if (m_outerSize)
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
|
||||
// for (int i=0; i<m_outerSize; ++i)
|
||||
// m_outerIndex[i] = 0;
|
||||
// if (m_outerSize)
|
||||
// m_outerIndex[i] = 0;
|
||||
}
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline int nonZeros() const { return m_data.size(); }
|
||||
|
||||
/** Initializes the filling process of \c *this.
|
||||
/** \deprecated use setZero() and reserve()
|
||||
* Initializes the filling process of \c *this.
|
||||
* \param reserveSize approximate number of nonzeros
|
||||
* Note that the matrix \c *this is zero-ed.
|
||||
*/
|
||||
inline void startFill(int reserveSize = 1000)
|
||||
EIGEN_DEPRECATED void startFill(int reserveSize = 1000)
|
||||
{
|
||||
setZero();
|
||||
m_data.reserve(reserveSize);
|
||||
}
|
||||
|
||||
/** Preallocates \a reserveSize non zeros */
|
||||
inline void reserve(int reserveSize)
|
||||
{
|
||||
m_data.reserve(reserveSize);
|
||||
}
|
||||
|
||||
/**
|
||||
/** \deprecated use insert()
|
||||
*/
|
||||
inline Scalar& fill(int row, int col)
|
||||
EIGEN_DEPRECATED Scalar& fill(int row, int col)
|
||||
{
|
||||
const int outer = IsRowMajor ? row : col;
|
||||
const int inner = IsRowMajor ? col : row;
|
||||
@ -172,45 +175,128 @@ class SparseMatrix
|
||||
m_data.append(0, inner);
|
||||
return m_data.value(id);
|
||||
}
|
||||
|
||||
//--- low level purely coherent filling ---
|
||||
|
||||
inline Scalar& insertBack(int outer, int inner)
|
||||
{
|
||||
ei_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "wrong sorted insertion");
|
||||
ei_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "wrong sorted insertion");
|
||||
int id = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
m_data.append(0, inner);
|
||||
return m_data.value(id);
|
||||
}
|
||||
|
||||
inline void startVec(int outer)
|
||||
{
|
||||
ei_assert(m_outerIndex[outer]==int(m_data.size()) && "you must call startVec on each inner vec");
|
||||
ei_assert(m_outerIndex[outer+1]==0 && "you must call startVec on each inner vec");
|
||||
m_outerIndex[outer+1] = m_outerIndex[outer];
|
||||
}
|
||||
|
||||
//---
|
||||
|
||||
/** Like fill() but with random inner coordinates.
|
||||
/** \deprecated use insert()
|
||||
* Like fill() but with random inner coordinates.
|
||||
*/
|
||||
inline Scalar& fillrand(int row, int col)
|
||||
EIGEN_DEPRECATED Scalar& fillrand(int row, int col)
|
||||
{
|
||||
return insert(row,col);
|
||||
}
|
||||
|
||||
/** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
|
||||
* The non zero coefficient must \b not already exist.
|
||||
*
|
||||
* \warning This function can be extremely slow if the non zero coefficients
|
||||
* are not inserted in a coherent order.
|
||||
*
|
||||
* After an insertion session, you should call the finalize() function.
|
||||
*/
|
||||
EIGEN_DONT_INLINE Scalar& insert(int row, int col)
|
||||
{
|
||||
const int outer = IsRowMajor ? row : col;
|
||||
const int inner = IsRowMajor ? col : row;
|
||||
|
||||
int previousOuter = outer;
|
||||
if (m_outerIndex[outer+1]==0)
|
||||
{
|
||||
// we start a new inner vector
|
||||
// nothing special to do here
|
||||
int i = outer;
|
||||
while (i>=0 && m_outerIndex[i]==0)
|
||||
while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
|
||||
{
|
||||
m_outerIndex[i] = m_data.size();
|
||||
--i;
|
||||
m_outerIndex[previousOuter] = m_data.size();
|
||||
--previousOuter;
|
||||
}
|
||||
m_outerIndex[outer+1] = m_outerIndex[outer];
|
||||
}
|
||||
assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "invalid outer index");
|
||||
|
||||
// here we have to handle the tricky case where the outerIndex array
|
||||
// starts with: [ 0 0 0 0 0 1 ...] and we are inserting in, e.g.,
|
||||
// the 2nd inner vector...
|
||||
bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
|
||||
&& (size_t(m_outerIndex[outer+1]) == m_data.size());
|
||||
|
||||
size_t startId = m_outerIndex[outer];
|
||||
// FIXME let's make sure sizeof(long int) == sizeof(size_t)
|
||||
size_t id = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
|
||||
float reallocRatio = 1;
|
||||
if (m_data.allocatedSize()<id+1)
|
||||
if (m_data.allocatedSize()<=m_data.size())
|
||||
{
|
||||
// we need to reallocate the data, to reduce multiple reallocations
|
||||
// we use a smart resize algorithm based on the current filling ratio
|
||||
// we use float to avoid overflows
|
||||
float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer);
|
||||
// in addition, we use float to avoid integers overflows
|
||||
float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
|
||||
reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
|
||||
// let's bounds the realloc ratio to
|
||||
// furthermore we bound the realloc ratio to:
|
||||
// 1) reduce multiple minor realloc when the matrix is almost filled
|
||||
// 2) avoid to allocate too much memory when the matrix is almost empty
|
||||
reallocRatio = std::min(std::max(reallocRatio,1.5f),8.f);
|
||||
}
|
||||
m_data.resize(id+1,reallocRatio);
|
||||
m_data.resize(m_data.size()+1,reallocRatio);
|
||||
|
||||
if (!isLastVec)
|
||||
{
|
||||
if (previousOuter==-1)
|
||||
{
|
||||
// oops wrong guess.
|
||||
// let's correct the outer offsets
|
||||
for (int k=0; k<=(outer+1); ++k)
|
||||
m_outerIndex[k] = 0;
|
||||
int k=outer+1;
|
||||
while(m_outerIndex[k]==0)
|
||||
m_outerIndex[k++] = 1;
|
||||
while (k<=m_outerSize && m_outerIndex[k]!=0)
|
||||
m_outerIndex[k++]++;
|
||||
id = 0;
|
||||
--k;
|
||||
k = m_outerIndex[k]-1;
|
||||
while (k>0)
|
||||
{
|
||||
m_data.index(k) = m_data.index(k-1);
|
||||
m_data.value(k) = m_data.value(k-1);
|
||||
k--;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// we are not inserting into the last inner vec
|
||||
// update outer indices:
|
||||
int j = outer+2;
|
||||
while (j<=m_outerSize && m_outerIndex[j]!=0)
|
||||
m_outerIndex[j++]++;
|
||||
--j;
|
||||
// shift data of last vecs:
|
||||
int k = m_outerIndex[j]-1;
|
||||
while (k>=int(id))
|
||||
{
|
||||
m_data.index(k) = m_data.index(k-1);
|
||||
m_data.value(k) = m_data.value(k-1);
|
||||
k--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while ( (id > startId) && (m_data.index(id-1) > inner) )
|
||||
{
|
||||
@ -223,7 +309,11 @@ class SparseMatrix
|
||||
return (m_data.value(id) = 0);
|
||||
}
|
||||
|
||||
inline void endFill()
|
||||
EIGEN_DEPRECATED void endFill() { finalize(); }
|
||||
|
||||
/** Must be called after inserting a set of non zero entries.
|
||||
*/
|
||||
inline void finalize()
|
||||
{
|
||||
int size = m_data.size();
|
||||
int i = m_outerSize;
|
||||
|
@ -156,26 +156,26 @@ template<typename Derived> class SparseMatrixBase
|
||||
ei_assert(( ((ei_traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
|
||||
(!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
|
||||
"the transpose operation is supposed to be handled in SparseMatrix::operator=");
|
||||
|
||||
|
||||
enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
|
||||
|
||||
const int outerSize = other.outerSize();
|
||||
//typedef typename ei_meta_if<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::ret TempType;
|
||||
// thanks to shallow copies, we always eval to a tempary
|
||||
Derived temp(other.rows(), other.cols());
|
||||
|
||||
temp.startFill(std::max(this->rows(),this->cols())*2);
|
||||
temp.reserve(std::max(this->rows(),this->cols())*2);
|
||||
for (int j=0; j<outerSize; ++j)
|
||||
{
|
||||
temp.startVec(j);
|
||||
for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
|
||||
{
|
||||
Scalar v = it.value();
|
||||
if (v!=Scalar(0))
|
||||
{
|
||||
if (OtherDerived::Flags & RowMajorBit) temp.fill(j,it.index()) = v;
|
||||
else temp.fill(it.index(),j) = v;
|
||||
}
|
||||
temp.insertBack(Flip?it.index():j,Flip?j:it.index()) = v;
|
||||
}
|
||||
}
|
||||
temp.endFill();
|
||||
temp.finalize();
|
||||
|
||||
derived() = temp.markAsRValue();
|
||||
}
|
||||
@ -193,20 +193,19 @@ template<typename Derived> class SparseMatrixBase
|
||||
{
|
||||
// eval without temporary
|
||||
derived().resize(other.rows(), other.cols());
|
||||
derived().startFill(std::max(this->rows(),this->cols())*2);
|
||||
derived().setZero();
|
||||
derived().reserve(std::max(this->rows(),this->cols())*2);
|
||||
for (int j=0; j<outerSize; ++j)
|
||||
{
|
||||
derived().startVec(j);
|
||||
for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
|
||||
{
|
||||
Scalar v = it.value();
|
||||
if (v!=Scalar(0))
|
||||
{
|
||||
if (IsRowMajor) derived().fill(j,it.index()) = v;
|
||||
else derived().fill(it.index(),j) = v;
|
||||
}
|
||||
derived().insertBack(j,it.index()) = v;
|
||||
}
|
||||
}
|
||||
derived().endFill();
|
||||
derived().finalize();
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -192,7 +192,8 @@ static void ei_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& r
|
||||
float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||
|
||||
res.resize(rows, cols);
|
||||
res.startFill(int(ratioRes*rows*cols));
|
||||
res.setZero();
|
||||
res.reserve(int(ratioRes*rows*cols));
|
||||
for (int j=0; j<cols; ++j)
|
||||
{
|
||||
// let's do a more accurate determination of the nnz ratio for the current column j of res
|
||||
@ -211,13 +212,11 @@ static void ei_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& r
|
||||
tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
|
||||
}
|
||||
}
|
||||
res.startVec(j);
|
||||
for (typename AmbiVector<Scalar>::Iterator it(tempVector); it; ++it)
|
||||
if (ResultType::Flags&RowMajorBit)
|
||||
res.fill(j,it.index()) = it.value();
|
||||
else
|
||||
res.fill(it.index(), j) = it.value();
|
||||
res.insertBack(j,it.index()) = it.value();
|
||||
}
|
||||
res.endFill();
|
||||
res.finalize();
|
||||
}
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType,
|
||||
|
@ -1,7 +1,7 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra. Eigen itself is part of the KDE project.
|
||||
//
|
||||
// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
|
||||
// Copyright (C) 2008-2009w Gael Guennebaud <g.gael@free.fr>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
@ -120,42 +120,32 @@ class SparseVector
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline int nonZeros() const { return m_data.size(); }
|
||||
|
||||
/**
|
||||
*/
|
||||
inline void reserve(int reserveSize) { m_data.reserve(reserveSize); }
|
||||
|
||||
inline void startFill(int reserve)
|
||||
inline void startVec(int outer)
|
||||
{
|
||||
setZero();
|
||||
m_data.reserve(reserve);
|
||||
ei_assert(outer==0);
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
inline Scalar& fill(int r, int c)
|
||||
|
||||
inline Scalar& insertBack(int outer, int inner)
|
||||
{
|
||||
ei_assert(r==0 || c==0);
|
||||
return fill(IsColVector ? r : c);
|
||||
ei_assert(outer==0);
|
||||
return insertBack(inner);
|
||||
}
|
||||
|
||||
inline Scalar& fill(int i)
|
||||
inline Scalar& insertBack(int i)
|
||||
{
|
||||
m_data.append(0, i);
|
||||
return m_data.value(m_data.size()-1);
|
||||
}
|
||||
|
||||
inline Scalar& fillrand(int r, int c)
|
||||
inline Scalar& insert(int outer, int inner)
|
||||
{
|
||||
ei_assert(r==0 || c==0);
|
||||
return fillrand(IsColVector ? r : c);
|
||||
ei_assert(outer==0);
|
||||
return insert(inner);
|
||||
}
|
||||
|
||||
/** Like fill() but with random coordinates.
|
||||
*/
|
||||
inline Scalar& fillrand(int i)
|
||||
Scalar& insert(int i)
|
||||
{
|
||||
int startId = 0;
|
||||
int id = m_data.size() - 1;
|
||||
// TODO smart realloc
|
||||
m_data.resize(id+2,1);
|
||||
|
||||
while ( (id >= startId) && (m_data.index(id) > i) )
|
||||
@ -169,8 +159,48 @@ class SparseVector
|
||||
return m_data.value(id+1);
|
||||
}
|
||||
|
||||
inline void endFill() {}
|
||||
/**
|
||||
*/
|
||||
inline void reserve(int reserveSize) { m_data.reserve(reserveSize); }
|
||||
|
||||
/** \deprecated use setZero() and reserve() */
|
||||
EIGEN_DEPRECATED void startFill(int reserve)
|
||||
{
|
||||
setZero();
|
||||
m_data.reserve(reserve);
|
||||
}
|
||||
|
||||
/** \deprecated use insertBack(int,int) */
|
||||
EIGEN_DEPRECATED Scalar& fill(int r, int c)
|
||||
{
|
||||
ei_assert(r==0 || c==0);
|
||||
return fill(IsColVector ? r : c);
|
||||
}
|
||||
|
||||
/** \deprecated use insertBack(int) */
|
||||
EIGEN_DEPRECATED Scalar& fill(int i)
|
||||
{
|
||||
m_data.append(0, i);
|
||||
return m_data.value(m_data.size()-1);
|
||||
}
|
||||
|
||||
/** \deprecated use insert(int,int) */
|
||||
EIGEN_DEPRECATED Scalar& fillrand(int r, int c)
|
||||
{
|
||||
ei_assert(r==0 || c==0);
|
||||
return fillrand(IsColVector ? r : c);
|
||||
}
|
||||
|
||||
/** \deprecated use insert(int) */
|
||||
EIGEN_DEPRECATED Scalar& fillrand(int i)
|
||||
{
|
||||
return insert(i);
|
||||
}
|
||||
|
||||
/** \deprecated use finalize() */
|
||||
EIGEN_DEPRECATED void endFill() {}
|
||||
inline void finalize() {}
|
||||
|
||||
void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
|
||||
{
|
||||
m_data.prune(reference,epsilon);
|
||||
|
@ -212,7 +212,7 @@ struct ei_sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
tempVector.setBounds(0,other.rows());
|
||||
|
||||
Rhs res(other.rows(), other.cols());
|
||||
res.startFill(other.nonZeros());
|
||||
res.reserve(other.nonZeros());
|
||||
|
||||
for(int col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
@ -269,11 +269,12 @@ struct ei_sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
++ count;
|
||||
// std::cerr << "fill " << it.index() << ", " << col << "\n";
|
||||
// std::cout << it.value() << " ";
|
||||
res.fill(it.index(), col) = it.value();
|
||||
// FIXME use insertBack
|
||||
res.insert(it.index(), col) = it.value();
|
||||
}
|
||||
// std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n";
|
||||
}
|
||||
res.endFill();
|
||||
res.finalize();
|
||||
other = res.markAsRValue();
|
||||
}
|
||||
};
|
||||
|
@ -64,9 +64,11 @@ initSparse(double density,
|
||||
std::vector<Vector2i>* zeroCoords = 0,
|
||||
std::vector<Vector2i>* nonzeroCoords = 0)
|
||||
{
|
||||
sparseMat.startFill(int(refMat.rows()*refMat.cols()*density));
|
||||
sparseMat.setZero();
|
||||
sparseMat.reserve(int(refMat.rows()*refMat.cols()*density));
|
||||
for(int j=0; j<refMat.cols(); j++)
|
||||
{
|
||||
sparseMat.startVec(j);
|
||||
for(int i=0; i<refMat.rows(); i++)
|
||||
{
|
||||
Scalar v = (ei_random<double>(0,1) < density) ? ei_random<Scalar>() : Scalar(0);
|
||||
@ -85,7 +87,7 @@ initSparse(double density,
|
||||
|
||||
if (v!=Scalar(0))
|
||||
{
|
||||
sparseMat.fill(i,j) = v;
|
||||
sparseMat.insertBack(j,i) = v;
|
||||
if (nonzeroCoords)
|
||||
nonzeroCoords->push_back(Vector2i(i,j));
|
||||
}
|
||||
@ -96,7 +98,7 @@ initSparse(double density,
|
||||
refMat(i,j) = v;
|
||||
}
|
||||
}
|
||||
sparseMat.endFill();
|
||||
sparseMat.finalize();
|
||||
}
|
||||
|
||||
template<typename Scalar> void
|
||||
@ -107,9 +109,11 @@ initSparse(double density,
|
||||
std::vector<Vector2i>* zeroCoords = 0,
|
||||
std::vector<Vector2i>* nonzeroCoords = 0)
|
||||
{
|
||||
sparseMat.startFill(int(refMat.rows()*refMat.cols()*density));
|
||||
sparseMat.setZero();
|
||||
sparseMat.reserve(int(refMat.rows()*refMat.cols()*density));
|
||||
for(int j=0; j<refMat.cols(); j++)
|
||||
{
|
||||
sparseMat.startVec(j); // not needed for DynamicSparseMatrix
|
||||
for(int i=0; i<refMat.rows(); i++)
|
||||
{
|
||||
Scalar v = (ei_random<double>(0,1) < density) ? ei_random<Scalar>() : Scalar(0);
|
||||
@ -128,7 +132,7 @@ initSparse(double density,
|
||||
|
||||
if (v!=Scalar(0))
|
||||
{
|
||||
sparseMat.fill(i,j) = v;
|
||||
sparseMat.insertBack(j,i) = v;
|
||||
if (nonzeroCoords)
|
||||
nonzeroCoords->push_back(Vector2i(i,j));
|
||||
}
|
||||
@ -139,7 +143,7 @@ initSparse(double density,
|
||||
refMat(i,j) = v;
|
||||
}
|
||||
}
|
||||
sparseMat.endFill();
|
||||
sparseMat.finalize();
|
||||
}
|
||||
|
||||
template<typename Scalar> void
|
||||
@ -156,7 +160,7 @@ initSparse(double density,
|
||||
Scalar v = (ei_random<double>(0,1) < density) ? ei_random<Scalar>() : Scalar(0);
|
||||
if (v!=Scalar(0))
|
||||
{
|
||||
sparseVec.fill(i) = v;
|
||||
sparseVec.insertBack(i) = v;
|
||||
if (nonzeroCoords)
|
||||
nonzeroCoords->push_back(i);
|
||||
}
|
||||
|
@ -177,22 +177,39 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
VERIFY(( test_random_setter<RandomSetter<SparseMatrixType, GoogleSparseHashMapTraits> >(m,refMat,nonzeroCoords) ));
|
||||
#endif
|
||||
|
||||
// test fillrand
|
||||
// test insert (inner random)
|
||||
{
|
||||
DenseMatrix m1(rows,cols);
|
||||
m1.setZero();
|
||||
SparseMatrixType m2(rows,cols);
|
||||
m2.startFill();
|
||||
m2.reserve(10);
|
||||
for (int j=0; j<cols; ++j)
|
||||
{
|
||||
for (int k=0; k<rows/2; ++k)
|
||||
{
|
||||
int i = ei_random<int>(0,rows-1);
|
||||
if (m1.coeff(i,j)==Scalar(0))
|
||||
m2.fillrand(i,j) = m1(i,j) = ei_random<Scalar>();
|
||||
m2.insert(i,j) = m1(i,j) = ei_random<Scalar>();
|
||||
}
|
||||
}
|
||||
m2.endFill();
|
||||
m2.finalize();
|
||||
VERIFY_IS_APPROX(m2,m1);
|
||||
}
|
||||
|
||||
// test insert (fully random)
|
||||
{
|
||||
DenseMatrix m1(rows,cols);
|
||||
m1.setZero();
|
||||
SparseMatrixType m2(rows,cols);
|
||||
m2.reserve(10);
|
||||
for (int k=0; k<rows*cols; ++k)
|
||||
{
|
||||
int i = ei_random<int>(0,rows-1);
|
||||
int j = ei_random<int>(0,cols-1);
|
||||
if (m1.coeff(i,j)==Scalar(0))
|
||||
m2.insert(i,j) = m1(i,j) = ei_random<Scalar>();
|
||||
}
|
||||
m2.finalize();
|
||||
VERIFY_IS_APPROX(m2,m1);
|
||||
}
|
||||
|
||||
@ -291,8 +308,9 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
refM2.setZero();
|
||||
int countFalseNonZero = 0;
|
||||
int countTrueNonZero = 0;
|
||||
m2.startFill();
|
||||
for (int j=0; j<m2.outerSize(); ++j)
|
||||
{
|
||||
m2.startVec(j);
|
||||
for (int i=0; i<m2.innerSize(); ++i)
|
||||
{
|
||||
float x = ei_random<float>(0,1);
|
||||
@ -303,15 +321,16 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
else if (x<0.5)
|
||||
{
|
||||
countFalseNonZero++;
|
||||
m2.fill(i,j) = Scalar(0);
|
||||
m2.insertBack(j,i) = Scalar(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
countTrueNonZero++;
|
||||
m2.fill(i,j) = refM2(i,j) = Scalar(1);
|
||||
m2.insertBack(j,i) = refM2(i,j) = Scalar(1);
|
||||
}
|
||||
}
|
||||
m2.endFill();
|
||||
}
|
||||
m2.finalize();
|
||||
VERIFY(countFalseNonZero+countTrueNonZero == m2.nonZeros());
|
||||
VERIFY_IS_APPROX(m2, refM2);
|
||||
m2.prune(1);
|
||||
|
@ -37,12 +37,12 @@ initSPD(double density,
|
||||
initSparse(density,aux,sparseMat,ForceNonZeroDiag);
|
||||
refMat += aux * aux.adjoint();
|
||||
}
|
||||
sparseMat.startFill();
|
||||
sparseMat.setZero();
|
||||
for (int j=0 ; j<sparseMat.cols(); ++j)
|
||||
for (int i=j ; i<sparseMat.rows(); ++i)
|
||||
if (refMat(i,j)!=Scalar(0))
|
||||
sparseMat.fill(i,j) = refMat(i,j);
|
||||
sparseMat.endFill();
|
||||
sparseMat.insert(i,j) = refMat(i,j);
|
||||
sparseMat.finalize();
|
||||
}
|
||||
|
||||
template<typename Scalar> void sparse_solvers(int rows, int cols)
|
||||
|
Loading…
x
Reference in New Issue
Block a user