Sparse module: add a more flexible SparseMatrix::fillrand() function

which allows to fill a matrix with random inner coordinates (makes sense
only when a very few coeffs are inserted per col/row)
This commit is contained in:
Gael Guennebaud 2008-12-11 18:26:24 +00:00
parent beabf008b0
commit 5015e48361
4 changed files with 68 additions and 5 deletions

View File

@ -131,6 +131,10 @@ class SparseMatrix
/** \returns the number of non zero coefficients */ /** \returns the number of non zero coefficients */
inline int nonZeros() const { return m_data.size(); } inline int nonZeros() const { return m_data.size(); }
/** Initializes the filling process of \c *this.
* \param reserveSize approximate number of nonzeros
* Note that the matrix \c *this is zero-ed.
*/
inline void startFill(int reserveSize = 1000) inline void startFill(int reserveSize = 1000)
{ {
m_data.clear(); m_data.clear();
@ -139,13 +143,16 @@ class SparseMatrix
m_outerIndex[i] = 0; m_outerIndex[i] = 0;
} }
/**
*/
inline Scalar& fill(int row, int col) inline Scalar& fill(int row, int col)
{ {
const int outer = RowMajor ? row : col; const int outer = RowMajor ? row : col;
const int inner = RowMajor ? col : row; const int inner = RowMajor ? col : row;
// std::cout << " fill " << outer << "," << inner << "\n";
if (m_outerIndex[outer+1]==0) if (m_outerIndex[outer+1]==0)
{ {
// we start a new inner vector
int i = outer; int i = outer;
while (i>=0 && m_outerIndex[i]==0) while (i>=0 && m_outerIndex[i]==0)
{ {
@ -162,6 +169,42 @@ class SparseMatrix
return m_data.value(id); return m_data.value(id);
} }
/** Like fill() but with random inner coordinates.
*/
inline Scalar& fillrand(int row, int col)
{
const int outer = RowMajor ? row : col;
const int inner = RowMajor ? col : row;
if (m_outerIndex[outer+1]==0)
{
// we start a new inner vector
// nothing special to do here
int i = outer;
while (i>=0 && m_outerIndex[i]==0)
{
m_outerIndex[i] = m_data.size();
--i;
}
m_outerIndex[outer+1] = m_outerIndex[outer];
}
//
assert(m_outerIndex[outer+1] == m_data.size() && "invalid outer index");
int startId = m_outerIndex[outer];
int id = m_outerIndex[outer+1]-1;
m_outerIndex[outer+1]++;
m_data.resize(id+2);
while ( (id >= startId) && (m_data.index(id) > inner) )
{
m_data.index(id+1) = m_data.index(id);
m_data.value(id+1) = m_data.value(id);
--id;
}
m_data.index(id+1) = inner;
return (m_data.value(id+1) = 0);
}
inline void endFill() inline void endFill()
{ {
int size = m_data.size(); int size = m_data.size();

View File

@ -63,8 +63,8 @@ class SparseMatrixBase : public MatrixBase<Derived>
inline Derived& operator=(const MatrixBase<OtherDerived>& other) inline Derived& operator=(const MatrixBase<OtherDerived>& other)
{ {
// std::cout << "Derived& operator=(const MatrixBase<OtherDerived>& other)\n"; // std::cout << "Derived& operator=(const MatrixBase<OtherDerived>& other)\n";
const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
ei_assert((!transpose) && "the transpose operation is supposed to be handled in SparseMatrix::operator="); ei_assert((!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit))) && "the transpose operation is supposed to be handled in SparseMatrix::operator=");
const int outerSize = other.outerSize(); const int outerSize = other.outerSize();
//typedef typename ei_meta_if<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::ret TempType; //typedef typename ei_meta_if<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::ret TempType;
// thanks to shallow copies, we always eval to a tempary // thanks to shallow copies, we always eval to a tempary

View File

@ -138,8 +138,8 @@ struct ei_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
// make sure to call innerSize/outerSize since we fake the storage order. // make sure to call innerSize/outerSize since we fake the storage order.
int rows = lhs.innerSize(); int rows = lhs.innerSize();
int cols = rhs.outerSize(); int cols = rhs.outerSize();
int size = lhs.outerSize(); //int size = lhs.outerSize();
ei_assert(size == rhs.innerSize()); ei_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer // allocate a temporary buffer
AmbiVector<Scalar> tempVector(rows); AmbiVector<Scalar> tempVector(rows);

View File

@ -153,6 +153,26 @@ template<typename Scalar> void sparse_basic(int rows, int cols)
#ifdef _SPARSE_HASH_MAP_H_ #ifdef _SPARSE_HASH_MAP_H_
VERIFY(( test_random_setter<RandomSetter<SparseMatrix<Scalar>, GoogleSparseHashMapTraits> >(m,refMat,nonzeroCoords) )); VERIFY(( test_random_setter<RandomSetter<SparseMatrix<Scalar>, GoogleSparseHashMapTraits> >(m,refMat,nonzeroCoords) ));
#endif #endif
// test fillrand
{
DenseMatrix m1(rows,cols);
m1.setZero();
SparseMatrix<Scalar> m2(rows,cols);
m2.startFill();
for (int j=0; j<cols; ++j)
{
for (int k=0; k<rows/2; ++k)
{
int i = ei_random<int>(0,rows-1);
if (m1.coeff(i,j)==Scalar(0))
m2.fillrand(i,j) = m1(i,j) = ei_random<Scalar>();
}
}
m2.endFill();
std::cerr << m1 << "\n\n" << m2 << "\n";
VERIFY_IS_APPROX(m1,m2);
}
// { // {
// m.setZero(); // m.setZero();
// VERIFY_IS_NOT_APPROX(m, refMat); // VERIFY_IS_NOT_APPROX(m, refMat);