mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-09-14 02:13:13 +08:00
Optimise assignment into a Block<SparseMatrix> by using Ref and avoiding useless updates in non-compressed mode. This make row-by-row filling of a row-major sparse matrix very efficient.
This commit is contained in:
parent
945b80c83e
commit
f25bdc707f
@ -114,7 +114,8 @@ public:
|
||||
// and/or it is not at the end of the nonzeros of the underlying matrix.
|
||||
|
||||
// 1 - eval to a temporary to avoid transposition and/or aliasing issues
|
||||
SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> tmp(other);
|
||||
Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());
|
||||
eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
|
||||
|
||||
// 2 - let's check whether there is enough allocated memory
|
||||
Index nnz = tmp.nonZeros();
|
||||
@ -127,6 +128,7 @@ public:
|
||||
? Index(matrix.data().allocatedSize()) + block_size
|
||||
: block_size;
|
||||
|
||||
bool update_trailing_pointers = false;
|
||||
if(nnz>free_size)
|
||||
{
|
||||
// realloc manually to reduce copies
|
||||
@ -135,8 +137,8 @@ public:
|
||||
internal::smart_copy(&m_matrix.data().value(0), &m_matrix.data().value(0) + start, &newdata.value(0));
|
||||
internal::smart_copy(&m_matrix.data().index(0), &m_matrix.data().index(0) + start, &newdata.index(0));
|
||||
|
||||
internal::smart_copy(&tmp.data().value(0), &tmp.data().value(0) + nnz, &newdata.value(start));
|
||||
internal::smart_copy(&tmp.data().index(0), &tmp.data().index(0) + nnz, &newdata.index(start));
|
||||
internal::smart_copy(tmp.valuePtr(), tmp.valuePtr() + nnz, &newdata.value(start));
|
||||
internal::smart_copy(tmp.innerIndexPtr(), tmp.innerIndexPtr() + nnz, &newdata.index(start));
|
||||
|
||||
internal::smart_copy(&matrix.data().value(end), &matrix.data().value(end) + tail_size, &newdata.value(start+nnz));
|
||||
internal::smart_copy(&matrix.data().index(end), &matrix.data().index(end) + tail_size, &newdata.index(start+nnz));
|
||||
@ -144,35 +146,53 @@ public:
|
||||
newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
|
||||
|
||||
matrix.data().swap(newdata);
|
||||
|
||||
update_trailing_pointers = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// no need to realloc, simply copy the tail at its respective position and insert tmp
|
||||
matrix.data().resize(start + nnz + tail_size);
|
||||
if(m_matrix.isCompressed())
|
||||
{
|
||||
// no need to realloc, simply copy the tail at its respective position and insert tmp
|
||||
matrix.data().resize(start + nnz + tail_size);
|
||||
|
||||
internal::smart_memmove(&matrix.data().value(end), &matrix.data().value(end) + tail_size, &matrix.data().value(start + nnz));
|
||||
internal::smart_memmove(&matrix.data().index(end), &matrix.data().index(end) + tail_size, &matrix.data().index(start + nnz));
|
||||
internal::smart_memmove(&matrix.data().value(end), &matrix.data().value(end) + tail_size, &matrix.data().value(start + nnz));
|
||||
internal::smart_memmove(&matrix.data().index(end), &matrix.data().index(end) + tail_size, &matrix.data().index(start + nnz));
|
||||
|
||||
internal::smart_copy(&tmp.data().value(0), &tmp.data().value(0) + nnz, &matrix.data().value(start));
|
||||
internal::smart_copy(&tmp.data().index(0), &tmp.data().index(0) + nnz, &matrix.data().index(start));
|
||||
update_trailing_pointers = true;
|
||||
}
|
||||
|
||||
internal::smart_copy(tmp.valuePtr(), tmp.valuePtr() + nnz, &matrix.data().value(start));
|
||||
internal::smart_copy(tmp.innerIndexPtr(), tmp.innerIndexPtr() + nnz, &matrix.data().index(start));
|
||||
}
|
||||
|
||||
// update innerNonZeros
|
||||
if(!m_matrix.isCompressed())
|
||||
for(Index j=0; j<m_outerSize.value(); ++j)
|
||||
matrix.innerNonZeroPtr()[m_outerStart+j] = StorageIndex(tmp.innerVector(j).nonZeros());
|
||||
|
||||
// update outer index pointers
|
||||
StorageIndex p = StorageIndex(start);
|
||||
for(Index k=0; k<m_outerSize.value(); ++k)
|
||||
// update outer index pointers and innerNonZeros
|
||||
if(IsVectorAtCompileTime)
|
||||
{
|
||||
matrix.outerIndexPtr()[m_outerStart+k] = p;
|
||||
p += tmp.innerVector(k).nonZeros();
|
||||
if(!m_matrix.isCompressed())
|
||||
matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
|
||||
matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
|
||||
}
|
||||
StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
|
||||
for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
|
||||
else
|
||||
{
|
||||
matrix.outerIndexPtr()[k] += offset;
|
||||
StorageIndex p = StorageIndex(start);
|
||||
for(Index k=0; k<m_outerSize.value(); ++k)
|
||||
{
|
||||
Index nnz_k = tmp.innerVector(k).nonZeros();
|
||||
if(!m_matrix.isCompressed())
|
||||
matrix.innerNonZeroPtr()[m_outerStart+k] = StorageIndex(nnz_k);
|
||||
matrix.outerIndexPtr()[m_outerStart+k] = p;
|
||||
p += nnz_k;
|
||||
}
|
||||
}
|
||||
|
||||
if(update_trailing_pointers)
|
||||
{
|
||||
StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
|
||||
for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
|
||||
{
|
||||
matrix.outerIndexPtr()[k] += offset;
|
||||
}
|
||||
}
|
||||
|
||||
return derived();
|
||||
|
Loading…
x
Reference in New Issue
Block a user