mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-04-30 15:54:13 +08:00
Sparse Core: Replace malloc/free with conditional_aligned
This commit is contained in:
parent
6aad0f821b
commit
b2a13c9dd1
@ -71,8 +71,8 @@ class CompressedStorage
|
|||||||
|
|
||||||
~CompressedStorage()
|
~CompressedStorage()
|
||||||
{
|
{
|
||||||
delete[] m_values;
|
conditional_aligned_delete_auto<Scalar, true>(m_values, m_allocatedSize);
|
||||||
delete[] m_indices;
|
conditional_aligned_delete_auto<StorageIndex, true>(m_indices, m_allocatedSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
void reserve(Index size)
|
void reserve(Index size)
|
||||||
@ -180,24 +180,13 @@ class CompressedStorage
|
|||||||
{
|
{
|
||||||
if (m_allocatedSize<m_size+1)
|
if (m_allocatedSize<m_size+1)
|
||||||
{
|
{
|
||||||
m_allocatedSize = 2*(m_size+1);
|
Index newAllocatedSize = 2 * (m_size + 1);
|
||||||
internal::scoped_array<Scalar> newValues(m_allocatedSize);
|
m_values = conditional_aligned_realloc_new_auto<Scalar, true>(m_values, newAllocatedSize, m_allocatedSize);
|
||||||
internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);
|
m_indices =
|
||||||
|
conditional_aligned_realloc_new_auto<StorageIndex, true>(m_indices, newAllocatedSize, m_allocatedSize);
|
||||||
// copy first chunk
|
m_allocatedSize = newAllocatedSize;
|
||||||
internal::smart_copy(m_values, m_values +id, newValues.ptr());
|
|
||||||
internal::smart_copy(m_indices, m_indices+id, newIndices.ptr());
|
|
||||||
|
|
||||||
// copy the rest
|
|
||||||
if(m_size>id)
|
|
||||||
{
|
|
||||||
internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1);
|
|
||||||
internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1);
|
|
||||||
}
|
|
||||||
std::swap(m_values,newValues.ptr());
|
|
||||||
std::swap(m_indices,newIndices.ptr());
|
|
||||||
}
|
}
|
||||||
else if(m_size>id)
|
if(m_size>id)
|
||||||
{
|
{
|
||||||
internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1);
|
internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1);
|
||||||
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
|
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
|
||||||
@ -233,15 +222,8 @@ class CompressedStorage
|
|||||||
EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
|
EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
|
||||||
#endif
|
#endif
|
||||||
eigen_internal_assert(size!=m_allocatedSize);
|
eigen_internal_assert(size!=m_allocatedSize);
|
||||||
internal::scoped_array<Scalar> newValues(size);
|
m_values = conditional_aligned_realloc_new_auto<Scalar, true>(m_values, size, m_allocatedSize);
|
||||||
internal::scoped_array<StorageIndex> newIndices(size);
|
m_indices = conditional_aligned_realloc_new_auto<StorageIndex, true>(m_indices, size, m_allocatedSize);
|
||||||
Index copySize = (std::min)(size, m_size);
|
|
||||||
if (copySize>0) {
|
|
||||||
internal::smart_copy(m_values, m_values+copySize, newValues.ptr());
|
|
||||||
internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());
|
|
||||||
}
|
|
||||||
std::swap(m_values,newValues.ptr());
|
|
||||||
std::swap(m_indices,newIndices.ptr());
|
|
||||||
m_allocatedSize = size;
|
m_allocatedSize = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,12 +80,18 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
|
|||||||
|
|
||||||
const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
|
const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
|
||||||
const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
|
const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
|
||||||
|
|
||||||
|
Index reserveSize = 0;
|
||||||
|
for (Index j = 0; j < outerEvaluationSize; ++j)
|
||||||
|
for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
|
||||||
|
reserveSize++;
|
||||||
|
|
||||||
if ((!transpose) && src.isRValue())
|
if ((!transpose) && src.isRValue())
|
||||||
{
|
{
|
||||||
// eval without temporary
|
// eval without temporary
|
||||||
dst.resize(src.rows(), src.cols());
|
dst.resize(src.rows(), src.cols());
|
||||||
dst.setZero();
|
dst.setZero();
|
||||||
dst.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
|
dst.reserve(reserveSize);
|
||||||
for (Index j=0; j<outerEvaluationSize; ++j)
|
for (Index j=0; j<outerEvaluationSize; ++j)
|
||||||
{
|
{
|
||||||
dst.startVec(j);
|
dst.startVec(j);
|
||||||
@ -109,7 +115,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
|
|||||||
|
|
||||||
DstXprType temp(src.rows(), src.cols());
|
DstXprType temp(src.rows(), src.cols());
|
||||||
|
|
||||||
temp.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
|
temp.reserve(reserveSize);
|
||||||
for (Index j=0; j<outerEvaluationSize; ++j)
|
for (Index j=0; j<outerEvaluationSize; ++j)
|
||||||
{
|
{
|
||||||
temp.startVec(j);
|
temp.startVec(j);
|
||||||
|
@ -302,8 +302,7 @@ class SparseMatrix
|
|||||||
{
|
{
|
||||||
Index totalReserveSize = 0;
|
Index totalReserveSize = 0;
|
||||||
// turn the matrix into non-compressed mode
|
// turn the matrix into non-compressed mode
|
||||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
|
m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
|
||||||
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
|
||||||
|
|
||||||
// temporarily use m_innerSizes to hold the new starting points.
|
// temporarily use m_innerSizes to hold the new starting points.
|
||||||
StorageIndex* newOuterIndex = m_innerNonZeros;
|
StorageIndex* newOuterIndex = m_innerNonZeros;
|
||||||
@ -336,8 +335,7 @@ class SparseMatrix
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
|
StorageIndex* newOuterIndex = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + 1);
|
||||||
if (!newOuterIndex) internal::throw_std_bad_alloc();
|
|
||||||
|
|
||||||
StorageIndex count = 0;
|
StorageIndex count = 0;
|
||||||
for(Index j=0; j<m_outerSize; ++j)
|
for(Index j=0; j<m_outerSize; ++j)
|
||||||
@ -365,7 +363,7 @@ class SparseMatrix
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::swap(m_outerIndex, newOuterIndex);
|
std::swap(m_outerIndex, newOuterIndex);
|
||||||
std::free(newOuterIndex);
|
internal::conditional_aligned_delete_auto<StorageIndex, true>(newOuterIndex, m_outerSize + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -488,7 +486,7 @@ class SparseMatrix
|
|||||||
m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
|
m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
|
||||||
oldStart = nextOldStart;
|
oldStart = nextOldStart;
|
||||||
}
|
}
|
||||||
std::free(m_innerNonZeros);
|
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||||
m_innerNonZeros = 0;
|
m_innerNonZeros = 0;
|
||||||
m_data.resize(m_outerIndex[m_outerSize]);
|
m_data.resize(m_outerIndex[m_outerSize]);
|
||||||
m_data.squeeze();
|
m_data.squeeze();
|
||||||
@ -499,7 +497,7 @@ class SparseMatrix
|
|||||||
{
|
{
|
||||||
if(m_innerNonZeros != 0)
|
if(m_innerNonZeros != 0)
|
||||||
return;
|
return;
|
||||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
|
m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
|
||||||
for (Index i = 0; i < m_outerSize; i++)
|
for (Index i = 0; i < m_outerSize; i++)
|
||||||
{
|
{
|
||||||
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
||||||
@ -569,9 +567,8 @@ class SparseMatrix
|
|||||||
if (m_innerNonZeros)
|
if (m_innerNonZeros)
|
||||||
{
|
{
|
||||||
// Resize m_innerNonZeros
|
// Resize m_innerNonZeros
|
||||||
StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
|
m_innerNonZeros = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(
|
||||||
if (!newInnerNonZeros) internal::throw_std_bad_alloc();
|
m_innerNonZeros, m_outerSize + outerChange, m_outerSize);
|
||||||
m_innerNonZeros = newInnerNonZeros;
|
|
||||||
|
|
||||||
for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
|
for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
|
||||||
m_innerNonZeros[i] = 0;
|
m_innerNonZeros[i] = 0;
|
||||||
@ -579,8 +576,7 @@ class SparseMatrix
|
|||||||
else if (innerChange < 0)
|
else if (innerChange < 0)
|
||||||
{
|
{
|
||||||
// Inner size decreased: allocate a new m_innerNonZeros
|
// Inner size decreased: allocate a new m_innerNonZeros
|
||||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
|
m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize + outerChange);
|
||||||
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
|
||||||
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
|
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
|
||||||
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
||||||
for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
|
for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
|
||||||
@ -604,9 +600,8 @@ class SparseMatrix
|
|||||||
if (outerChange == 0)
|
if (outerChange == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
|
m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(
|
||||||
if (!newOuterIndex) internal::throw_std_bad_alloc();
|
m_outerSize + outerChange + 1, m_outerSize + 1);
|
||||||
m_outerIndex = newOuterIndex;
|
|
||||||
if (outerChange > 0)
|
if (outerChange > 0)
|
||||||
{
|
{
|
||||||
StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
|
StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
|
||||||
@ -630,15 +625,13 @@ class SparseMatrix
|
|||||||
m_data.clear();
|
m_data.clear();
|
||||||
if (m_outerSize != outerSize || m_outerSize==0)
|
if (m_outerSize != outerSize || m_outerSize==0)
|
||||||
{
|
{
|
||||||
std::free(m_outerIndex);
|
m_outerIndex = internal::conditional_aligned_realloc_new_auto<StorageIndex, true>(m_outerIndex, outerSize + 1,
|
||||||
m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
|
m_outerSize + 1);
|
||||||
if (!m_outerIndex) internal::throw_std_bad_alloc();
|
|
||||||
|
|
||||||
m_outerSize = outerSize;
|
m_outerSize = outerSize;
|
||||||
}
|
}
|
||||||
if(m_innerNonZeros)
|
if(m_innerNonZeros)
|
||||||
{
|
{
|
||||||
std::free(m_innerNonZeros);
|
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||||
m_innerNonZeros = 0;
|
m_innerNonZeros = 0;
|
||||||
}
|
}
|
||||||
std::fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
|
std::fill_n(m_outerIndex, m_outerSize + 1, StorageIndex(0));
|
||||||
@ -746,7 +739,7 @@ class SparseMatrix
|
|||||||
Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
|
Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
|
||||||
Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
|
Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
|
||||||
Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
|
Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
|
||||||
std::free(m_innerNonZeros);
|
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||||
m_innerNonZeros = 0;
|
m_innerNonZeros = 0;
|
||||||
}
|
}
|
||||||
inline SparseMatrix& operator=(const SparseMatrix& other)
|
inline SparseMatrix& operator=(const SparseMatrix& other)
|
||||||
@ -836,8 +829,8 @@ class SparseMatrix
|
|||||||
/** Destructor */
|
/** Destructor */
|
||||||
inline ~SparseMatrix()
|
inline ~SparseMatrix()
|
||||||
{
|
{
|
||||||
std::free(m_outerIndex);
|
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_outerIndex, m_outerSize + 1);
|
||||||
std::free(m_innerNonZeros);
|
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Overloaded for performance */
|
/** Overloaded for performance */
|
||||||
@ -855,7 +848,7 @@ protected:
|
|||||||
resize(other.rows(), other.cols());
|
resize(other.rows(), other.cols());
|
||||||
if(m_innerNonZeros)
|
if(m_innerNonZeros)
|
||||||
{
|
{
|
||||||
std::free(m_innerNonZeros);
|
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||||
m_innerNonZeros = 0;
|
m_innerNonZeros = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1154,7 +1147,7 @@ void SparseMatrix<Scalar,Options_,StorageIndex_>::collapseDuplicates(DupFunctor
|
|||||||
m_outerIndex[m_outerSize] = count;
|
m_outerIndex[m_outerSize] = count;
|
||||||
|
|
||||||
// turn the matrix into compressed form
|
// turn the matrix into compressed form
|
||||||
std::free(m_innerNonZeros);
|
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
|
||||||
m_innerNonZeros = 0;
|
m_innerNonZeros = 0;
|
||||||
m_data.resize(m_outerIndex[m_outerSize]);
|
m_data.resize(m_outerIndex[m_outerSize]);
|
||||||
}
|
}
|
||||||
@ -1249,8 +1242,7 @@ typename SparseMatrix<Scalar_,Options_,StorageIndex_>::Scalar& SparseMatrix<Scal
|
|||||||
m_data.reserve(2*m_innerSize);
|
m_data.reserve(2*m_innerSize);
|
||||||
|
|
||||||
// turn the matrix into non-compressed mode
|
// turn the matrix into non-compressed mode
|
||||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
|
m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
|
||||||
if(!m_innerNonZeros) internal::throw_std_bad_alloc();
|
|
||||||
|
|
||||||
std::fill(m_innerNonZeros, m_innerNonZeros + m_outerSize, StorageIndex(0));
|
std::fill(m_innerNonZeros, m_innerNonZeros + m_outerSize, StorageIndex(0));
|
||||||
|
|
||||||
@ -1263,8 +1255,7 @@ typename SparseMatrix<Scalar_,Options_,StorageIndex_>::Scalar& SparseMatrix<Scal
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
// turn the matrix into non-compressed mode
|
// turn the matrix into non-compressed mode
|
||||||
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
|
m_innerNonZeros = internal::conditional_aligned_new_auto<StorageIndex, true>(m_outerSize);
|
||||||
if(!m_innerNonZeros) internal::throw_std_bad_alloc();
|
|
||||||
for(Index j=0; j<m_outerSize; ++j)
|
for(Index j=0; j<m_outerSize; ++j)
|
||||||
m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
|
m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user