mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-11 11:19:02 +08:00
cleanning pass on the sparse modules:
- remove outdated/deprecated code - improve a bit the documentation
This commit is contained in:
parent
2d621d235d
commit
cda397b117
@ -11,7 +11,7 @@ extern "C" {
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup Sparse_modules
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup CholmodSupport_Module CholmodSupport module
|
||||
*
|
||||
*
|
||||
|
@ -244,6 +244,10 @@ using std::ptrdiff_t;
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
/** \defgroup Support_modules Support modules [category]
|
||||
* Category of modules which add support for external libraries.
|
||||
*/
|
||||
|
||||
#include "src/Core/util/Constants.h"
|
||||
#include "src/Core/util/ForwardDeclarations.h"
|
||||
#include "src/Core/util/Meta.h"
|
||||
|
@ -33,7 +33,8 @@
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \defgroup Eigen2Support_Module Eigen2 support module
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup Eigen2Support_Module Eigen2 support module
|
||||
* This module provides a couple of deprecated functions improving the compatibility with Eigen2.
|
||||
*
|
||||
* To use it, define EIGEN2_SUPPORT before including any Eigen header
|
||||
|
@ -30,7 +30,7 @@ namespace Eigen { struct SluMatrix; }
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup Sparse_modules
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup SuperLUSupport_Module SuperLUSupport module
|
||||
*
|
||||
* \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined because it is too polluting.
|
||||
|
@ -11,7 +11,7 @@ extern "C" {
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup Sparse_modules
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup UmfPackSupport_Module UmfPackSupport module
|
||||
*
|
||||
*
|
||||
|
@ -149,7 +149,9 @@ enum CholmodMode {
|
||||
CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt
|
||||
};
|
||||
|
||||
/** \brief A Cholesky factorization and solver based on Cholmod
|
||||
/** \ingroup CholmodSupport_Module
|
||||
* \class CholmodDecomposition
|
||||
* \brief A Cholesky factorization and solver based on Cholmod
|
||||
*
|
||||
* This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization
|
||||
* using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
|
||||
@ -159,6 +161,7 @@ enum CholmodMode {
|
||||
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
|
||||
* or Upper. Default is Lower.
|
||||
*
|
||||
* \sa TutorialSparseDirectSolvers
|
||||
*/
|
||||
template<typename _MatrixType, int _UpLo = Lower>
|
||||
class CholmodDecomposition
|
||||
|
@ -28,7 +28,8 @@
|
||||
/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
|
||||
*/
|
||||
|
||||
/** \class InnerIterator
|
||||
/** \ingroup SparseCore_Module
|
||||
* \class InnerIterator
|
||||
* \brief An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression
|
||||
*
|
||||
* todo
|
||||
|
@ -315,42 +315,6 @@ template<typename Derived>
|
||||
const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) const
|
||||
{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
|
||||
|
||||
//----------
|
||||
|
||||
/** \deprecated see middleRows */
|
||||
template<typename Derived>
|
||||
SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(Index start, Index size)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
|
||||
return innerVectors(start, size);
|
||||
}
|
||||
|
||||
/** \deprecated see middleRows */
|
||||
template<typename Derived>
|
||||
const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(Index start, Index size) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
|
||||
return innerVectors(start, size);
|
||||
}
|
||||
|
||||
/** \deprecated see middleCols */
|
||||
template<typename Derived>
|
||||
SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(Index start, Index size)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||
return innerVectors(start, size);
|
||||
}
|
||||
|
||||
/** \deprecated see middleCols */
|
||||
template<typename Derived>
|
||||
const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(Index start, Index size) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||
return innerVectors(start, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
|
||||
template<typename Derived>
|
||||
SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size)
|
||||
|
@ -298,16 +298,6 @@ class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs,
|
||||
* Implementation of SparseMatrixBase and SparseCwise functions/operators
|
||||
***************************************************************************/
|
||||
|
||||
// template<typename Derived>
|
||||
// template<typename OtherDerived>
|
||||
// EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename internal::traits<Derived>::Scalar>,
|
||||
// Derived, OtherDerived>
|
||||
// SparseMatrixBase<Derived>::operator-(const SparseMatrixBase<OtherDerived> &other) const
|
||||
// {
|
||||
// return CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
|
||||
// Derived, OtherDerived>(derived(), other.derived());
|
||||
// }
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Derived &
|
||||
@ -316,14 +306,6 @@ SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &othe
|
||||
return *this = derived() - other.derived();
|
||||
}
|
||||
|
||||
// template<typename Derived>
|
||||
// template<typename OtherDerived>
|
||||
// EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
|
||||
// SparseMatrixBase<Derived>::operator+(const SparseMatrixBase<OtherDerived> &other) const
|
||||
// {
|
||||
// return CwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived>(derived(), other.derived());
|
||||
// }
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Derived &
|
||||
@ -332,14 +314,6 @@ SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& othe
|
||||
return *this = derived() + other.derived();
|
||||
}
|
||||
|
||||
// template<typename ExpressionType>
|
||||
// template<typename OtherDerived>
|
||||
// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
|
||||
// SparseCwise<ExpressionType>::operator*(const SparseMatrixBase<OtherDerived> &other) const
|
||||
// {
|
||||
// return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived());
|
||||
// }
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
|
||||
@ -348,28 +322,4 @@ SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) c
|
||||
return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived());
|
||||
}
|
||||
|
||||
// template<typename ExpressionType>
|
||||
// template<typename OtherDerived>
|
||||
// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
|
||||
// SparseCwise<ExpressionType>::operator/(const SparseMatrixBase<OtherDerived> &other) const
|
||||
// {
|
||||
// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived());
|
||||
// }
|
||||
//
|
||||
// template<typename ExpressionType>
|
||||
// template<typename OtherDerived>
|
||||
// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
|
||||
// SparseCwise<ExpressionType>::operator/(const MatrixBase<OtherDerived> &other) const
|
||||
// {
|
||||
// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived());
|
||||
// }
|
||||
|
||||
// template<typename ExpressionType>
|
||||
// template<typename OtherDerived>
|
||||
// inline ExpressionType& SparseCwise<ExpressionType>::operator*=(const SparseMatrixBase<OtherDerived> &other)
|
||||
// {
|
||||
// return m_matrix.const_cast_derived() = _expression() * other.derived();
|
||||
// }
|
||||
|
||||
|
||||
#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
|
||||
|
@ -25,19 +25,6 @@
|
||||
#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
|
||||
#define EIGEN_SPARSE_CWISE_UNARY_OP_H
|
||||
|
||||
// template<typename UnaryOp, typename MatrixType>
|
||||
// struct internal::traits<SparseCwiseUnaryOp<UnaryOp, MatrixType> > : internal::traits<MatrixType>
|
||||
// {
|
||||
// typedef typename internal::result_of<
|
||||
// UnaryOp(typename MatrixType::Scalar)
|
||||
// >::type Scalar;
|
||||
// typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
// typedef typename internal::remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
// enum {
|
||||
// CoeffReadCost = _MatrixTypeNested::CoeffReadCost + internal::functor_traits<UnaryOp>::Cost
|
||||
// };
|
||||
// };
|
||||
|
||||
template<typename UnaryOp, typename MatrixType>
|
||||
class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>
|
||||
: public SparseMatrixBase<CwiseUnaryOp<UnaryOp, MatrixType> >
|
||||
|
@ -25,22 +25,28 @@
|
||||
#ifndef EIGEN_SPARSEMATRIX_H
|
||||
#define EIGEN_SPARSEMATRIX_H
|
||||
|
||||
/** \ingroup Sparse_Module
|
||||
/** \ingroup SparseCore_Module
|
||||
*
|
||||
* \class SparseMatrix
|
||||
*
|
||||
* \brief The main sparse matrix class
|
||||
* \brief A versatible sparse matrix representation
|
||||
*
|
||||
* This class implements a sparse matrix using the very common compressed row/column storage
|
||||
* scheme.
|
||||
* This class implements a more versatile variants of the common \em compressed row/column storage format.
|
||||
* Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
|
||||
* All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
|
||||
* space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
|
||||
* can be done with limited memory reallocation and copies.
|
||||
*
|
||||
* A call to the function makeCompressed() turns the matrix into the standard \em compressed format
|
||||
* compatible with many library.
|
||||
*
|
||||
* More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
|
||||
*
|
||||
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
|
||||
* \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
|
||||
* is RowMajor. The default is 0 which means column-major.
|
||||
* \tparam _Index the type of the indices. Default is \c int.
|
||||
*
|
||||
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
|
||||
*
|
||||
* This class can be extended with the help of the plugin mechanism described on the page
|
||||
* \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
|
||||
*/
|
||||
@ -72,12 +78,8 @@ class SparseMatrix
|
||||
{
|
||||
public:
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
|
||||
// using Base::operator=;
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
|
||||
// FIXME: why are these operator already alvailable ???
|
||||
// EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, *=)
|
||||
// EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, /=)
|
||||
|
||||
typedef MappedSparseMatrix<Scalar,Flags> Map;
|
||||
using Base::IsRowMajor;
|
||||
@ -93,7 +95,7 @@ class SparseMatrix
|
||||
Index m_outerSize;
|
||||
Index m_innerSize;
|
||||
Index* m_outerIndex;
|
||||
Index* m_innerNonZeros; // optional, if null then the data are compressed
|
||||
Index* m_innerNonZeros; // optional, if null then the data is compressed
|
||||
CompressedStorage<Scalar,Index> m_data;
|
||||
|
||||
Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
|
||||
@ -115,18 +117,32 @@ class SparseMatrix
|
||||
return m_innerNonZeros ? m_innerNonZeros[j] : m_outerIndex[j+1]-m_outerIndex[j];
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* \returns a const pointer to the array of values */
|
||||
inline const Scalar* _valuePtr() const { return &m_data.value(0); }
|
||||
/** \internal
|
||||
* \returns a non-const pointer to the array of values */
|
||||
inline Scalar* _valuePtr() { return &m_data.value(0); }
|
||||
|
||||
/** \internal
|
||||
* \returns a const pointer to the array of inner indices */
|
||||
inline const Index* _innerIndexPtr() const { return &m_data.index(0); }
|
||||
/** \internal
|
||||
* \returns a non-const pointer to the array of inner indices */
|
||||
inline Index* _innerIndexPtr() { return &m_data.index(0); }
|
||||
|
||||
/** \internal
|
||||
* \returns a const pointer to the array of the starting positions of the inner vectors */
|
||||
inline const Index* _outerIndexPtr() const { return m_outerIndex; }
|
||||
/** \internal
|
||||
* \returns a non-const pointer to the array of the starting positions of the inner vectors */
|
||||
inline Index* _outerIndexPtr() { return m_outerIndex; }
|
||||
|
||||
inline Storage& data() { return m_data; }
|
||||
inline const Storage& data() const { return m_data; }
|
||||
|
||||
/** \returns the value of the matrix at position \a i, \a j
|
||||
* This function returns Scalar(0) if the element is an explicit \em zero */
|
||||
inline Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
@ -135,6 +151,8 @@ class SparseMatrix
|
||||
return m_data.atInRange(m_outerIndex[outer], end, inner);
|
||||
}
|
||||
|
||||
/** \returns a non-const reference to the value of the matrix at position \a i, \a j
|
||||
* The element \b have to be a non-zero element. */
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
@ -180,9 +198,9 @@ class SparseMatrix
|
||||
}
|
||||
|
||||
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** Preallocates \a reserveSize non zeros.
|
||||
/** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
|
||||
*
|
||||
* Precondition: the matrix must be in compressed mode. */
|
||||
* This function turns the matrix in non-compressed() mode */
|
||||
template<class SizesType>
|
||||
inline void reserve(const SizesType& reserveSizes);
|
||||
#else
|
||||
@ -207,7 +225,6 @@ class SparseMatrix
|
||||
if(compressed())
|
||||
{
|
||||
std::size_t totalReserveSize = 0;
|
||||
// std::cerr << "reserve from compressed format\n";
|
||||
// turn the matrix into non-compressed mode
|
||||
m_innerNonZeros = new Index[m_outerSize];
|
||||
|
||||
@ -221,17 +238,13 @@ class SparseMatrix
|
||||
count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
|
||||
totalReserveSize += reserveSizes[j];
|
||||
}
|
||||
// std::cerr << "data.r " << totalReserveSize << "\n";
|
||||
m_data.reserve(totalReserveSize);
|
||||
// std::cerr << "data.r OK\n";
|
||||
std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
|
||||
for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
|
||||
{
|
||||
ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
|
||||
// std::cerr << j << " innerNNZ=" << innerNNZ << "\n";
|
||||
for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
|
||||
{
|
||||
// std::cerr << " " << i << " " << newOuterIndex[j]+i << "\n";
|
||||
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
|
||||
m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
|
||||
}
|
||||
@ -239,17 +252,12 @@ class SparseMatrix
|
||||
m_outerIndex[j] = newOuterIndex[j];
|
||||
m_innerNonZeros[j] = innerNNZ;
|
||||
}
|
||||
// std::cerr << "OK" << "\n";
|
||||
m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
|
||||
|
||||
m_data.resize(m_outerIndex[m_outerSize]);
|
||||
|
||||
// std::cout << Matrix<Index,1,Dynamic>::Map(m_outerIndex, m_outerSize+1) << "\n";
|
||||
// std::cout << Matrix<Index,1,Dynamic>::Map(m_innerNonZeros, m_outerSize) << "\n";
|
||||
}
|
||||
else
|
||||
{
|
||||
// std::cerr << "reserve from uncompressed format\n";
|
||||
Index* newOuterIndex = new Index[m_outerSize+1];
|
||||
Index count = 0;
|
||||
for(Index j=0; j<m_outerSize; ++j)
|
||||
@ -267,11 +275,9 @@ class SparseMatrix
|
||||
std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
|
||||
if(offset>0)
|
||||
{
|
||||
// std::cout << "offset=" << offset << " m_data.size()=" << m_data.size() << "\n";
|
||||
std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
|
||||
for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
|
||||
{
|
||||
// std::cout << newOuterIndex[j]+i << " <- " << m_outerIndex[j]+i << "\n";
|
||||
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
|
||||
m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
|
||||
}
|
||||
@ -287,7 +293,8 @@ class SparseMatrix
|
||||
|
||||
//--- low level purely coherent filling ---
|
||||
|
||||
/** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
|
||||
/** \internal
|
||||
* \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
|
||||
* - the nonzero does not already exist
|
||||
* - the new coefficient is the last one according to the storage order
|
||||
*
|
||||
@ -301,7 +308,8 @@ class SparseMatrix
|
||||
return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
|
||||
}
|
||||
|
||||
/** \sa insertBack, startVec */
|
||||
/** \internal
|
||||
* \sa insertBack, startVec */
|
||||
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
|
||||
{
|
||||
eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
|
||||
@ -312,7 +320,8 @@ class SparseMatrix
|
||||
return m_data.value(p);
|
||||
}
|
||||
|
||||
/** \warning use it only if you know what you are doing */
|
||||
/** \internal
|
||||
* \warning use it only if you know what you are doing */
|
||||
inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
|
||||
{
|
||||
Index p = m_outerIndex[outer+1];
|
||||
@ -321,7 +330,8 @@ class SparseMatrix
|
||||
return m_data.value(p);
|
||||
}
|
||||
|
||||
/** \sa insertBack, insertBackByOuterInner */
|
||||
/** \internal
|
||||
* \sa insertBack, insertBackByOuterInner */
|
||||
inline void startVec(Index outer)
|
||||
{
|
||||
eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
|
||||
@ -347,154 +357,7 @@ class SparseMatrix
|
||||
return insertUncompressed(row,col);
|
||||
}
|
||||
|
||||
EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
|
||||
{
|
||||
eigen_assert(compressed());
|
||||
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
Index previousOuter = outer;
|
||||
if (m_outerIndex[outer+1]==0)
|
||||
{
|
||||
// we start a new inner vector
|
||||
while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
|
||||
{
|
||||
m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
|
||||
--previousOuter;
|
||||
}
|
||||
m_outerIndex[outer+1] = m_outerIndex[outer];
|
||||
}
|
||||
|
||||
// here we have to handle the tricky case where the outerIndex array
|
||||
// starts with: [ 0 0 0 0 0 1 ...] and we are inserting in, e.g.,
|
||||
// the 2nd inner vector...
|
||||
bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
|
||||
&& (size_t(m_outerIndex[outer+1]) == m_data.size());
|
||||
|
||||
size_t startId = m_outerIndex[outer];
|
||||
// FIXME let's make sure sizeof(long int) == sizeof(size_t)
|
||||
size_t p = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
|
||||
float reallocRatio = 1;
|
||||
if (m_data.allocatedSize()<=m_data.size())
|
||||
{
|
||||
// if there is no preallocated memory, let's reserve a minimum of 32 elements
|
||||
if (m_data.size()==0)
|
||||
{
|
||||
m_data.reserve(32);
|
||||
}
|
||||
else
|
||||
{
|
||||
// we need to reallocate the data, to reduce multiple reallocations
|
||||
// we use a smart resize algorithm based on the current filling ratio
|
||||
// in addition, we use float to avoid integers overflows
|
||||
float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
|
||||
reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
|
||||
// furthermore we bound the realloc ratio to:
|
||||
// 1) reduce multiple minor realloc when the matrix is almost filled
|
||||
// 2) avoid to allocate too much memory when the matrix is almost empty
|
||||
reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
|
||||
}
|
||||
}
|
||||
m_data.resize(m_data.size()+1,reallocRatio);
|
||||
|
||||
if (!isLastVec)
|
||||
{
|
||||
if (previousOuter==-1)
|
||||
{
|
||||
// oops wrong guess.
|
||||
// let's correct the outer offsets
|
||||
for (Index k=0; k<=(outer+1); ++k)
|
||||
m_outerIndex[k] = 0;
|
||||
Index k=outer+1;
|
||||
while(m_outerIndex[k]==0)
|
||||
m_outerIndex[k++] = 1;
|
||||
while (k<=m_outerSize && m_outerIndex[k]!=0)
|
||||
m_outerIndex[k++]++;
|
||||
p = 0;
|
||||
--k;
|
||||
k = m_outerIndex[k]-1;
|
||||
while (k>0)
|
||||
{
|
||||
m_data.index(k) = m_data.index(k-1);
|
||||
m_data.value(k) = m_data.value(k-1);
|
||||
k--;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// we are not inserting into the last inner vec
|
||||
// update outer indices:
|
||||
Index j = outer+2;
|
||||
while (j<=m_outerSize && m_outerIndex[j]!=0)
|
||||
m_outerIndex[j++]++;
|
||||
--j;
|
||||
// shift data of last vecs:
|
||||
Index k = m_outerIndex[j]-1;
|
||||
while (k>=Index(p))
|
||||
{
|
||||
m_data.index(k) = m_data.index(k-1);
|
||||
m_data.value(k) = m_data.value(k-1);
|
||||
k--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while ( (p > startId) && (m_data.index(p-1) > inner) )
|
||||
{
|
||||
m_data.index(p) = m_data.index(p-1);
|
||||
m_data.value(p) = m_data.value(p-1);
|
||||
--p;
|
||||
}
|
||||
|
||||
m_data.index(p) = inner;
|
||||
return (m_data.value(p) = 0);
|
||||
}
|
||||
|
||||
class SingletonVector
|
||||
{
|
||||
Index m_index;
|
||||
Index m_value;
|
||||
public:
|
||||
typedef Index value_type;
|
||||
SingletonVector(Index i, Index v)
|
||||
: m_index(i), m_value(v)
|
||||
{}
|
||||
|
||||
Index operator[](Index i) const { return i==m_index ? m_value : 0; }
|
||||
};
|
||||
|
||||
EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
|
||||
{
|
||||
eigen_assert(!compressed());
|
||||
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
|
||||
std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
|
||||
if(innerNNZ>=room)
|
||||
{
|
||||
// this inner vector is full, we need to reallocate the whole buffer :(
|
||||
reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
|
||||
}
|
||||
|
||||
Index startId = m_outerIndex[outer];
|
||||
Index p = startId + m_innerNonZeros[outer];
|
||||
while ( (p > startId) && (m_data.index(p-1) > inner) )
|
||||
{
|
||||
m_data.index(p) = m_data.index(p-1);
|
||||
m_data.value(p) = m_data.value(p-1);
|
||||
--p;
|
||||
}
|
||||
|
||||
m_innerNonZeros[outer]++;
|
||||
|
||||
m_data.index(p) = inner;
|
||||
return (m_data.value(p) = 0);
|
||||
}
|
||||
|
||||
|
||||
/** Must be called after inserting a set of non zero entries.
|
||||
@ -517,16 +380,13 @@ class SparseMatrix
|
||||
}
|
||||
}
|
||||
|
||||
/** Turns the matrix into the \em compressed format.
|
||||
*/
|
||||
void makeCompressed()
|
||||
{
|
||||
if(compressed())
|
||||
return;
|
||||
|
||||
// std::cout << Matrix<Index,1,Dynamic>::Map(m_outerIndex, m_outerSize+1) << "\n";
|
||||
// std::cout << Matrix<Index,1,Dynamic>::Map(m_innerNonZeros, m_outerSize) << "\n";
|
||||
// std::cout << Matrix<Index,1,Dynamic>::Map(&m_data.index(0), nonZeros()) << "\n";
|
||||
// std::cout << Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), nonZeros()) << "\n";
|
||||
|
||||
Index oldStart = m_outerIndex[1];
|
||||
m_outerIndex[1] = m_innerNonZeros[0];
|
||||
for(Index j=1; j<m_outerSize; ++j)
|
||||
@ -548,9 +408,6 @@ class SparseMatrix
|
||||
m_innerNonZeros = 0;
|
||||
m_data.resize(m_outerIndex[m_outerSize]);
|
||||
m_data.squeeze();
|
||||
// std::cout << Matrix<Index,1,Dynamic>::Map(m_outerIndex, m_outerSize+1) << "\n";
|
||||
// std::cout << Matrix<Index,1,Dynamic>::Map(&m_data.index(0), nonZeros()) << "\n";
|
||||
// std::cout << Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), nonZeros()) << "\n";
|
||||
}
|
||||
|
||||
/** Suppress all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
|
||||
@ -611,9 +468,11 @@ class SparseMatrix
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
|
||||
}
|
||||
|
||||
/** Low level API
|
||||
/** \deprecated
|
||||
* \internal
|
||||
* Low level API
|
||||
* Resize the nonzero vector to \a size
|
||||
* \deprecated */
|
||||
* */
|
||||
void resizeNonZeros(Index size)
|
||||
{
|
||||
// TODO remove this function
|
||||
@ -662,7 +521,6 @@ class SparseMatrix
|
||||
|
||||
inline SparseMatrix& operator=(const SparseMatrix& other)
|
||||
{
|
||||
// std::cout << "SparseMatrix& operator=(const SparseMatrix& other)\n";
|
||||
if (other.isRValue())
|
||||
{
|
||||
swap(other.const_cast_derived());
|
||||
@ -786,66 +644,165 @@ class SparseMatrix
|
||||
|
||||
/** Overloaded for performance */
|
||||
Scalar sum() const;
|
||||
|
||||
public:
|
||||
|
||||
/** \deprecated use setZero() and reserve()
|
||||
* Initializes the filling process of \c *this.
|
||||
* \param reserveSize approximate number of nonzeros
|
||||
* Note that the matrix \c *this is zero-ed.
|
||||
*/
|
||||
EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
|
||||
{
|
||||
setZero();
|
||||
m_data.reserve(reserveSize);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
* Like fill() but with random inner coordinates.
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
|
||||
{
|
||||
return insert(row,col);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
if (m_outerIndex[outer+1]==0)
|
||||
{
|
||||
// we start a new inner vector
|
||||
Index i = outer;
|
||||
while (i>=0 && m_outerIndex[i]==0)
|
||||
{
|
||||
m_outerIndex[i] = m_data.size();
|
||||
--i;
|
||||
}
|
||||
m_outerIndex[outer+1] = m_outerIndex[outer];
|
||||
}
|
||||
else
|
||||
{
|
||||
eigen_assert(m_data.index(m_data.size()-1)<inner && "wrong sorted insertion");
|
||||
}
|
||||
// std::cerr << size_t(m_outerIndex[outer+1]) << " == " << m_data.size() << "\n";
|
||||
assert(size_t(m_outerIndex[outer+1]) == m_data.size());
|
||||
Index p = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
|
||||
m_data.append(0, inner);
|
||||
return m_data.value(p);
|
||||
}
|
||||
|
||||
/** \deprecated use finalize */
|
||||
EIGEN_DEPRECATED void endFill() { finalize(); }
|
||||
|
||||
# ifdef EIGEN_SPARSEMATRIX_PLUGIN
|
||||
# include EIGEN_SPARSEMATRIX_PLUGIN
|
||||
# endif
|
||||
|
||||
protected:
|
||||
/** \internal
|
||||
* \sa insert(Index,Index) */
|
||||
EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
|
||||
{
|
||||
eigen_assert(compressed());
|
||||
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
Index previousOuter = outer;
|
||||
if (m_outerIndex[outer+1]==0)
|
||||
{
|
||||
// we start a new inner vector
|
||||
while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
|
||||
{
|
||||
m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
|
||||
--previousOuter;
|
||||
}
|
||||
m_outerIndex[outer+1] = m_outerIndex[outer];
|
||||
}
|
||||
|
||||
// here we have to handle the tricky case where the outerIndex array
|
||||
// starts with: [ 0 0 0 0 0 1 ...] and we are inserting in, e.g.,
|
||||
// the 2nd inner vector...
|
||||
bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
|
||||
&& (size_t(m_outerIndex[outer+1]) == m_data.size());
|
||||
|
||||
size_t startId = m_outerIndex[outer];
|
||||
// FIXME let's make sure sizeof(long int) == sizeof(size_t)
|
||||
size_t p = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
|
||||
float reallocRatio = 1;
|
||||
if (m_data.allocatedSize()<=m_data.size())
|
||||
{
|
||||
// if there is no preallocated memory, let's reserve a minimum of 32 elements
|
||||
if (m_data.size()==0)
|
||||
{
|
||||
m_data.reserve(32);
|
||||
}
|
||||
else
|
||||
{
|
||||
// we need to reallocate the data, to reduce multiple reallocations
|
||||
// we use a smart resize algorithm based on the current filling ratio
|
||||
// in addition, we use float to avoid integers overflows
|
||||
float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
|
||||
reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
|
||||
// furthermore we bound the realloc ratio to:
|
||||
// 1) reduce multiple minor realloc when the matrix is almost filled
|
||||
// 2) avoid to allocate too much memory when the matrix is almost empty
|
||||
reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
|
||||
}
|
||||
}
|
||||
m_data.resize(m_data.size()+1,reallocRatio);
|
||||
|
||||
if (!isLastVec)
|
||||
{
|
||||
if (previousOuter==-1)
|
||||
{
|
||||
// oops wrong guess.
|
||||
// let's correct the outer offsets
|
||||
for (Index k=0; k<=(outer+1); ++k)
|
||||
m_outerIndex[k] = 0;
|
||||
Index k=outer+1;
|
||||
while(m_outerIndex[k]==0)
|
||||
m_outerIndex[k++] = 1;
|
||||
while (k<=m_outerSize && m_outerIndex[k]!=0)
|
||||
m_outerIndex[k++]++;
|
||||
p = 0;
|
||||
--k;
|
||||
k = m_outerIndex[k]-1;
|
||||
while (k>0)
|
||||
{
|
||||
m_data.index(k) = m_data.index(k-1);
|
||||
m_data.value(k) = m_data.value(k-1);
|
||||
k--;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// we are not inserting into the last inner vec
|
||||
// update outer indices:
|
||||
Index j = outer+2;
|
||||
while (j<=m_outerSize && m_outerIndex[j]!=0)
|
||||
m_outerIndex[j++]++;
|
||||
--j;
|
||||
// shift data of last vecs:
|
||||
Index k = m_outerIndex[j]-1;
|
||||
while (k>=Index(p))
|
||||
{
|
||||
m_data.index(k) = m_data.index(k-1);
|
||||
m_data.value(k) = m_data.value(k-1);
|
||||
k--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while ( (p > startId) && (m_data.index(p-1) > inner) )
|
||||
{
|
||||
m_data.index(p) = m_data.index(p-1);
|
||||
m_data.value(p) = m_data.value(p-1);
|
||||
--p;
|
||||
}
|
||||
|
||||
m_data.index(p) = inner;
|
||||
return (m_data.value(p) = 0);
|
||||
}
|
||||
|
||||
class SingletonVector
|
||||
{
|
||||
Index m_index;
|
||||
Index m_value;
|
||||
public:
|
||||
typedef Index value_type;
|
||||
SingletonVector(Index i, Index v)
|
||||
: m_index(i), m_value(v)
|
||||
{}
|
||||
|
||||
Index operator[](Index i) const { return i==m_index ? m_value : 0; }
|
||||
};
|
||||
|
||||
/** \internal
|
||||
* \sa insert(Index,Index) */
|
||||
EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
|
||||
{
|
||||
eigen_assert(!compressed());
|
||||
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
|
||||
std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
|
||||
if(innerNNZ>=room)
|
||||
{
|
||||
// this inner vector is full, we need to reallocate the whole buffer :(
|
||||
reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
|
||||
}
|
||||
|
||||
Index startId = m_outerIndex[outer];
|
||||
Index p = startId + m_innerNonZeros[outer];
|
||||
while ( (p > startId) && (m_data.index(p-1) > inner) )
|
||||
{
|
||||
m_data.index(p) = m_data.index(p-1);
|
||||
m_data.value(p) = m_data.value(p-1);
|
||||
--p;
|
||||
}
|
||||
|
||||
m_innerNonZeros[outer]++;
|
||||
|
||||
m_data.index(p) = inner;
|
||||
return (m_data.value(p) = 0);
|
||||
}
|
||||
|
||||
private:
|
||||
struct default_prunning_func {
|
||||
default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
|
||||
|
@ -1,7 +1,7 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
@ -25,7 +25,7 @@
|
||||
#ifndef EIGEN_SPARSEMATRIXBASE_H
|
||||
#define EIGEN_SPARSEMATRIXBASE_H
|
||||
|
||||
/** \ingroup Sparse_Module
|
||||
/** \ingroup SparseCore_Module
|
||||
*
|
||||
* \class SparseMatrixBase
|
||||
*
|
||||
@ -57,8 +57,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
other.derived().evalTo(derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
// using Base::operator=;
|
||||
|
||||
enum {
|
||||
|
||||
@ -110,15 +108,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
#endif
|
||||
};
|
||||
|
||||
/* \internal the return type of MatrixBase::conjugate() */
|
||||
// typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
|
||||
// const SparseCwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Derived>,
|
||||
// const Derived&
|
||||
// >::type ConjugateReturnType;
|
||||
/* \internal the return type of MatrixBase::real() */
|
||||
// typedef SparseCwiseUnaryOp<internal::scalar_real_op<Scalar>, Derived> RealReturnType;
|
||||
/* \internal the return type of MatrixBase::imag() */
|
||||
// typedef SparseCwiseUnaryOp<internal::scalar_imag_op<Scalar>, Derived> ImagReturnType;
|
||||
/** \internal the return type of MatrixBase::adjoint() */
|
||||
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
|
||||
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
|
||||
@ -213,7 +202,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
template<typename OtherDerived>
|
||||
inline void assignGeneric(const OtherDerived& other)
|
||||
{
|
||||
// std::cout << "Derived& operator=(const MatrixBase<OtherDerived>& other)\n";
|
||||
//const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
|
||||
eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
|
||||
(!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
|
||||
@ -246,10 +234,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
template<typename OtherDerived>
|
||||
inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
// std::cout << typeid(OtherDerived).name() << "\n";
|
||||
// std::cout << Flags << " " << OtherDerived::Flags << "\n";
|
||||
const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
|
||||
// std::cout << "eval transpose = " << transpose << "\n";
|
||||
const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
|
||||
if ((!transpose) && other.isRValue())
|
||||
{
|
||||
@ -324,24 +309,11 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
return s;
|
||||
}
|
||||
|
||||
// const SparseCwiseUnaryOp<internal::scalar_opposite_op<typename internal::traits<Derived>::Scalar>,Derived> operator-() const;
|
||||
|
||||
// template<typename OtherDerived>
|
||||
// const CwiseBinaryOp<internal::scalar_sum_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
|
||||
// operator+(const SparseMatrixBase<OtherDerived> &other) const;
|
||||
|
||||
// template<typename OtherDerived>
|
||||
// const CwiseBinaryOp<internal::scalar_difference_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
|
||||
// operator-(const SparseMatrixBase<OtherDerived> &other) const;
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
|
||||
template<typename OtherDerived>
|
||||
Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
|
||||
|
||||
// template<typename Lhs,typename Rhs>
|
||||
// Derived& operator+=(const Flagged<Product<Lhs,Rhs,CacheFriendlyProduct>, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other);
|
||||
|
||||
Derived& operator*=(const Scalar& other);
|
||||
Derived& operator/=(const Scalar& other);
|
||||
|
||||
@ -361,16 +333,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
|
||||
cwiseProduct(const MatrixBase<OtherDerived> &other) const;
|
||||
|
||||
// const SparseCwiseUnaryOp<internal::scalar_multiple_op<typename internal::traits<Derived>::Scalar>, Derived>
|
||||
// operator*(const Scalar& scalar) const;
|
||||
// const SparseCwiseUnaryOp<internal::scalar_quotient1_op<typename internal::traits<Derived>::Scalar>, Derived>
|
||||
// operator/(const Scalar& scalar) const;
|
||||
|
||||
// inline friend const SparseCwiseUnaryOp<internal::scalar_multiple_op<typename internal::traits<Derived>::Scalar>, Derived>
|
||||
// operator*(const Scalar& scalar, const SparseMatrixBase& matrix)
|
||||
// { return matrix*scalar; }
|
||||
|
||||
|
||||
// sparse * sparse
|
||||
template<typename OtherDerived>
|
||||
const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
|
||||
@ -410,8 +372,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
// deprecated
|
||||
template<typename OtherDerived>
|
||||
void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
|
||||
// template<typename OtherDerived>
|
||||
// void solveTriangularInPlace(SparseMatrixBase<OtherDerived>& other) const;
|
||||
#endif // EIGEN2_SUPPORT
|
||||
|
||||
template<int Mode>
|
||||
@ -424,12 +384,9 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
|
||||
RealScalar squaredNorm() const;
|
||||
RealScalar norm() const;
|
||||
// const PlainObject normalized() const;
|
||||
// void normalize();
|
||||
|
||||
Transpose<Derived> transpose() { return derived(); }
|
||||
const Transpose<const Derived> transpose() const { return derived(); }
|
||||
// void transposeInPlace();
|
||||
const AdjointReturnType adjoint() const { return transpose(); }
|
||||
|
||||
// sub-vector
|
||||
@ -446,7 +403,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
|
||||
const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
|
||||
|
||||
|
||||
SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size);
|
||||
const SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size) const;
|
||||
SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size);
|
||||
@ -454,74 +410,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
|
||||
const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
|
||||
|
||||
// typename BlockReturnType<Derived>::Type block(int startRow, int startCol, int blockRows, int blockCols);
|
||||
// const typename BlockReturnType<Derived>::Type
|
||||
// block(int startRow, int startCol, int blockRows, int blockCols) const;
|
||||
//
|
||||
// typename BlockReturnType<Derived>::SubVectorType segment(int start, int size);
|
||||
// const typename BlockReturnType<Derived>::SubVectorType segment(int start, int size) const;
|
||||
//
|
||||
// typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size);
|
||||
// const typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size) const;
|
||||
//
|
||||
// typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size);
|
||||
// const typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size) const;
|
||||
//
|
||||
// template<int BlockRows, int BlockCols>
|
||||
// typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol);
|
||||
// template<int BlockRows, int BlockCols>
|
||||
// const typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol) const;
|
||||
|
||||
// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType start(void);
|
||||
// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType start() const;
|
||||
|
||||
// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType end();
|
||||
// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType end() const;
|
||||
|
||||
// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType segment(int start);
|
||||
// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType segment(int start) const;
|
||||
|
||||
// Diagonal<Derived> diagonal();
|
||||
// const Diagonal<Derived> diagonal() const;
|
||||
|
||||
// template<unsigned int Mode> Part<Derived, Mode> part();
|
||||
// template<unsigned int Mode> const Part<Derived, Mode> part() const;
|
||||
|
||||
|
||||
// static const ConstantReturnType Constant(int rows, int cols, const Scalar& value);
|
||||
// static const ConstantReturnType Constant(int size, const Scalar& value);
|
||||
// static const ConstantReturnType Constant(const Scalar& value);
|
||||
|
||||
// template<typename CustomNullaryOp>
|
||||
// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(int rows, int cols, const CustomNullaryOp& func);
|
||||
// template<typename CustomNullaryOp>
|
||||
// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(int size, const CustomNullaryOp& func);
|
||||
// template<typename CustomNullaryOp>
|
||||
// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(const CustomNullaryOp& func);
|
||||
|
||||
// static const ConstantReturnType Zero(int rows, int cols);
|
||||
// static const ConstantReturnType Zero(int size);
|
||||
// static const ConstantReturnType Zero();
|
||||
// static const ConstantReturnType Ones(int rows, int cols);
|
||||
// static const ConstantReturnType Ones(int size);
|
||||
// static const ConstantReturnType Ones();
|
||||
// static const IdentityReturnType Identity();
|
||||
// static const IdentityReturnType Identity(int rows, int cols);
|
||||
// static const BasisReturnType Unit(int size, int i);
|
||||
// static const BasisReturnType Unit(int i);
|
||||
// static const BasisReturnType UnitX();
|
||||
// static const BasisReturnType UnitY();
|
||||
// static const BasisReturnType UnitZ();
|
||||
// static const BasisReturnType UnitW();
|
||||
|
||||
// const DiagonalMatrix<Derived> asDiagonal() const;
|
||||
|
||||
// Derived& setConstant(const Scalar& value);
|
||||
// Derived& setZero();
|
||||
// Derived& setOnes();
|
||||
// Derived& setRandom();
|
||||
// Derived& setIdentity();
|
||||
|
||||
/** \internal use operator= */
|
||||
template<typename DenseDerived>
|
||||
void evalTo(MatrixBase<DenseDerived>& dst) const
|
||||
@ -546,37 +434,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
bool isApprox(const MatrixBase<OtherDerived>& other,
|
||||
RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
|
||||
{ return toDense().isApprox(other,prec); }
|
||||
// bool isMuchSmallerThan(const RealScalar& other,
|
||||
// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
// template<typename OtherDerived>
|
||||
// bool isMuchSmallerThan(const MatrixBase<OtherDerived>& other,
|
||||
// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
// bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
// bool isZero(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
// bool isOnes(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
// bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
// bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
// bool isUpper(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
// bool isLower(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
// template<typename OtherDerived>
|
||||
// bool isOrthogonal(const MatrixBase<OtherDerived>& other,
|
||||
// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
// bool isUnitary(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
// template<typename OtherDerived>
|
||||
// inline bool operator==(const MatrixBase<OtherDerived>& other) const
|
||||
// { return (cwise() == other).all(); }
|
||||
|
||||
// template<typename OtherDerived>
|
||||
// inline bool operator!=(const MatrixBase<OtherDerived>& other) const
|
||||
// { return (cwise() != other).any(); }
|
||||
|
||||
|
||||
// template<typename NewType>
|
||||
// const SparseCwiseUnaryOp<internal::scalar_cast_op<typename internal::traits<Derived>::Scalar, NewType>, Derived> cast() const;
|
||||
|
||||
/** \returns the matrix or vector obtained by evaluating this expression.
|
||||
*
|
||||
@ -586,126 +443,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
inline const typename internal::eval<Derived>::type eval() const
|
||||
{ return typename internal::eval<Derived>::type(derived()); }
|
||||
|
||||
// template<typename OtherDerived>
|
||||
// void swap(MatrixBase<OtherDerived> const & other);
|
||||
|
||||
// template<unsigned int Added>
|
||||
// const SparseFlagged<Derived, Added, 0> marked() const;
|
||||
// const Flagged<Derived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit> lazy() const;
|
||||
|
||||
/** \returns number of elements to skip to pass from one row (resp. column) to another
|
||||
* for a row-major (resp. column-major) matrix.
|
||||
* Combined with coeffRef() and the \ref flags flags, it allows a direct access to the data
|
||||
* of the underlying matrix.
|
||||
*/
|
||||
// inline int stride(void) const { return derived().stride(); }
|
||||
|
||||
// FIXME
|
||||
// ConjugateReturnType conjugate() const;
|
||||
// const RealReturnType real() const;
|
||||
// const ImagReturnType imag() const;
|
||||
|
||||
// template<typename CustomUnaryOp>
|
||||
// const SparseCwiseUnaryOp<CustomUnaryOp, Derived> unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const;
|
||||
|
||||
// template<typename CustomBinaryOp, typename OtherDerived>
|
||||
// const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
|
||||
// binaryExpr(const MatrixBase<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const;
|
||||
|
||||
|
||||
Scalar sum() const;
|
||||
// Scalar trace() const;
|
||||
|
||||
// typename internal::traits<Derived>::Scalar minCoeff() const;
|
||||
// typename internal::traits<Derived>::Scalar maxCoeff() const;
|
||||
|
||||
// typename internal::traits<Derived>::Scalar minCoeff(int* row, int* col = 0) const;
|
||||
// typename internal::traits<Derived>::Scalar maxCoeff(int* row, int* col = 0) const;
|
||||
|
||||
// template<typename BinaryOp>
|
||||
// typename internal::result_of<BinaryOp(typename internal::traits<Derived>::Scalar)>::type
|
||||
// redux(const BinaryOp& func) const;
|
||||
|
||||
// template<typename Visitor>
|
||||
// void visit(Visitor& func) const;
|
||||
|
||||
|
||||
// const SparseCwise<Derived> cwise() const;
|
||||
// SparseCwise<Derived> cwise();
|
||||
|
||||
// inline const WithFormat<Derived> format(const IOFormat& fmt) const;
|
||||
|
||||
/////////// Array module ///////////
|
||||
/*
|
||||
bool all(void) const;
|
||||
bool any(void) const;
|
||||
|
||||
const VectorwiseOp<Derived,Horizontal> rowwise() const;
|
||||
const VectorwiseOp<Derived,Vertical> colwise() const;
|
||||
|
||||
static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(int rows, int cols);
|
||||
static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(int size);
|
||||
static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random();
|
||||
|
||||
template<typename ThenDerived,typename ElseDerived>
|
||||
const Select<Derived,ThenDerived,ElseDerived>
|
||||
select(const MatrixBase<ThenDerived>& thenMatrix,
|
||||
const MatrixBase<ElseDerived>& elseMatrix) const;
|
||||
|
||||
template<typename ThenDerived>
|
||||
inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
|
||||
select(const MatrixBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const;
|
||||
|
||||
template<typename ElseDerived>
|
||||
inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
|
||||
select(typename ElseDerived::Scalar thenScalar, const MatrixBase<ElseDerived>& elseMatrix) const;
|
||||
|
||||
template<int p> RealScalar lpNorm() const;
|
||||
*/
|
||||
|
||||
|
||||
// template<typename OtherDerived>
|
||||
// Scalar dot(const MatrixBase<OtherDerived>& other) const
|
||||
// {
|
||||
// EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
// EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
|
||||
// EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
|
||||
// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
|
||||
//
|
||||
// eigen_assert(derived().size() == other.size());
|
||||
// // short version, but the assembly looks more complicated because
|
||||
// // of the CwiseBinaryOp iterator complexity
|
||||
// // return res = (derived().cwise() * other.derived().conjugate()).sum();
|
||||
//
|
||||
// // optimized, generic version
|
||||
// typename Derived::InnerIterator i(derived(),0);
|
||||
// typename OtherDerived::InnerIterator j(other.derived(),0);
|
||||
// Scalar res = 0;
|
||||
// while (i && j)
|
||||
// {
|
||||
// if (i.index()==j.index())
|
||||
// {
|
||||
// // std::cerr << i.value() << " * " << j.value() << "\n";
|
||||
// res += i.value() * internal::conj(j.value());
|
||||
// ++i; ++j;
|
||||
// }
|
||||
// else if (i.index()<j.index())
|
||||
// ++i;
|
||||
// else
|
||||
// ++j;
|
||||
// }
|
||||
// return res;
|
||||
// }
|
||||
//
|
||||
// Scalar sum() const
|
||||
// {
|
||||
// Scalar res = 0;
|
||||
// for (typename Derived::InnerIterator iter(*this,0); iter; ++iter)
|
||||
// {
|
||||
// res += iter.value();
|
||||
// }
|
||||
// return res;
|
||||
// }
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -25,8 +25,8 @@
|
||||
#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
|
||||
#define EIGEN_SPARSE_SELFADJOINTVIEW_H
|
||||
|
||||
/** \class SparseSelfAdjointView
|
||||
*
|
||||
/** \ingroup SparseCore_Module
|
||||
* \class SparseSelfAdjointView
|
||||
*
|
||||
* \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
|
||||
*
|
||||
|
@ -25,7 +25,8 @@
|
||||
#ifndef EIGEN_SPARSEVECTOR_H
|
||||
#define EIGEN_SPARSEVECTOR_H
|
||||
|
||||
/** \class SparseVector
|
||||
/** \ingroup SparseCore_Module
|
||||
* \class SparseVector
|
||||
*
|
||||
* \brief a sparse vector class
|
||||
*
|
||||
@ -67,7 +68,6 @@ class SparseVector
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
|
||||
// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, =)
|
||||
|
||||
protected:
|
||||
public:
|
||||
@ -262,56 +262,6 @@ class SparseVector
|
||||
}
|
||||
#endif
|
||||
|
||||
// const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
|
||||
// if (needToTranspose)
|
||||
// {
|
||||
// // two passes algorithm:
|
||||
// // 1 - compute the number of coeffs per dest inner vector
|
||||
// // 2 - do the actual copy/eval
|
||||
// // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed
|
||||
// typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
|
||||
// OtherCopy otherCopy(other.derived());
|
||||
// typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
|
||||
//
|
||||
// resize(other.rows(), other.cols());
|
||||
// Eigen::Map<VectorXi>(m_outerIndex,outerSize()).setZero();
|
||||
// // pass 1
|
||||
// // FIXME the above copy could be merged with that pass
|
||||
// for (int j=0; j<otherCopy.outerSize(); ++j)
|
||||
// for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
|
||||
// ++m_outerIndex[it.index()];
|
||||
//
|
||||
// // prefix sum
|
||||
// int count = 0;
|
||||
// VectorXi positions(outerSize());
|
||||
// for (int j=0; j<outerSize(); ++j)
|
||||
// {
|
||||
// int tmp = m_outerIndex[j];
|
||||
// m_outerIndex[j] = count;
|
||||
// positions[j] = count;
|
||||
// count += tmp;
|
||||
// }
|
||||
// m_outerIndex[outerSize()] = count;
|
||||
// // alloc
|
||||
// m_data.resize(count);
|
||||
// // pass 2
|
||||
// for (int j=0; j<otherCopy.outerSize(); ++j)
|
||||
// for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
|
||||
// {
|
||||
// int pos = positions[it.index()]++;
|
||||
// m_data.index(pos) = j;
|
||||
// m_data.value(pos) = it.value();
|
||||
// }
|
||||
//
|
||||
// return *this;
|
||||
// }
|
||||
// else
|
||||
// {
|
||||
// // there is no special optimization
|
||||
// return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
|
||||
// }
|
||||
// }
|
||||
|
||||
friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
|
||||
{
|
||||
for (Index i=0; i<m.nonZeros(); ++i)
|
||||
@ -320,28 +270,6 @@ class SparseVector
|
||||
return s;
|
||||
}
|
||||
|
||||
// this specialized version does not seems to be faster
|
||||
// Scalar dot(const SparseVector& other) const
|
||||
// {
|
||||
// int i=0, j=0;
|
||||
// Scalar res = 0;
|
||||
// asm("#begindot");
|
||||
// while (i<nonZeros() && j<other.nonZeros())
|
||||
// {
|
||||
// if (m_data.index(i)==other.m_data.index(j))
|
||||
// {
|
||||
// res += m_data.value(i) * internal::conj(other.m_data.value(j));
|
||||
// ++i; ++j;
|
||||
// }
|
||||
// else if (m_data.index(i)<other.m_data.index(j))
|
||||
// ++i;
|
||||
// else
|
||||
// ++j;
|
||||
// }
|
||||
// asm("#enddot");
|
||||
// return res;
|
||||
// }
|
||||
|
||||
/** Destructor */
|
||||
inline ~SparseVector() {}
|
||||
|
||||
|
@ -296,7 +296,10 @@ MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
|
||||
/** \ingroup SuperLUSupport_Module
|
||||
* \class SuperLUBase
|
||||
* \brief The base class for the direct and incomplete LU factorization of SuperLU
|
||||
*/
|
||||
template<typename _MatrixType, typename Derived>
|
||||
class SuperLUBase
|
||||
{
|
||||
@ -468,6 +471,18 @@ class SuperLUBase
|
||||
};
|
||||
|
||||
|
||||
/** \ingroup SuperLUSupport_Module
|
||||
* \class SuperLU
|
||||
* \brief A sparse direct LU factorization and solver based on the SuperLU library
|
||||
*
|
||||
* This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
|
||||
* using the SuperLU library. The sparse matrix A must be squared and invertible. The vectors or matrices
|
||||
* X and B can be either dense or sparse.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
*
|
||||
* \sa \ref TutorialSparseDirectSolvers
|
||||
*/
|
||||
template<typename _MatrixType>
|
||||
class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >
|
||||
{
|
||||
@ -785,6 +800,19 @@ typename SuperLU<MatrixType>::Scalar SuperLU<MatrixType>::determinant() const
|
||||
}
|
||||
|
||||
#ifdef EIGEN_SUPERLU_HAS_ILU
|
||||
|
||||
/** \ingroup SuperLUSupport_Module
|
||||
* \class SuperILU
|
||||
* \brief A sparse direct \b incomplete LU factorization and solver based on the SuperLU library
|
||||
*
|
||||
* This class allows to solve for an approximate solution of A.X = B sparse linear problems via an incomplete LU factorization
|
||||
* using the SuperLU library. This class is aimed to be used as a preconditioner of the iterative linear solvers.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
*
|
||||
* \sa \ref TutorialSparseDirectSolvers, class ConjugateGradient, class BiCGSTAB
|
||||
*/
|
||||
|
||||
template<typename _MatrixType>
|
||||
class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> >
|
||||
{
|
||||
|
@ -120,7 +120,8 @@ inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *N
|
||||
return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
|
||||
}
|
||||
|
||||
/** \brief A sparse LU factorization and solver based on UmfPack
|
||||
/** \ingroup UmfPackSupport_Module
|
||||
* \brief A sparse LU factorization and solver based on UmfPack
|
||||
*
|
||||
* This class allows to solve for A.X = B sparse linear problems via a LU factorization
|
||||
* using the UmfPack library. The sparse matrix A must be column-major, squared and full rank.
|
||||
@ -128,6 +129,7 @@ inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *N
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
*
|
||||
* \sa \ref TutorialSparseDirectSolvers
|
||||
*/
|
||||
template<typename _MatrixType>
|
||||
class UmfPackLU
|
||||
|
Loading…
x
Reference in New Issue
Block a user