mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-07-21 20:34:28 +08:00
merge my Dynamic -> -1 change
This commit is contained in:
commit
d72d538747
@ -5,15 +5,6 @@
|
||||
|
||||
#include "src/Core/util/DisableMSVCWarnings.h"
|
||||
|
||||
// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module
|
||||
#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2)
|
||||
#ifndef EIGEN_HIDE_HEAVY_CODE
|
||||
#define EIGEN_HIDE_HEAVY_CODE
|
||||
#endif
|
||||
#elif defined EIGEN_HIDE_HEAVY_CODE
|
||||
#undef EIGEN_HIDE_HEAVY_CODE
|
||||
#endif
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \defgroup Cholesky_Module Cholesky module
|
||||
@ -37,29 +28,6 @@ namespace Eigen {
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#define EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \
|
||||
PREFIX template class LLT<MATRIXTYPE>; \
|
||||
PREFIX template class LDLT<MATRIXTYPE>
|
||||
|
||||
#define EIGEN_CHOLESKY_MODULE_INSTANTIATE(PREFIX) \
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix2f,PREFIX); \
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix2d,PREFIX); \
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix3f,PREFIX); \
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix3d,PREFIX); \
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix4f,PREFIX); \
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix4d,PREFIX); \
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXf,PREFIX); \
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXd,PREFIX); \
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXcf,PREFIX); \
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXcd,PREFIX)
|
||||
|
||||
#ifdef EIGEN_EXTERN_INSTANTIATIONS
|
||||
|
||||
namespace Eigen {
|
||||
EIGEN_CHOLESKY_MODULE_INSTANTIATE(extern);
|
||||
} // namespace Eigen
|
||||
#endif
|
||||
|
||||
#include "src/Core/util/EnableMSVCWarnings.h"
|
||||
|
||||
#endif // EIGEN_CHOLESKY_MODULE_H
|
||||
|
@ -260,6 +260,7 @@ using std::size_t;
|
||||
#include "src/Core/Diagonal.h"
|
||||
#include "src/Core/DiagonalProduct.h"
|
||||
#include "src/Core/PermutationMatrix.h"
|
||||
#include "src/Core/Transpositions.h"
|
||||
#include "src/Core/Redux.h"
|
||||
#include "src/Core/Visitor.h"
|
||||
#include "src/Core/Fuzzy.h"
|
||||
|
@ -10,15 +10,6 @@
|
||||
#include "Householder"
|
||||
#include "LU"
|
||||
|
||||
// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module
|
||||
#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2)
|
||||
#ifndef EIGEN_HIDE_HEAVY_CODE
|
||||
#define EIGEN_HIDE_HEAVY_CODE
|
||||
#endif
|
||||
#elif defined EIGEN_HIDE_HEAVY_CODE
|
||||
#undef EIGEN_HIDE_HEAVY_CODE
|
||||
#endif
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \defgroup Eigenvalues_Module Eigenvalues module
|
||||
@ -44,32 +35,6 @@ namespace Eigen {
|
||||
#include "src/Eigenvalues/ComplexEigenSolver.h"
|
||||
#include "src/Eigenvalues/MatrixBaseEigenvalues.h"
|
||||
|
||||
// declare all classes for a given matrix type
|
||||
#define EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \
|
||||
PREFIX template class Tridiagonalization<MATRIXTYPE>; \
|
||||
PREFIX template class HessenbergDecomposition<MATRIXTYPE>; \
|
||||
PREFIX template class SelfAdjointEigenSolver<MATRIXTYPE>
|
||||
|
||||
// removed because it does not support complex yet
|
||||
// PREFIX template class EigenSolver<MATRIXTYPE>
|
||||
|
||||
// declare all class for all types
|
||||
#define EIGEN_EIGENVALUES_MODULE_INSTANTIATE(PREFIX) \
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(Matrix2f,PREFIX); \
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(Matrix2d,PREFIX); \
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(Matrix3f,PREFIX); \
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(Matrix3d,PREFIX); \
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(Matrix4f,PREFIX); \
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(Matrix4d,PREFIX); \
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(MatrixXf,PREFIX); \
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(MatrixXd,PREFIX); \
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(MatrixXcf,PREFIX); \
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE_TYPE(MatrixXcd,PREFIX)
|
||||
|
||||
#ifdef EIGEN_EXTERN_INSTANTIATIONS
|
||||
EIGEN_EIGENVALUES_MODULE_INSTANTIATE(extern);
|
||||
#endif // EIGEN_EXTERN_INSTANTIATIONS
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#include "src/Core/util/EnableMSVCWarnings.h"
|
||||
|
29
Eigen/QR
29
Eigen/QR
@ -9,15 +9,6 @@
|
||||
#include "Jacobi"
|
||||
#include "Householder"
|
||||
|
||||
// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module
|
||||
#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2)
|
||||
#ifndef EIGEN_HIDE_HEAVY_CODE
|
||||
#define EIGEN_HIDE_HEAVY_CODE
|
||||
#endif
|
||||
#elif defined EIGEN_HIDE_HEAVY_CODE
|
||||
#undef EIGEN_HIDE_HEAVY_CODE
|
||||
#endif
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \defgroup QR_Module QR module
|
||||
@ -38,26 +29,6 @@ namespace Eigen {
|
||||
#include "src/QR/FullPivHouseholderQR.h"
|
||||
#include "src/QR/ColPivHouseholderQR.h"
|
||||
|
||||
// declare all classes for a given matrix type
|
||||
#define EIGEN_QR_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \
|
||||
PREFIX template class HouseholderQR<MATRIXTYPE>; \
|
||||
|
||||
// declare all class for all types
|
||||
#define EIGEN_QR_MODULE_INSTANTIATE(PREFIX) \
|
||||
EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix2f,PREFIX); \
|
||||
EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix2d,PREFIX); \
|
||||
EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix3f,PREFIX); \
|
||||
EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix3d,PREFIX); \
|
||||
EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix4f,PREFIX); \
|
||||
EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix4d,PREFIX); \
|
||||
EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXf,PREFIX); \
|
||||
EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXd,PREFIX); \
|
||||
EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXcf,PREFIX); \
|
||||
EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXcd,PREFIX)
|
||||
|
||||
#ifdef EIGEN_EXTERN_INSTANTIATIONS
|
||||
EIGEN_QR_MODULE_INSTANTIATE(extern);
|
||||
#endif // EIGEN_EXTERN_INSTANTIATIONS
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
|
@ -61,7 +61,7 @@ template<typename Derived> class ArrayBase
|
||||
typename NumTraits<typename ei_traits<Derived>::Scalar>::Real>::operator*;
|
||||
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
@ -84,6 +84,10 @@ template<typename MatrixType, int Direction> class Reverse
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Reverse)
|
||||
using Base::IsRowMajor;
|
||||
|
||||
// next line is necessary because otherwise const version of operator()
|
||||
// is hidden by non-const version defined in this file
|
||||
using Base::operator();
|
||||
|
||||
protected:
|
||||
enum {
|
||||
PacketSize = ei_packet_traits<Scalar>::size,
|
||||
@ -106,6 +110,12 @@ template<typename MatrixType, int Direction> class Reverse
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
inline Scalar& operator()(Index row, Index col)
|
||||
{
|
||||
ei_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
|
||||
return coeffRef(row, col);
|
||||
}
|
||||
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
return m_matrix.const_cast_derived().coeffRef(ReverseRow ? m_matrix.rows() - row - 1 : row,
|
||||
@ -128,6 +138,12 @@ template<typename MatrixType, int Direction> class Reverse
|
||||
return m_matrix.const_cast_derived().coeffRef(m_matrix.size() - index - 1);
|
||||
}
|
||||
|
||||
inline Scalar& operator()(Index index)
|
||||
{
|
||||
ei_assert(index >= 0 && index < m_matrix.size());
|
||||
return coeffRef(index);
|
||||
}
|
||||
|
||||
template<int LoadMode>
|
||||
inline const PacketScalar packet(Index row, Index col) const
|
||||
{
|
||||
|
@ -92,7 +92,7 @@ class PartialReduxExpr : ei_no_assignment_operator,
|
||||
Index rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); }
|
||||
Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
|
||||
|
||||
const Scalar coeff(Index i, Index j) const
|
||||
EIGEN_STRONG_INLINE const Scalar coeff(Index i, Index j) const
|
||||
{
|
||||
if (Direction==Vertical)
|
||||
return m_functor(m_matrix.col(j));
|
||||
@ -113,15 +113,15 @@ class PartialReduxExpr : ei_no_assignment_operator,
|
||||
const MemberOp m_functor;
|
||||
};
|
||||
|
||||
#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \
|
||||
template <typename ResultType> \
|
||||
struct ei_member_##MEMBER { \
|
||||
EIGEN_EMPTY_STRUCT_CTOR(ei_member_##MEMBER) \
|
||||
typedef ResultType result_type; \
|
||||
template<typename Scalar, int Size> struct Cost \
|
||||
{ enum { value = COST }; }; \
|
||||
template<typename XprType> \
|
||||
inline ResultType operator()(const XprType& mat) const \
|
||||
#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \
|
||||
template <typename ResultType> \
|
||||
struct ei_member_##MEMBER { \
|
||||
EIGEN_EMPTY_STRUCT_CTOR(ei_member_##MEMBER) \
|
||||
typedef ResultType result_type; \
|
||||
template<typename Scalar, int Size> struct Cost \
|
||||
{ enum { value = COST }; }; \
|
||||
template<typename XprType> \
|
||||
EIGEN_STRONG_INLINE ResultType operator()(const XprType& mat) const \
|
||||
{ return mat.MEMBER(); } \
|
||||
}
|
||||
|
||||
@ -178,14 +178,16 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
|
||||
public:
|
||||
|
||||
typedef typename ExpressionType::Scalar Scalar;
|
||||
typedef typename ExpressionType::RealScalar RealScalar;
|
||||
typedef typename ExpressionType::Index Index;
|
||||
typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
|
||||
ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
|
||||
|
||||
template<template<typename _Scalar> class Functor> struct ReturnType
|
||||
template<template<typename _Scalar> class Functor,
|
||||
typename Scalar=typename ei_traits<ExpressionType>::Scalar> struct ReturnType
|
||||
{
|
||||
typedef PartialReduxExpr<ExpressionType,
|
||||
Functor<typename ei_traits<ExpressionType>::Scalar>,
|
||||
Functor<Scalar>,
|
||||
Direction
|
||||
> Type;
|
||||
};
|
||||
@ -285,7 +287,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
|
||||
* Output: \verbinclude PartialRedux_squaredNorm.out
|
||||
*
|
||||
* \sa DenseBase::squaredNorm() */
|
||||
const typename ReturnType<ei_member_squaredNorm>::Type squaredNorm() const
|
||||
const typename ReturnType<ei_member_squaredNorm,RealScalar>::Type squaredNorm() const
|
||||
{ return _expression(); }
|
||||
|
||||
/** \returns a row (or column) vector expression of the norm
|
||||
@ -295,7 +297,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
|
||||
* Output: \verbinclude PartialRedux_norm.out
|
||||
*
|
||||
* \sa DenseBase::norm() */
|
||||
const typename ReturnType<ei_member_norm>::Type norm() const
|
||||
const typename ReturnType<ei_member_norm,RealScalar>::Type norm() const
|
||||
{ return _expression(); }
|
||||
|
||||
|
||||
@ -304,7 +306,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
|
||||
* blue's algorithm.
|
||||
*
|
||||
* \sa DenseBase::blueNorm() */
|
||||
const typename ReturnType<ei_member_blueNorm>::Type blueNorm() const
|
||||
const typename ReturnType<ei_member_blueNorm,RealScalar>::Type blueNorm() const
|
||||
{ return _expression(); }
|
||||
|
||||
|
||||
@ -313,7 +315,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
|
||||
* underflow and overflow.
|
||||
*
|
||||
* \sa DenseBase::stableNorm() */
|
||||
const typename ReturnType<ei_member_stableNorm>::Type stableNorm() const
|
||||
const typename ReturnType<ei_member_stableNorm,RealScalar>::Type stableNorm() const
|
||||
{ return _expression(); }
|
||||
|
||||
|
||||
@ -322,7 +324,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
|
||||
* underflow and overflow using a concatenation of hypot() calls.
|
||||
*
|
||||
* \sa DenseBase::hypotNorm() */
|
||||
const typename ReturnType<ei_member_hypotNorm>::Type hypotNorm() const
|
||||
const typename ReturnType<ei_member_hypotNorm,RealScalar>::Type hypotNorm() const
|
||||
{ return _expression(); }
|
||||
|
||||
/** \returns a row (or column) vector expression of the sum
|
||||
@ -380,8 +382,8 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
|
||||
/** \returns a matrix expression
|
||||
* where each column (or row) are reversed.
|
||||
*
|
||||
* Example: \include PartialRedux_reverse.cpp
|
||||
* Output: \verbinclude PartialRedux_reverse.out
|
||||
* Example: \include Vectorwise_reverse.cpp
|
||||
* Output: \verbinclude Vectorwise_reverse.out
|
||||
*
|
||||
* \sa DenseBase::reverse() */
|
||||
const Reverse<ExpressionType, Direction> reverse() const
|
||||
|
@ -1,7 +1,7 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
|
||||
// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
|
||||
// Copyright (C) 2009 Keir Mierle <mierle@gmail.com>
|
||||
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
@ -27,11 +27,13 @@
|
||||
#ifndef EIGEN_LDLT_H
|
||||
#define EIGEN_LDLT_H
|
||||
|
||||
template<typename MatrixType, int UpLo> struct LDLT_Traits;
|
||||
|
||||
/** \ingroup cholesky_Module
|
||||
*
|
||||
* \class LDLT
|
||||
*
|
||||
* \brief Robust Cholesky decomposition of a matrix
|
||||
* \brief Robust Cholesky decomposition of a matrix with pivoting
|
||||
*
|
||||
* \param MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition
|
||||
*
|
||||
@ -43,7 +45,7 @@
|
||||
* zeros in the bottom right rank(A) - n submatrix. Avoiding the square root
|
||||
* on D also stabilizes the computation.
|
||||
*
|
||||
* Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky
|
||||
* Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky
|
||||
* decomposition to determine whether a system of equations has a solution.
|
||||
*
|
||||
* \sa MatrixBase::ldlt(), class LLT
|
||||
@ -52,7 +54,7 @@
|
||||
* Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
|
||||
* the strict lower part does not have to store correct values.
|
||||
*/
|
||||
template<typename _MatrixType> class LDLT
|
||||
template<typename _MatrixType, int _UpLo> class LDLT
|
||||
{
|
||||
public:
|
||||
typedef _MatrixType MatrixType;
|
||||
@ -61,20 +63,25 @@ template<typename _MatrixType> class LDLT
|
||||
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
||||
Options = MatrixType::Options,
|
||||
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
|
||||
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
|
||||
UpLo = _UpLo
|
||||
};
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename ei_plain_col_type<MatrixType, Index>::type IntColVectorType;
|
||||
typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType;
|
||||
|
||||
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
|
||||
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
|
||||
|
||||
typedef LDLT_Traits<MatrixType,UpLo> Traits;
|
||||
|
||||
/** \brief Default Constructor.
|
||||
*
|
||||
* The default constructor is useful in cases in which the user intends to
|
||||
* perform decompositions via LDLT::compute(const MatrixType&).
|
||||
*/
|
||||
LDLT() : m_matrix(), m_p(), m_transpositions(), m_isInitialized(false) {}
|
||||
LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {}
|
||||
|
||||
/** \brief Default Constructor with memory preallocation
|
||||
*
|
||||
@ -82,15 +89,15 @@ template<typename _MatrixType> class LDLT
|
||||
* according to the specified problem \a size.
|
||||
* \sa LDLT()
|
||||
*/
|
||||
LDLT(Index size) : m_matrix(size, size),
|
||||
m_p(size),
|
||||
m_transpositions(size),
|
||||
m_temporary(size),
|
||||
m_isInitialized(false) {}
|
||||
LDLT(Index size)
|
||||
: m_matrix(size, size),
|
||||
m_transpositions(size),
|
||||
m_temporary(size),
|
||||
m_isInitialized(false)
|
||||
{}
|
||||
|
||||
LDLT(const MatrixType& matrix)
|
||||
: m_matrix(matrix.rows(), matrix.cols()),
|
||||
m_p(matrix.rows()),
|
||||
m_transpositions(matrix.rows()),
|
||||
m_temporary(matrix.rows()),
|
||||
m_isInitialized(false)
|
||||
@ -98,21 +105,26 @@ template<typename _MatrixType> class LDLT
|
||||
compute(matrix);
|
||||
}
|
||||
|
||||
/** \returns the lower triangular matrix L */
|
||||
inline TriangularView<MatrixType, UnitLower> matrixL(void) const
|
||||
/** \returns a view of the upper triangular matrix U */
|
||||
inline typename Traits::MatrixU matrixU() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_matrix;
|
||||
return Traits::getU(m_matrix);
|
||||
}
|
||||
|
||||
/** \returns a vector of integers, whose size is the number of rows of the matrix being decomposed,
|
||||
* representing the P permutation i.e. the permutation of the rows. For its precise meaning,
|
||||
* see the examples given in the documentation of class FullPivLU.
|
||||
*/
|
||||
inline const IntColVectorType& permutationP() const
|
||||
/** \returns a view of the lower triangular matrix L */
|
||||
inline typename Traits::MatrixL matrixL() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_p;
|
||||
return Traits::getL(m_matrix);
|
||||
}
|
||||
|
||||
/** \returns the permutation matrix P as a transposition sequence.
|
||||
*/
|
||||
inline const TranspositionType& transpositionsP() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_transpositions;
|
||||
}
|
||||
|
||||
/** \returns the coefficients of the diagonal matrix D */
|
||||
@ -157,7 +169,7 @@ template<typename _MatrixType> class LDLT
|
||||
|
||||
LDLT& compute(const MatrixType& matrix);
|
||||
|
||||
/** \returns the LDLT decomposition matrix
|
||||
/** \returns the internal LDLT decomposition matrix
|
||||
*
|
||||
* TODO: document the storage layout
|
||||
*/
|
||||
@ -173,6 +185,7 @@ template<typename _MatrixType> class LDLT
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
protected:
|
||||
|
||||
/** \internal
|
||||
* Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
|
||||
* The strict upper part is used during the decomposition, the strict lower
|
||||
@ -180,118 +193,173 @@ template<typename _MatrixType> class LDLT
|
||||
* is not stored), and the diagonal entries correspond to D.
|
||||
*/
|
||||
MatrixType m_matrix;
|
||||
IntColVectorType m_p;
|
||||
IntColVectorType m_transpositions; // FIXME do we really need to store permanently the transpositions?
|
||||
TranspositionType m_transpositions;
|
||||
TmpMatrixType m_temporary;
|
||||
Index m_sign;
|
||||
int m_sign;
|
||||
bool m_isInitialized;
|
||||
};
|
||||
|
||||
template<int UpLo> struct ei_ldlt_inplace;
|
||||
|
||||
template<> struct ei_ldlt_inplace<Lower>
|
||||
{
|
||||
template<typename MatrixType, typename TranspositionType, typename Workspace>
|
||||
static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
|
||||
{
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
ei_assert(mat.rows()==mat.cols());
|
||||
const Index size = mat.rows();
|
||||
|
||||
if (size <= 1)
|
||||
{
|
||||
transpositions.setIdentity();
|
||||
if(sign)
|
||||
*sign = ei_real(mat.coeff(0,0))>0 ? 1:-1;
|
||||
return true;
|
||||
}
|
||||
|
||||
RealScalar cutoff = 0, biggest_in_corner;
|
||||
|
||||
for (Index k = 0; k < size; ++k)
|
||||
{
|
||||
// Find largest diagonal element
|
||||
Index index_of_biggest_in_corner;
|
||||
biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
|
||||
index_of_biggest_in_corner += k;
|
||||
|
||||
if(k == 0)
|
||||
{
|
||||
// The biggest overall is the point of reference to which further diagonals
|
||||
// are compared; if any diagonal is negligible compared
|
||||
// to the largest overall, the algorithm bails.
|
||||
cutoff = ei_abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
|
||||
|
||||
if(sign)
|
||||
*sign = ei_real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1;
|
||||
}
|
||||
|
||||
// Finish early if the matrix is not full rank.
|
||||
if(biggest_in_corner < cutoff)
|
||||
{
|
||||
for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
|
||||
break;
|
||||
}
|
||||
|
||||
transpositions.coeffRef(k) = index_of_biggest_in_corner;
|
||||
if(k != index_of_biggest_in_corner)
|
||||
{
|
||||
// apply the transposition while taking care to consider only
|
||||
// the lower triangular part
|
||||
Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element
|
||||
mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k));
|
||||
mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s));
|
||||
std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner));
|
||||
for(int i=k+1;i<index_of_biggest_in_corner;++i)
|
||||
{
|
||||
Scalar tmp = mat.coeffRef(i,k);
|
||||
mat.coeffRef(i,k) = ei_conj(mat.coeffRef(index_of_biggest_in_corner,i));
|
||||
mat.coeffRef(index_of_biggest_in_corner,i) = ei_conj(tmp);
|
||||
}
|
||||
if(NumTraits<Scalar>::IsComplex)
|
||||
mat.coeffRef(index_of_biggest_in_corner,k) = ei_conj(mat.coeff(index_of_biggest_in_corner,k));
|
||||
}
|
||||
|
||||
// partition the matrix:
|
||||
// A00 | - | -
|
||||
// lu = A10 | A11 | -
|
||||
// A20 | A21 | A22
|
||||
Index rs = size - k - 1;
|
||||
Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);
|
||||
Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
|
||||
Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
|
||||
|
||||
if(k>0)
|
||||
{
|
||||
temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint();
|
||||
mat.coeffRef(k,k) -= (A10 * temp.head(k)).value();
|
||||
if(rs>0)
|
||||
A21.noalias() -= A20 * temp.head(k);
|
||||
}
|
||||
if((rs>0) && (ei_abs(mat.coeffRef(k,k)) > cutoff))
|
||||
A21 /= mat.coeffRef(k,k);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct ei_ldlt_inplace<Upper>
|
||||
{
|
||||
template<typename MatrixType, typename TranspositionType, typename Workspace>
|
||||
static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
|
||||
{
|
||||
Transpose<MatrixType> matt(mat);
|
||||
return ei_ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename MatrixType> struct LDLT_Traits<MatrixType,Lower>
|
||||
{
|
||||
typedef TriangularView<MatrixType, UnitLower> MatrixL;
|
||||
typedef TriangularView<typename MatrixType::AdjointReturnType, UnitUpper> MatrixU;
|
||||
inline static MatrixL getL(const MatrixType& m) { return m; }
|
||||
inline static MatrixU getU(const MatrixType& m) { return m.adjoint(); }
|
||||
};
|
||||
|
||||
template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
|
||||
{
|
||||
typedef TriangularView<typename MatrixType::AdjointReturnType, UnitLower> MatrixL;
|
||||
typedef TriangularView<MatrixType, UnitUpper> MatrixU;
|
||||
inline static MatrixL getL(const MatrixType& m) { return m.adjoint(); }
|
||||
inline static MatrixU getU(const MatrixType& m) { return m; }
|
||||
};
|
||||
|
||||
/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
|
||||
template<typename MatrixType, int _UpLo>
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||
{
|
||||
ei_assert(a.rows()==a.cols());
|
||||
const Index size = a.rows();
|
||||
|
||||
m_matrix = a;
|
||||
|
||||
m_p.resize(size);
|
||||
m_transpositions.resize(size);
|
||||
m_isInitialized = false;
|
||||
|
||||
if (size <= 1) {
|
||||
m_p.setZero();
|
||||
m_transpositions.setZero();
|
||||
m_sign = ei_real(a.coeff(0,0))>0 ? 1:-1;
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
RealScalar cutoff = 0, biggest_in_corner;
|
||||
|
||||
// By using a temorary, packet-aligned products are guarenteed. In the LLT
|
||||
// case this is unnecessary because the diagonal is included and will always
|
||||
// have optimal alignment.
|
||||
m_temporary.resize(size);
|
||||
|
||||
for (Index j = 0; j < size; ++j)
|
||||
{
|
||||
// Find largest diagonal element
|
||||
Index index_of_biggest_in_corner;
|
||||
biggest_in_corner = m_matrix.diagonal().tail(size-j).cwiseAbs()
|
||||
.maxCoeff(&index_of_biggest_in_corner);
|
||||
index_of_biggest_in_corner += j;
|
||||
|
||||
if(j == 0)
|
||||
{
|
||||
// The biggest overall is the point of reference to which further diagonals
|
||||
// are compared; if any diagonal is negligible compared
|
||||
// to the largest overall, the algorithm bails.
|
||||
cutoff = ei_abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
|
||||
|
||||
m_sign = ei_real(m_matrix.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1;
|
||||
}
|
||||
|
||||
// Finish early if the matrix is not full rank.
|
||||
if(biggest_in_corner < cutoff)
|
||||
{
|
||||
for(Index i = j; i < size; i++) m_transpositions.coeffRef(i) = i;
|
||||
break;
|
||||
}
|
||||
|
||||
m_transpositions.coeffRef(j) = index_of_biggest_in_corner;
|
||||
if(j != index_of_biggest_in_corner)
|
||||
{
|
||||
m_matrix.row(j).swap(m_matrix.row(index_of_biggest_in_corner));
|
||||
m_matrix.col(j).swap(m_matrix.col(index_of_biggest_in_corner));
|
||||
}
|
||||
|
||||
if (j == 0) {
|
||||
m_matrix.row(0) = m_matrix.row(0).conjugate();
|
||||
m_matrix.col(0).tail(size-1) = m_matrix.row(0).tail(size-1) / m_matrix.coeff(0,0);
|
||||
continue;
|
||||
}
|
||||
|
||||
RealScalar Djj = ei_real(m_matrix.coeff(j,j) - m_matrix.row(j).head(j).dot(m_matrix.col(j).head(j)));
|
||||
m_matrix.coeffRef(j,j) = Djj;
|
||||
|
||||
Index endSize = size - j - 1;
|
||||
if (endSize > 0) {
|
||||
m_temporary.tail(endSize).noalias() = m_matrix.block(j+1,0, endSize, j)
|
||||
* m_matrix.col(j).head(j).conjugate();
|
||||
|
||||
m_matrix.row(j).tail(endSize) = m_matrix.row(j).tail(endSize).conjugate()
|
||||
- m_temporary.tail(endSize).transpose();
|
||||
|
||||
if(ei_abs(Djj) > cutoff)
|
||||
{
|
||||
m_matrix.col(j).tail(endSize) = m_matrix.row(j).tail(endSize) / Djj;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse applied swaps to get P matrix.
|
||||
for(Index k = 0; k < size; ++k) m_p.coeffRef(k) = k;
|
||||
for(Index k = size-1; k >= 0; --k) {
|
||||
std::swap(m_p.coeffRef(k), m_p.coeffRef(m_transpositions.coeff(k)));
|
||||
}
|
||||
ei_ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign);
|
||||
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename _MatrixType, typename Rhs>
|
||||
struct ei_solve_retval<LDLT<_MatrixType>, Rhs>
|
||||
: ei_solve_retval_base<LDLT<_MatrixType>, Rhs>
|
||||
template<typename _MatrixType, int _UpLo, typename Rhs>
|
||||
struct ei_solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
|
||||
: ei_solve_retval_base<LDLT<_MatrixType,_UpLo>, Rhs>
|
||||
{
|
||||
EIGEN_MAKE_SOLVE_HELPERS(LDLT<_MatrixType>,Rhs)
|
||||
typedef LDLT<_MatrixType,_UpLo> LDLTType;
|
||||
EIGEN_MAKE_SOLVE_HELPERS(LDLTType,Rhs)
|
||||
|
||||
template<typename Dest> void evalTo(Dest& dst) const
|
||||
{
|
||||
dst = rhs();
|
||||
dec().solveInPlace(dst);
|
||||
ei_assert(rhs().rows() == dec().matrixLDLT().rows());
|
||||
// dst = P b
|
||||
dst = dec().transpositionsP() * rhs();
|
||||
|
||||
// dst = L^-1 (P b)
|
||||
dec().matrixL().solveInPlace(dst);
|
||||
|
||||
// dst = D^-1 (L^-1 P b)
|
||||
dst = dec().vectorD().asDiagonal().inverse() * dst;
|
||||
|
||||
// dst = L^-T (D^-1 L^-1 P b)
|
||||
dec().matrixU().solveInPlace(dst);
|
||||
|
||||
// dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b
|
||||
dst = dec().transpositionsP().transpose() * dst;
|
||||
}
|
||||
};
|
||||
|
||||
@ -306,29 +374,15 @@ struct ei_solve_retval<LDLT<_MatrixType>, Rhs>
|
||||
*
|
||||
* \sa LDLT::solve(), MatrixBase::ldlt()
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
template<typename MatrixType,int _UpLo>
|
||||
template<typename Derived>
|
||||
bool LDLT<MatrixType>::solveInPlace(MatrixBase<Derived> &bAndX) const
|
||||
bool LDLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
|
||||
{
|
||||
ei_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
const Index size = m_matrix.rows();
|
||||
ei_assert(size == bAndX.rows());
|
||||
|
||||
// z = P b
|
||||
for(Index i = 0; i < size; ++i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i));
|
||||
|
||||
// y = L^-1 z
|
||||
//matrixL().solveInPlace(bAndX);
|
||||
m_matrix.template triangularView<UnitLower>().solveInPlace(bAndX);
|
||||
|
||||
// w = D^-1 y
|
||||
bAndX = m_matrix.diagonal().asDiagonal().inverse() * bAndX;
|
||||
|
||||
// u = L^-T w
|
||||
m_matrix.adjoint().template triangularView<UnitUpper>().solveInPlace(bAndX);
|
||||
|
||||
// x = P^T u
|
||||
for (Index i = size-1; i >= 0; --i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i));
|
||||
bAndX = this->solve(bAndX);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -336,28 +390,38 @@ bool LDLT<MatrixType>::solveInPlace(MatrixBase<Derived> &bAndX) const
|
||||
/** \returns the matrix represented by the decomposition,
|
||||
* i.e., it returns the product: P^T L D L^* P.
|
||||
* This function is provided for debug purpose. */
|
||||
template<typename MatrixType>
|
||||
MatrixType LDLT<MatrixType>::reconstructedMatrix() const
|
||||
template<typename MatrixType, int _UpLo>
|
||||
MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
const Index size = m_matrix.rows();
|
||||
MatrixType res(size,size);
|
||||
res.setIdentity();
|
||||
|
||||
// PI
|
||||
for(Index i = 0; i < size; ++i) res.row(m_transpositions.coeff(i)).swap(res.row(i));
|
||||
// P
|
||||
res.setIdentity();
|
||||
res = transpositionsP() * res;
|
||||
// L^* P
|
||||
res = matrixL().adjoint() * res;
|
||||
res = matrixU() * res;
|
||||
// D(L^*P)
|
||||
res = vectorD().asDiagonal() * res;
|
||||
// L(DL^*P)
|
||||
res = matrixL() * res;
|
||||
// P^T (LDL^*P)
|
||||
for (Index i = size-1; i >= 0; --i) res.row(m_transpositions.coeff(i)).swap(res.row(i));
|
||||
res = transpositionsP().transpose() * res;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/** \cholesky_module
|
||||
* \returns the Cholesky decomposition with full pivoting without square root of \c *this
|
||||
*/
|
||||
template<typename MatrixType, unsigned int UpLo>
|
||||
inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
|
||||
SelfAdjointView<MatrixType, UpLo>::ldlt() const
|
||||
{
|
||||
return LDLT<PlainObject,UpLo>(m_matrix);
|
||||
}
|
||||
|
||||
/** \cholesky_module
|
||||
* \returns the Cholesky decomposition with full pivoting without square root of \c *this
|
||||
*/
|
||||
|
@ -162,7 +162,6 @@ template<typename _MatrixType, int _UpLo> class LLT
|
||||
bool m_isInitialized;
|
||||
};
|
||||
|
||||
// forward declaration (defined at the end of this file)
|
||||
template<int UpLo> struct ei_llt_inplace;
|
||||
|
||||
template<> struct ei_llt_inplace<Lower>
|
||||
@ -209,9 +208,12 @@ template<> struct ei_llt_inplace<Lower>
|
||||
|
||||
for (Index k=0; k<size; k+=blockSize)
|
||||
{
|
||||
// partition the matrix:
|
||||
// A00 | - | -
|
||||
// lu = A10 | A11 | -
|
||||
// A20 | A21 | A22
|
||||
Index bs = std::min(blockSize, size-k);
|
||||
Index rs = size - k - bs;
|
||||
|
||||
Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs);
|
||||
Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs);
|
||||
Block<MatrixType,Dynamic,Dynamic> A22(m,k+bs,k+bs,rs,rs);
|
||||
|
@ -47,6 +47,7 @@ struct ei_traits<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef Dense StorageKind;
|
||||
typedef DenseIndex Index;
|
||||
enum {
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
RowsAtCompileTime = Rows,
|
||||
|
@ -162,7 +162,7 @@ template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess> c
|
||||
.coeffRef(row + m_startRow.value(), col + m_startCol.value());
|
||||
}
|
||||
|
||||
inline const CoeffReturnType coeff(Index row, Index col) const
|
||||
EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const
|
||||
{
|
||||
return m_xpr.coeff(row + m_startRow.value(), col + m_startCol.value());
|
||||
}
|
||||
|
@ -45,8 +45,19 @@
|
||||
* \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
|
||||
*/
|
||||
template<typename BinaryOp, typename Lhs, typename Rhs>
|
||||
struct ei_traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > : ei_traits<Lhs>
|
||||
struct ei_traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
|
||||
{
|
||||
// we must not inherit from ei_traits<Lhs> since it has
|
||||
// the potential to cause problems with MSVC
|
||||
typedef typename ei_cleantype<Lhs>::type Ancestor;
|
||||
typedef typename ei_traits<Ancestor>::XprKind XprKind;
|
||||
enum {
|
||||
RowsAtCompileTime = ei_traits<Ancestor>::RowsAtCompileTime,
|
||||
ColsAtCompileTime = ei_traits<Ancestor>::ColsAtCompileTime,
|
||||
MaxRowsAtCompileTime = ei_traits<Ancestor>::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = ei_traits<Ancestor>::MaxColsAtCompileTime
|
||||
};
|
||||
|
||||
// even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor),
|
||||
// we still want to handle the case when the result type is different.
|
||||
typedef typename ei_result_of<
|
||||
@ -57,6 +68,8 @@ struct ei_traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > : ei_traits<Lhs>
|
||||
>::type Scalar;
|
||||
typedef typename ei_promote_storage_type<typename ei_traits<Lhs>::StorageKind,
|
||||
typename ei_traits<Rhs>::StorageKind>::ret StorageKind;
|
||||
typedef typename ei_promote_index_type<typename ei_traits<Lhs>::Index,
|
||||
typename ei_traits<Rhs>::Index>::type Index;
|
||||
typedef typename Lhs::Nested LhsNested;
|
||||
typedef typename Rhs::Nested RhsNested;
|
||||
typedef typename ei_unref<LhsNested>::type _LhsNested;
|
||||
@ -97,7 +110,7 @@ class CwiseBinaryOp : ei_no_assignment_operator,
|
||||
BinaryOp, Lhs, Rhs,
|
||||
typename ei_promote_storage_type<typename ei_traits<Lhs>::StorageKind,
|
||||
typename ei_traits<Rhs>::StorageKind>::ret>::Base Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE_NEW(CwiseBinaryOp)
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)
|
||||
|
||||
typedef typename ei_nested<Lhs>::type LhsNested;
|
||||
typedef typename ei_nested<Rhs>::type RhsNested;
|
||||
@ -125,14 +138,14 @@ class CwiseBinaryOp : ei_no_assignment_operator,
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const {
|
||||
// return the fixed size type if available to enable compile time optimizations
|
||||
if (ei_traits<typename ei_cleantype<LhsNested>::type>::RowsAtCompileTime==Dynamic)
|
||||
if (ei_traits<typename ei_cleantype<LhsNested>::type>::RowsAtCompileTime==Dynamic)
|
||||
return m_rhs.rows();
|
||||
else
|
||||
return m_lhs.rows();
|
||||
}
|
||||
EIGEN_STRONG_INLINE Index cols() const {
|
||||
// return the fixed size type if available to enable compile time optimizations
|
||||
if (ei_traits<typename ei_cleantype<LhsNested>::type>::ColsAtCompileTime==Dynamic)
|
||||
if (ei_traits<typename ei_cleantype<LhsNested>::type>::ColsAtCompileTime==Dynamic)
|
||||
return m_rhs.cols();
|
||||
else
|
||||
return m_lhs.cols();
|
||||
|
@ -71,7 +71,7 @@ class CwiseUnaryOp : ei_no_assignment_operator,
|
||||
public:
|
||||
|
||||
typedef typename CwiseUnaryOpImpl<UnaryOp, XprType,typename ei_traits<XprType>::StorageKind>::Base Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE_NEW(CwiseUnaryOp)
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)
|
||||
|
||||
inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
|
||||
: m_xpr(xpr), m_functor(func) {}
|
||||
|
@ -70,7 +70,7 @@ class CwiseUnaryView : ei_no_assignment_operator,
|
||||
public:
|
||||
|
||||
typedef typename CwiseUnaryViewImpl<ViewOp, MatrixType,typename ei_traits<MatrixType>::StorageKind>::Base Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE_NEW(CwiseUnaryView)
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView)
|
||||
|
||||
inline CwiseUnaryView(const MatrixType& mat, const ViewOp& func = ViewOp())
|
||||
: m_matrix(mat), m_functor(func) {}
|
||||
|
@ -51,7 +51,7 @@ template<typename Derived> class DenseBase
|
||||
class InnerIterator;
|
||||
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
@ -218,8 +218,8 @@ template<typename Derived> class DenseBase
|
||||
*/
|
||||
void resize(Index rows, Index cols)
|
||||
{
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(rows);
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(cols);
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(rows);
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(cols);
|
||||
ei_assert(rows == this->rows() && cols == this->cols()
|
||||
&& "DenseBase::resize() does not actually allow to resize.");
|
||||
}
|
||||
@ -247,7 +247,7 @@ template<typename Derived> class DenseBase
|
||||
/** \internal expression type of a block of whole rows */
|
||||
template<int N> struct NRowsBlockXpr { typedef Block<Derived, N, ei_traits<Derived>::ColsAtCompileTime> Type; };
|
||||
|
||||
|
||||
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
/** Copies \a other into *this. \returns a reference to *this. */
|
||||
@ -440,8 +440,8 @@ template<typename Derived> class DenseBase
|
||||
* a const reference, in order to avoid a useless copy.
|
||||
*/
|
||||
EIGEN_STRONG_INLINE const typename ei_eval<Derived>::type eval() const
|
||||
{
|
||||
// Even though MSVC does not honor strong inlining when the return type
|
||||
{
|
||||
// Even though MSVC does not honor strong inlining when the return type
|
||||
// is a dynamic matrix, we desperately need strong inlining for fixed
|
||||
// size types on MSVC.
|
||||
return typename ei_eval<Derived>::type(derived());
|
||||
|
@ -30,7 +30,7 @@ class DenseCoeffsBase : public EigenBase<Derived>
|
||||
{
|
||||
public:
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename ei_meta_if<ei_has_direct_access<Derived>::ret, const Scalar&, Scalar>::ret CoeffReturnType;
|
||||
@ -40,7 +40,7 @@ class DenseCoeffsBase : public EigenBase<Derived>
|
||||
using Base::cols;
|
||||
using Base::size;
|
||||
using Base::derived;
|
||||
|
||||
|
||||
EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const
|
||||
{
|
||||
return int(Derived::RowsAtCompileTime) == 1 ? 0
|
||||
@ -245,7 +245,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
|
||||
typedef DenseCoeffsBase<Derived, false> Base;
|
||||
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
@ -46,7 +46,7 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
|
||||
typedef typename ei_dense_xpr_base<Derived>::type Base;
|
||||
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
@ -159,7 +159,7 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
|
||||
*
|
||||
* \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)
|
||||
*/
|
||||
inline void resize(Index rows, Index cols)
|
||||
EIGEN_STRONG_INLINE void resize(Index rows, Index cols)
|
||||
{
|
||||
#ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
|
||||
Index size = rows*cols;
|
||||
@ -471,7 +471,9 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other)
|
||||
{
|
||||
_resize_to_match(other);
|
||||
// I don't think we need this resize call since the lazyAssign will anyways resize
|
||||
// and lazyAssign will be called by the assign selector.
|
||||
//_resize_to_match(other);
|
||||
// the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because
|
||||
// it wouldn't allow to copy a row-vector into a column-vector.
|
||||
return ei_assign_selector<Derived,OtherDerived,false>::run(this->derived(), other.derived());
|
||||
|
@ -34,7 +34,7 @@ class DiagonalBase : public EigenBase<Derived>
|
||||
typedef typename ei_traits<Derived>::DiagonalVectorType DiagonalVectorType;
|
||||
typedef typename DiagonalVectorType::Scalar Scalar;
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
|
||||
enum {
|
||||
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
|
||||
@ -103,6 +103,7 @@ struct ei_traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime>
|
||||
{
|
||||
typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;
|
||||
typedef Dense StorageKind;
|
||||
typedef DenseIndex Index;
|
||||
};
|
||||
|
||||
template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
|
||||
@ -115,7 +116,7 @@ class DiagonalMatrix
|
||||
typedef const DiagonalMatrix& Nested;
|
||||
typedef _Scalar Scalar;
|
||||
typedef typename ei_traits<DiagonalMatrix>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<DiagonalMatrix>::Index Index;
|
||||
#endif
|
||||
|
||||
protected:
|
||||
@ -203,6 +204,7 @@ struct ei_traits<DiagonalWrapper<_DiagonalVectorType> >
|
||||
{
|
||||
typedef _DiagonalVectorType DiagonalVectorType;
|
||||
typedef typename DiagonalVectorType::Scalar Scalar;
|
||||
typedef typename DiagonalVectorType::Index Index;
|
||||
typedef typename DiagonalVectorType::StorageKind StorageKind;
|
||||
enum {
|
||||
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
|
||||
|
@ -85,7 +85,7 @@ MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
|
||||
* \sa dot(), norm()
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
|
||||
EIGEN_STRONG_INLINE typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
|
||||
{
|
||||
return ei_real((*this).cwiseAbs2().sum());
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ template<typename Derived> struct EigenBase
|
||||
// typedef typename ei_plain_matrix_type<Derived>::type PlainObject;
|
||||
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
|
||||
/** \returns a reference to the derived object */
|
||||
Derived& derived() { return *static_cast<Derived*>(this); }
|
||||
|
@ -46,7 +46,7 @@ template<typename Derived> class MapBase
|
||||
|
||||
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
@ -113,6 +113,7 @@ struct ei_traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef Dense StorageKind;
|
||||
typedef DenseIndex Index;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
RowsAtCompileTime = _Rows,
|
||||
|
@ -58,11 +58,11 @@ template<typename Derived> class MatrixBase
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
typedef MatrixBase StorageBaseType;
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
|
||||
typedef DenseBase<Derived> Base;
|
||||
using Base::RowsAtCompileTime;
|
||||
using Base::ColsAtCompileTime;
|
||||
|
@ -243,7 +243,7 @@ template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic
|
||||
m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
|
||||
m_cols = cols;
|
||||
}
|
||||
void resize(DenseIndex size, DenseIndex, DenseIndex cols)
|
||||
EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex cols)
|
||||
{
|
||||
if(size != _Rows*m_cols)
|
||||
{
|
||||
@ -279,7 +279,7 @@ template<typename T, int _Cols, int _Options> class ei_matrix_storage<T, Dynamic
|
||||
m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
|
||||
m_rows = rows;
|
||||
}
|
||||
void resize(DenseIndex size, DenseIndex rows, DenseIndex)
|
||||
EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex rows, DenseIndex)
|
||||
{
|
||||
if(size != m_rows*_Cols)
|
||||
{
|
||||
|
@ -45,18 +45,12 @@ class NoAlias
|
||||
public:
|
||||
NoAlias(ExpressionType& expression) : m_expression(expression) {}
|
||||
|
||||
/* \sa MatrixBase::lazyAssign() */
|
||||
/** Behaves like MatrixBase::lazyAssign(other)
|
||||
* \sa MatrixBase::lazyAssign() */
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other)
|
||||
{ return m_expression.lazyAssign(other.derived()); }
|
||||
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE ExpressionType& operator=(const ReturnByValue<OtherDerived>& other)
|
||||
{
|
||||
other.evalTo(m_expression);
|
||||
return m_expression;
|
||||
}
|
||||
|
||||
/** \sa MatrixBase::operator+= */
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other)
|
||||
|
@ -2,6 +2,7 @@
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
@ -46,7 +47,6 @@
|
||||
*
|
||||
* \sa class DiagonalMatrix
|
||||
*/
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime> class PermutationMatrix;
|
||||
template<typename PermutationType, typename MatrixType, int Side, bool Transposed=false> struct ei_permut_matrix_product_retval;
|
||||
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime>
|
||||
@ -77,8 +77,12 @@ class PermutationMatrix : public EigenBase<PermutationMatrix<SizeAtCompileTime,
|
||||
typedef Matrix<int, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
|
||||
|
||||
inline PermutationMatrix()
|
||||
{
|
||||
}
|
||||
{}
|
||||
|
||||
/** Constructs an uninitialized permutation matrix of given size.
|
||||
*/
|
||||
inline PermutationMatrix(int size) : m_indices(size)
|
||||
{}
|
||||
|
||||
/** Copy constructor. */
|
||||
template<int OtherSize, int OtherMaxSize>
|
||||
@ -102,6 +106,14 @@ class PermutationMatrix : public EigenBase<PermutationMatrix<SizeAtCompileTime,
|
||||
explicit inline PermutationMatrix(const MatrixBase<Other>& indices) : m_indices(indices)
|
||||
{}
|
||||
|
||||
/** Convert the Transpositions \a tr to a permutation matrix */
|
||||
template<int OtherSize, int OtherMaxSize>
|
||||
explicit PermutationMatrix(const Transpositions<OtherSize,OtherMaxSize>& tr)
|
||||
: m_indices(tr.size())
|
||||
{
|
||||
*this = tr;
|
||||
}
|
||||
|
||||
/** Copies the other permutation into *this */
|
||||
template<int OtherSize, int OtherMaxSize>
|
||||
PermutationMatrix& operator=(const PermutationMatrix<OtherSize, OtherMaxSize>& other)
|
||||
@ -110,6 +122,16 @@ class PermutationMatrix : public EigenBase<PermutationMatrix<SizeAtCompileTime,
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Assignment from the Transpositions \a tr */
|
||||
template<int OtherSize, int OtherMaxSize>
|
||||
PermutationMatrix& operator=(const Transpositions<OtherSize,OtherMaxSize>& tr)
|
||||
{
|
||||
setIdentity(tr.size());
|
||||
for(int k=size()-1; k>=0; --k)
|
||||
applyTranspositionOnTheRight(k,tr.coeff(k));
|
||||
return *this;
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** This is a special case of the templated operator=. Its purpose is to
|
||||
* prevent a default operator= from hiding the templated operator=.
|
||||
@ -121,11 +143,6 @@ class PermutationMatrix : public EigenBase<PermutationMatrix<SizeAtCompileTime,
|
||||
}
|
||||
#endif
|
||||
|
||||
/** Constructs an uninitialized permutation matrix of given size.
|
||||
*/
|
||||
inline PermutationMatrix(int size) : m_indices(size)
|
||||
{}
|
||||
|
||||
/** \returns the number of rows */
|
||||
inline int rows() const { return m_indices.size(); }
|
||||
|
||||
|
@ -194,6 +194,11 @@ class GeneralProduct<Lhs, Rhs, InnerProduct>
|
||||
}
|
||||
|
||||
typename Base::Scalar value() const { return Base::coeff(0,0); }
|
||||
|
||||
/** Convertion to scalar */
|
||||
operator const typename Base::Scalar() const {
|
||||
return Base::coeff(0,0);
|
||||
}
|
||||
};
|
||||
|
||||
/***********************************************************************
|
||||
|
@ -37,6 +37,8 @@ struct ei_traits<ProductBase<Derived,_Lhs,_Rhs> >
|
||||
typedef typename ei_scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
|
||||
typedef typename ei_promote_storage_type<typename ei_traits<Lhs>::StorageKind,
|
||||
typename ei_traits<Rhs>::StorageKind>::ret StorageKind;
|
||||
typedef typename ei_promote_index_type<typename ei_traits<Lhs>::Index,
|
||||
typename ei_traits<Rhs>::Index>::type Index;
|
||||
enum {
|
||||
RowsAtCompileTime = ei_traits<Lhs>::RowsAtCompileTime,
|
||||
ColsAtCompileTime = ei_traits<Rhs>::ColsAtCompileTime,
|
||||
|
@ -181,7 +181,7 @@ struct ei_redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>
|
||||
{
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
typedef typename Derived::Index Index;
|
||||
static Scalar run(const Derived& mat, const Func& func)
|
||||
static EIGEN_STRONG_INLINE Scalar run(const Derived& mat, const Func& func)
|
||||
{
|
||||
ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix");
|
||||
Scalar res;
|
||||
@ -311,7 +311,7 @@ struct ei_redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling
|
||||
*/
|
||||
template<typename Derived>
|
||||
template<typename Func>
|
||||
inline typename ei_result_of<Func(typename ei_traits<Derived>::Scalar)>::type
|
||||
EIGEN_STRONG_INLINE typename ei_result_of<Func(typename ei_traits<Derived>::Scalar)>::type
|
||||
DenseBase<Derived>::redux(const Func& func) const
|
||||
{
|
||||
typedef typename ei_cleantype<typename Derived::Nested>::type ThisNested;
|
||||
|
@ -36,9 +36,9 @@ struct ei_traits<ReturnByValue<Derived> >
|
||||
enum {
|
||||
// We're disabling the DirectAccess because e.g. the constructor of
|
||||
// the Block-with-DirectAccess expression requires to have a coeffRef method.
|
||||
// FIXME this should be fixed so we can have DirectAccessBit here.
|
||||
// Also, we don't want to have to implement the stride stuff.
|
||||
Flags = (ei_traits<typename ei_traits<Derived>::ReturnType>::Flags
|
||||
| EvalBeforeNestingBit | EvalBeforeAssigningBit) & ~DirectAccessBit
|
||||
| EvalBeforeNestingBit) & ~DirectAccessBit
|
||||
};
|
||||
};
|
||||
|
||||
@ -84,11 +84,8 @@ template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
|
||||
{
|
||||
// since we're by-passing the mechanisms in Assign.h, we implement here the EvalBeforeAssigningBit.
|
||||
// override by using .noalias(), see corresponding operator= in NoAlias.
|
||||
typename Derived::PlainObject result(rows(), cols());
|
||||
other.evalTo(result);
|
||||
return (derived() = result);
|
||||
other.evalTo(derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
#endif // EIGEN_RETURNBYVALUE_H
|
||||
|
@ -153,7 +153,7 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
|
||||
/////////// Cholesky module ///////////
|
||||
|
||||
const LLT<PlainObject, UpLo> llt() const;
|
||||
const LDLT<PlainObject> ldlt() const;
|
||||
const LDLT<PlainObject, UpLo> ldlt() const;
|
||||
|
||||
/////////// Eigenvalue module ///////////
|
||||
|
||||
|
@ -87,7 +87,7 @@ MatrixBase<Derived>::blueNorm() const
|
||||
static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
|
||||
if(nmax <= 0)
|
||||
{
|
||||
Index nbig, ibeta, it, iemin, iemax, iexp;
|
||||
int nbig, ibeta, it, iemin, iemax, iexp;
|
||||
RealScalar abig, eps;
|
||||
// This program calculates the machine-dependent constants
|
||||
// bl, b2, slm, s2m, relerr overfl, nmax
|
||||
|
@ -66,7 +66,7 @@ template<typename MatrixType> class Transpose
|
||||
public:
|
||||
|
||||
typedef typename TransposeImpl<MatrixType,typename ei_traits<MatrixType>::StorageKind>::Base Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE_NEW(Transpose)
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
|
||||
|
||||
inline Transpose(const MatrixType& matrix) : m_matrix(matrix) {}
|
||||
|
||||
|
292
Eigen/src/Core/Transpositions.h
Normal file
292
Eigen/src/Core/Transpositions.h
Normal file
@ -0,0 +1,292 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2010 Gael Guennebaud <g.gael@free.fr>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
// License as published by the Free Software Foundation; either
|
||||
// version 3 of the License, or (at your option) any later version.
|
||||
//
|
||||
// Alternatively, you can redistribute it and/or
|
||||
// modify it under the terms of the GNU General Public License as
|
||||
// published by the Free Software Foundation; either version 2 of
|
||||
// the License, or (at your option) any later version.
|
||||
//
|
||||
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public
|
||||
// License and a copy of the GNU General Public License along with
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#ifndef EIGEN_TRANSPOSITIONS_H
|
||||
#define EIGEN_TRANSPOSITIONS_H
|
||||
|
||||
/** \class Transpositions
|
||||
*
|
||||
* \brief Represents a sequence of transpositions (row/column interchange)
|
||||
*
|
||||
* \param SizeAtCompileTime the number of transpositions, or Dynamic
|
||||
* \param MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
|
||||
*
|
||||
* This class represents a permutation transformation as a sequence of \em n transpositions
|
||||
* \f$[T_{n-1} \ldots T_{i} \ldots T_{0}]\f$. It is internally stored as a vector of integers \c indices.
|
||||
* Each transposition \f$ T_{i} \f$ applied on the left of a matrix (\f$ T_{i} M\f$) interchanges
|
||||
* the rows \c i and \c indices[i] of the matrix \c M.
|
||||
* A transposition applied on the right (e.g., \f$ M T_{i}\f$) yields a column interchange.
|
||||
*
|
||||
* Compared to the class PermutationMatrix, such a sequence of transpositions is what is
|
||||
* computed during a decomposition with pivoting, and it is faster when applying the permutation in-place.
|
||||
*
|
||||
* To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example:
|
||||
* \code
|
||||
* Transpositions tr;
|
||||
* MatrixXf mat;
|
||||
* mat = tr * mat;
|
||||
* \endcode
|
||||
* In this example, we detect that the matrix appears on both side, and so the transpositions
|
||||
* are applied in-place without any temporary or extra copy.
|
||||
*
|
||||
* \sa class PermutationMatrix
|
||||
*/
|
||||
template<typename TranspositionType, typename MatrixType, int Side, bool Transposed=false> struct ei_transposition_matrix_product_retval;
|
||||
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime>
|
||||
class Transpositions
|
||||
{
|
||||
public:
|
||||
|
||||
typedef Matrix<DenseIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
|
||||
typedef typename IndicesType::Index Index;
|
||||
|
||||
inline Transpositions() {}
|
||||
|
||||
/** Copy constructor. */
|
||||
template<int OtherSize, int OtherMaxSize>
|
||||
inline Transpositions(const Transpositions<OtherSize, OtherMaxSize>& other)
|
||||
: m_indices(other.indices()) {}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** Standard copy constructor. Defined only to prevent a default copy constructor
|
||||
* from hiding the other templated constructor */
|
||||
inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {}
|
||||
#endif
|
||||
|
||||
/** Generic constructor from expression of the transposition indices. */
|
||||
template<typename Other>
|
||||
explicit inline Transpositions(const MatrixBase<Other>& indices) : m_indices(indices)
|
||||
{}
|
||||
|
||||
/** Copies the \a other transpositions into \c *this */
|
||||
template<int OtherSize, int OtherMaxSize>
|
||||
Transpositions& operator=(const Transpositions<OtherSize, OtherMaxSize>& other)
|
||||
{
|
||||
m_indices = other.indices();
|
||||
return *this;
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** This is a special case of the templated operator=. Its purpose is to
|
||||
* prevent a default operator= from hiding the templated operator=.
|
||||
*/
|
||||
Transpositions& operator=(const Transpositions& other)
|
||||
{
|
||||
m_indices = other.m_indices;
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** Constructs an uninitialized permutation matrix of given size.
|
||||
*/
|
||||
inline Transpositions(Index size) : m_indices(size)
|
||||
{}
|
||||
|
||||
/** \returns the number of transpositions */
|
||||
inline Index size() const { return m_indices.size(); }
|
||||
|
||||
/** Direct access to the underlying index vector */
|
||||
inline const Index& coeff(Index i) const { return m_indices.coeff(i); }
|
||||
/** Direct access to the underlying index vector */
|
||||
inline Index& coeffRef(Index i) { return m_indices.coeffRef(i); }
|
||||
/** Direct access to the underlying index vector */
|
||||
inline const Index& operator()(Index i) const { return m_indices(i); }
|
||||
/** Direct access to the underlying index vector */
|
||||
inline Index& operator()(Index i) { return m_indices(i); }
|
||||
/** Direct access to the underlying index vector */
|
||||
inline const Index& operator[](Index i) const { return m_indices(i); }
|
||||
/** Direct access to the underlying index vector */
|
||||
inline Index& operator[](Index i) { return m_indices(i); }
|
||||
|
||||
/** const version of indices(). */
|
||||
const IndicesType& indices() const { return m_indices; }
|
||||
/** \returns a reference to the stored array representing the transpositions. */
|
||||
IndicesType& indices() { return m_indices; }
|
||||
|
||||
/** Resizes to given size. */
|
||||
inline void resize(int size)
|
||||
{
|
||||
m_indices.resize(size);
|
||||
}
|
||||
|
||||
/** Sets \c *this to represents an identity transformation */
|
||||
void setIdentity()
|
||||
{
|
||||
for(int i = 0; i < m_indices.size(); ++i)
|
||||
m_indices.coeffRef(i) = i;
|
||||
}
|
||||
|
||||
// FIXME: do we want such methods ?
|
||||
// might be usefull when the target matrix expression is complex, e.g.:
|
||||
// object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..);
|
||||
/*
|
||||
template<typename MatrixType>
|
||||
void applyForwardToRows(MatrixType& mat) const
|
||||
{
|
||||
for(Index k=0 ; k<size() ; ++k)
|
||||
if(m_indices(k)!=k)
|
||||
mat.row(k).swap(mat.row(m_indices(k)));
|
||||
}
|
||||
|
||||
template<typename MatrixType>
|
||||
void applyBackwardToRows(MatrixType& mat) const
|
||||
{
|
||||
for(Index k=size()-1 ; k>=0 ; --k)
|
||||
if(m_indices(k)!=k)
|
||||
mat.row(k).swap(mat.row(m_indices(k)));
|
||||
}
|
||||
*/
|
||||
|
||||
/** \returns the inverse transformation */
|
||||
inline Transpose<Transpositions> inverse() const
|
||||
{ return *this; }
|
||||
|
||||
/** \returns the tranpose transformation */
|
||||
inline Transpose<Transpositions> transpose() const
|
||||
{ return *this; }
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<int OtherSize, int OtherMaxSize>
|
||||
Transpositions(const Transpose<Transpositions<OtherSize,OtherMaxSize> >& other)
|
||||
: m_indices(other.size())
|
||||
{
|
||||
Index n = size();
|
||||
Index j = size-1;
|
||||
for(Index i=0; i<n;++i,--j)
|
||||
m_indices.coeffRef(j) = other.nestedTranspositions().indices().coeff(i);
|
||||
}
|
||||
#endif
|
||||
|
||||
protected:
|
||||
|
||||
IndicesType m_indices;
|
||||
};
|
||||
|
||||
/** \returns the \a matrix with the \a transpositions applied to the columns.
|
||||
*/
|
||||
template<typename Derived, int SizeAtCompileTime, int MaxSizeAtCompileTime>
|
||||
inline const ei_transposition_matrix_product_retval<Transpositions<SizeAtCompileTime, MaxSizeAtCompileTime>, Derived, OnTheRight>
|
||||
operator*(const MatrixBase<Derived>& matrix,
|
||||
const Transpositions<SizeAtCompileTime, MaxSizeAtCompileTime> &transpositions)
|
||||
{
|
||||
return ei_transposition_matrix_product_retval
|
||||
<Transpositions<SizeAtCompileTime, MaxSizeAtCompileTime>, Derived, OnTheRight>
|
||||
(transpositions, matrix.derived());
|
||||
}
|
||||
|
||||
/** \returns the \a matrix with the \a transpositions applied to the rows.
|
||||
*/
|
||||
template<typename Derived, int SizeAtCompileTime, int MaxSizeAtCompileTime>
|
||||
inline const ei_transposition_matrix_product_retval
|
||||
<Transpositions<SizeAtCompileTime, MaxSizeAtCompileTime>, Derived, OnTheLeft>
|
||||
operator*(const Transpositions<SizeAtCompileTime, MaxSizeAtCompileTime> &transpositions,
|
||||
const MatrixBase<Derived>& matrix)
|
||||
{
|
||||
return ei_transposition_matrix_product_retval
|
||||
<Transpositions<SizeAtCompileTime, MaxSizeAtCompileTime>, Derived, OnTheLeft>
|
||||
(transpositions, matrix.derived());
|
||||
}
|
||||
|
||||
template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
|
||||
struct ei_traits<ei_transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
|
||||
{
|
||||
typedef typename MatrixType::PlainObject ReturnType;
|
||||
};
|
||||
|
||||
template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
|
||||
struct ei_transposition_matrix_product_retval
|
||||
: public ReturnByValue<ei_transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
|
||||
{
|
||||
typedef typename ei_cleantype<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
|
||||
typedef typename TranspositionType::Index Index;
|
||||
|
||||
ei_transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix)
|
||||
: m_transpositions(tr), m_matrix(matrix)
|
||||
{}
|
||||
|
||||
inline int rows() const { return m_matrix.rows(); }
|
||||
inline int cols() const { return m_matrix.cols(); }
|
||||
|
||||
template<typename Dest> inline void evalTo(Dest& dst) const
|
||||
{
|
||||
const int size = m_transpositions.size();
|
||||
Index j = 0;
|
||||
|
||||
if(!(ei_is_same_type<MatrixTypeNestedCleaned,Dest>::ret && ei_extract_data(dst) == ei_extract_data(m_matrix)))
|
||||
dst = m_matrix;
|
||||
|
||||
for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)
|
||||
if((j=m_transpositions.coeff(k))!=k)
|
||||
{
|
||||
if(Side==OnTheLeft)
|
||||
dst.row(k).swap(dst.row(j));
|
||||
else if(Side==OnTheRight)
|
||||
dst.col(k).swap(dst.col(j));
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
const TranspositionType& m_transpositions;
|
||||
const typename MatrixType::Nested m_matrix;
|
||||
};
|
||||
|
||||
/* Template partial specialization for transposed/inverse transpositions */
|
||||
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime>
|
||||
class Transpose<Transpositions<SizeAtCompileTime, MaxSizeAtCompileTime> >
|
||||
{
|
||||
typedef Transpositions<SizeAtCompileTime, MaxSizeAtCompileTime> TranspositionType;
|
||||
typedef typename TranspositionType::IndicesType IndicesType;
|
||||
public:
|
||||
|
||||
Transpose(const TranspositionType& t) : m_transpositions(t) {}
|
||||
|
||||
inline int size() const { return m_transpositions.size(); }
|
||||
|
||||
/** \returns the \a matrix with the inverse transpositions applied to the columns.
|
||||
*/
|
||||
template<typename Derived> friend
|
||||
inline const ei_transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>
|
||||
operator*(const MatrixBase<Derived>& matrix, const Transpose& trt)
|
||||
{
|
||||
return ei_transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>(trt.m_transpositions, matrix.derived());
|
||||
}
|
||||
|
||||
/** \returns the \a matrix with the inverse transpositions applied to the rows.
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline const ei_transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>
|
||||
operator*(const MatrixBase<Derived>& matrix) const
|
||||
{
|
||||
return ei_transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>(m_transpositions, matrix.derived());
|
||||
}
|
||||
|
||||
const TranspositionType& nestedTranspositions() const { return m_transpositions; }
|
||||
|
||||
protected:
|
||||
const TranspositionType& m_transpositions;
|
||||
};
|
||||
|
||||
#endif // EIGEN_TRANSPOSITIONS_H
|
@ -46,7 +46,7 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
|
||||
};
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
|
||||
inline TriangularBase() { ei_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }
|
||||
|
||||
@ -156,10 +156,10 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
||||
typedef typename MatrixType::PlainObject DenseMatrixType;
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename ei_cleantype<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
using TriangularBase<TriangularView<_MatrixType, _Mode> >::evalToLazy;
|
||||
using Base::evalToLazy;
|
||||
|
||||
typedef typename ei_traits<TriangularView>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<TriangularView>::Index Index;
|
||||
|
||||
enum {
|
||||
Mode = _Mode,
|
||||
|
@ -31,10 +31,10 @@
|
||||
|
||||
#ifndef EIGEN_HAS_FUSE_CJMADD
|
||||
#define EIGEN_HAS_FUSE_CJMADD 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_TUNE_FOR_CPU_CACHE_SIZE
|
||||
#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE 8*128*128
|
||||
#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE 8*256*256
|
||||
#endif
|
||||
|
||||
// NOTE Altivec has 32 registers, but Eigen only accepts a value of 8 or 16
|
||||
@ -153,7 +153,7 @@ template<> EIGEN_STRONG_INLINE Packet4f ei_pset1<float>(const float& from) {
|
||||
return vc;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4i ei_pset1<int>(const int& from) {
|
||||
template<> EIGEN_STRONG_INLINE Packet4i ei_pset1<int>(const int& from) {
|
||||
int EIGEN_ALIGN16 ai[4];
|
||||
ai[0] = from;
|
||||
Packet4i vc = vec_ld(0, ai);
|
||||
|
@ -52,7 +52,7 @@
|
||||
* Typically for a single-threaded application you would set that to 25% of the size of your CPU caches in bytes
|
||||
*/
|
||||
#ifndef EIGEN_TUNE_FOR_CPU_CACHE_SIZE
|
||||
#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE (sizeof(float)*256*256)
|
||||
#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE (sizeof(float)*512*512)
|
||||
#endif
|
||||
|
||||
/** Defines the maximal width of the blocks used in the triangular product and solver
|
||||
|
@ -32,7 +32,7 @@
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_TUNE_FOR_CPU_CACHE_SIZE
|
||||
#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE 4*96*96
|
||||
#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE 4*192*192
|
||||
#endif
|
||||
|
||||
// FIXME NEON has 16 quad registers, but since the current register allocator
|
||||
|
@ -54,6 +54,8 @@ struct ei_traits<CoeffBasedProduct<LhsNested,RhsNested,NestingFlags> >
|
||||
typedef typename ei_scalar_product_traits<typename _LhsNested::Scalar, typename _RhsNested::Scalar>::ReturnType Scalar;
|
||||
typedef typename ei_promote_storage_type<typename ei_traits<_LhsNested>::StorageKind,
|
||||
typename ei_traits<_RhsNested>::StorageKind>::ret StorageKind;
|
||||
typedef typename ei_promote_index_type<typename ei_traits<_LhsNested>::Index,
|
||||
typename ei_traits<_RhsNested>::Index>::type Index;
|
||||
|
||||
enum {
|
||||
LhsCoeffReadCost = _LhsNested::CoeffReadCost,
|
||||
|
@ -25,13 +25,156 @@
|
||||
#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
|
||||
#define EIGEN_GENERAL_BLOCK_PANEL_H
|
||||
|
||||
#ifndef EIGEN_EXTERN_INSTANTIATIONS
|
||||
/** \internal */
|
||||
inline void ei_manage_caching_sizes(Action action, std::ptrdiff_t* a=0, std::ptrdiff_t* b=0, std::ptrdiff_t* c=0, int scalar_size = 0)
|
||||
{
|
||||
const int nbScalarSizes = 12;
|
||||
static std::ptrdiff_t m_maxK[nbScalarSizes];
|
||||
static std::ptrdiff_t m_maxM[nbScalarSizes];
|
||||
static std::ptrdiff_t m_maxN[nbScalarSizes];
|
||||
static std::ptrdiff_t m_l1CacheSize = 0;
|
||||
static std::ptrdiff_t m_l2CacheSize = 0;
|
||||
if(m_l1CacheSize==0)
|
||||
{
|
||||
// initialization
|
||||
m_l1CacheSize = EIGEN_TUNE_FOR_CPU_CACHE_SIZE;
|
||||
m_l2CacheSize = 32*EIGEN_TUNE_FOR_CPU_CACHE_SIZE;
|
||||
ei_manage_caching_sizes(SetAction,&m_l1CacheSize, &m_l2CacheSize);
|
||||
}
|
||||
|
||||
if(action==SetAction && scalar_size==0)
|
||||
{
|
||||
// set the cpu cache size and cache all block sizes from a global cache size in byte
|
||||
ei_internal_assert(a!=0 && b!=0 && c==0);
|
||||
m_l1CacheSize = *a;
|
||||
m_l2CacheSize = *b;
|
||||
int ss = 4;
|
||||
for(int i=0; i<nbScalarSizes;++i,ss+=4)
|
||||
{
|
||||
// Round the block size such that it is a multiple of 64/ss.
|
||||
// This is to make sure the block size are multiple of the register block sizes.
|
||||
// And in the worst case we ensure an even number.
|
||||
std::ptrdiff_t rb = 64/ss;
|
||||
if(rb==0) rb = 1;
|
||||
m_maxK[i] = 4 * std::ptrdiff_t(ei_sqrt<float>(m_l1CacheSize/(64*ss)));
|
||||
m_maxM[i] = 2 * m_maxK[i];
|
||||
m_maxN[i] = ((m_l2CacheSize / (2 * m_maxK[i] * ss))/4)*4;
|
||||
}
|
||||
}
|
||||
else if(action==SetAction && scalar_size!=0)
|
||||
{
|
||||
// set the block sizes for the given scalar type (represented as its size)
|
||||
ei_internal_assert(a!=0 && b!=0 && c!=0);
|
||||
int i = std::max((scalar_size>>2)-1,0);
|
||||
if(i<nbScalarSizes)
|
||||
{
|
||||
m_maxK[i] = *a;
|
||||
m_maxM[i] = *b;
|
||||
m_maxN[i] = *c;
|
||||
}
|
||||
}
|
||||
else if(action==GetAction && scalar_size==0)
|
||||
{
|
||||
ei_internal_assert(a!=0 && b!=0 && c==0);
|
||||
*a = m_l1CacheSize;
|
||||
*b = m_l2CacheSize;
|
||||
}
|
||||
else if(action==GetAction && scalar_size!=0)
|
||||
{
|
||||
ei_internal_assert(a!=0 && b!=0 && c!=0);
|
||||
int i = std::min(std::max((scalar_size>>2),1),nbScalarSizes)-1;
|
||||
*a = m_maxK[i];
|
||||
*b = m_maxM[i];
|
||||
*c = m_maxN[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
ei_internal_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
/** \returns the currently set level 1 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
|
||||
* \sa setCpuCacheSize */
|
||||
inline std::ptrdiff_t l1CacheSize()
|
||||
{
|
||||
std::ptrdiff_t l1, l2;
|
||||
ei_manage_caching_sizes(GetAction, &l1, &l2);
|
||||
return l1;
|
||||
}
|
||||
|
||||
/** \returns the currently set level 2 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
|
||||
* \sa setCpuCacheSize */
|
||||
inline std::ptrdiff_t l2CacheSize()
|
||||
{
|
||||
std::ptrdiff_t l1, l2;
|
||||
ei_manage_caching_sizes(GetAction, &l1, &l2);
|
||||
return l2;
|
||||
}
|
||||
|
||||
/** Set the cpu L1 and L2 cache sizes (in bytes).
|
||||
* These values are use to adjust the size of the blocks
|
||||
* for the algorithms working per blocks.
|
||||
*
|
||||
* This function also automatically set the blocking size parameters
|
||||
* for each scalar type using the following rules:
|
||||
* \code
|
||||
* max_k = 4 * sqrt(l1/(64*sizeof(Scalar)));
|
||||
* max_m = 2 * k;
|
||||
* max_n = l2/(2*max_k*sizeof(Scalar));
|
||||
* \endcode
|
||||
* overwriting custom values set using the setBlockingSizes function.
|
||||
*
|
||||
* See setBlockingSizes() for an explanation about the meaning of these parameters.
|
||||
*
|
||||
* \sa setBlockingSizes */
|
||||
inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2)
|
||||
{
|
||||
ei_manage_caching_sizes(SetAction, &l1, &l2);
|
||||
}
|
||||
|
||||
/** \brief Set the blocking size parameters \a maxK, \a maxM and \a maxN for the scalar type \a Scalar.
|
||||
*
|
||||
* \param[in] maxK the size of the L1 and L2 blocks along the k dimension
|
||||
* \param[in] maxM the size of the L1 blocks along the m dimension
|
||||
* \param[in] maxN the size of the L2 blocks along the n dimension
|
||||
*
|
||||
* This function sets the blocking size parameters for matrix products and related algorithms.
|
||||
* More precisely, let A * B be a m x k by k x n matrix product. Then Eigen's product like
|
||||
* algorithms perform L2 blocking on B with horizontal panels of size maxK x maxN,
|
||||
* and L1 blocking on A with blocks of size maxM x maxK.
|
||||
*
|
||||
* Theoretically, for best performances maxM should be closed to maxK and maxM * maxK should
|
||||
* note exceed half of the L1 cache. Likewise, maxK * maxM should be smaller than the L2 cache.
|
||||
*
|
||||
* Note that in practice there is no distinction between scalar types of same size.
|
||||
*
|
||||
* \sa setCpuCacheSizes */
|
||||
template<typename Scalar>
|
||||
void setBlockingSizes(std::ptrdiff_t maxK, std::ptrdiff_t maxM, std::ptrdiff_t maxN)
|
||||
{
|
||||
std::ptrdiff_t k, m, n;
|
||||
typedef ei_product_blocking_traits<Scalar> Traits;
|
||||
k = ((maxK)/4)*4;
|
||||
m = ((maxM)/Traits::mr)*Traits::mr;
|
||||
n = ((maxN)/Traits::nr)*Traits::nr;
|
||||
ei_manage_caching_sizes(SetAction,&k,&m,&n,sizeof(Scalar));
|
||||
}
|
||||
|
||||
/** \returns in \a makK, \a maxM and \a maxN the blocking size parameters for the scalar type \a Scalar.
|
||||
*
|
||||
* See setBlockingSizes for an explanation about the meaning of these parameters.
|
||||
*
|
||||
* \sa setBlockingSizes */
|
||||
template<typename Scalar>
|
||||
void getBlockingSizes(std::ptrdiff_t& maxK, std::ptrdiff_t& maxM, std::ptrdiff_t& maxN)
|
||||
{
|
||||
ei_manage_caching_sizes(GetAction,&maxK,&maxM,&maxN,sizeof(Scalar));
|
||||
}
|
||||
|
||||
#ifdef EIGEN_HAS_FUSE_CJMADD
|
||||
#define CJMADD(A,B,C,T) C = cj.pmadd(A,B,C);
|
||||
#define CJMADD(A,B,C,T) C = cj.pmadd(A,B,C);
|
||||
#else
|
||||
#define CJMADD(A,B,C,T) T = B; T = cj.pmul(A,T); C = ei_padd(C,T);
|
||||
// #define CJMADD(A,B,C,T) T = A; T = cj.pmul(T,B); C = ei_padd(C,T);
|
||||
#define CJMADD(A,B,C,T) T = B; T = cj.pmul(A,T); C = ei_padd(C,T);
|
||||
#endif
|
||||
|
||||
// optimized GEneral packed Block * packed Panel product kernel
|
||||
@ -762,6 +905,4 @@ struct ei_gemm_pack_rhs<Scalar, Index, nr, RowMajor, PanelMode>
|
||||
}
|
||||
};
|
||||
|
||||
#endif // EIGEN_EXTERN_INSTANTIATIONS
|
||||
|
||||
#endif // EIGEN_GENERAL_BLOCK_PANEL_H
|
||||
|
@ -25,8 +25,6 @@
|
||||
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
|
||||
#define EIGEN_GENERAL_MATRIX_MATRIX_H
|
||||
|
||||
#ifndef EIGEN_EXTERN_INSTANTIATIONS
|
||||
|
||||
/* Specialization for a row-major destination matrix => simple transposition of the product */
|
||||
template<
|
||||
typename Scalar, typename Index,
|
||||
@ -77,8 +75,13 @@ static void run(Index rows, Index cols, Index depth,
|
||||
typedef typename ei_packet_traits<Scalar>::type PacketType;
|
||||
typedef ei_product_blocking_traits<Scalar> Blocking;
|
||||
|
||||
Index kc = std::min<Index>(Blocking::Max_kc,depth); // cache block size along the K direction
|
||||
Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
|
||||
Index kc; // cache block size along the K direction
|
||||
Index mc; // cache block size along the M direction
|
||||
Index nc; // cache block size along the N direction
|
||||
getBlockingSizes<Scalar>(kc, mc, nc);
|
||||
kc = std::min<Index>(kc,depth);
|
||||
mc = std::min<Index>(mc,rows);
|
||||
nc = std::min<Index>(nc,cols);
|
||||
|
||||
ei_gemm_pack_rhs<Scalar, Index, Blocking::nr, RhsStorageOrder> pack_rhs;
|
||||
ei_gemm_pack_lhs<Scalar, Index, Blocking::mr, LhsStorageOrder> pack_lhs;
|
||||
@ -159,7 +162,8 @@ static void run(Index rows, Index cols, Index depth,
|
||||
else
|
||||
#endif // EIGEN_HAS_OPENMP
|
||||
{
|
||||
(void)info; // info is not used
|
||||
EIGEN_UNUSED_VARIABLE(info);
|
||||
|
||||
// this is the sequential version!
|
||||
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
|
||||
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
|
||||
@ -203,8 +207,6 @@ static void run(Index rows, Index cols, Index depth,
|
||||
|
||||
};
|
||||
|
||||
#endif // EIGEN_EXTERN_INSTANTIATIONS
|
||||
|
||||
/*********************************************************************************
|
||||
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
|
||||
* implementation of the high level wrapper to ei_general_matrix_matrix_product
|
||||
@ -239,7 +241,9 @@ struct ei_gemm_functor
|
||||
|
||||
Index sharedBlockBSize() const
|
||||
{
|
||||
return std::min<Index>(ei_product_blocking_traits<Scalar>::Max_kc,m_rhs.rows()) * m_rhs.cols();
|
||||
Index maxKc, maxMc, maxNc;
|
||||
getBlockingSizes<Scalar>(maxKc, maxMc, maxNc);
|
||||
return std::min<Index>(maxKc,m_rhs.rows()) * m_rhs.cols();
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -25,6 +25,50 @@
|
||||
#ifndef EIGEN_PARALLELIZER_H
|
||||
#define EIGEN_PARALLELIZER_H
|
||||
|
||||
/** \internal */
|
||||
inline void ei_manage_multi_threading(Action action, int* v)
|
||||
{
|
||||
static int m_maxThreads = -1;
|
||||
|
||||
if(action==SetAction)
|
||||
{
|
||||
ei_internal_assert(v!=0);
|
||||
m_maxThreads = *v;
|
||||
}
|
||||
else if(action==GetAction)
|
||||
{
|
||||
ei_internal_assert(v!=0);
|
||||
#ifdef EIGEN_HAS_OPENMP
|
||||
if(m_maxThreads>0)
|
||||
*v = m_maxThreads;
|
||||
else
|
||||
*v = omp_get_max_threads();
|
||||
#else
|
||||
*v = 1;
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
ei_internal_assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
/** \returns the max number of threads reserved for Eigen
|
||||
* \sa setNbThreads */
|
||||
inline int nbThreads()
|
||||
{
|
||||
int ret;
|
||||
ei_manage_multi_threading(GetAction, &ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** Sets the max number of threads reserved for Eigen
|
||||
* \sa nbThreads */
|
||||
inline void setNbThreads(int v)
|
||||
{
|
||||
ei_manage_multi_threading(SetAction, &v);
|
||||
}
|
||||
|
||||
template<typename BlockBScalar, typename Index> struct GemmParallelInfo
|
||||
{
|
||||
GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0), blockB(0) {}
|
||||
@ -57,10 +101,10 @@ void ei_parallelize_gemm(const Functor& func, Index rows, Index cols)
|
||||
|
||||
// 2- compute the maximal number of threads from the size of the product:
|
||||
// FIXME this has to be fine tuned
|
||||
Index max_threads = std::max(1,rows / 32);
|
||||
Index max_threads = std::max<Index>(1,rows / 32);
|
||||
|
||||
// 3 - compute the number of threads we are going to use
|
||||
Index threads = std::min<Index>(omp_get_max_threads(), max_threads);
|
||||
Index threads = std::min<Index>(nbThreads(), max_threads);
|
||||
|
||||
if(threads==1)
|
||||
return func(0,rows, 0,cols);
|
||||
@ -71,7 +115,8 @@ void ei_parallelize_gemm(const Functor& func, Index rows, Index cols)
|
||||
typedef typename Functor::BlockBScalar BlockBScalar;
|
||||
BlockBScalar* sharedBlockB = new BlockBScalar[func.sharedBlockBSize()];
|
||||
|
||||
GemmParallelInfo<BlockBScalar>* info = new GemmParallelInfo<BlockBScalar>[threads];
|
||||
GemmParallelInfo<BlockBScalar,Index>* info = new
|
||||
GemmParallelInfo<BlockBScalar,Index>[threads];
|
||||
|
||||
#pragma omp parallel for schedule(static,1) num_threads(threads)
|
||||
for(Index i=0; i<threads; ++i)
|
||||
|
@ -139,7 +139,7 @@ struct ei_product_blocking_traits
|
||||
mr = 2 * PacketSize,
|
||||
|
||||
// max cache block size along the K direction
|
||||
Max_kc = 8 * ei_meta_sqrt<EIGEN_TUNE_FOR_CPU_CACHE_SIZE/(64*sizeof(Scalar))>::ret,
|
||||
Max_kc = 4 * ei_meta_sqrt<EIGEN_TUNE_FOR_CPU_CACHE_SIZE/(64*sizeof(Scalar))>::ret,
|
||||
|
||||
// max cache block size along the M direction
|
||||
Max_mc = 2*Max_kc
|
||||
@ -162,7 +162,7 @@ template<typename XprType> struct ei_blas_traits
|
||||
&& ( /* Uncomment this when the low-level matrix-vector product functions support strided vectors
|
||||
bool(XprType::IsVectorAtCompileTime)
|
||||
|| */
|
||||
int(ei_inner_stride_at_compile_time<XprType>::ret) == 1)
|
||||
int(ei_inner_stride_at_compile_time<XprType>::ret) <= 1)
|
||||
) ? 1 : 0
|
||||
};
|
||||
typedef typename ei_meta_if<bool(HasUsableDirectAccess),
|
||||
|
@ -259,6 +259,8 @@ namespace Architecture
|
||||
|
||||
enum { CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
|
||||
|
||||
enum Action {GetAction, SetAction};
|
||||
|
||||
/** The type used to identify a dense storage. */
|
||||
struct Dense {};
|
||||
|
||||
|
@ -3,7 +3,8 @@
|
||||
// 4273 - QtAlignedMalloc, inconsistent DLL linkage
|
||||
// 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p))
|
||||
// 4101 - unreferenced local variable
|
||||
// 4324 - structure was padded due to declspec(align())
|
||||
// 4512 - assignment operator could not be generated
|
||||
#pragma warning( push )
|
||||
#pragma warning( disable : 4100 4101 4181 4244 4127 4211 4273 4512 4522 4717 )
|
||||
#pragma warning( disable : 4100 4101 4181 4244 4127 4211 4273 4324 4512 4522 4717 )
|
||||
#endif
|
||||
|
@ -77,6 +77,8 @@ template<typename _DiagonalVectorType> class DiagonalWrapper;
|
||||
template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime=SizeAtCompileTime> class DiagonalMatrix;
|
||||
template<typename MatrixType, typename DiagonalType, int ProductOrder> class DiagonalProduct;
|
||||
template<typename MatrixType, int Index> class Diagonal;
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime> class PermutationMatrix;
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime> class Transpositions;
|
||||
|
||||
template<int InnerStrideAtCompileTime, int OuterStrideAtCompileTime> class Stride;
|
||||
template<typename MatrixType, int MapOptions=Unaligned, typename StrideType = Stride<0,0> > class Map;
|
||||
@ -158,7 +160,7 @@ template<typename MatrixType> class FullPivHouseholderQR;
|
||||
template<typename MatrixType> class SVD;
|
||||
template<typename MatrixType, unsigned int Options = 0> class JacobiSVD;
|
||||
template<typename MatrixType, int UpLo = Lower> class LLT;
|
||||
template<typename MatrixType> class LDLT;
|
||||
template<typename MatrixType, int UpLo = Lower> class LDLT;
|
||||
template<typename VectorsType, typename CoeffsType, int Side=OnTheLeft> class HouseholderSequence;
|
||||
template<typename Scalar> class PlanarRotation;
|
||||
|
||||
|
@ -98,10 +98,6 @@
|
||||
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_DEFAULT_SPARSE_INDEX_TYPE
|
||||
#define EIGEN_DEFAULT_SPARSE_INDEX_TYPE int
|
||||
#endif
|
||||
|
||||
/** Allows to disable some optimizations which might affect the accuracy of the result.
|
||||
* Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
|
||||
* They currently include:
|
||||
@ -180,6 +176,9 @@
|
||||
#define EIGEN_UNUSED
|
||||
#endif
|
||||
|
||||
// Suppresses 'unused variable' warnings.
|
||||
#define EIGEN_UNUSED_VARIABLE(var) (void)var;
|
||||
|
||||
#if (defined __GNUC__)
|
||||
#define EIGEN_ASM_COMMENT(X) asm("#"X)
|
||||
#else
|
||||
@ -269,13 +268,13 @@
|
||||
* documentation in a single line.
|
||||
**/
|
||||
|
||||
#define EIGEN_GENERIC_PUBLIC_INTERFACE_NEW(Derived) \
|
||||
#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
|
||||
typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
|
||||
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
|
||||
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
|
||||
typedef typename Eigen::ei_nested<Derived>::type Nested; \
|
||||
typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
|
||||
typedef typename Eigen::ei_index<StorageKind>::type Index; \
|
||||
typedef typename Eigen::ei_traits<Derived>::Index Index; \
|
||||
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
|
||||
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
|
||||
Flags = Eigen::ei_traits<Derived>::Flags, \
|
||||
@ -292,7 +291,7 @@
|
||||
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
|
||||
typedef typename Eigen::ei_nested<Derived>::type Nested; \
|
||||
typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
|
||||
typedef typename Eigen::ei_index<StorageKind>::type Index; \
|
||||
typedef typename Eigen::ei_traits<Derived>::Index Index; \
|
||||
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
|
||||
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
|
||||
MaxRowsAtCompileTime = Eigen::ei_traits<Derived>::MaxRowsAtCompileTime, \
|
||||
|
@ -218,7 +218,7 @@ inline void ei_aligned_free(void *ptr)
|
||||
**/
|
||||
inline void* ei_aligned_realloc(void *ptr, size_t new_size, size_t old_size)
|
||||
{
|
||||
(void)old_size; // Suppress 'unused variable' warning. Seen in boost tee.
|
||||
EIGEN_UNUSED_VARIABLE(old_size);
|
||||
|
||||
void *result;
|
||||
#if !EIGEN_ALIGN
|
||||
|
@ -42,13 +42,14 @@ class ei_no_assignment_operator
|
||||
ei_no_assignment_operator& operator=(const ei_no_assignment_operator&);
|
||||
};
|
||||
|
||||
template<typename StorageKind> struct ei_index {};
|
||||
typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex;
|
||||
|
||||
template<>
|
||||
struct ei_index<Dense>
|
||||
{ typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE type; };
|
||||
|
||||
typedef ei_index<Dense>::type DenseIndex;
|
||||
/** \internal return the index type with the largest number of bits */
|
||||
template<typename I1, typename I2>
|
||||
struct ei_promote_index_type
|
||||
{
|
||||
typedef typename ei_meta_if<(sizeof(I1)<sizeof(I2)), I2, I1>::ret type;
|
||||
};
|
||||
|
||||
/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
|
||||
* can be accessed using value() and setValue().
|
||||
|
@ -27,6 +27,9 @@
|
||||
#ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H
|
||||
#define EIGEN_COMPLEX_EIGEN_SOLVER_H
|
||||
|
||||
#include "./EigenvaluesCommon.h"
|
||||
#include "./ComplexSchur.h"
|
||||
|
||||
/** \eigenvalues_module \ingroup Eigenvalues_Module
|
||||
* \nonstableyet
|
||||
*
|
||||
@ -56,7 +59,10 @@
|
||||
template<typename _MatrixType> class ComplexEigenSolver
|
||||
{
|
||||
public:
|
||||
|
||||
/** \brief Synonym for the template parameter \p _MatrixType. */
|
||||
typedef _MatrixType MatrixType;
|
||||
|
||||
enum {
|
||||
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
||||
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
||||
@ -65,12 +71,12 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
|
||||
};
|
||||
|
||||
/** \brief Scalar type for matrices of type \p _MatrixType. */
|
||||
/** \brief Scalar type for matrices of type #MatrixType. */
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
|
||||
/** \brief Complex scalar type for \p _MatrixType.
|
||||
/** \brief Complex scalar type for #MatrixType.
|
||||
*
|
||||
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
|
||||
* \c float or \c double) and just \c Scalar if #Scalar is
|
||||
@ -81,14 +87,14 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
|
||||
*
|
||||
* This is a column vector with entries of type #ComplexScalar.
|
||||
* The length of the vector is the size of \p _MatrixType.
|
||||
* The length of the vector is the size of #MatrixType.
|
||||
*/
|
||||
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options, MaxColsAtCompileTime, 1> EigenvalueType;
|
||||
|
||||
/** \brief Type for matrix of eigenvectors as returned by eigenvectors().
|
||||
*
|
||||
* This is a square matrix with entries of type #ComplexScalar.
|
||||
* The size is the same as the size of \p _MatrixType.
|
||||
* The size is the same as the size of #MatrixType.
|
||||
*/
|
||||
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, ColsAtCompileTime> EigenvectorType;
|
||||
|
||||
@ -102,6 +108,7 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||
m_eivalues(),
|
||||
m_schur(),
|
||||
m_isInitialized(false),
|
||||
m_eigenvectorsOk(false),
|
||||
m_matX()
|
||||
{}
|
||||
|
||||
@ -116,40 +123,46 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||
m_eivalues(size),
|
||||
m_schur(size),
|
||||
m_isInitialized(false),
|
||||
m_eigenvectorsOk(false),
|
||||
m_matX(size, size)
|
||||
{}
|
||||
|
||||
/** \brief Constructor; computes eigendecomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
*
|
||||
* This constructor calls compute() to compute the eigendecomposition.
|
||||
*/
|
||||
ComplexEigenSolver(const MatrixType& matrix)
|
||||
ComplexEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
|
||||
: m_eivec(matrix.rows(),matrix.cols()),
|
||||
m_eivalues(matrix.cols()),
|
||||
m_schur(matrix.rows()),
|
||||
m_isInitialized(false),
|
||||
m_eigenvectorsOk(false),
|
||||
m_matX(matrix.rows(),matrix.cols())
|
||||
{
|
||||
compute(matrix);
|
||||
compute(matrix, computeEigenvectors);
|
||||
}
|
||||
|
||||
/** \brief Returns the eigenvectors of given matrix.
|
||||
*
|
||||
* \returns A const reference to the matrix whose columns are the eigenvectors.
|
||||
*
|
||||
* It is assumed that either the constructor
|
||||
* ComplexEigenSolver(const MatrixType& matrix) or the member
|
||||
* function compute(const MatrixType& matrix) has been called
|
||||
* before to compute the eigendecomposition of a matrix. This
|
||||
* function returns a matrix whose columns are the
|
||||
* eigenvectors. Column \f$ k \f$ is an eigenvector
|
||||
* corresponding to eigenvalue number \f$ k \f$ as returned by
|
||||
* eigenvalues(). The eigenvectors are normalized to have
|
||||
* (Euclidean) norm equal to one. The matrix returned by this
|
||||
* function is the matrix \f$ V \f$ in the eigendecomposition \f$
|
||||
* A = V D V^{-1} \f$, if it exists.
|
||||
* \pre Either the constructor
|
||||
* ComplexEigenSolver(const MatrixType& matrix, bool) or the member
|
||||
* function compute(const MatrixType& matrix, bool) has been called before
|
||||
* to compute the eigendecomposition of a matrix, and
|
||||
* \p computeEigenvectors was set to true (the default).
|
||||
*
|
||||
* This function returns a matrix whose columns are the eigenvectors. Column
|
||||
* \f$ k \f$ is an eigenvector corresponding to eigenvalue number \f$ k
|
||||
* \f$ as returned by eigenvalues(). The eigenvectors are normalized to
|
||||
* have (Euclidean) norm equal to one. The matrix returned by this
|
||||
* function is the matrix \f$ V \f$ in the eigendecomposition \f$ A = V D
|
||||
* V^{-1} \f$, if it exists.
|
||||
*
|
||||
* Example: \include ComplexEigenSolver_eigenvectors.cpp
|
||||
* Output: \verbinclude ComplexEigenSolver_eigenvectors.out
|
||||
@ -157,6 +170,7 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||
const EigenvectorType& eigenvectors() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
|
||||
ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
||||
return m_eivec;
|
||||
}
|
||||
|
||||
@ -164,11 +178,12 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||
*
|
||||
* \returns A const reference to the column vector containing the eigenvalues.
|
||||
*
|
||||
* It is assumed that either the constructor
|
||||
* ComplexEigenSolver(const MatrixType& matrix) or the member
|
||||
* function compute(const MatrixType& matrix) has been called
|
||||
* before to compute the eigendecomposition of a matrix. This
|
||||
* function returns a column vector containing the
|
||||
* \pre Either the constructor
|
||||
* ComplexEigenSolver(const MatrixType& matrix, bool) or the member
|
||||
* function compute(const MatrixType& matrix, bool) has been called before
|
||||
* to compute the eigendecomposition of a matrix.
|
||||
*
|
||||
* This function returns a column vector containing the
|
||||
* eigenvalues. Eigenvalues are repeated according to their
|
||||
* algebraic multiplicity, so there are as many eigenvalues as
|
||||
* rows in the matrix.
|
||||
@ -185,10 +200,15 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||
/** \brief Computes eigendecomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
* \returns Reference to \c *this
|
||||
*
|
||||
* This function computes the eigenvalues and eigenvectors of \p
|
||||
* matrix. The eigenvalues() and eigenvectors() functions can be
|
||||
* used to retrieve the computed eigendecomposition.
|
||||
* This function computes the eigenvalues of the complex matrix \p matrix.
|
||||
* The eigenvalues() function can be used to retrieve them. If
|
||||
* \p computeEigenvectors is true, then the eigenvectors are also computed
|
||||
* and can be retrieved by calling eigenvectors().
|
||||
*
|
||||
* The matrix is first reduced to Schur form using the
|
||||
* ComplexSchur class. The Schur decomposition is then used to
|
||||
@ -201,31 +221,62 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||
* Example: \include ComplexEigenSolver_compute.cpp
|
||||
* Output: \verbinclude ComplexEigenSolver_compute.out
|
||||
*/
|
||||
void compute(const MatrixType& matrix);
|
||||
ComplexEigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
|
||||
*/
|
||||
ComputationInfo info() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
|
||||
return m_schur.info();
|
||||
}
|
||||
|
||||
protected:
|
||||
EigenvectorType m_eivec;
|
||||
EigenvalueType m_eivalues;
|
||||
ComplexSchur<MatrixType> m_schur;
|
||||
bool m_isInitialized;
|
||||
bool m_eigenvectorsOk;
|
||||
EigenvectorType m_matX;
|
||||
|
||||
private:
|
||||
void doComputeEigenvectors(RealScalar matrixnorm);
|
||||
void sortEigenvalues(bool computeEigenvectors);
|
||||
};
|
||||
|
||||
|
||||
template<typename MatrixType>
|
||||
void ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix)
|
||||
ComplexEigenSolver<MatrixType>& ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||
{
|
||||
// this code is inspired from Jampack
|
||||
assert(matrix.cols() == matrix.rows());
|
||||
const Index n = matrix.cols();
|
||||
const RealScalar matrixnorm = matrix.norm();
|
||||
|
||||
// Step 1: Do a complex Schur decomposition, A = U T U^*
|
||||
// Do a complex Schur decomposition, A = U T U^*
|
||||
// The eigenvalues are on the diagonal of T.
|
||||
m_schur.compute(matrix);
|
||||
m_eivalues = m_schur.matrixT().diagonal();
|
||||
m_schur.compute(matrix, computeEigenvectors);
|
||||
|
||||
// Step 2: Compute X such that T = X D X^(-1), where D is the diagonal of T.
|
||||
if(m_schur.info() == Success)
|
||||
{
|
||||
m_eivalues = m_schur.matrixT().diagonal();
|
||||
if(computeEigenvectors)
|
||||
doComputeEigenvectors(matrix.norm());
|
||||
sortEigenvalues(computeEigenvectors);
|
||||
}
|
||||
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = computeEigenvectors;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
template<typename MatrixType>
|
||||
void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(RealScalar matrixnorm)
|
||||
{
|
||||
const Index n = m_eivalues.size();
|
||||
|
||||
// Compute X such that T = X D X^(-1), where D is the diagonal of T.
|
||||
// The matrix X is unit triangular.
|
||||
m_matX = EigenvectorType::Zero(n, n);
|
||||
for(Index k=n-1 ; k>=0 ; k--)
|
||||
@ -248,16 +299,20 @@ void ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)
|
||||
// Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)
|
||||
m_eivec.noalias() = m_schur.matrixU() * m_matX;
|
||||
// .. and normalize the eigenvectors
|
||||
for(Index k=0 ; k<n ; k++)
|
||||
{
|
||||
m_eivec.col(k).normalize();
|
||||
}
|
||||
m_isInitialized = true;
|
||||
}
|
||||
|
||||
// Step 4: Sort the eigenvalues
|
||||
|
||||
template<typename MatrixType>
|
||||
void ComplexEigenSolver<MatrixType>::sortEigenvalues(bool computeEigenvectors)
|
||||
{
|
||||
const Index n = m_eivalues.size();
|
||||
for (Index i=0; i<n; i++)
|
||||
{
|
||||
Index k;
|
||||
@ -266,11 +321,11 @@ void ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
k += i;
|
||||
std::swap(m_eivalues[k],m_eivalues[i]);
|
||||
m_eivec.col(i).swap(m_eivec.col(k));
|
||||
if(computeEigenvectors)
|
||||
m_eivec.col(i).swap(m_eivec.col(k));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif // EIGEN_COMPLEX_EIGEN_SOLVER_H
|
||||
|
@ -27,6 +27,9 @@
|
||||
#ifndef EIGEN_COMPLEX_SCHUR_H
|
||||
#define EIGEN_COMPLEX_SCHUR_H
|
||||
|
||||
#include "./EigenvaluesCommon.h"
|
||||
#include "./HessenbergDecomposition.h"
|
||||
|
||||
template<typename MatrixType, bool IsComplex> struct ei_complex_schur_reduce_to_hessenberg;
|
||||
|
||||
/** \eigenvalues_module \ingroup Eigenvalues_Module
|
||||
@ -110,21 +113,21 @@ template<typename _MatrixType> class ComplexSchur
|
||||
|
||||
/** \brief Constructor; computes Schur decomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
|
||||
* \param[in] skipU If true, then the unitary matrix U in the decomposition is not computed.
|
||||
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
|
||||
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
|
||||
*
|
||||
* This constructor calls compute() to compute the Schur decomposition.
|
||||
*
|
||||
* \sa matrixT() and matrixU() for examples.
|
||||
*/
|
||||
ComplexSchur(const MatrixType& matrix, bool skipU = false)
|
||||
ComplexSchur(const MatrixType& matrix, bool computeU = true)
|
||||
: m_matT(matrix.rows(),matrix.cols()),
|
||||
m_matU(matrix.rows(),matrix.cols()),
|
||||
m_hess(matrix.rows()),
|
||||
m_isInitialized(false),
|
||||
m_matUisUptodate(false)
|
||||
{
|
||||
compute(matrix, skipU);
|
||||
compute(matrix, computeU);
|
||||
}
|
||||
|
||||
/** \brief Returns the unitary matrix in the Schur decomposition.
|
||||
@ -132,10 +135,10 @@ template<typename _MatrixType> class ComplexSchur
|
||||
* \returns A const reference to the matrix U.
|
||||
*
|
||||
* It is assumed that either the constructor
|
||||
* ComplexSchur(const MatrixType& matrix, bool skipU) or the
|
||||
* member function compute(const MatrixType& matrix, bool skipU)
|
||||
* ComplexSchur(const MatrixType& matrix, bool computeU) or the
|
||||
* member function compute(const MatrixType& matrix, bool computeU)
|
||||
* has been called before to compute the Schur decomposition of a
|
||||
* matrix, and that \p skipU was set to false (the default
|
||||
* matrix, and that \p computeU was set to true (the default
|
||||
* value).
|
||||
*
|
||||
* Example: \include ComplexSchur_matrixU.cpp
|
||||
@ -153,8 +156,8 @@ template<typename _MatrixType> class ComplexSchur
|
||||
* \returns A const reference to the matrix T.
|
||||
*
|
||||
* It is assumed that either the constructor
|
||||
* ComplexSchur(const MatrixType& matrix, bool skipU) or the
|
||||
* member function compute(const MatrixType& matrix, bool skipU)
|
||||
* ComplexSchur(const MatrixType& matrix, bool computeU) or the
|
||||
* member function compute(const MatrixType& matrix, bool computeU)
|
||||
* has been called before to compute the Schur decomposition of a
|
||||
* matrix.
|
||||
*
|
||||
@ -174,7 +177,8 @@ template<typename _MatrixType> class ComplexSchur
|
||||
/** \brief Computes Schur decomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
|
||||
* \param[in] skipU If true, then the unitary matrix U in the decomposition is not computed.
|
||||
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
|
||||
* \returns Reference to \c *this
|
||||
*
|
||||
* The Schur decomposition is computed by first reducing the
|
||||
* matrix to Hessenberg form using the class
|
||||
@ -182,24 +186,42 @@ template<typename _MatrixType> class ComplexSchur
|
||||
* to triangular form by performing QR iterations with a single
|
||||
* shift. The cost of computing the Schur decomposition depends
|
||||
* on the number of iterations; as a rough guide, it may be taken
|
||||
* on the number of iterations; as a rough guide, it may be taken
|
||||
* to be \f$25n^3\f$ complex flops, or \f$10n^3\f$ complex flops
|
||||
* if \a skipU is true.
|
||||
* if \a computeU is false.
|
||||
*
|
||||
* Example: \include ComplexSchur_compute.cpp
|
||||
* Output: \verbinclude ComplexSchur_compute.out
|
||||
*/
|
||||
void compute(const MatrixType& matrix, bool skipU = false);
|
||||
ComplexSchur& compute(const MatrixType& matrix, bool computeU = true);
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
|
||||
*/
|
||||
ComputationInfo info() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "RealSchur is not initialized.");
|
||||
return m_info;
|
||||
}
|
||||
|
||||
/** \brief Maximum number of iterations.
|
||||
*
|
||||
* Maximum number of iterations allowed for an eigenvalue to converge.
|
||||
*/
|
||||
static const int m_maxIterations = 30;
|
||||
|
||||
protected:
|
||||
ComplexMatrixType m_matT, m_matU;
|
||||
HessenbergDecomposition<MatrixType> m_hess;
|
||||
ComputationInfo m_info;
|
||||
bool m_isInitialized;
|
||||
bool m_matUisUptodate;
|
||||
|
||||
private:
|
||||
bool subdiagonalEntryIsNeglegible(Index i);
|
||||
ComplexScalar computeShift(Index iu, Index iter);
|
||||
void reduceToTriangularForm(bool skipU);
|
||||
void reduceToTriangularForm(bool computeU);
|
||||
friend struct ei_complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>;
|
||||
};
|
||||
|
||||
@ -295,22 +317,24 @@ typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::compu
|
||||
|
||||
|
||||
template<typename MatrixType>
|
||||
void ComplexSchur<MatrixType>::compute(const MatrixType& matrix, bool skipU)
|
||||
ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
|
||||
{
|
||||
m_matUisUptodate = false;
|
||||
ei_assert(matrix.cols() == matrix.rows());
|
||||
|
||||
if(matrix.cols() == 1)
|
||||
{
|
||||
m_matU = ComplexMatrixType::Identity(1,1);
|
||||
if(!skipU) m_matT = matrix.template cast<ComplexScalar>();
|
||||
m_matT = matrix.template cast<ComplexScalar>();
|
||||
if(computeU) m_matU = ComplexMatrixType::Identity(1,1);
|
||||
m_info = Success;
|
||||
m_isInitialized = true;
|
||||
m_matUisUptodate = !skipU;
|
||||
return;
|
||||
m_matUisUptodate = computeU;
|
||||
return *this;
|
||||
}
|
||||
|
||||
ei_complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>::run(*this, matrix, skipU);
|
||||
reduceToTriangularForm(skipU);
|
||||
ei_complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>::run(*this, matrix, computeU);
|
||||
reduceToTriangularForm(computeU);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/* Reduce given matrix to Hessenberg form */
|
||||
@ -318,28 +342,26 @@ template<typename MatrixType, bool IsComplex>
|
||||
struct ei_complex_schur_reduce_to_hessenberg
|
||||
{
|
||||
// this is the implementation for the case IsComplex = true
|
||||
static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool skipU)
|
||||
static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
|
||||
{
|
||||
// TODO skip Q if skipU = true
|
||||
_this.m_hess.compute(matrix);
|
||||
_this.m_matT = _this.m_hess.matrixH();
|
||||
if(!skipU) _this.m_matU = _this.m_hess.matrixQ();
|
||||
if(computeU) _this.m_matU = _this.m_hess.matrixQ();
|
||||
}
|
||||
};
|
||||
|
||||
template<typename MatrixType>
|
||||
struct ei_complex_schur_reduce_to_hessenberg<MatrixType, false>
|
||||
{
|
||||
static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool skipU)
|
||||
static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
|
||||
{
|
||||
typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
|
||||
typedef typename ComplexSchur<MatrixType>::ComplexMatrixType ComplexMatrixType;
|
||||
|
||||
// Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar
|
||||
// TODO skip Q if skipU = true
|
||||
_this.m_hess.compute(matrix);
|
||||
_this.m_matT = _this.m_hess.matrixH().template cast<ComplexScalar>();
|
||||
if(!skipU)
|
||||
if(computeU)
|
||||
{
|
||||
// This may cause an allocation which seems to be avoidable
|
||||
MatrixType Q = _this.m_hess.matrixQ();
|
||||
@ -350,7 +372,7 @@ struct ei_complex_schur_reduce_to_hessenberg<MatrixType, false>
|
||||
|
||||
// Reduce the Hessenberg matrix m_matT to triangular form by QR iteration.
|
||||
template<typename MatrixType>
|
||||
void ComplexSchur<MatrixType>::reduceToTriangularForm(bool skipU)
|
||||
void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
|
||||
{
|
||||
// The matrix m_matT is divided in three parts.
|
||||
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
|
||||
@ -373,9 +395,9 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool skipU)
|
||||
// if iu is zero then we are done; the whole matrix is triangularized
|
||||
if(iu==0) break;
|
||||
|
||||
// if we spent 30 iterations on the current element, we give up
|
||||
// if we spent too many iterations on the current element, we give up
|
||||
iter++;
|
||||
if(iter >= 30) break;
|
||||
if(iter > m_maxIterations) break;
|
||||
|
||||
// find il, the top row of the active submatrix
|
||||
il = iu-1;
|
||||
@ -393,7 +415,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool skipU)
|
||||
rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
|
||||
m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
|
||||
m_matT.topRows(std::min(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
|
||||
if(!skipU) m_matU.applyOnTheRight(il, il+1, rot);
|
||||
if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
|
||||
|
||||
for(Index i=il+1 ; i<iu ; i++)
|
||||
{
|
||||
@ -401,19 +423,17 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool skipU)
|
||||
m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
|
||||
m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
|
||||
m_matT.topRows(std::min(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
|
||||
if(!skipU) m_matU.applyOnTheRight(i, i+1, rot);
|
||||
if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
|
||||
}
|
||||
}
|
||||
|
||||
if(iter >= 30)
|
||||
{
|
||||
// FIXME : what to do when iter==MAXITER ??
|
||||
// std::cerr << "MAXITER" << std::endl;
|
||||
return;
|
||||
}
|
||||
if(iter <= m_maxIterations)
|
||||
m_info = Success;
|
||||
else
|
||||
m_info = NoConvergence;
|
||||
|
||||
m_isInitialized = true;
|
||||
m_matUisUptodate = !skipU;
|
||||
m_matUisUptodate = computeU;
|
||||
}
|
||||
|
||||
#endif // EIGEN_COMPLEX_SCHUR_H
|
||||
|
@ -26,6 +26,7 @@
|
||||
#ifndef EIGEN_EIGENSOLVER_H
|
||||
#define EIGEN_EIGENSOLVER_H
|
||||
|
||||
#include "./EigenvaluesCommon.h"
|
||||
#include "./RealSchur.h"
|
||||
|
||||
/** \eigenvalues_module \ingroup Eigenvalues_Module
|
||||
@ -57,16 +58,16 @@
|
||||
* this variant of the eigendecomposition the pseudo-eigendecomposition.
|
||||
*
|
||||
* Call the function compute() to compute the eigenvalues and eigenvectors of
|
||||
* a given matrix. Alternatively, you can use the
|
||||
* EigenSolver(const MatrixType&) constructor which computes the eigenvalues
|
||||
* and eigenvectors at construction time. Once the eigenvalue and eigenvectors
|
||||
* are computed, they can be retrieved with the eigenvalues() and
|
||||
* a given matrix. Alternatively, you can use the
|
||||
* EigenSolver(const MatrixType&, bool) constructor which computes the
|
||||
* eigenvalues and eigenvectors at construction time. Once the eigenvalue and
|
||||
* eigenvectors are computed, they can be retrieved with the eigenvalues() and
|
||||
* eigenvectors() functions. The pseudoEigenvalueMatrix() and
|
||||
* pseudoEigenvectors() methods allow the construction of the
|
||||
* pseudo-eigendecomposition.
|
||||
*
|
||||
* The documentation for EigenSolver(const MatrixType&) contains an example of
|
||||
* the typical use of this class.
|
||||
* The documentation for EigenSolver(const MatrixType&, bool) contains an
|
||||
* example of the typical use of this class.
|
||||
*
|
||||
* \note The implementation is adapted from
|
||||
* <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
|
||||
@ -78,7 +79,9 @@ template<typename _MatrixType> class EigenSolver
|
||||
{
|
||||
public:
|
||||
|
||||
/** \brief Synonym for the template parameter \p _MatrixType. */
|
||||
typedef _MatrixType MatrixType;
|
||||
|
||||
enum {
|
||||
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
|
||||
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
||||
@ -87,12 +90,12 @@ template<typename _MatrixType> class EigenSolver
|
||||
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
|
||||
};
|
||||
|
||||
/** \brief Scalar type for matrices of type \p _MatrixType. */
|
||||
/** \brief Scalar type for matrices of type #MatrixType. */
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
|
||||
/** \brief Complex scalar type for \p _MatrixType.
|
||||
/** \brief Complex scalar type for #MatrixType.
|
||||
*
|
||||
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
|
||||
* \c float or \c double) and just \c Scalar if #Scalar is
|
||||
@ -103,27 +106,27 @@ template<typename _MatrixType> class EigenSolver
|
||||
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
|
||||
*
|
||||
* This is a column vector with entries of type #ComplexScalar.
|
||||
* The length of the vector is the size of \p _MatrixType.
|
||||
* The length of the vector is the size of #MatrixType.
|
||||
*/
|
||||
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
|
||||
|
||||
/** \brief Type for matrix of eigenvectors as returned by eigenvectors().
|
||||
*
|
||||
* This is a square matrix with entries of type #ComplexScalar.
|
||||
* The size is the same as the size of \p _MatrixType.
|
||||
* The size is the same as the size of #MatrixType.
|
||||
*/
|
||||
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
|
||||
|
||||
/** \brief Default constructor.
|
||||
*
|
||||
* The default constructor is useful in cases in which the user intends to
|
||||
* perform decompositions via EigenSolver::compute(const MatrixType&).
|
||||
* perform decompositions via EigenSolver::compute(const MatrixType&, bool).
|
||||
*
|
||||
* \sa compute() for an example.
|
||||
*/
|
||||
EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {}
|
||||
|
||||
/** \brief Default Constructor with memory preallocation
|
||||
/** \brief Default constructor with memory preallocation
|
||||
*
|
||||
* Like the default constructor but with preallocation of the internal data
|
||||
* according to the specified problem \a size.
|
||||
@ -133,6 +136,7 @@ template<typename _MatrixType> class EigenSolver
|
||||
: m_eivec(size, size),
|
||||
m_eivalues(size),
|
||||
m_isInitialized(false),
|
||||
m_eigenvectorsOk(false),
|
||||
m_realSchur(size),
|
||||
m_matT(size, size),
|
||||
m_tmp(size)
|
||||
@ -141,6 +145,9 @@ template<typename _MatrixType> class EigenSolver
|
||||
/** \brief Constructor; computes eigendecomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
*
|
||||
* This constructor calls compute() to compute the eigenvalues
|
||||
* and eigenvectors.
|
||||
@ -150,23 +157,26 @@ template<typename _MatrixType> class EigenSolver
|
||||
*
|
||||
* \sa compute()
|
||||
*/
|
||||
EigenSolver(const MatrixType& matrix)
|
||||
EigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
|
||||
: m_eivec(matrix.rows(), matrix.cols()),
|
||||
m_eivalues(matrix.cols()),
|
||||
m_isInitialized(false),
|
||||
m_eigenvectorsOk(false),
|
||||
m_realSchur(matrix.cols()),
|
||||
m_matT(matrix.rows(), matrix.cols()),
|
||||
m_tmp(matrix.cols())
|
||||
{
|
||||
compute(matrix);
|
||||
compute(matrix, computeEigenvectors);
|
||||
}
|
||||
|
||||
/** \brief Returns the eigenvectors of given matrix.
|
||||
*
|
||||
* \returns %Matrix whose columns are the (possibly complex) eigenvectors.
|
||||
*
|
||||
* \pre Either the constructor EigenSolver(const MatrixType&) or the
|
||||
* member function compute(const MatrixType&) has been called before.
|
||||
* \pre Either the constructor
|
||||
* EigenSolver(const MatrixType&,bool) or the member function
|
||||
* compute(const MatrixType&, bool) has been called before, and
|
||||
* \p computeEigenvectors was set to true (the default).
|
||||
*
|
||||
* Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
|
||||
* to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
|
||||
@ -185,9 +195,10 @@ template<typename _MatrixType> class EigenSolver
|
||||
*
|
||||
* \returns Const reference to matrix whose columns are the pseudo-eigenvectors.
|
||||
*
|
||||
* \pre Either the constructor EigenSolver(const MatrixType&) or
|
||||
* the member function compute(const MatrixType&) has been called
|
||||
* before.
|
||||
* \pre Either the constructor
|
||||
* EigenSolver(const MatrixType&,bool) or the member function
|
||||
* compute(const MatrixType&, bool) has been called before, and
|
||||
* \p computeEigenvectors was set to true (the default).
|
||||
*
|
||||
* The real matrix \f$ V \f$ returned by this function and the
|
||||
* block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix()
|
||||
@ -201,6 +212,7 @@ template<typename _MatrixType> class EigenSolver
|
||||
const MatrixType& pseudoEigenvectors() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "EigenSolver is not initialized.");
|
||||
ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
||||
return m_eivec;
|
||||
}
|
||||
|
||||
@ -208,8 +220,9 @@ template<typename _MatrixType> class EigenSolver
|
||||
*
|
||||
* \returns A block-diagonal matrix.
|
||||
*
|
||||
* \pre Either the constructor EigenSolver(const MatrixType&) or the
|
||||
* member function compute(const MatrixType&) has been called before.
|
||||
* \pre Either the constructor
|
||||
* EigenSolver(const MatrixType&,bool) or the member function
|
||||
* compute(const MatrixType&, bool) has been called before.
|
||||
*
|
||||
* The matrix \f$ D \f$ returned by this function is real and
|
||||
* block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2
|
||||
@ -226,8 +239,9 @@ template<typename _MatrixType> class EigenSolver
|
||||
*
|
||||
* \returns A const reference to the column vector containing the eigenvalues.
|
||||
*
|
||||
* \pre Either the constructor EigenSolver(const MatrixType&) or the
|
||||
* member function compute(const MatrixType&) has been called before.
|
||||
* \pre Either the constructor
|
||||
* EigenSolver(const MatrixType&,bool) or the member function
|
||||
* compute(const MatrixType&, bool) has been called before.
|
||||
*
|
||||
* The eigenvalues are repeated according to their algebraic multiplicity,
|
||||
* so there are as many eigenvalues as rows in the matrix.
|
||||
@ -247,34 +261,46 @@ template<typename _MatrixType> class EigenSolver
|
||||
/** \brief Computes eigendecomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
* \returns Reference to \c *this
|
||||
*
|
||||
* This function computes the eigenvalues and eigenvectors of \p matrix.
|
||||
* The eigenvalues() and eigenvectors() functions can be used to retrieve
|
||||
* the computed eigendecomposition.
|
||||
* This function computes the eigenvalues of the real matrix \p matrix.
|
||||
* The eigenvalues() function can be used to retrieve them. If
|
||||
* \p computeEigenvectors is true, then the eigenvectors are also computed
|
||||
* and can be retrieved by calling eigenvectors().
|
||||
*
|
||||
* The matrix is first reduced to real Schur form using the RealSchur
|
||||
* class. The Schur decomposition is then used to compute the eigenvalues
|
||||
* and eigenvectors.
|
||||
*
|
||||
* The cost of the computation is dominated by the cost of the Schur
|
||||
* decomposition, which is very approximately \f$ 25n^3 \f$ where
|
||||
* \f$ n \f$ is the size of the matrix.
|
||||
* The cost of the computation is dominated by the cost of the
|
||||
* Schur decomposition, which is very approximately \f$ 25n^3 \f$
|
||||
* (where \f$ n \f$ is the size of the matrix) if \p computeEigenvectors
|
||||
* is true, and \f$ 10n^3 \f$ if \p computeEigenvectors is false.
|
||||
*
|
||||
* This method reuses of the allocated data in the EigenSolver object.
|
||||
*
|
||||
* Example: \include EigenSolver_compute.cpp
|
||||
* Output: \verbinclude EigenSolver_compute.out
|
||||
*/
|
||||
EigenSolver& compute(const MatrixType& matrix);
|
||||
EigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
|
||||
|
||||
ComputationInfo info() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
|
||||
return m_realSchur.info();
|
||||
}
|
||||
|
||||
private:
|
||||
void computeEigenvectors();
|
||||
void doComputeEigenvectors();
|
||||
|
||||
protected:
|
||||
MatrixType m_eivec;
|
||||
EigenvalueType m_eivalues;
|
||||
bool m_isInitialized;
|
||||
bool m_eigenvectorsOk;
|
||||
RealSchur<MatrixType> m_realSchur;
|
||||
MatrixType m_matT;
|
||||
|
||||
@ -286,7 +312,7 @@ template<typename MatrixType>
|
||||
MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "EigenSolver is not initialized.");
|
||||
Index n = m_eivec.cols();
|
||||
Index n = m_eivalues.rows();
|
||||
MatrixType matD = MatrixType::Zero(n,n);
|
||||
for (Index i=0; i<n; ++i)
|
||||
{
|
||||
@ -306,6 +332,7 @@ template<typename MatrixType>
|
||||
typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "EigenSolver is not initialized.");
|
||||
ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
||||
Index n = m_eivec.cols();
|
||||
EigenvectorsType matV(n,n);
|
||||
for (Index j=0; j<n; ++j)
|
||||
@ -332,39 +359,46 @@ typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eige
|
||||
}
|
||||
|
||||
template<typename MatrixType>
|
||||
EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matrix)
|
||||
EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||
{
|
||||
assert(matrix.cols() == matrix.rows());
|
||||
|
||||
// Reduce to real Schur form.
|
||||
m_realSchur.compute(matrix);
|
||||
m_matT = m_realSchur.matrixT();
|
||||
m_eivec = m_realSchur.matrixU();
|
||||
|
||||
// Compute eigenvalues from matT
|
||||
m_eivalues.resize(matrix.cols());
|
||||
Index i = 0;
|
||||
while (i < matrix.cols())
|
||||
m_realSchur.compute(matrix, computeEigenvectors);
|
||||
if (m_realSchur.info() == Success)
|
||||
{
|
||||
if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0))
|
||||
{
|
||||
m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
|
||||
++i;
|
||||
}
|
||||
else
|
||||
{
|
||||
Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
|
||||
Scalar z = ei_sqrt(ei_abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
|
||||
m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
|
||||
m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
|
||||
i += 2;
|
||||
}
|
||||
}
|
||||
m_matT = m_realSchur.matrixT();
|
||||
if (computeEigenvectors)
|
||||
m_eivec = m_realSchur.matrixU();
|
||||
|
||||
// Compute eigenvectors.
|
||||
computeEigenvectors();
|
||||
// Compute eigenvalues from matT
|
||||
m_eivalues.resize(matrix.cols());
|
||||
Index i = 0;
|
||||
while (i < matrix.cols())
|
||||
{
|
||||
if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0))
|
||||
{
|
||||
m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
|
||||
++i;
|
||||
}
|
||||
else
|
||||
{
|
||||
Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
|
||||
Scalar z = ei_sqrt(ei_abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
|
||||
m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
|
||||
m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
|
||||
i += 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute eigenvectors.
|
||||
if (computeEigenvectors)
|
||||
doComputeEigenvectors();
|
||||
}
|
||||
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = computeEigenvectors;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -389,7 +423,7 @@ std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
|
||||
|
||||
|
||||
template<typename MatrixType>
|
||||
void EigenSolver<MatrixType>::computeEigenvectors()
|
||||
void EigenSolver<MatrixType>::doComputeEigenvectors()
|
||||
{
|
||||
const Index size = m_eivec.cols();
|
||||
const Scalar eps = NumTraits<Scalar>::epsilon();
|
||||
@ -404,7 +438,7 @@ void EigenSolver<MatrixType>::computeEigenvectors()
|
||||
// Backsubstitute to find vectors of upper triangular form
|
||||
if (norm == 0.0)
|
||||
{
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
for (Index n = size-1; n >= 0; n--)
|
||||
|
39
Eigen/src/Eigenvalues/EigenvaluesCommon.h
Normal file
39
Eigen/src/Eigenvalues/EigenvaluesCommon.h
Normal file
@ -0,0 +1,39 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
// License as published by the Free Software Foundation; either
|
||||
// version 3 of the License, or (at your option) any later version.
|
||||
//
|
||||
// Alternatively, you can redistribute it and/or
|
||||
// modify it under the terms of the GNU General Public License as
|
||||
// published by the Free Software Foundation; either version 2 of
|
||||
// the License, or (at your option) any later version.
|
||||
//
|
||||
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public
|
||||
// License and a copy of the GNU General Public License along with
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#ifndef EIGEN_EIGENVALUES_COMMON_H
|
||||
#define EIGEN_EIGENVALUES_COMMON_H
|
||||
|
||||
/** \eigenvalues_module \ingroup Eigenvalues_Module
|
||||
* \nonstableyet
|
||||
*
|
||||
* \brief Enum for reporting the status of a computation.
|
||||
*/
|
||||
enum ComputationInfo {
|
||||
Success = 0, /**< \brief Computation was successful. */
|
||||
NoConvergence = 1 /**< \brief Iterative procedure did not converge. */
|
||||
};
|
||||
|
||||
#endif // EIGEN_EIGENVALUES_COMMON_H
|
||||
|
@ -53,11 +53,11 @@ struct ei_traits<HessenbergDecompositionMatrixHReturnType<MatrixType> >
|
||||
* \f$ Q^{-1} = Q^* \f$).
|
||||
*
|
||||
* Call the function compute() to compute the Hessenberg decomposition of a
|
||||
* given matrix. Alternatively, you can use the
|
||||
* given matrix. Alternatively, you can use the
|
||||
* HessenbergDecomposition(const MatrixType&) constructor which computes the
|
||||
* Hessenberg decomposition at construction time. Once the decomposition is
|
||||
* computed, you can use the matrixH() and matrixQ() functions to construct
|
||||
* the matrices H and Q in the decomposition.
|
||||
* the matrices H and Q in the decomposition.
|
||||
*
|
||||
* The documentation for matrixH() contains an example of the typical use of
|
||||
* this class.
|
||||
@ -107,14 +107,15 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
*/
|
||||
HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size)
|
||||
: m_matrix(size,size),
|
||||
m_temp(size)
|
||||
m_temp(size),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
if(size>1)
|
||||
m_hCoeffs.resize(size-1);
|
||||
}
|
||||
|
||||
/** \brief Constructor; computes Hessenberg decomposition of given matrix.
|
||||
*
|
||||
/** \brief Constructor; computes Hessenberg decomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed.
|
||||
*
|
||||
* This constructor calls compute() to compute the Hessenberg
|
||||
@ -124,17 +125,23 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
*/
|
||||
HessenbergDecomposition(const MatrixType& matrix)
|
||||
: m_matrix(matrix),
|
||||
m_temp(matrix.rows())
|
||||
m_temp(matrix.rows()),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
if(matrix.rows()<2)
|
||||
{
|
||||
m_isInitialized = true;
|
||||
return;
|
||||
}
|
||||
m_hCoeffs.resize(matrix.rows()-1,1);
|
||||
_compute(m_matrix, m_hCoeffs, m_temp);
|
||||
m_isInitialized = true;
|
||||
}
|
||||
|
||||
/** \brief Computes Hessenberg decomposition of given matrix.
|
||||
*
|
||||
/** \brief Computes Hessenberg decomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed.
|
||||
* \returns Reference to \c *this
|
||||
*
|
||||
* The Hessenberg decomposition is computed by bringing the columns of the
|
||||
* matrix successively in the required form using Householder reflections
|
||||
@ -148,13 +155,18 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
* Example: \include HessenbergDecomposition_compute.cpp
|
||||
* Output: \verbinclude HessenbergDecomposition_compute.out
|
||||
*/
|
||||
void compute(const MatrixType& matrix)
|
||||
HessenbergDecomposition& compute(const MatrixType& matrix)
|
||||
{
|
||||
m_matrix = matrix;
|
||||
if(matrix.rows()<2)
|
||||
return;
|
||||
{
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
}
|
||||
m_hCoeffs.resize(matrix.rows()-1,1);
|
||||
_compute(m_matrix, m_hCoeffs, m_temp);
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** \brief Returns the Householder coefficients.
|
||||
@ -165,14 +177,18 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
* or the member function compute(const MatrixType&) has been called
|
||||
* before to compute the Hessenberg decomposition of a matrix.
|
||||
*
|
||||
* The Householder coefficients allow the reconstruction of the matrix
|
||||
* The Householder coefficients allow the reconstruction of the matrix
|
||||
* \f$ Q \f$ in the Hessenberg decomposition from the packed data.
|
||||
*
|
||||
* \sa packedMatrix(), \ref Householder_Module "Householder module"
|
||||
*/
|
||||
const CoeffVectorType& householderCoefficients() const { return m_hCoeffs; }
|
||||
const CoeffVectorType& householderCoefficients() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
|
||||
return m_hCoeffs;
|
||||
}
|
||||
|
||||
/** \brief Returns the internal representation of the decomposition
|
||||
/** \brief Returns the internal representation of the decomposition
|
||||
*
|
||||
* \returns a const reference to a matrix with the internal representation
|
||||
* of the decomposition.
|
||||
@ -185,11 +201,11 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
* - the upper part and lower sub-diagonal represent the Hessenberg matrix H
|
||||
* - the rest of the lower part contains the Householder vectors that, combined with
|
||||
* Householder coefficients returned by householderCoefficients(),
|
||||
* allows to reconstruct the matrix Q as
|
||||
* allows to reconstruct the matrix Q as
|
||||
* \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
|
||||
* Here, the matrices \f$ H_i \f$ are the Householder transformations
|
||||
* Here, the matrices \f$ H_i \f$ are the Householder transformations
|
||||
* \f$ H_i = (I - h_i v_i v_i^T) \f$
|
||||
* where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
|
||||
* where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
|
||||
* \f$ v_i \f$ is the Householder vector defined by
|
||||
* \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
|
||||
* with M the matrix returned by this function.
|
||||
@ -201,9 +217,13 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
*
|
||||
* \sa householderCoefficients()
|
||||
*/
|
||||
const MatrixType& packedMatrix() const { return m_matrix; }
|
||||
const MatrixType& packedMatrix() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
|
||||
return m_matrix;
|
||||
}
|
||||
|
||||
/** \brief Reconstructs the orthogonal matrix Q in the decomposition
|
||||
/** \brief Reconstructs the orthogonal matrix Q in the decomposition
|
||||
*
|
||||
* \returns object representing the matrix Q
|
||||
*
|
||||
@ -219,6 +239,7 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
*/
|
||||
HouseholderSequenceType matrixQ() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
|
||||
return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate(), false, m_matrix.rows() - 1, 1);
|
||||
}
|
||||
|
||||
@ -244,6 +265,7 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
*/
|
||||
HessenbergDecompositionMatrixHReturnType<MatrixType> matrixH() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
|
||||
return HessenbergDecompositionMatrixHReturnType<MatrixType>(*this);
|
||||
}
|
||||
|
||||
@ -252,15 +274,14 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
typedef Matrix<Scalar, 1, Size, Options | RowMajor, 1, MaxSize> VectorType;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp);
|
||||
|
||||
|
||||
protected:
|
||||
MatrixType m_matrix;
|
||||
CoeffVectorType m_hCoeffs;
|
||||
VectorType m_temp;
|
||||
bool m_isInitialized;
|
||||
};
|
||||
|
||||
#ifndef EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
/** \internal
|
||||
* Performs a tridiagonal decomposition of \a matA in place.
|
||||
*
|
||||
@ -302,8 +323,6 @@ void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVector
|
||||
}
|
||||
}
|
||||
|
||||
#endif // EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
/** \eigenvalues_module \ingroup Eigenvalues_Module
|
||||
* \nonstableyet
|
||||
*
|
||||
|
@ -37,7 +37,7 @@ struct ei_eigenvalues_selector
|
||||
{
|
||||
typedef typename Derived::PlainObject PlainObject;
|
||||
PlainObject m_eval(m);
|
||||
return ComplexEigenSolver<PlainObject>(m_eval).eigenvalues();
|
||||
return ComplexEigenSolver<PlainObject>(m_eval, false).eigenvalues();
|
||||
}
|
||||
};
|
||||
|
||||
@ -49,7 +49,7 @@ struct ei_eigenvalues_selector<Derived, false>
|
||||
{
|
||||
typedef typename Derived::PlainObject PlainObject;
|
||||
PlainObject m_eval(m);
|
||||
return EigenSolver<PlainObject>(m_eval).eigenvalues();
|
||||
return EigenSolver<PlainObject>(m_eval, false).eigenvalues();
|
||||
}
|
||||
};
|
||||
|
||||
@ -101,7 +101,7 @@ SelfAdjointView<MatrixType, UpLo>::eigenvalues() const
|
||||
{
|
||||
typedef typename SelfAdjointView<MatrixType, UpLo>::PlainObject PlainObject;
|
||||
PlainObject thisAsMatrix(*this);
|
||||
return SelfAdjointEigenSolver<PlainObject>(thisAsMatrix).eigenvalues();
|
||||
return SelfAdjointEigenSolver<PlainObject>(thisAsMatrix, false).eigenvalues();
|
||||
}
|
||||
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
#ifndef EIGEN_REAL_SCHUR_H
|
||||
#define EIGEN_REAL_SCHUR_H
|
||||
|
||||
#include "./EigenvaluesCommon.h"
|
||||
#include "./HessenbergDecomposition.h"
|
||||
|
||||
/** \eigenvalues_module \ingroup Eigenvalues_Module
|
||||
@ -50,13 +51,13 @@
|
||||
* the eigendecomposition of a matrix.
|
||||
*
|
||||
* Call the function compute() to compute the real Schur decomposition of a
|
||||
* given matrix. Alternatively, you can use the RealSchur(const MatrixType&)
|
||||
* given matrix. Alternatively, you can use the RealSchur(const MatrixType&, bool)
|
||||
* constructor which computes the real Schur decomposition at construction
|
||||
* time. Once the decomposition is computed, you can use the matrixU() and
|
||||
* matrixT() functions to retrieve the matrices U and T in the decomposition.
|
||||
*
|
||||
* The documentation of RealSchur(const MatrixType&) contains an example of
|
||||
* the typical use of this class.
|
||||
* The documentation of RealSchur(const MatrixType&, bool) contains an example
|
||||
* of the typical use of this class.
|
||||
*
|
||||
* \note The implementation is adapted from
|
||||
* <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
|
||||
@ -98,41 +99,46 @@ template<typename _MatrixType> class RealSchur
|
||||
m_matU(size, size),
|
||||
m_workspaceVector(size),
|
||||
m_hess(size),
|
||||
m_isInitialized(false)
|
||||
m_isInitialized(false),
|
||||
m_matUisUptodate(false)
|
||||
{ }
|
||||
|
||||
/** \brief Constructor; computes real Schur decomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
|
||||
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
|
||||
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
|
||||
*
|
||||
* This constructor calls compute() to compute the Schur decomposition.
|
||||
*
|
||||
* Example: \include RealSchur_RealSchur_MatrixType.cpp
|
||||
* Output: \verbinclude RealSchur_RealSchur_MatrixType.out
|
||||
*/
|
||||
RealSchur(const MatrixType& matrix)
|
||||
RealSchur(const MatrixType& matrix, bool computeU = true)
|
||||
: m_matT(matrix.rows(),matrix.cols()),
|
||||
m_matU(matrix.rows(),matrix.cols()),
|
||||
m_workspaceVector(matrix.rows()),
|
||||
m_hess(matrix.rows()),
|
||||
m_isInitialized(false)
|
||||
m_isInitialized(false),
|
||||
m_matUisUptodate(false)
|
||||
{
|
||||
compute(matrix);
|
||||
compute(matrix, computeU);
|
||||
}
|
||||
|
||||
/** \brief Returns the orthogonal matrix in the Schur decomposition.
|
||||
*
|
||||
* \returns A const reference to the matrix U.
|
||||
*
|
||||
* \pre Either the constructor RealSchur(const MatrixType&) or the member
|
||||
* function compute(const MatrixType&) has been called before to compute
|
||||
* the Schur decomposition of a matrix.
|
||||
* \pre Either the constructor RealSchur(const MatrixType&, bool) or the
|
||||
* member function compute(const MatrixType&, bool) has been called before
|
||||
* to compute the Schur decomposition of a matrix, and \p computeU was set
|
||||
* to true (the default value).
|
||||
*
|
||||
* \sa RealSchur(const MatrixType&) for an example
|
||||
* \sa RealSchur(const MatrixType&, bool) for an example
|
||||
*/
|
||||
const MatrixType& matrixU() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "RealSchur is not initialized.");
|
||||
ei_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition.");
|
||||
return m_matU;
|
||||
}
|
||||
|
||||
@ -140,11 +146,11 @@ template<typename _MatrixType> class RealSchur
|
||||
*
|
||||
* \returns A const reference to the matrix T.
|
||||
*
|
||||
* \pre Either the constructor RealSchur(const MatrixType&) or the member
|
||||
* function compute(const MatrixType&) has been called before to compute
|
||||
* the Schur decomposition of a matrix.
|
||||
* \pre Either the constructor RealSchur(const MatrixType&, bool) or the
|
||||
* member function compute(const MatrixType&, bool) has been called before
|
||||
* to compute the Schur decomposition of a matrix.
|
||||
*
|
||||
* \sa RealSchur(const MatrixType&) for an example
|
||||
* \sa RealSchur(const MatrixType&, bool) for an example
|
||||
*/
|
||||
const MatrixType& matrixT() const
|
||||
{
|
||||
@ -154,19 +160,38 @@ template<typename _MatrixType> class RealSchur
|
||||
|
||||
/** \brief Computes Schur decomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
|
||||
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
|
||||
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
|
||||
* \returns Reference to \c *this
|
||||
*
|
||||
* The Schur decomposition is computed by first reducing the matrix to
|
||||
* Hessenberg form using the class HessenbergDecomposition. The Hessenberg
|
||||
* matrix is then reduced to triangular form by performing Francis QR
|
||||
* iterations with implicit double shift. The cost of computing the Schur
|
||||
* decomposition depends on the number of iterations; as a rough guide, it
|
||||
* may be taken to be \f$25n^3\f$ flops.
|
||||
* may be taken to be \f$25n^3\f$ flops if \a computeU is true and
|
||||
* \f$10n^3\f$ flops if \a computeU is false.
|
||||
*
|
||||
* Example: \include RealSchur_compute.cpp
|
||||
* Output: \verbinclude RealSchur_compute.out
|
||||
*/
|
||||
void compute(const MatrixType& matrix);
|
||||
RealSchur& compute(const MatrixType& matrix, bool computeU = true);
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
|
||||
*/
|
||||
ComputationInfo info() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "RealSchur is not initialized.");
|
||||
return m_info;
|
||||
}
|
||||
|
||||
/** \brief Maximum number of iterations.
|
||||
*
|
||||
* Maximum number of iterations allowed for an eigenvalue to converge.
|
||||
*/
|
||||
static const int m_maxIterations = 40;
|
||||
|
||||
private:
|
||||
|
||||
@ -174,39 +199,41 @@ template<typename _MatrixType> class RealSchur
|
||||
MatrixType m_matU;
|
||||
ColumnVectorType m_workspaceVector;
|
||||
HessenbergDecomposition<MatrixType> m_hess;
|
||||
ComputationInfo m_info;
|
||||
bool m_isInitialized;
|
||||
bool m_matUisUptodate;
|
||||
|
||||
typedef Matrix<Scalar,3,1> Vector3s;
|
||||
|
||||
Scalar computeNormOfT();
|
||||
Index findSmallSubdiagEntry(Index iu, Scalar norm);
|
||||
void splitOffTwoRows(Index iu, Scalar exshift);
|
||||
void splitOffTwoRows(Index iu, bool computeU, Scalar exshift);
|
||||
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
|
||||
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
|
||||
void performFrancisQRStep(Index il, Index im, Index iu, const Vector3s& firstHouseholderVector, Scalar* workspace);
|
||||
void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);
|
||||
};
|
||||
|
||||
|
||||
template<typename MatrixType>
|
||||
void RealSchur<MatrixType>::compute(const MatrixType& matrix)
|
||||
RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
|
||||
{
|
||||
assert(matrix.cols() == matrix.rows());
|
||||
|
||||
// Step 1. Reduce to Hessenberg form
|
||||
// TODO skip Q if skipU = true
|
||||
m_hess.compute(matrix);
|
||||
m_matT = m_hess.matrixH();
|
||||
m_matU = m_hess.matrixQ();
|
||||
if (computeU)
|
||||
m_matU = m_hess.matrixQ();
|
||||
|
||||
// Step 2. Reduce to real Schur form
|
||||
m_workspaceVector.resize(m_matU.cols());
|
||||
m_workspaceVector.resize(m_matT.cols());
|
||||
Scalar* workspace = &m_workspaceVector.coeffRef(0);
|
||||
|
||||
// The matrix m_matT is divided in three parts.
|
||||
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
|
||||
// Rows il,...,iu is the part we are working on (the active window).
|
||||
// Rows iu+1,...,end are already brought in triangular form.
|
||||
Index iu = m_matU.cols() - 1;
|
||||
Index iu = m_matT.cols() - 1;
|
||||
Index iter = 0; // iteration count
|
||||
Scalar exshift = 0.0; // sum of exceptional shifts
|
||||
Scalar norm = computeNormOfT();
|
||||
@ -226,7 +253,7 @@ void RealSchur<MatrixType>::compute(const MatrixType& matrix)
|
||||
}
|
||||
else if (il == iu-1) // Two roots found
|
||||
{
|
||||
splitOffTwoRows(iu, exshift);
|
||||
splitOffTwoRows(iu, computeU, exshift);
|
||||
iu -= 2;
|
||||
iter = 0;
|
||||
}
|
||||
@ -234,21 +261,29 @@ void RealSchur<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
Vector3s firstHouseholderVector, shiftInfo;
|
||||
computeShift(iu, iter, exshift, shiftInfo);
|
||||
iter = iter + 1; // (Could check iteration count here.)
|
||||
iter = iter + 1;
|
||||
if (iter > m_maxIterations) break;
|
||||
Index im;
|
||||
initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
|
||||
performFrancisQRStep(il, im, iu, firstHouseholderVector, workspace);
|
||||
performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);
|
||||
}
|
||||
}
|
||||
|
||||
if(iter <= m_maxIterations)
|
||||
m_info = Success;
|
||||
else
|
||||
m_info = NoConvergence;
|
||||
|
||||
m_isInitialized = true;
|
||||
m_matUisUptodate = computeU;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** \internal Computes and returns vector L1 norm of T */
|
||||
template<typename MatrixType>
|
||||
inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
|
||||
{
|
||||
const Index size = m_matU.cols();
|
||||
const Index size = m_matT.cols();
|
||||
// FIXME to be efficient the following would requires a triangular reduxion code
|
||||
// Scalar norm = m_matT.upper().cwiseAbs().sum()
|
||||
// + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
|
||||
@ -277,9 +312,9 @@ inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(I
|
||||
|
||||
/** \internal Update T given that rows iu-1 and iu decouple from the rest. */
|
||||
template<typename MatrixType>
|
||||
inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, Scalar exshift)
|
||||
inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scalar exshift)
|
||||
{
|
||||
const Index size = m_matU.cols();
|
||||
const Index size = m_matT.cols();
|
||||
|
||||
// The eigenvalues of the 2x2 matrix [a b; c d] are
|
||||
// trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
|
||||
@ -300,7 +335,8 @@ inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, Scalar exshift)
|
||||
m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint());
|
||||
m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot);
|
||||
m_matT.coeffRef(iu, iu-1) = Scalar(0);
|
||||
m_matU.applyOnTheRight(iu-1, iu, rot);
|
||||
if (computeU)
|
||||
m_matU.applyOnTheRight(iu-1, iu, rot);
|
||||
}
|
||||
|
||||
if (iu > 1)
|
||||
@ -375,12 +411,12 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
|
||||
|
||||
/** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */
|
||||
template<typename MatrixType>
|
||||
inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, const Vector3s& firstHouseholderVector, Scalar* workspace)
|
||||
inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)
|
||||
{
|
||||
assert(im >= il);
|
||||
assert(im <= iu-2);
|
||||
|
||||
const Index size = m_matU.cols();
|
||||
const Index size = m_matT.cols();
|
||||
|
||||
for (Index k = im; k <= iu-2; ++k)
|
||||
{
|
||||
@ -406,7 +442,8 @@ inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Inde
|
||||
// These Householder transformations form the O(n^3) part of the algorithm
|
||||
m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
|
||||
m_matT.block(0, k, std::min(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
if (computeU)
|
||||
m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
}
|
||||
}
|
||||
|
||||
@ -420,7 +457,8 @@ inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Inde
|
||||
m_matT.coeffRef(iu-1, iu-2) = beta;
|
||||
m_matT.block(iu-1, iu-1, 2, size-iu+1).applyHouseholderOnTheLeft(ess, tau, workspace);
|
||||
m_matT.block(0, iu-1, iu+1, 2).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
if (computeU)
|
||||
m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||
}
|
||||
|
||||
// clean up pollution due to round-off errors
|
||||
|
@ -26,6 +26,9 @@
|
||||
#ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
|
||||
#define EIGEN_SELFADJOINTEIGENSOLVER_H
|
||||
|
||||
#include "./EigenvaluesCommon.h"
|
||||
#include "./Tridiagonalization.h"
|
||||
|
||||
/** \eigenvalues_module \ingroup Eigenvalues_Module
|
||||
* \nonstableyet
|
||||
*
|
||||
@ -35,12 +38,12 @@
|
||||
*
|
||||
* \tparam _MatrixType the type of the matrix of which we are computing the
|
||||
* eigendecomposition; this is expected to be an instantiation of the Matrix
|
||||
* class template. Currently, only real matrices are supported.
|
||||
* class template.
|
||||
*
|
||||
* A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real
|
||||
* matrices, this means that the matrix is symmetric: it equals its
|
||||
* transpose. This class computes the eigenvalues and eigenvectors of a
|
||||
* selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
|
||||
* selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
|
||||
* \f$ v \f$ such that \f$ Av = \lambda v \f$. The eigenvalues of a
|
||||
* selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
|
||||
* the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
|
||||
@ -52,6 +55,8 @@
|
||||
* faster and more accurate than the general purpose eigenvalue algorithms
|
||||
* implemented in EigenSolver and ComplexEigenSolver.
|
||||
*
|
||||
* Only the \b lower \b triangular \b part of the input matrix is referenced.
|
||||
*
|
||||
* This class can also be used to solve the generalized eigenvalue problem
|
||||
* \f$ Av = \lambda Bv \f$. In this case, the matrix \f$ A \f$ should be
|
||||
* selfadjoint and the matrix \f$ B \f$ should be positive definite.
|
||||
@ -65,7 +70,7 @@
|
||||
*
|
||||
* The documentation for SelfAdjointEigenSolver(const MatrixType&, bool)
|
||||
* contains an example of the typical use of this class.
|
||||
*
|
||||
*
|
||||
* \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver
|
||||
*/
|
||||
template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
@ -84,15 +89,15 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
|
||||
/** \brief Real scalar type for \p _MatrixType.
|
||||
/** \brief Real scalar type for \p _MatrixType.
|
||||
*
|
||||
* This is just \c Scalar if #Scalar is real (e.g., \c float or
|
||||
* This is just \c Scalar if #Scalar is real (e.g., \c float or
|
||||
* \c double), and the type of the real part of \c Scalar if #Scalar is
|
||||
* complex.
|
||||
*/
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
|
||||
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
|
||||
*
|
||||
* This is a column vector with entries of type #RealScalar.
|
||||
* The length of the vector is the size of \p _MatrixType.
|
||||
@ -114,11 +119,9 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
SelfAdjointEigenSolver()
|
||||
: m_eivec(),
|
||||
m_eivalues(),
|
||||
m_tridiag(),
|
||||
m_subdiag()
|
||||
{
|
||||
ei_assert(Size!=Dynamic);
|
||||
}
|
||||
m_subdiag(),
|
||||
m_isInitialized(false)
|
||||
{ }
|
||||
|
||||
/** \brief Constructor, pre-allocates memory for dynamic-size matrices.
|
||||
*
|
||||
@ -128,7 +131,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
* This constructor is useful for dynamic-size matrices, when the user
|
||||
* intends to perform decompositions via compute(const MatrixType&, bool)
|
||||
* or compute(const MatrixType&, const MatrixType&, bool). The \p size
|
||||
* parameter is only used as a hint. It is not an error to give a wrong
|
||||
* parameter is only used as a hint. It is not an error to give a wrong
|
||||
* \p size, but it may impair performance.
|
||||
*
|
||||
* \sa compute(const MatrixType&, bool) for an example
|
||||
@ -136,17 +139,17 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
SelfAdjointEigenSolver(Index size)
|
||||
: m_eivec(size, size),
|
||||
m_eivalues(size),
|
||||
m_tridiag(size),
|
||||
m_subdiag(size > 1 ? size - 1 : 1)
|
||||
m_subdiag(size > 1 ? size - 1 : 1),
|
||||
m_isInitialized(false)
|
||||
{}
|
||||
|
||||
/** \brief Constructor; computes eigendecomposition of given matrix.
|
||||
*
|
||||
/** \brief Constructor; computes eigendecomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
|
||||
* be computed.
|
||||
* be computed. Only the lower triangular part of the matrix is referenced.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
* computed.
|
||||
*
|
||||
* This constructor calls compute(const MatrixType&, bool) to compute the
|
||||
* eigenvalues of the matrix \p matrix. The eigenvectors are computed if
|
||||
@ -155,55 +158,58 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out
|
||||
*
|
||||
* \sa compute(const MatrixType&, bool),
|
||||
* \sa compute(const MatrixType&, bool),
|
||||
* SelfAdjointEigenSolver(const MatrixType&, const MatrixType&, bool)
|
||||
*/
|
||||
SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
|
||||
: m_eivec(matrix.rows(), matrix.cols()),
|
||||
m_eivalues(matrix.cols()),
|
||||
m_tridiag(matrix.rows()),
|
||||
m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1)
|
||||
m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
compute(matrix, computeEigenvectors);
|
||||
}
|
||||
|
||||
/** \brief Constructor; computes eigendecomposition of given matrix pencil.
|
||||
*
|
||||
/** \brief Constructor; computes generalized eigendecomposition of given matrix pencil.
|
||||
*
|
||||
* \param[in] matA Selfadjoint matrix in matrix pencil.
|
||||
* Only the lower triangular part of the matrix is referenced.
|
||||
* \param[in] matB Positive-definite matrix in matrix pencil.
|
||||
* Only the lower triangular part of the matrix is referenced.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
* computed.
|
||||
*
|
||||
* This constructor calls compute(const MatrixType&, const MatrixType&, bool)
|
||||
* This constructor calls compute(const MatrixType&, const MatrixType&, bool)
|
||||
* to compute the eigenvalues and (if requested) the eigenvectors of the
|
||||
* generalized eigenproblem \f$ Ax = \lambda B x \f$ with \a matA the
|
||||
* selfadjoint matrix \f$ A \f$ and \a matB the positive definite matrix
|
||||
* \f$ B \f$ . The eigenvectors are computed if \a computeEigenvectors is
|
||||
* true.
|
||||
* \f$ B \f$. Each eigenvector \f$ x \f$ satisfies the property
|
||||
* \f$ x^* B x = 1 \f$. The eigenvectors are computed if
|
||||
* \a computeEigenvectors is true.
|
||||
*
|
||||
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.out
|
||||
*
|
||||
* \sa compute(const MatrixType&, const MatrixType&, bool),
|
||||
* \sa compute(const MatrixType&, const MatrixType&, bool),
|
||||
* SelfAdjointEigenSolver(const MatrixType&, bool)
|
||||
*/
|
||||
SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
|
||||
: m_eivec(matA.rows(), matA.cols()),
|
||||
m_eivalues(matA.cols()),
|
||||
m_tridiag(matA.rows()),
|
||||
m_subdiag(matA.rows() > 1 ? matA.rows() - 1 : 1)
|
||||
m_subdiag(matA.rows() > 1 ? matA.rows() - 1 : 1),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
compute(matA, matB, computeEigenvectors);
|
||||
}
|
||||
|
||||
/** \brief Computes eigendecomposition of given matrix.
|
||||
*
|
||||
/** \brief Computes eigendecomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
|
||||
* be computed.
|
||||
* be computed. Only the lower triangular part of the matrix is referenced.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
* computed.
|
||||
* \returns Reference to \c *this
|
||||
*
|
||||
* This function computes the eigenvalues of \p matrix. The eigenvalues()
|
||||
@ -231,28 +237,33 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
*/
|
||||
SelfAdjointEigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
|
||||
|
||||
/** \brief Computes eigendecomposition of given matrix pencil.
|
||||
*
|
||||
/** \brief Computes generalized eigendecomposition of given matrix pencil.
|
||||
*
|
||||
* \param[in] matA Selfadjoint matrix in matrix pencil.
|
||||
* Only the lower triangular part of the matrix is referenced.
|
||||
* \param[in] matB Positive-definite matrix in matrix pencil.
|
||||
* Only the lower triangular part of the matrix is referenced.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
* computed.
|
||||
* \returns Reference to \c *this
|
||||
*
|
||||
* This function computes eigenvalues and (if requested) the eigenvectors
|
||||
* of the generalized eigenproblem \f$ Ax = \lambda B x \f$ with \a matA
|
||||
* the selfadjoint matrix \f$ A \f$ and \a matB the positive definite
|
||||
* matrix \f$ B \f$. The eigenvalues() function can be used to retrieve
|
||||
* matrix \f$ B \f$. In addition, each eigenvector \f$ x \f$
|
||||
* satisfies the property \f$ x^* B x = 1 \f$.
|
||||
*
|
||||
* The eigenvalues() function can be used to retrieve
|
||||
* the eigenvalues. If \p computeEigenvectors is true, then the
|
||||
* eigenvectors are also computed and can be retrieved by calling
|
||||
* eigenvectors().
|
||||
*
|
||||
* The implementation uses LLT to compute the Cholesky decomposition
|
||||
* The implementation uses LLT to compute the Cholesky decomposition
|
||||
* \f$ B = LL^* \f$ and calls compute(const MatrixType&, bool) to compute
|
||||
* the eigendecomposition \f$ L^{-1} A (L^*)^{-1} \f$. This solves the
|
||||
* generalized eigenproblem, because any solution of the generalized
|
||||
* eigenproblem \f$ Ax = \lambda B x \f$ corresponds to a solution
|
||||
* eigenproblem \f$ Ax = \lambda B x \f$ corresponds to a solution
|
||||
* \f$ L^{-1} A (L^*)^{-1} (L^* x) = \lambda (L^* x) \f$ of the
|
||||
* eigenproblem for \f$ L^{-1} A (L^*)^{-1} \f$.
|
||||
*
|
||||
@ -263,7 +274,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
*/
|
||||
SelfAdjointEigenSolver& compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true);
|
||||
|
||||
/** \brief Returns the eigenvectors of given matrix (pencil).
|
||||
/** \brief Returns the eigenvectors of given matrix (pencil).
|
||||
*
|
||||
* \returns A const reference to the matrix whose columns are the eigenvectors.
|
||||
*
|
||||
@ -283,13 +294,12 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
*/
|
||||
const MatrixType& eigenvectors() const
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
ei_assert(m_eigenvectorsOk);
|
||||
#endif
|
||||
ei_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
||||
ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
||||
return m_eivec;
|
||||
}
|
||||
|
||||
/** \brief Returns the eigenvalues of given matrix (pencil).
|
||||
/** \brief Returns the eigenvalues of given matrix (pencil).
|
||||
*
|
||||
* \returns A const reference to the column vector containing the eigenvalues.
|
||||
*
|
||||
@ -303,9 +313,13 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
*
|
||||
* \sa eigenvectors(), MatrixBase::eigenvalues()
|
||||
*/
|
||||
const RealVectorType& eigenvalues() const { return m_eivalues; }
|
||||
const RealVectorType& eigenvalues() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
||||
return m_eivalues;
|
||||
}
|
||||
|
||||
/** \brief Computes the positive-definite square root of the matrix.
|
||||
/** \brief Computes the positive-definite square root of the matrix.
|
||||
*
|
||||
* \returns the positive-definite square root of the matrix
|
||||
*
|
||||
@ -320,15 +334,17 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
* Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
|
||||
*
|
||||
* \sa operatorInverseSqrt(),
|
||||
* \sa operatorInverseSqrt(),
|
||||
* \ref MatrixFunctions_Module "MatrixFunctions Module"
|
||||
*/
|
||||
MatrixType operatorSqrt() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
||||
ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
||||
return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
|
||||
}
|
||||
|
||||
/** \brief Computes the inverse square root of the matrix.
|
||||
/** \brief Computes the inverse square root of the matrix.
|
||||
*
|
||||
* \returns the inverse positive-definite square root of the matrix
|
||||
*
|
||||
@ -348,22 +364,36 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
*/
|
||||
MatrixType operatorInverseSqrt() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
||||
ei_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
||||
return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
|
||||
}
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
|
||||
*/
|
||||
ComputationInfo info() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
||||
return m_info;
|
||||
}
|
||||
|
||||
/** \brief Maximum number of iterations.
|
||||
*
|
||||
* Maximum number of iterations allowed for an eigenvalue to converge.
|
||||
*/
|
||||
static const int m_maxIterations = 30;
|
||||
|
||||
protected:
|
||||
MatrixType m_eivec;
|
||||
RealVectorType m_eivalues;
|
||||
TridiagonalizationType m_tridiag;
|
||||
typename TridiagonalizationType::SubDiagonalType m_subdiag;
|
||||
#ifndef NDEBUG
|
||||
ComputationInfo m_info;
|
||||
bool m_isInitialized;
|
||||
bool m_eigenvectorsOk;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifndef EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
/** \internal
|
||||
*
|
||||
* \eigenvalues_module \ingroup Eigenvalues_Module
|
||||
@ -386,30 +416,33 @@ static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index
|
||||
template<typename MatrixType>
|
||||
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
m_eigenvectorsOk = computeEigenvectors;
|
||||
#endif
|
||||
assert(matrix.cols() == matrix.rows());
|
||||
Index n = matrix.cols();
|
||||
m_eivalues.resize(n,1);
|
||||
m_eivec.resize(n,n);
|
||||
|
||||
if(n==1)
|
||||
{
|
||||
m_eivalues.coeffRef(0,0) = ei_real(matrix.coeff(0,0));
|
||||
m_eivec.setOnes();
|
||||
if(computeEigenvectors)
|
||||
m_eivec.setOnes();
|
||||
m_info = Success;
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = computeEigenvectors;
|
||||
return *this;
|
||||
}
|
||||
|
||||
m_tridiag.compute(matrix);
|
||||
// declare some aliases
|
||||
RealVectorType& diag = m_eivalues;
|
||||
diag = m_tridiag.diagonal();
|
||||
m_subdiag = m_tridiag.subDiagonal();
|
||||
if (computeEigenvectors)
|
||||
m_eivec = m_tridiag.matrixQ();
|
||||
MatrixType& mat = m_eivec;
|
||||
|
||||
mat = matrix;
|
||||
m_subdiag.resize(n-1);
|
||||
ei_tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
|
||||
|
||||
Index end = n-1;
|
||||
Index start = 0;
|
||||
Index iter = 0; // number of iterations we are working on one element
|
||||
|
||||
while (end>0)
|
||||
{
|
||||
for (Index i = start; i<end; ++i)
|
||||
@ -418,9 +451,17 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(
|
||||
|
||||
// find the largest unreduced block
|
||||
while (end>0 && m_subdiag[end-1]==0)
|
||||
{
|
||||
iter = 0;
|
||||
end--;
|
||||
}
|
||||
if (end<=0)
|
||||
break;
|
||||
|
||||
// if we spent too many iterations on the current element, we give up
|
||||
iter++;
|
||||
if(iter > m_maxIterations) break;
|
||||
|
||||
start = end - 1;
|
||||
while (start>0 && m_subdiag[start-1]!=0)
|
||||
start--;
|
||||
@ -428,19 +469,31 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(
|
||||
ei_tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
|
||||
}
|
||||
|
||||
if (iter <= m_maxIterations)
|
||||
m_info = Success;
|
||||
else
|
||||
m_info = NoConvergence;
|
||||
|
||||
// Sort eigenvalues and corresponding vectors.
|
||||
// TODO make the sort optional ?
|
||||
// TODO use a better sort algorithm !!
|
||||
for (Index i = 0; i < n-1; ++i)
|
||||
if (m_info == Success)
|
||||
{
|
||||
Index k;
|
||||
m_eivalues.segment(i,n-i).minCoeff(&k);
|
||||
if (k > 0)
|
||||
for (Index i = 0; i < n-1; ++i)
|
||||
{
|
||||
std::swap(m_eivalues[i], m_eivalues[k+i]);
|
||||
m_eivec.col(i).swap(m_eivec.col(k+i));
|
||||
Index k;
|
||||
m_eivalues.segment(i,n-i).minCoeff(&k);
|
||||
if (k > 0)
|
||||
{
|
||||
std::swap(m_eivalues[i], m_eivalues[k+i]);
|
||||
if(computeEigenvectors)
|
||||
m_eivec.col(i).swap(m_eivec.col(k+i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = computeEigenvectors;
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -450,39 +503,23 @@ compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors
|
||||
{
|
||||
ei_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows());
|
||||
|
||||
// Compute the cholesky decomposition of matB = L L'
|
||||
// Compute the cholesky decomposition of matB = L L' = U'U
|
||||
LLT<MatrixType> cholB(matB);
|
||||
|
||||
// compute C = inv(L) A inv(L')
|
||||
MatrixType matC = matA;
|
||||
cholB.matrixL().solveInPlace(matC);
|
||||
// FIXME since we currently do not support A * inv(L'), let's do (inv(L) A')' :
|
||||
matC.adjointInPlace();
|
||||
cholB.matrixL().solveInPlace(matC);
|
||||
matC.adjointInPlace();
|
||||
// this version works too:
|
||||
// matC = matC.transpose();
|
||||
// cholB.matrixL().conjugate().template marked<Lower>().solveTriangularInPlace(matC);
|
||||
// matC = matC.transpose();
|
||||
// FIXME: this should work: (currently it only does for small matrices)
|
||||
// Transpose<MatrixType> trMatC(matC);
|
||||
// cholB.matrixL().conjugate().eval().template marked<Lower>().solveTriangularInPlace(trMatC);
|
||||
MatrixType matC = matA.template selfadjointView<Lower>();
|
||||
cholB.matrixL().template solveInPlace<OnTheLeft>(matC);
|
||||
cholB.matrixU().template solveInPlace<OnTheRight>(matC);
|
||||
|
||||
compute(matC, computeEigenvectors);
|
||||
|
||||
if (computeEigenvectors)
|
||||
{
|
||||
// transform back the eigen vectors: evecs = inv(U) * evecs
|
||||
// transform back the eigen vectors: evecs = inv(U) * evecs
|
||||
if(computeEigenvectors)
|
||||
cholB.matrixU().solveInPlace(m_eivec);
|
||||
for (Index i=0; i<m_eivec.cols(); ++i)
|
||||
m_eivec.col(i) = m_eivec.col(i).normalized();
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
#endif // EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
#ifndef EIGEN_EXTERN_INSTANTIATIONS
|
||||
template<typename RealScalar, typename Scalar, typename Index>
|
||||
static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
|
||||
{
|
||||
@ -524,6 +561,5 @@ static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // EIGEN_SELFADJOINTEIGENSOLVER_H
|
||||
|
@ -80,7 +80,7 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
|
||||
typedef typename ei_plain_col_type<MatrixType, RealScalar>::type DiagonalType;
|
||||
typedef Matrix<RealScalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> SubDiagonalType;
|
||||
|
||||
|
||||
typedef typename ei_meta_if<NumTraits<Scalar>::IsComplex,
|
||||
typename Diagonal<MatrixType,0>::RealReturnType,
|
||||
Diagonal<MatrixType,0>
|
||||
@ -109,11 +109,13 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
* \sa compute() for an example.
|
||||
*/
|
||||
Tridiagonalization(Index size = Size==Dynamic ? 2 : Size)
|
||||
: m_matrix(size,size), m_hCoeffs(size > 1 ? size-1 : 1)
|
||||
: m_matrix(size,size),
|
||||
m_hCoeffs(size > 1 ? size-1 : 1),
|
||||
m_isInitialized(false)
|
||||
{}
|
||||
|
||||
/** \brief Constructor; computes tridiagonal decomposition of given matrix.
|
||||
*
|
||||
/** \brief Constructor; computes tridiagonal decomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition
|
||||
* is to be computed.
|
||||
*
|
||||
@ -123,15 +125,19 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
* Output: \verbinclude Tridiagonalization_Tridiagonalization_MatrixType.out
|
||||
*/
|
||||
Tridiagonalization(const MatrixType& matrix)
|
||||
: m_matrix(matrix), m_hCoeffs(matrix.cols() > 1 ? matrix.cols()-1 : 1)
|
||||
: m_matrix(matrix),
|
||||
m_hCoeffs(matrix.cols() > 1 ? matrix.cols()-1 : 1),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
_compute(m_matrix, m_hCoeffs);
|
||||
m_isInitialized = true;
|
||||
}
|
||||
|
||||
/** \brief Computes tridiagonal decomposition of given matrix.
|
||||
*
|
||||
/** \brief Computes tridiagonal decomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition
|
||||
* is to be computed.
|
||||
* \returns Reference to \c *this
|
||||
*
|
||||
* The tridiagonal decomposition is computed by bringing the columns of
|
||||
* the matrix successively in the required form using Householder
|
||||
@ -144,11 +150,13 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
* Example: \include Tridiagonalization_compute.cpp
|
||||
* Output: \verbinclude Tridiagonalization_compute.out
|
||||
*/
|
||||
void compute(const MatrixType& matrix)
|
||||
Tridiagonalization& compute(const MatrixType& matrix)
|
||||
{
|
||||
m_matrix = matrix;
|
||||
m_hCoeffs.resize(matrix.rows()-1, 1);
|
||||
_compute(m_matrix, m_hCoeffs);
|
||||
ei_tridiagonalization_inplace(m_matrix, m_hCoeffs);
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** \brief Returns the Householder coefficients.
|
||||
@ -159,7 +167,7 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
* the member function compute(const MatrixType&) has been called before
|
||||
* to compute the tridiagonal decomposition of a matrix.
|
||||
*
|
||||
* The Householder coefficients allow the reconstruction of the matrix
|
||||
* The Householder coefficients allow the reconstruction of the matrix
|
||||
* \f$ Q \f$ in the tridiagonal decomposition from the packed data.
|
||||
*
|
||||
* Example: \include Tridiagonalization_householderCoefficients.cpp
|
||||
@ -167,9 +175,13 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
*
|
||||
* \sa packedMatrix(), \ref Householder_Module "Householder module"
|
||||
*/
|
||||
inline CoeffVectorType householderCoefficients() const { return m_hCoeffs; }
|
||||
inline CoeffVectorType householderCoefficients() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "Tridiagonalization is not initialized.");
|
||||
return m_hCoeffs;
|
||||
}
|
||||
|
||||
/** \brief Returns the internal representation of the decomposition
|
||||
/** \brief Returns the internal representation of the decomposition
|
||||
*
|
||||
* \returns a const reference to a matrix with the internal representation
|
||||
* of the decomposition.
|
||||
@ -181,14 +193,14 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
* The returned matrix contains the following information:
|
||||
* - the strict upper triangular part is equal to the input matrix A.
|
||||
* - the diagonal and lower sub-diagonal represent the real tridiagonal
|
||||
* symmetric matrix T.
|
||||
* symmetric matrix T.
|
||||
* - the rest of the lower part contains the Householder vectors that,
|
||||
* combined with Householder coefficients returned by
|
||||
* householderCoefficients(), allows to reconstruct the matrix Q as
|
||||
* \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
|
||||
* Here, the matrices \f$ H_i \f$ are the Householder transformations
|
||||
* Here, the matrices \f$ H_i \f$ are the Householder transformations
|
||||
* \f$ H_i = (I - h_i v_i v_i^T) \f$
|
||||
* where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
|
||||
* where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
|
||||
* \f$ v_i \f$ is the Householder vector defined by
|
||||
* \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
|
||||
* with M the matrix returned by this function.
|
||||
@ -200,9 +212,13 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
*
|
||||
* \sa householderCoefficients()
|
||||
*/
|
||||
inline const MatrixType& packedMatrix() const { return m_matrix; }
|
||||
inline const MatrixType& packedMatrix() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "Tridiagonalization is not initialized.");
|
||||
return m_matrix;
|
||||
}
|
||||
|
||||
/** \brief Returns the unitary matrix Q in the decomposition
|
||||
/** \brief Returns the unitary matrix Q in the decomposition
|
||||
*
|
||||
* \returns object representing the matrix Q
|
||||
*
|
||||
@ -219,6 +235,7 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
*/
|
||||
HouseholderSequenceType matrixQ() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "Tridiagonalization is not initialized.");
|
||||
return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate(), false, m_matrix.rows() - 1, 1);
|
||||
}
|
||||
|
||||
@ -268,43 +285,6 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
*/
|
||||
const SubDiagonalReturnType subDiagonal() const;
|
||||
|
||||
/** \brief Performs a full decomposition in place
|
||||
*
|
||||
* \param[in,out] mat On input, the selfadjoint matrix whose tridiagonal
|
||||
* decomposition is to be computed. On output, the orthogonal matrix Q
|
||||
* in the decomposition if \p extractQ is true.
|
||||
* \param[out] diag The diagonal of the tridiagonal matrix T in the
|
||||
* decomposition.
|
||||
* \param[out] subdiag The subdiagonal of the tridiagonal matrix T in
|
||||
* the decomposition.
|
||||
* \param[in] extractQ If true, the orthogonal matrix Q in the
|
||||
* decomposition is computed and stored in \p mat.
|
||||
*
|
||||
* Compute the tridiagonal matrix of \p mat in place. The tridiagonal
|
||||
* matrix T is passed to the output parameters \p diag and \p subdiag. If
|
||||
* \p extractQ is true, then the orthogonal matrix Q is passed to \p mat.
|
||||
*
|
||||
* The vectors \p diag and \p subdiag are not resized. The function
|
||||
* assumes that they are already of the correct size. The length of the
|
||||
* vector \p diag should equal the number of rows in \p mat, and the
|
||||
* length of the vector \p subdiag should be one left.
|
||||
*
|
||||
* This implementation contains an optimized path for real 3-by-3 matrices
|
||||
* which is especially useful for plane fitting.
|
||||
*
|
||||
* \note Notwithstanding the name, the current implementation copies
|
||||
* \p mat to a temporary matrix and uses that matrix to compute the
|
||||
* decomposition.
|
||||
*
|
||||
* Example (this uses the same matrix as the example in
|
||||
* Tridiagonalization(const MatrixType&)):
|
||||
* \include Tridiagonalization_decomposeInPlace.cpp
|
||||
* Output: \verbinclude Tridiagonalization_decomposeInPlace.out
|
||||
*
|
||||
* \sa Tridiagonalization(const MatrixType&), compute()
|
||||
*/
|
||||
static void decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ = true);
|
||||
|
||||
protected:
|
||||
|
||||
static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs);
|
||||
@ -312,12 +292,14 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
|
||||
MatrixType m_matrix;
|
||||
CoeffVectorType m_hCoeffs;
|
||||
bool m_isInitialized;
|
||||
};
|
||||
|
||||
template<typename MatrixType>
|
||||
const typename Tridiagonalization<MatrixType>::DiagonalReturnType
|
||||
Tridiagonalization<MatrixType>::diagonal() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "Tridiagonalization is not initialized.");
|
||||
return m_matrix.diagonal();
|
||||
}
|
||||
|
||||
@ -325,6 +307,7 @@ template<typename MatrixType>
|
||||
const typename Tridiagonalization<MatrixType>::SubDiagonalReturnType
|
||||
Tridiagonalization<MatrixType>::subDiagonal() const
|
||||
{
|
||||
ei_assert(m_isInitialized && "Tridiagonalization is not initialized.");
|
||||
Index n = m_matrix.rows();
|
||||
return Block<MatrixType,SizeMinusOne,SizeMinusOne>(m_matrix, 1, 0, n-1,n-1).diagonal();
|
||||
}
|
||||
@ -335,6 +318,7 @@ Tridiagonalization<MatrixType>::matrixT() const
|
||||
{
|
||||
// FIXME should this function (and other similar ones) rather take a matrix as argument
|
||||
// and fill it ? (to avoid temporaries)
|
||||
ei_assert(m_isInitialized && "Tridiagonalization is not initialized.");
|
||||
Index n = m_matrix.rows();
|
||||
MatrixType matT = m_matrix;
|
||||
matT.topRightCorner(n-1, n-1).diagonal() = subDiagonal().template cast<Scalar>().conjugate();
|
||||
@ -346,24 +330,37 @@ Tridiagonalization<MatrixType>::matrixT() const
|
||||
return matT;
|
||||
}
|
||||
|
||||
#ifndef EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
/** \internal
|
||||
* Performs a tridiagonal decomposition of \a matA in place.
|
||||
* Performs a tridiagonal decomposition of the selfadjoint matrix \a matA in-place.
|
||||
*
|
||||
* \param matA the input selfadjoint matrix
|
||||
* \param hCoeffs returned Householder coefficients
|
||||
* \param[in,out] matA On input the selfadjoint matrix. Only the \b lower triangular part is referenced.
|
||||
* On output, the strict upper part is left unchanged, and the lower triangular part
|
||||
* represents the T and Q matrices in packed format has detailed below.
|
||||
* \param[out] hCoeffs returned Householder coefficients (see below)
|
||||
*
|
||||
* The result is written in the lower triangular part of \a matA.
|
||||
* On output, the tridiagonal selfadjoint matrix T is stored in the diagonal
|
||||
* and lower sub-diagonal of the matrix \a matA.
|
||||
* The unitary matrix Q is represented in a compact way as a product of
|
||||
* Householder reflectors \f$ H_i \f$ such that:
|
||||
* \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
|
||||
* The Householder reflectors are defined as
|
||||
* \f$ H_i = (I - h_i v_i v_i^T) \f$
|
||||
* where \f$ h_i = hCoeffs[i]\f$ is the \f$ i \f$th Householder coefficient and
|
||||
* \f$ v_i \f$ is the Householder vector defined by
|
||||
* \f$ v_i = [ 0, \ldots, 0, 1, matA(i+2,i), \ldots, matA(N-1,i) ]^T \f$.
|
||||
*
|
||||
* Implemented from Golub's "Matrix Computations", algorithm 8.3.1.
|
||||
*
|
||||
* \sa packedMatrix()
|
||||
* \sa Tridiagonalization::packedMatrix()
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
void Tridiagonalization<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs)
|
||||
template<typename MatrixType, typename CoeffVectorType>
|
||||
void ei_tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
|
||||
{
|
||||
assert(matA.rows()==matA.cols());
|
||||
ei_assert(matA.rows()==matA.cols());
|
||||
ei_assert(matA.rows()==hCoeffs.size()+1);
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
Index n = matA.rows();
|
||||
for (Index i = 0; i<n-1; ++i)
|
||||
{
|
||||
@ -389,69 +386,139 @@ void Tridiagonalization<MatrixType>::_compute(MatrixType& matA, CoeffVectorType&
|
||||
}
|
||||
}
|
||||
|
||||
template<typename MatrixType>
|
||||
void Tridiagonalization<MatrixType>::decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
|
||||
// forward declaration, implementation at the end of this file
|
||||
template<typename MatrixType, int Size=MatrixType::ColsAtCompileTime>
|
||||
struct ei_tridiagonalization_inplace_selector;
|
||||
|
||||
/** \brief Performs a full tridiagonalization in place
|
||||
*
|
||||
* \param[in,out] mat On input, the selfadjoint matrix whose tridiagonal
|
||||
* decomposition is to be computed. Only the lower triangular part referenced.
|
||||
* The rest is left unchanged. On output, the orthogonal matrix Q
|
||||
* in the decomposition if \p extractQ is true.
|
||||
* \param[out] diag The diagonal of the tridiagonal matrix T in the
|
||||
* decomposition.
|
||||
* \param[out] subdiag The subdiagonal of the tridiagonal matrix T in
|
||||
* the decomposition.
|
||||
* \param[in] extractQ If true, the orthogonal matrix Q in the
|
||||
* decomposition is computed and stored in \p mat.
|
||||
*
|
||||
* Computes the tridiagonal decomposition of the selfadjoint matrix \p mat in place
|
||||
* such that \f$ mat = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real
|
||||
* symmetric tridiagonal matrix.
|
||||
*
|
||||
* The tridiagonal matrix T is passed to the output parameters \p diag and \p subdiag. If
|
||||
* \p extractQ is true, then the orthogonal matrix Q is passed to \p mat. Otherwise the lower
|
||||
* part of the matrix \p mat is destroyed.
|
||||
*
|
||||
* The vectors \p diag and \p subdiag are not resized. The function
|
||||
* assumes that they are already of the correct size. The length of the
|
||||
* vector \p diag should equal the number of rows in \p mat, and the
|
||||
* length of the vector \p subdiag should be one left.
|
||||
*
|
||||
* This implementation contains an optimized path for 3-by-3 matrices
|
||||
* which is especially useful for plane fitting.
|
||||
*
|
||||
* \note Currently, it requires two temporary vectors to hold the intermediate
|
||||
* Householder coefficients, and to reconstruct the matrix Q from the Householder
|
||||
* reflectors.
|
||||
*
|
||||
* Example (this uses the same matrix as the example in
|
||||
* Tridiagonalization::Tridiagonalization(const MatrixType&)):
|
||||
* \include Tridiagonalization_decomposeInPlace.cpp
|
||||
* Output: \verbinclude Tridiagonalization_decomposeInPlace.out
|
||||
*
|
||||
* \sa class Tridiagonalization
|
||||
*/
|
||||
template<typename MatrixType, typename DiagonalType, typename SubDiagonalType>
|
||||
void ei_tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
|
||||
{
|
||||
typedef typename MatrixType::Index Index;
|
||||
Index n = mat.rows();
|
||||
ei_assert(mat.cols()==n && diag.size()==n && subdiag.size()==n-1);
|
||||
if (n==3 && (!NumTraits<Scalar>::IsComplex) )
|
||||
{
|
||||
_decomposeInPlace3x3(mat, diag, subdiag, extractQ);
|
||||
}
|
||||
else
|
||||
{
|
||||
Tridiagonalization tridiag(mat);
|
||||
diag = tridiag.diagonal();
|
||||
subdiag = tridiag.subDiagonal();
|
||||
if (extractQ)
|
||||
mat = tridiag.matrixQ();
|
||||
}
|
||||
ei_tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* Optimized path for 3x3 matrices.
|
||||
* General full tridiagonalization
|
||||
*/
|
||||
template<typename MatrixType, int Size>
|
||||
struct ei_tridiagonalization_inplace_selector
|
||||
{
|
||||
typedef typename Tridiagonalization<MatrixType>::CoeffVectorType CoeffVectorType;
|
||||
typedef typename Tridiagonalization<MatrixType>::HouseholderSequenceType HouseholderSequenceType;
|
||||
typedef typename MatrixType::Index Index;
|
||||
template<typename DiagonalType, typename SubDiagonalType>
|
||||
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
|
||||
{
|
||||
CoeffVectorType hCoeffs(mat.cols()-1);
|
||||
ei_tridiagonalization_inplace(mat,hCoeffs);
|
||||
diag = mat.diagonal().real();
|
||||
subdiag = mat.template diagonal<-1>().real();
|
||||
if(extractQ)
|
||||
mat = HouseholderSequenceType(mat, hCoeffs.conjugate(), false, mat.rows() - 1, 1);
|
||||
}
|
||||
};
|
||||
|
||||
/** \internal
|
||||
* Specialization for 3x3 matrices.
|
||||
* Especially useful for plane fitting.
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
void Tridiagonalization<MatrixType>::_decomposeInPlace3x3(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
|
||||
struct ei_tridiagonalization_inplace_selector<MatrixType,3>
|
||||
{
|
||||
diag[0] = ei_real(mat(0,0));
|
||||
RealScalar v1norm2 = ei_abs2(mat(0,2));
|
||||
if (ei_isMuchSmallerThan(v1norm2, RealScalar(1)))
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
|
||||
template<typename DiagonalType, typename SubDiagonalType>
|
||||
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
|
||||
{
|
||||
diag[1] = ei_real(mat(1,1));
|
||||
diag[2] = ei_real(mat(2,2));
|
||||
subdiag[0] = ei_real(mat(0,1));
|
||||
subdiag[1] = ei_real(mat(1,2));
|
||||
if (extractQ)
|
||||
mat.setIdentity();
|
||||
}
|
||||
else
|
||||
{
|
||||
RealScalar beta = ei_sqrt(ei_abs2(mat(0,1))+v1norm2);
|
||||
RealScalar invBeta = RealScalar(1)/beta;
|
||||
Scalar m01 = mat(0,1) * invBeta;
|
||||
Scalar m02 = mat(0,2) * invBeta;
|
||||
Scalar q = RealScalar(2)*m01*mat(1,2) + m02*(mat(2,2) - mat(1,1));
|
||||
diag[1] = ei_real(mat(1,1) + m02*q);
|
||||
diag[2] = ei_real(mat(2,2) - m02*q);
|
||||
subdiag[0] = beta;
|
||||
subdiag[1] = ei_real(mat(1,2) - m01 * q);
|
||||
if (extractQ)
|
||||
diag[0] = ei_real(mat(0,0));
|
||||
RealScalar v1norm2 = ei_abs2(mat(2,0));
|
||||
if (ei_isMuchSmallerThan(v1norm2, RealScalar(1)))
|
||||
{
|
||||
mat(0,0) = 1;
|
||||
mat(0,1) = 0;
|
||||
mat(0,2) = 0;
|
||||
mat(1,0) = 0;
|
||||
mat(1,1) = m01;
|
||||
mat(1,2) = m02;
|
||||
mat(2,0) = 0;
|
||||
mat(2,1) = m02;
|
||||
mat(2,2) = -m01;
|
||||
diag[1] = ei_real(mat(1,1));
|
||||
diag[2] = ei_real(mat(2,2));
|
||||
subdiag[0] = ei_real(mat(1,0));
|
||||
subdiag[1] = ei_real(mat(2,1));
|
||||
if (extractQ)
|
||||
mat.setIdentity();
|
||||
}
|
||||
else
|
||||
{
|
||||
RealScalar beta = ei_sqrt(ei_abs2(mat(1,0)) + v1norm2);
|
||||
RealScalar invBeta = RealScalar(1)/beta;
|
||||
Scalar m01 = ei_conj(mat(1,0)) * invBeta;
|
||||
Scalar m02 = ei_conj(mat(2,0)) * invBeta;
|
||||
Scalar q = RealScalar(2)*m01*ei_conj(mat(2,1)) + m02*(mat(2,2) - mat(1,1));
|
||||
diag[1] = ei_real(mat(1,1) + m02*q);
|
||||
diag[2] = ei_real(mat(2,2) - m02*q);
|
||||
subdiag[0] = beta;
|
||||
subdiag[1] = ei_real(ei_conj(mat(2,1)) - m01 * q);
|
||||
if (extractQ)
|
||||
{
|
||||
mat << 1, 0, 0,
|
||||
0, m01, m02,
|
||||
0, m02, -m01;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#endif // EIGEN_HIDE_HEAVY_CODE
|
||||
/** \internal
|
||||
* Trivial specialization for 1x1 matrices
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
struct ei_tridiagonalization_inplace_selector<MatrixType,1>
|
||||
{
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
|
||||
template<typename DiagonalType, typename SubDiagonalType>
|
||||
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
|
||||
{
|
||||
diag(0,0) = ei_real(mat(0,0));
|
||||
if(extractQ)
|
||||
mat(0,0) = Scalar(1);
|
||||
}
|
||||
};
|
||||
#endif // EIGEN_TRIDIAGONALIZATION_H
|
||||
|
@ -174,7 +174,7 @@ template<int Dim,int Mode>
|
||||
inline Transform<Scalar,Dim,Mode>
|
||||
UniformScaling<Scalar>::operator* (const Transform<Scalar,Dim, Mode>& t) const
|
||||
{
|
||||
Transform<Scalar,Dim> res = t;
|
||||
Transform<Scalar,Dim,Mode> res = t;
|
||||
res.prescale(factor());
|
||||
return res;
|
||||
}
|
||||
|
@ -53,6 +53,7 @@ template<typename VectorsType, typename CoeffsType, int Side>
|
||||
struct ei_traits<HouseholderSequence<VectorsType,CoeffsType,Side> >
|
||||
{
|
||||
typedef typename VectorsType::Scalar Scalar;
|
||||
typedef typename VectorsType::Index Index;
|
||||
typedef typename VectorsType::StorageKind StorageKind;
|
||||
enum {
|
||||
RowsAtCompileTime = Side==OnTheLeft ? ei_traits<VectorsType>::RowsAtCompileTime
|
||||
@ -159,18 +160,45 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
|
||||
template<typename DestType> void evalTo(DestType& dst) const
|
||||
{
|
||||
Index vecs = m_actualVectors;
|
||||
dst.setIdentity(rows(), rows());
|
||||
// FIXME find a way to pass this temporary if the user want to
|
||||
Matrix<Scalar, DestType::RowsAtCompileTime, 1,
|
||||
AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> temp(rows());
|
||||
for(Index k = vecs-1; k >= 0; --k)
|
||||
AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> temp(rows());
|
||||
if( ei_is_same_type<typename ei_cleantype<VectorsType>::type,DestType>::ret
|
||||
&& ei_extract_data(dst) == ei_extract_data(m_vectors))
|
||||
{
|
||||
Index cornerSize = rows() - k - m_shift;
|
||||
if(m_trans)
|
||||
dst.bottomRightCorner(cornerSize, cornerSize)
|
||||
.applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
|
||||
else
|
||||
dst.bottomRightCorner(cornerSize, cornerSize)
|
||||
.applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
|
||||
// in-place
|
||||
dst.diagonal().setOnes();
|
||||
dst.template triangularView<StrictlyUpper>().setZero();
|
||||
for(Index k = vecs-1; k >= 0; --k)
|
||||
{
|
||||
Index cornerSize = rows() - k - m_shift;
|
||||
if(m_trans)
|
||||
dst.bottomRightCorner(cornerSize, cornerSize)
|
||||
.applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
|
||||
else
|
||||
dst.bottomRightCorner(cornerSize, cornerSize)
|
||||
.applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
|
||||
|
||||
// clear the off diagonal vector
|
||||
dst.col(k).tail(rows()-k-1).setZero();
|
||||
}
|
||||
// clear the remaining columns if needed
|
||||
for(Index k = 0; k<cols()-vecs ; ++k)
|
||||
dst.col(k).tail(rows()-k-1).setZero();
|
||||
}
|
||||
else
|
||||
{
|
||||
dst.setIdentity(rows(), rows());
|
||||
for(Index k = vecs-1; k >= 0; --k)
|
||||
{
|
||||
Index cornerSize = rows() - k - m_shift;
|
||||
if(m_trans)
|
||||
dst.bottomRightCorner(cornerSize, cornerSize)
|
||||
.applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
|
||||
else
|
||||
dst.bottomRightCorner(cornerSize, cornerSize)
|
||||
.applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ template<typename _MatrixType> class FullPivLU
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
typedef typename ei_traits<MatrixType>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename ei_plain_row_type<MatrixType, Index>::type IntRowVectorType;
|
||||
typedef typename ei_plain_col_type<MatrixType, Index>::type IntColVectorType;
|
||||
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType;
|
||||
|
@ -284,7 +284,6 @@ struct ei_inverse_impl : public ReturnByValue<ei_inverse_impl<MatrixType> >
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename ei_eval<MatrixType>::type MatrixTypeNested;
|
||||
typedef typename ei_cleantype<MatrixTypeNested>::type MatrixTypeNestedCleaned;
|
||||
|
||||
const MatrixTypeNested m_matrix;
|
||||
|
||||
ei_inverse_impl(const MatrixType& matrix)
|
||||
@ -296,6 +295,10 @@ struct ei_inverse_impl : public ReturnByValue<ei_inverse_impl<MatrixType> >
|
||||
|
||||
template<typename Dest> inline void evalTo(Dest& dst) const
|
||||
{
|
||||
const int Size = EIGEN_ENUM_MIN(MatrixType::ColsAtCompileTime,Dest::ColsAtCompileTime);
|
||||
ei_assert(( (Size<=1) || (Size>4) || (ei_extract_data(m_matrix)!=ei_extract_data(dst)))
|
||||
&& "Aliasing problem detected in inverse(), you need to do inverse().eval() here.");
|
||||
|
||||
ei_compute_inverse<MatrixTypeNestedCleaned, Dest>::run(m_matrix, dst);
|
||||
}
|
||||
};
|
||||
@ -354,7 +357,14 @@ inline void MatrixBase<Derived>::computeInverseAndDetWithCheck(
|
||||
{
|
||||
// i'd love to put some static assertions there, but SFINAE means that they have no effect...
|
||||
ei_assert(rows() == cols());
|
||||
ei_compute_inverse_and_det_with_check<PlainObject, ResultType>::run
|
||||
// for 2x2, it's worth giving a chance to avoid evaluating.
|
||||
// for larger sizes, evaluating has negligible cost and limits code size.
|
||||
typedef typename ei_meta_if<
|
||||
RowsAtCompileTime == 2,
|
||||
typename ei_cleantype<typename ei_nested<Derived, 2>::type>::type,
|
||||
PlainObject
|
||||
>::ret MatrixType;
|
||||
ei_compute_inverse_and_det_with_check<MatrixType, ResultType>::run
|
||||
(derived(), absDeterminantThreshold, inverse, determinant, invertible);
|
||||
}
|
||||
|
||||
|
@ -72,9 +72,9 @@ template<typename _MatrixType> class PartialPivLU
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
|
||||
typedef typename ei_traits<MatrixType>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_plain_col_type<MatrixType, Index>::type PermutationVectorType;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
|
||||
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
|
||||
|
||||
|
||||
/**
|
||||
@ -186,7 +186,7 @@ template<typename _MatrixType> class PartialPivLU
|
||||
protected:
|
||||
MatrixType m_lu;
|
||||
PermutationType m_p;
|
||||
PermutationVectorType m_rowsTranspositions;
|
||||
TranspositionType m_rowsTranspositions;
|
||||
Index m_det_p;
|
||||
bool m_isInitialized;
|
||||
};
|
||||
@ -339,9 +339,9 @@ struct ei_partial_lu_impl
|
||||
Index tsize = size - k - bs; // trailing size
|
||||
|
||||
// partition the matrix:
|
||||
// A00 | A01 | A02
|
||||
// lu = A10 | A11 | A12
|
||||
// A20 | A21 | A22
|
||||
// A00 | A01 | A02
|
||||
// lu = A_0 | A_1 | A_2 = A10 | A11 | A12
|
||||
// A20 | A21 | A22
|
||||
BlockType A_0(lu,0,0,rows,k);
|
||||
BlockType A_2(lu,0,k+bs,rows,tsize);
|
||||
BlockType A11(lu,k,k,bs,bs);
|
||||
@ -350,8 +350,8 @@ struct ei_partial_lu_impl
|
||||
BlockType A22(lu,k+bs,k+bs,trows,tsize);
|
||||
|
||||
Index nb_transpositions_in_panel;
|
||||
// recursively calls the blocked LU algorithm with a very small
|
||||
// blocking size:
|
||||
// recursively call the blocked LU algorithm on [A11^T A21^T]^T
|
||||
// with a very small blocking size:
|
||||
if(!blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride,
|
||||
row_transpositions+k, nb_transpositions_in_panel, 16))
|
||||
{
|
||||
@ -364,7 +364,7 @@ struct ei_partial_lu_impl
|
||||
}
|
||||
nb_transpositions += nb_transpositions_in_panel;
|
||||
|
||||
// update permutations and apply them to A10
|
||||
// update permutations and apply them to A_0
|
||||
for(Index i=k; i<k+bs; ++i)
|
||||
{
|
||||
Index piv = (row_transpositions[i] += k);
|
||||
@ -389,8 +389,8 @@ struct ei_partial_lu_impl
|
||||
|
||||
/** \internal performs the LU decomposition with partial pivoting in-place.
|
||||
*/
|
||||
template<typename MatrixType, typename IntVector>
|
||||
void ei_partial_lu_inplace(MatrixType& lu, IntVector& row_transpositions, typename MatrixType::Index& nb_transpositions)
|
||||
template<typename MatrixType, typename TranspositionType>
|
||||
void ei_partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename MatrixType::Index& nb_transpositions)
|
||||
{
|
||||
ei_assert(lu.cols() == row_transpositions.size());
|
||||
ei_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
|
||||
@ -414,9 +414,7 @@ PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& ma
|
||||
ei_partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions);
|
||||
m_det_p = (nb_transpositions%2) ? -1 : 1;
|
||||
|
||||
m_p.setIdentity(size);
|
||||
for(Index k = size-1; k >= 0; --k)
|
||||
m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k));
|
||||
m_p = m_rowsTranspositions;
|
||||
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
|
@ -286,6 +286,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
{
|
||||
m_usePrescribedThreshold = true;
|
||||
m_prescribedThreshold = threshold;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Allows to come back to the default behavior, letting Eigen use its default formula for
|
||||
@ -299,6 +300,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
ColPivHouseholderQR& setThreshold(Default_t)
|
||||
{
|
||||
m_usePrescribedThreshold = false;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Returns the threshold that will be used by certain methods such as rank().
|
||||
@ -345,8 +347,6 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
Index m_det_pq;
|
||||
};
|
||||
|
||||
#ifndef EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
template<typename MatrixType>
|
||||
typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::absDeterminant() const
|
||||
{
|
||||
@ -511,8 +511,6 @@ typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHousehol
|
||||
return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate(), false, m_nonzero_pivots, 0);
|
||||
}
|
||||
|
||||
#endif // EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
/** \return the column-pivoting Householder QR decomposition of \c *this.
|
||||
*
|
||||
* \sa class ColPivHouseholderQR
|
||||
|
@ -271,8 +271,6 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
||||
Index m_det_pq;
|
||||
};
|
||||
|
||||
#ifndef EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
template<typename MatrixType>
|
||||
typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::absDeterminant() const
|
||||
{
|
||||
@ -437,8 +435,6 @@ typename FullPivHouseholderQR<MatrixType>::MatrixQType FullPivHouseholderQR<Matr
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif // EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
/** \return the full-pivoting Householder QR decomposition of \c *this.
|
||||
*
|
||||
* \sa class FullPivHouseholderQR
|
||||
|
@ -177,8 +177,6 @@ template<typename _MatrixType> class HouseholderQR
|
||||
bool m_isInitialized;
|
||||
};
|
||||
|
||||
#ifndef EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
template<typename MatrixType>
|
||||
typename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const
|
||||
{
|
||||
@ -254,8 +252,6 @@ struct ei_solve_retval<HouseholderQR<_MatrixType>, Rhs>
|
||||
}
|
||||
};
|
||||
|
||||
#endif // EIGEN_HIDE_HEAVY_CODE
|
||||
|
||||
/** \return the Householder QR decomposition of \c *this.
|
||||
*
|
||||
* \sa class HouseholderQR
|
||||
|
@ -30,12 +30,14 @@
|
||||
*
|
||||
* See BasicSparseLLT and SparseProduct for usage examples.
|
||||
*/
|
||||
template<typename _Scalar> class AmbiVector
|
||||
template<typename _Scalar, typename _Index>
|
||||
class AmbiVector
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef SparseIndex Index;
|
||||
|
||||
AmbiVector(Index size)
|
||||
: m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
|
||||
{
|
||||
@ -130,8 +132,8 @@ template<typename _Scalar> class AmbiVector
|
||||
};
|
||||
|
||||
/** \returns the number of non zeros in the current sub vector */
|
||||
template<typename Scalar>
|
||||
SparseIndex AmbiVector<Scalar>::nonZeros() const
|
||||
template<typename _Scalar,typename _Index>
|
||||
_Index AmbiVector<_Scalar,_Index>::nonZeros() const
|
||||
{
|
||||
if (m_mode==IsSparse)
|
||||
return m_llSize;
|
||||
@ -139,8 +141,8 @@ SparseIndex AmbiVector<Scalar>::nonZeros() const
|
||||
return m_end - m_start;
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
void AmbiVector<Scalar>::init(double estimatedDensity)
|
||||
template<typename _Scalar,typename _Index>
|
||||
void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
|
||||
{
|
||||
if (estimatedDensity>0.1)
|
||||
init(IsDense);
|
||||
@ -148,8 +150,8 @@ void AmbiVector<Scalar>::init(double estimatedDensity)
|
||||
init(IsSparse);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
void AmbiVector<Scalar>::init(int mode)
|
||||
template<typename _Scalar,typename _Index>
|
||||
void AmbiVector<_Scalar,_Index>::init(int mode)
|
||||
{
|
||||
m_mode = mode;
|
||||
if (m_mode==IsSparse)
|
||||
@ -164,15 +166,15 @@ void AmbiVector<Scalar>::init(int mode)
|
||||
*
|
||||
* Don't worry, this function is extremely cheap.
|
||||
*/
|
||||
template<typename Scalar>
|
||||
void AmbiVector<Scalar>::restart()
|
||||
template<typename _Scalar,typename _Index>
|
||||
void AmbiVector<_Scalar,_Index>::restart()
|
||||
{
|
||||
m_llCurrent = m_llStart;
|
||||
}
|
||||
|
||||
/** Set all coefficients of current subvector to zero */
|
||||
template<typename Scalar>
|
||||
void AmbiVector<Scalar>::setZero()
|
||||
template<typename _Scalar,typename _Index>
|
||||
void AmbiVector<_Scalar,_Index>::setZero()
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
{
|
||||
@ -187,8 +189,8 @@ void AmbiVector<Scalar>::setZero()
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
Scalar& AmbiVector<Scalar>::coeffRef(Index i)
|
||||
template<typename _Scalar,typename _Index>
|
||||
_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
return m_buffer[i];
|
||||
@ -256,8 +258,8 @@ Scalar& AmbiVector<Scalar>::coeffRef(Index i)
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
Scalar& AmbiVector<Scalar>::coeff(Index i)
|
||||
template<typename _Scalar,typename _Index>
|
||||
_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
return m_buffer[i];
|
||||
@ -284,8 +286,8 @@ Scalar& AmbiVector<Scalar>::coeff(Index i)
|
||||
}
|
||||
|
||||
/** Iterator over the nonzero coefficients */
|
||||
template<typename _Scalar>
|
||||
class AmbiVector<_Scalar>::Iterator
|
||||
template<typename _Scalar,typename _Index>
|
||||
class AmbiVector<_Scalar,_Index>::Iterator
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
|
@ -109,8 +109,8 @@ cholmod_dense ei_cholmod_map_eigen_to_dense(MatrixBase<Derived>& mat)
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename Scalar, int Flags>
|
||||
MappedSparseMatrix<Scalar,Flags>::MappedSparseMatrix(cholmod_sparse& cm)
|
||||
template<typename Scalar, int Flags, typename _Index>
|
||||
MappedSparseMatrix<Scalar,Flags,_Index>::MappedSparseMatrix(cholmod_sparse& cm)
|
||||
{
|
||||
m_innerSize = cm.nrow;
|
||||
m_outerSize = cm.ncol;
|
||||
@ -128,6 +128,7 @@ class SparseLLT<MatrixType,Cholmod> : public SparseLLT<MatrixType>
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::RealScalar RealScalar;
|
||||
typedef typename Base::CholMatrixType CholMatrixType;
|
||||
typedef typename MatrixType::Index Index;
|
||||
using Base::MatrixLIsDirty;
|
||||
using Base::SupernodalFactorIsDirty;
|
||||
using Base::m_flags;
|
||||
|
@ -28,12 +28,20 @@
|
||||
/** Stores a sparse set of values as a list of values and a list of indices.
|
||||
*
|
||||
*/
|
||||
template<typename Scalar>
|
||||
template<typename _Scalar,typename _Index>
|
||||
class CompressedStorage
|
||||
{
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef SparseIndex Index;
|
||||
public:
|
||||
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
|
||||
protected:
|
||||
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
public:
|
||||
|
||||
CompressedStorage()
|
||||
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
|
||||
{}
|
||||
@ -118,13 +126,13 @@ class CompressedStorage
|
||||
res.m_allocatedSize = res.m_size = size;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
|
||||
inline Index searchLowerIndex(Index key) const
|
||||
{
|
||||
return searchLowerIndex(0, m_size, key);
|
||||
}
|
||||
|
||||
|
||||
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
|
||||
inline Index searchLowerIndex(size_t start, size_t end, Index key) const
|
||||
{
|
||||
@ -138,7 +146,7 @@ class CompressedStorage
|
||||
}
|
||||
return static_cast<Index>(start);
|
||||
}
|
||||
|
||||
|
||||
/** \returns the stored value at index \a key
|
||||
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
|
||||
inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const
|
||||
@ -152,7 +160,7 @@ class CompressedStorage
|
||||
const size_t id = searchLowerIndex(0,m_size-1,key);
|
||||
return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
|
||||
}
|
||||
|
||||
|
||||
/** Like at(), but the search is performed in the range [start,end) */
|
||||
inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const
|
||||
{
|
||||
@ -165,7 +173,7 @@ class CompressedStorage
|
||||
const size_t id = searchLowerIndex(start,end-1,key);
|
||||
return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
|
||||
}
|
||||
|
||||
|
||||
/** \returns a reference to the value at index \a key
|
||||
* If the value does not exist, then the value \a defaultValue is inserted
|
||||
* such that the keys are sorted. */
|
||||
@ -185,7 +193,7 @@ class CompressedStorage
|
||||
}
|
||||
return m_values[id];
|
||||
}
|
||||
|
||||
|
||||
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
|
||||
{
|
||||
size_t k = 0;
|
||||
|
@ -42,10 +42,11 @@
|
||||
*
|
||||
* \see SparseMatrix
|
||||
*/
|
||||
template<typename _Scalar, int _Flags>
|
||||
struct ei_traits<DynamicSparseMatrix<_Scalar, _Flags> >
|
||||
template<typename _Scalar, int _Flags, typename _Index>
|
||||
struct ei_traits<DynamicSparseMatrix<_Scalar, _Flags, _Index> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@ -59,12 +60,12 @@ struct ei_traits<DynamicSparseMatrix<_Scalar, _Flags> >
|
||||
};
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Flags>
|
||||
template<typename _Scalar, int _Flags, typename _Index>
|
||||
class DynamicSparseMatrix
|
||||
: public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Flags> >
|
||||
: public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Flags, _Index> >
|
||||
{
|
||||
public:
|
||||
EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(DynamicSparseMatrix)
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix)
|
||||
// FIXME: why are these operator already alvailable ???
|
||||
// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=)
|
||||
// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=)
|
||||
@ -76,7 +77,7 @@ class DynamicSparseMatrix
|
||||
typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
|
||||
|
||||
Index m_innerSize;
|
||||
std::vector<CompressedStorage<Scalar> > m_data;
|
||||
std::vector<CompressedStorage<Scalar,Index> > m_data;
|
||||
|
||||
public:
|
||||
|
||||
@ -86,8 +87,8 @@ class DynamicSparseMatrix
|
||||
inline Index outerSize() const { return static_cast<Index>(m_data.size()); }
|
||||
inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
|
||||
|
||||
std::vector<CompressedStorage<Scalar> >& _data() { return m_data; }
|
||||
const std::vector<CompressedStorage<Scalar> >& _data() const { return m_data; }
|
||||
std::vector<CompressedStorage<Scalar,Index> >& _data() { return m_data; }
|
||||
const std::vector<CompressedStorage<Scalar,Index> >& _data() const { return m_data; }
|
||||
|
||||
/** \returns the coefficient value at given position \a row, \a col
|
||||
* This operation involes a log(rho*outer_size) binary search.
|
||||
@ -127,13 +128,7 @@ class DynamicSparseMatrix
|
||||
return res;
|
||||
}
|
||||
|
||||
/** \deprecated
|
||||
* Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
|
||||
EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
|
||||
{
|
||||
setZero();
|
||||
reserve(reserveSize);
|
||||
}
|
||||
|
||||
|
||||
void reserve(Index reserveSize = 1000)
|
||||
{
|
||||
@ -147,9 +142,21 @@ class DynamicSparseMatrix
|
||||
}
|
||||
}
|
||||
|
||||
/** Does nothing: provided for compatibility with SparseMatrix */
|
||||
inline void startVec(Index /*outer*/) {}
|
||||
|
||||
inline Scalar& insertBack(Index outer, Index inner)
|
||||
/** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
|
||||
* - the nonzero does not already exist
|
||||
* - the new coefficient is the last one of the given inner vector.
|
||||
*
|
||||
* \sa insert, insertBackByOuterInner */
|
||||
inline Scalar& insertBack(Index row, Index col)
|
||||
{
|
||||
return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
|
||||
}
|
||||
|
||||
/** \sa insertBack */
|
||||
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
|
||||
{
|
||||
ei_assert(outer<Index(m_data.size()) && inner<m_innerSize && "out of range");
|
||||
ei_assert(((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner))
|
||||
@ -158,32 +165,6 @@ class DynamicSparseMatrix
|
||||
return m_data[outer].value(m_data[outer].size()-1);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
* inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
|
||||
* 1 - the coefficient does not exist yet
|
||||
* 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
|
||||
* In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
|
||||
* \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
|
||||
*
|
||||
* \see fillrand(), coeffRef()
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
return insertBack(outer,inner);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
* Like fill() but with random inner coordinates.
|
||||
* Compared to the generic coeffRef(), the unique limitation is that we assume
|
||||
* the coefficient does not exist yet.
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
|
||||
{
|
||||
return insert(row,col);
|
||||
}
|
||||
|
||||
inline Scalar& insert(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
@ -204,12 +185,10 @@ class DynamicSparseMatrix
|
||||
return m_data[outer].value(id+1);
|
||||
}
|
||||
|
||||
/** \deprecated use finalize()
|
||||
* Does nothing. Provided for compatibility with SparseMatrix. */
|
||||
EIGEN_DEPRECATED void endFill() {}
|
||||
|
||||
/** Does nothing: provided for compatibility with SparseMatrix */
|
||||
inline void finalize() {}
|
||||
|
||||
/** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
|
||||
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
|
||||
{
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
@ -301,10 +280,50 @@ class DynamicSparseMatrix
|
||||
|
||||
/** Destructor */
|
||||
inline ~DynamicSparseMatrix() {}
|
||||
|
||||
public:
|
||||
|
||||
/** \deprecated
|
||||
* Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
|
||||
EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
|
||||
{
|
||||
setZero();
|
||||
reserve(reserveSize);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
* inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
|
||||
* 1 - the coefficient does not exist yet
|
||||
* 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
|
||||
* In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
|
||||
* \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
|
||||
*
|
||||
* \see fillrand(), coeffRef()
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
return insertBack(outer,inner);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
* Like fill() but with random inner coordinates.
|
||||
* Compared to the generic coeffRef(), the unique limitation is that we assume
|
||||
* the coefficient does not exist yet.
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
|
||||
{
|
||||
return insert(row,col);
|
||||
}
|
||||
|
||||
/** \deprecated use finalize()
|
||||
* Does nothing. Provided for compatibility with SparseMatrix. */
|
||||
EIGEN_DEPRECATED void endFill() {}
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Flags>
|
||||
class DynamicSparseMatrix<Scalar,_Flags>::InnerIterator : public SparseVector<Scalar,_Flags>::InnerIterator
|
||||
template<typename Scalar, int _Flags, typename _Index>
|
||||
class DynamicSparseMatrix<Scalar,_Flags,_Index>::InnerIterator : public SparseVector<Scalar,_Flags>::InnerIterator
|
||||
{
|
||||
typedef typename SparseVector<Scalar,_Flags>::InnerIterator Base;
|
||||
public:
|
||||
|
@ -34,25 +34,25 @@
|
||||
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
|
||||
*
|
||||
*/
|
||||
template<typename _Scalar, int _Flags>
|
||||
struct ei_traits<MappedSparseMatrix<_Scalar, _Flags> > : ei_traits<SparseMatrix<_Scalar, _Flags> >
|
||||
template<typename _Scalar, int _Flags, typename _Index>
|
||||
struct ei_traits<MappedSparseMatrix<_Scalar, _Flags, _Index> > : ei_traits<SparseMatrix<_Scalar, _Flags, _Index> >
|
||||
{};
|
||||
|
||||
template<typename _Scalar, int _Flags>
|
||||
template<typename _Scalar, int _Flags, typename _Index>
|
||||
class MappedSparseMatrix
|
||||
: public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags> >
|
||||
: public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _Index> >
|
||||
{
|
||||
public:
|
||||
EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(MappedSparseMatrix)
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix)
|
||||
|
||||
protected:
|
||||
enum { IsRowMajor = Base::IsRowMajor };
|
||||
|
||||
Index m_outerSize;
|
||||
Index m_innerSize;
|
||||
Index m_nnz;
|
||||
Index* m_outerIndex;
|
||||
Index* m_innerIndices;
|
||||
Index m_outerSize;
|
||||
Index m_innerSize;
|
||||
Index m_nnz;
|
||||
Index* m_outerIndex;
|
||||
Index* m_innerIndices;
|
||||
Scalar* m_values;
|
||||
|
||||
public:
|
||||
@ -135,8 +135,8 @@ class MappedSparseMatrix
|
||||
inline ~MappedSparseMatrix() {}
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Flags>
|
||||
class MappedSparseMatrix<Scalar,_Flags>::InnerIterator
|
||||
template<typename Scalar, int _Flags, typename _Index>
|
||||
class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const MappedSparseMatrix& mat, Index outer)
|
||||
|
@ -243,7 +243,7 @@ class RandomSetter
|
||||
mp_target->startVec(j);
|
||||
prevOuter = outer;
|
||||
}
|
||||
mp_target->insertBack(outer, inner) = it->second.value;
|
||||
mp_target->insertBackByOuterInner(outer, inner) = it->second.value;
|
||||
}
|
||||
}
|
||||
mp_target->finalize();
|
||||
|
@ -29,6 +29,7 @@ template<typename MatrixType, int Size>
|
||||
struct ei_traits<SparseInnerVectorSet<MatrixType, Size> >
|
||||
{
|
||||
typedef typename ei_traits<MatrixType>::Scalar Scalar;
|
||||
typedef typename ei_traits<MatrixType>::Index Index;
|
||||
typedef typename ei_traits<MatrixType>::StorageKind StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@ -50,7 +51,7 @@ class SparseInnerVectorSet : ei_no_assignment_operator,
|
||||
|
||||
enum { IsRowMajor = ei_traits<SparseInnerVectorSet>::IsRowMajor };
|
||||
|
||||
EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet)
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
|
||||
class InnerIterator: public MatrixType::InnerIterator
|
||||
{
|
||||
public:
|
||||
@ -111,7 +112,7 @@ class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size>
|
||||
|
||||
enum { IsRowMajor = ei_traits<SparseInnerVectorSet>::IsRowMajor };
|
||||
|
||||
EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet)
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
|
||||
class InnerIterator: public MatrixType::InnerIterator
|
||||
{
|
||||
public:
|
||||
@ -209,7 +210,7 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options>, Size>
|
||||
|
||||
enum { IsRowMajor = ei_traits<SparseInnerVectorSet>::IsRowMajor };
|
||||
|
||||
EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet)
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
|
||||
class InnerIterator: public MatrixType::InnerIterator
|
||||
{
|
||||
public:
|
||||
|
@ -43,6 +43,8 @@ struct ei_traits<SparseDiagonalProduct<Lhs, Rhs> >
|
||||
typedef typename ei_cleantype<Lhs>::type _Lhs;
|
||||
typedef typename ei_cleantype<Rhs>::type _Rhs;
|
||||
typedef typename _Lhs::Scalar Scalar;
|
||||
typedef typename ei_promote_index_type<typename ei_traits<Lhs>::Index,
|
||||
typename ei_traits<Rhs>::Index>::type Index;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@ -82,7 +84,7 @@ class SparseDiagonalProduct
|
||||
|
||||
public:
|
||||
|
||||
EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseDiagonalProduct)
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
|
||||
|
||||
typedef ei_sparse_diagonal_product_inner_iterator_selector
|
||||
<_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
|
||||
|
@ -71,6 +71,8 @@ LDL License:
|
||||
*
|
||||
* \param MatrixType the type of the matrix of which we are computing the LDLT Cholesky decomposition
|
||||
*
|
||||
* \warning the upper triangular part has to be specified. The rest of the matrix is not used. The input matrix must be column major.
|
||||
*
|
||||
* \sa class LDLT, class LDLT
|
||||
*/
|
||||
template<typename MatrixType, int Backend = DefaultBackend>
|
||||
@ -213,7 +215,7 @@ void SparseLDLT<MatrixType,Backend>::_symbolic(const MatrixType& a)
|
||||
m_parent[k] = -1; /* parent of k is not yet known */
|
||||
tags[k] = k; /* mark node k as visited */
|
||||
m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
|
||||
Index kk = P ? P[k] : k; /* kth original, or permuted, column */
|
||||
Index kk = P ? P[k] : k; /* kth original, or permuted, column */
|
||||
Index p2 = Ap[kk+1];
|
||||
for (Index p = Ap[kk]; p < p2; ++p)
|
||||
{
|
||||
@ -269,10 +271,10 @@ bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
|
||||
for (Index k = 0; k < size; ++k)
|
||||
{
|
||||
/* compute nonzero pattern of kth row of L, in topological order */
|
||||
y[k] = 0.0; /* Y(0:k) is now all zero */
|
||||
y[k] = 0.0; /* Y(0:k) is now all zero */
|
||||
Index top = size; /* stack for pattern is empty */
|
||||
tags[k] = k; /* mark node k as visited */
|
||||
m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
|
||||
tags[k] = k; /* mark node k as visited */
|
||||
m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
|
||||
Index kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */
|
||||
Index p2 = Ap[kk+1];
|
||||
for (Index p = Ap[kk]; p < p2; ++p)
|
||||
@ -280,7 +282,7 @@ bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
|
||||
Index i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */
|
||||
if (i <= k)
|
||||
{
|
||||
y[i] += Ax[p]; /* scatter A(i,k) into Y (sum duplicates) */
|
||||
y[i] += ei_conj(Ax[p]); /* scatter A(i,k) into Y (sum duplicates) */
|
||||
Index len;
|
||||
for (len = 0; tags[i] != k; i = m_parent[i])
|
||||
{
|
||||
@ -291,22 +293,23 @@ bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
|
||||
pattern[--top] = pattern[--len];
|
||||
}
|
||||
}
|
||||
|
||||
/* compute numerical values kth row of L (a sparse triangular solve) */
|
||||
m_diag[k] = y[k]; /* get D(k,k) and clear Y(k) */
|
||||
y[k] = 0.0;
|
||||
for (; top < size; ++top)
|
||||
{
|
||||
Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
|
||||
Scalar yi = y[i]; /* get and clear Y(i) */
|
||||
Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
|
||||
Scalar yi = (y[i]); /* get and clear Y(i) */
|
||||
y[i] = 0.0;
|
||||
Index p2 = Lp[i] + m_nonZerosPerCol[i];
|
||||
Index p;
|
||||
for (p = Lp[i]; p < p2; ++p)
|
||||
y[Li[p]] -= Lx[p] * yi;
|
||||
y[Li[p]] -= ei_conj(Lx[p]) * (yi);
|
||||
Scalar l_ki = yi / m_diag[i]; /* the nonzero entry L(k,i) */
|
||||
m_diag[k] -= l_ki * yi;
|
||||
m_diag[k] -= l_ki * ei_conj(yi);
|
||||
Li[p] = k; /* store L(k,i) in column form of L */
|
||||
Lx[p] = l_ki;
|
||||
Lx[p] = (l_ki);
|
||||
++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
|
||||
}
|
||||
if (m_diag[k] == 0.0)
|
||||
@ -323,7 +326,7 @@ bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
|
||||
return ok; /* success, diagonal of D is all nonzero */
|
||||
}
|
||||
|
||||
/** Computes b = L^-T L^-1 b */
|
||||
/** Computes b = L^-T D^-1 L^-1 b */
|
||||
template<typename MatrixType, int Backend>
|
||||
template<typename Derived>
|
||||
bool SparseLDLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
|
||||
@ -336,10 +339,8 @@ bool SparseLDLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
|
||||
if (m_matrix.nonZeros()>0) // otherwise L==I
|
||||
m_matrix.template triangularView<UnitLower>().solveInPlace(b);
|
||||
b = b.cwiseQuotient(m_diag);
|
||||
// FIXME should be .adjoint() but it fails to compile...
|
||||
|
||||
if (m_matrix.nonZeros()>0) // otherwise L==I
|
||||
m_matrix.transpose().template triangularView<UnitUpper>().solveInPlace(b);
|
||||
m_matrix.adjoint().template triangularView<UnitUpper>().solveInPlace(b);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ void SparseLLT<MatrixType,Backend>::compute(const MatrixType& a)
|
||||
m_matrix.resize(size, size);
|
||||
|
||||
// allocate a temporary vector for accumulations
|
||||
AmbiVector<Scalar> tempVector(size);
|
||||
AmbiVector<Scalar,Index> tempVector(size);
|
||||
RealScalar density = a.nonZeros()/RealScalar(size*size);
|
||||
|
||||
// TODO estimate the number of non zeros
|
||||
@ -177,7 +177,7 @@ void SparseLLT<MatrixType,Backend>::compute(const MatrixType& a)
|
||||
RealScalar rx = ei_sqrt(ei_real(x));
|
||||
m_matrix.insert(j,j) = rx; // FIXME use insertBack
|
||||
Scalar y = Scalar(1)/rx;
|
||||
for (typename AmbiVector<Scalar>::Iterator it(tempVector, m_precision*rx); it; ++it)
|
||||
for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector, m_precision*rx); it; ++it)
|
||||
{
|
||||
// FIXME use insertBack
|
||||
m_matrix.insert(it.index(), j) = it.value() * y;
|
||||
@ -195,14 +195,7 @@ bool SparseLLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
|
||||
ei_assert(size==b.rows());
|
||||
|
||||
m_matrix.template triangularView<Lower>().solveInPlace(b);
|
||||
// FIXME should be simply .adjoint() but it fails to compile...
|
||||
if (NumTraits<Scalar>::IsComplex)
|
||||
{
|
||||
CholMatrixType aux = m_matrix.conjugate();
|
||||
aux.transpose().template triangularView<Upper>().solveInPlace(b);
|
||||
}
|
||||
else
|
||||
m_matrix.transpose().template triangularView<Upper>().solveInPlace(b);
|
||||
m_matrix.adjoint().template triangularView<Upper>().solveInPlace(b);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
|
||||
// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
@ -37,14 +37,16 @@
|
||||
* \param _Scalar the scalar type, i.e. the type of the coefficients
|
||||
* \param _Options Union of bit flags controlling the storage scheme. Currently the only possibility
|
||||
* is RowMajor. The default is 0 which means column-major.
|
||||
* \param _Index the type of the indices. Default is \c int.
|
||||
*
|
||||
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
|
||||
*
|
||||
*/
|
||||
template<typename _Scalar, int _Options>
|
||||
struct ei_traits<SparseMatrix<_Scalar, _Options> >
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
struct ei_traits<SparseMatrix<_Scalar, _Options, _Index> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@ -58,12 +60,12 @@ struct ei_traits<SparseMatrix<_Scalar, _Options> >
|
||||
};
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options>
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
class SparseMatrix
|
||||
: public SparseMatrixBase<SparseMatrix<_Scalar, _Options> >
|
||||
: public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
|
||||
{
|
||||
public:
|
||||
EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseMatrix)
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
|
||||
// FIXME: why are these operator already alvailable ???
|
||||
@ -80,7 +82,7 @@ class SparseMatrix
|
||||
Index m_outerSize;
|
||||
Index m_innerSize;
|
||||
Index* m_outerIndex;
|
||||
CompressedStorage<Scalar> m_data;
|
||||
CompressedStorage<Scalar,Index> m_data;
|
||||
|
||||
public:
|
||||
|
||||
@ -135,67 +137,41 @@ class SparseMatrix
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
|
||||
|
||||
/** \deprecated use setZero() and reserve()
|
||||
* Initializes the filling process of \c *this.
|
||||
* \param reserveSize approximate number of nonzeros
|
||||
* Note that the matrix \c *this is zero-ed.
|
||||
*/
|
||||
EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
|
||||
{
|
||||
setZero();
|
||||
m_data.reserve(reserveSize);
|
||||
}
|
||||
|
||||
/** Preallocates \a reserveSize non zeros */
|
||||
inline void reserve(Index reserveSize)
|
||||
{
|
||||
m_data.reserve(reserveSize);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
if (m_outerIndex[outer+1]==0)
|
||||
{
|
||||
// we start a new inner vector
|
||||
Index i = outer;
|
||||
while (i>=0 && m_outerIndex[i]==0)
|
||||
{
|
||||
m_outerIndex[i] = m_data.size();
|
||||
--i;
|
||||
}
|
||||
m_outerIndex[outer+1] = m_outerIndex[outer];
|
||||
}
|
||||
else
|
||||
{
|
||||
ei_assert(m_data.index(m_data.size()-1)<inner && "wrong sorted insertion");
|
||||
}
|
||||
// std::cerr << size_t(m_outerIndex[outer+1]) << " == " << m_data.size() << "\n";
|
||||
assert(size_t(m_outerIndex[outer+1]) == m_data.size());
|
||||
Index id = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
|
||||
m_data.append(0, inner);
|
||||
return m_data.value(id);
|
||||
}
|
||||
|
||||
//--- low level purely coherent filling ---
|
||||
|
||||
inline Scalar& insertBack(Index outer, Index inner)
|
||||
/** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
|
||||
* - the nonzero does not already exist
|
||||
* - the new coefficient is the last one according to the storage order
|
||||
*
|
||||
* Before filling a given inner vector you must call the statVec(Index) function.
|
||||
*
|
||||
* After an insertion session, you should call the finalize() function.
|
||||
*
|
||||
* \sa insert, insertBackByOuterInner, startVec */
|
||||
inline Scalar& insertBack(Index row, Index col)
|
||||
{
|
||||
ei_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "wrong sorted insertion");
|
||||
ei_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "wrong sorted insertion");
|
||||
return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
|
||||
}
|
||||
|
||||
/** \sa insertBack, startVec */
|
||||
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
|
||||
{
|
||||
ei_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
|
||||
ei_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
|
||||
Index id = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
m_data.append(0, inner);
|
||||
return m_data.value(id);
|
||||
}
|
||||
|
||||
inline Scalar& insertBackNoCheck(Index outer, Index inner)
|
||||
/** \warning use it only if you know what you are doing */
|
||||
inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
|
||||
{
|
||||
Index id = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
@ -203,23 +179,16 @@ class SparseMatrix
|
||||
return m_data.value(id);
|
||||
}
|
||||
|
||||
/** \sa insertBack, insertBackByOuterInner */
|
||||
inline void startVec(Index outer)
|
||||
{
|
||||
ei_assert(m_outerIndex[outer]==int(m_data.size()) && "you must call startVec on each inner vec");
|
||||
ei_assert(m_outerIndex[outer+1]==0 && "you must call startVec on each inner vec");
|
||||
ei_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
|
||||
ei_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
|
||||
m_outerIndex[outer+1] = m_outerIndex[outer];
|
||||
}
|
||||
|
||||
//---
|
||||
|
||||
/** \deprecated use insert()
|
||||
* Like fill() but with random inner coordinates.
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
|
||||
{
|
||||
return insert(row,col);
|
||||
}
|
||||
|
||||
/** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
|
||||
* The non zero coefficient must \b not already exist.
|
||||
*
|
||||
@ -332,7 +301,8 @@ class SparseMatrix
|
||||
return (m_data.value(id) = 0);
|
||||
}
|
||||
|
||||
EIGEN_DEPRECATED void endFill() { finalize(); }
|
||||
|
||||
|
||||
|
||||
/** Must be called after inserting a set of non zero entries.
|
||||
*/
|
||||
@ -351,6 +321,7 @@ class SparseMatrix
|
||||
}
|
||||
}
|
||||
|
||||
/** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
|
||||
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
|
||||
{
|
||||
Index k = 0;
|
||||
@ -389,23 +360,29 @@ class SparseMatrix
|
||||
}
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
|
||||
}
|
||||
|
||||
/** Low level API
|
||||
* Resize the nonzero vector to \a size */
|
||||
void resizeNonZeros(Index size)
|
||||
{
|
||||
m_data.resize(size);
|
||||
}
|
||||
|
||||
/** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
|
||||
inline SparseMatrix()
|
||||
: m_outerSize(-1), m_innerSize(0), m_outerIndex(0)
|
||||
{
|
||||
resize(0, 0);
|
||||
}
|
||||
|
||||
/** Constructs a \a rows \c x \a cols empty matrix */
|
||||
inline SparseMatrix(Index rows, Index cols)
|
||||
: m_outerSize(0), m_innerSize(0), m_outerIndex(0)
|
||||
{
|
||||
resize(rows, cols);
|
||||
}
|
||||
|
||||
/** Constructs a sparse matrix from the sparse expression \a other */
|
||||
template<typename OtherDerived>
|
||||
inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
|
||||
: m_outerSize(0), m_innerSize(0), m_outerIndex(0)
|
||||
@ -413,12 +390,14 @@ class SparseMatrix
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
/** Copy constructor */
|
||||
inline SparseMatrix(const SparseMatrix& other)
|
||||
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0)
|
||||
{
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
/** Swap the content of two sparse matrices of same type (optimization) */
|
||||
inline void swap(SparseMatrix& other)
|
||||
{
|
||||
//EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
|
||||
@ -444,11 +423,13 @@ class SparseMatrix
|
||||
return *this;
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename Lhs, typename Rhs>
|
||||
inline SparseMatrix& operator=(const SparseProduct<Lhs,Rhs>& product)
|
||||
{
|
||||
return Base::operator=(product);
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
@ -534,10 +515,65 @@ class SparseMatrix
|
||||
|
||||
/** Overloaded for performance */
|
||||
Scalar sum() const;
|
||||
|
||||
public:
|
||||
|
||||
/** \deprecated use setZero() and reserve()
|
||||
* Initializes the filling process of \c *this.
|
||||
* \param reserveSize approximate number of nonzeros
|
||||
* Note that the matrix \c *this is zero-ed.
|
||||
*/
|
||||
EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
|
||||
{
|
||||
setZero();
|
||||
m_data.reserve(reserveSize);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
* Like fill() but with random inner coordinates.
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
|
||||
{
|
||||
return insert(row,col);
|
||||
}
|
||||
|
||||
/** \deprecated use insert()
|
||||
*/
|
||||
EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
if (m_outerIndex[outer+1]==0)
|
||||
{
|
||||
// we start a new inner vector
|
||||
Index i = outer;
|
||||
while (i>=0 && m_outerIndex[i]==0)
|
||||
{
|
||||
m_outerIndex[i] = m_data.size();
|
||||
--i;
|
||||
}
|
||||
m_outerIndex[outer+1] = m_outerIndex[outer];
|
||||
}
|
||||
else
|
||||
{
|
||||
ei_assert(m_data.index(m_data.size()-1)<inner && "wrong sorted insertion");
|
||||
}
|
||||
// std::cerr << size_t(m_outerIndex[outer+1]) << " == " << m_data.size() << "\n";
|
||||
assert(size_t(m_outerIndex[outer+1]) == m_data.size());
|
||||
Index id = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
|
||||
m_data.append(0, inner);
|
||||
return m_data.value(id);
|
||||
}
|
||||
|
||||
/** \deprecated use finalize */
|
||||
EIGEN_DEPRECATED void endFill() { finalize(); }
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Options>
|
||||
class SparseMatrix<Scalar,_Options>::InnerIterator
|
||||
template<typename Scalar, int _Options, typename _Index>
|
||||
class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const SparseMatrix& mat, Index outer)
|
||||
|
@ -43,7 +43,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename ei_traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename ei_index<StorageKind>::type Index;
|
||||
typedef typename ei_traits<Derived>::Index Index;
|
||||
|
||||
typedef SparseMatrixBase StorageBaseType;
|
||||
|
||||
@ -209,7 +209,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
{
|
||||
Scalar v = it.value();
|
||||
if (v!=Scalar(0))
|
||||
temp.insertBack(Flip?it.index():j,Flip?j:it.index()) = v;
|
||||
temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
|
||||
}
|
||||
}
|
||||
temp.finalize();
|
||||
@ -239,7 +239,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
{
|
||||
Scalar v = it.value();
|
||||
if (v!=Scalar(0))
|
||||
derived().insertBack(j,it.index()) = v;
|
||||
derived().insertBackByOuterInner(j,it.index()) = v;
|
||||
}
|
||||
}
|
||||
derived().finalize();
|
||||
|
@ -57,6 +57,8 @@ struct ei_traits<SparseProduct<LhsNested, RhsNested> >
|
||||
typedef typename ei_cleantype<LhsNested>::type _LhsNested;
|
||||
typedef typename ei_cleantype<RhsNested>::type _RhsNested;
|
||||
typedef typename _LhsNested::Scalar Scalar;
|
||||
typedef typename ei_promote_index_type<typename ei_traits<_LhsNested>::Index,
|
||||
typename ei_traits<_RhsNested>::Index>::type Index;
|
||||
|
||||
enum {
|
||||
LhsCoeffReadCost = _LhsNested::CoeffReadCost,
|
||||
@ -236,7 +238,7 @@ static void ei_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& r
|
||||
ei_assert(lhs.outerSize() == rhs.innerSize());
|
||||
|
||||
// allocate a temporary buffer
|
||||
AmbiVector<Scalar> tempVector(rows);
|
||||
AmbiVector<Scalar,Index> tempVector(rows);
|
||||
|
||||
// estimate the number of non zero entries
|
||||
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
|
||||
@ -264,8 +266,8 @@ static void ei_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& r
|
||||
}
|
||||
}
|
||||
res.startVec(j);
|
||||
for (typename AmbiVector<Scalar>::Iterator it(tempVector); it; ++it)
|
||||
res.insertBack(j,it.index()) = it.value();
|
||||
for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector); it; ++it)
|
||||
res.insertBackByOuterInner(j,it.index()) = it.value();
|
||||
}
|
||||
res.finalize();
|
||||
}
|
||||
@ -380,9 +382,9 @@ struct ei_sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
// prevent warnings until the code is fixed
|
||||
(void) lhs;
|
||||
(void) rhs;
|
||||
(void) res;
|
||||
EIGEN_UNUSED_VARIABLE(lhs);
|
||||
EIGEN_UNUSED_VARIABLE(rhs);
|
||||
EIGEN_UNUSED_VARIABLE(res);
|
||||
|
||||
// typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
|
||||
// RowMajorMatrix rhsRow = rhs;
|
||||
|
@ -37,17 +37,17 @@ SparseMatrixBase<Derived>::sum() const
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options>
|
||||
typename ei_traits<SparseMatrix<_Scalar,_Options> >::Scalar
|
||||
SparseMatrix<_Scalar,_Options>::sum() const
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
typename ei_traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
|
||||
SparseMatrix<_Scalar,_Options,_Index>::sum() const
|
||||
{
|
||||
ei_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
|
||||
return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options>
|
||||
typename ei_traits<SparseVector<_Scalar,_Options> >::Scalar
|
||||
SparseVector<_Scalar,_Options>::sum() const
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
typename ei_traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
|
||||
SparseVector<_Scalar,_Options,_Index>::sum() const
|
||||
{
|
||||
ei_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
|
||||
return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
|
||||
|
@ -56,30 +56,13 @@ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
|
||||
EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
|
||||
EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
|
||||
|
||||
#define _EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \
|
||||
typedef BaseClass Base; \
|
||||
typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
|
||||
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
|
||||
typedef typename Eigen::ei_nested<Derived>::type Nested; \
|
||||
typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
|
||||
typedef typename Eigen::ei_index<StorageKind>::type Index; \
|
||||
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
|
||||
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
|
||||
Flags = Eigen::ei_traits<Derived>::Flags, \
|
||||
CoeffReadCost = Eigen::ei_traits<Derived>::CoeffReadCost, \
|
||||
SizeAtCompileTime = Base::SizeAtCompileTime, \
|
||||
IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
|
||||
|
||||
#define EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived) \
|
||||
_EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived>)
|
||||
|
||||
#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
|
||||
typedef BaseClass Base; \
|
||||
typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
|
||||
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
|
||||
typedef typename Eigen::ei_nested<Derived>::type Nested; \
|
||||
typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
|
||||
typedef typename Eigen::ei_index<StorageKind>::type Index; \
|
||||
typedef typename Eigen::ei_traits<Derived>::Index Index; \
|
||||
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
|
||||
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
|
||||
Flags = Eigen::ei_traits<Derived>::Flags, \
|
||||
@ -92,12 +75,6 @@ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
|
||||
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
|
||||
_EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived>)
|
||||
|
||||
template<>
|
||||
struct ei_index<Sparse>
|
||||
{ typedef EIGEN_DEFAULT_SPARSE_INDEX_TYPE type; };
|
||||
|
||||
typedef ei_index<Sparse>::type SparseIndex;
|
||||
|
||||
enum SparseBackend {
|
||||
DefaultBackend,
|
||||
Taucs,
|
||||
@ -128,10 +105,10 @@ enum {
|
||||
};
|
||||
|
||||
template<typename Derived> class SparseMatrixBase;
|
||||
template<typename _Scalar, int _Flags = 0> class SparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0> class DynamicSparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0> class SparseVector;
|
||||
template<typename _Scalar, int _Flags = 0> class MappedSparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
|
||||
template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix;
|
||||
|
||||
template<typename MatrixType, int Size> class SparseInnerVectorSet;
|
||||
template<typename MatrixType, int Mode> class SparseTriangularView;
|
||||
|
@ -34,10 +34,11 @@
|
||||
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
|
||||
*
|
||||
*/
|
||||
template<typename _Scalar, int _Options>
|
||||
struct ei_traits<SparseVector<_Scalar, _Options> >
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
struct ei_traits<SparseVector<_Scalar, _Options, _Index> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _Index Index;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
@ -53,12 +54,12 @@ struct ei_traits<SparseVector<_Scalar, _Options> >
|
||||
};
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options>
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
class SparseVector
|
||||
: public SparseMatrixBase<SparseVector<_Scalar, _Options> >
|
||||
: public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
|
||||
{
|
||||
public:
|
||||
EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseVector)
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
|
||||
// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, =)
|
||||
@ -69,11 +70,11 @@ class SparseVector
|
||||
typedef SparseMatrixBase<SparseVector> SparseBase;
|
||||
enum { IsColVector = ei_traits<SparseVector>::IsColVector };
|
||||
|
||||
CompressedStorage<Scalar> m_data;
|
||||
CompressedStorage<Scalar,Index> m_data;
|
||||
Index m_size;
|
||||
|
||||
CompressedStorage<Scalar>& _data() { return m_data; }
|
||||
CompressedStorage<Scalar>& _data() const { return m_data; }
|
||||
CompressedStorage<Scalar,Index>& _data() { return m_data; }
|
||||
CompressedStorage<Scalar,Index>& _data() const { return m_data; }
|
||||
|
||||
public:
|
||||
|
||||
@ -127,7 +128,7 @@ class SparseVector
|
||||
ei_assert(outer==0);
|
||||
}
|
||||
|
||||
inline Scalar& insertBack(Index outer, Index inner)
|
||||
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
|
||||
{
|
||||
ei_assert(outer==0);
|
||||
return insertBack(inner);
|
||||
@ -138,8 +139,10 @@ class SparseVector
|
||||
return m_data.value(m_data.size()-1);
|
||||
}
|
||||
|
||||
inline Scalar& insert(Index outer, Index inner)
|
||||
inline Scalar& insert(Index row, Index col)
|
||||
{
|
||||
Index inner = IsColVector ? row : col;
|
||||
Index outer = IsColVector ? col : row;
|
||||
ei_assert(outer==0);
|
||||
return insert(inner);
|
||||
}
|
||||
@ -165,42 +168,7 @@ class SparseVector
|
||||
*/
|
||||
inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
|
||||
|
||||
/** \deprecated use setZero() and reserve() */
|
||||
EIGEN_DEPRECATED void startFill(Index reserve)
|
||||
{
|
||||
setZero();
|
||||
m_data.reserve(reserve);
|
||||
}
|
||||
|
||||
/** \deprecated use insertBack(Index,Index) */
|
||||
EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
|
||||
{
|
||||
ei_assert(r==0 || c==0);
|
||||
return fill(IsColVector ? r : c);
|
||||
}
|
||||
|
||||
/** \deprecated use insertBack(Index) */
|
||||
EIGEN_DEPRECATED Scalar& fill(Index i)
|
||||
{
|
||||
m_data.append(0, i);
|
||||
return m_data.value(m_data.size()-1);
|
||||
}
|
||||
|
||||
/** \deprecated use insert(Index,Index) */
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
|
||||
{
|
||||
ei_assert(r==0 || c==0);
|
||||
return fillrand(IsColVector ? r : c);
|
||||
}
|
||||
|
||||
/** \deprecated use insert(Index) */
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index i)
|
||||
{
|
||||
return insert(i);
|
||||
}
|
||||
|
||||
/** \deprecated use finalize() */
|
||||
EIGEN_DEPRECATED void endFill() {}
|
||||
inline void finalize() {}
|
||||
|
||||
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
|
||||
@ -362,10 +330,49 @@ class SparseVector
|
||||
|
||||
/** Overloaded for performance */
|
||||
Scalar sum() const;
|
||||
|
||||
public:
|
||||
|
||||
/** \deprecated use setZero() and reserve() */
|
||||
EIGEN_DEPRECATED void startFill(Index reserve)
|
||||
{
|
||||
setZero();
|
||||
m_data.reserve(reserve);
|
||||
}
|
||||
|
||||
/** \deprecated use insertBack(Index,Index) */
|
||||
EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
|
||||
{
|
||||
ei_assert(r==0 || c==0);
|
||||
return fill(IsColVector ? r : c);
|
||||
}
|
||||
|
||||
/** \deprecated use insertBack(Index) */
|
||||
EIGEN_DEPRECATED Scalar& fill(Index i)
|
||||
{
|
||||
m_data.append(0, i);
|
||||
return m_data.value(m_data.size()-1);
|
||||
}
|
||||
|
||||
/** \deprecated use insert(Index,Index) */
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
|
||||
{
|
||||
ei_assert(r==0 || c==0);
|
||||
return fillrand(IsColVector ? r : c);
|
||||
}
|
||||
|
||||
/** \deprecated use insert(Index) */
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index i)
|
||||
{
|
||||
return insert(i);
|
||||
}
|
||||
|
||||
/** \deprecated use finalize() */
|
||||
EIGEN_DEPRECATED void endFill() {}
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Options>
|
||||
class SparseVector<Scalar,_Options>::InnerIterator
|
||||
template<typename Scalar, int _Options, typename _Index>
|
||||
class SparseVector<Scalar,_Options,_Index>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const SparseVector& vec, Index outer=0)
|
||||
@ -374,7 +381,7 @@ class SparseVector<Scalar,_Options>::InnerIterator
|
||||
ei_assert(outer==0);
|
||||
}
|
||||
|
||||
InnerIterator(const CompressedStorage<Scalar>& data)
|
||||
InnerIterator(const CompressedStorage<Scalar,Index>& data)
|
||||
: m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
|
||||
{}
|
||||
|
||||
@ -395,7 +402,7 @@ class SparseVector<Scalar,_Options>::InnerIterator
|
||||
inline operator bool() const { return (m_id < m_end); }
|
||||
|
||||
protected:
|
||||
const CompressedStorage<Scalar>& m_data;
|
||||
const CompressedStorage<Scalar,Index>& m_data;
|
||||
Index m_id;
|
||||
const Index m_end;
|
||||
};
|
||||
|
@ -267,8 +267,8 @@ SluMatrix SparseMatrixBase<Derived>::asSluMatrix()
|
||||
}
|
||||
|
||||
/** View a Super LU matrix as an Eigen expression */
|
||||
template<typename Scalar, int Flags>
|
||||
MappedSparseMatrix<Scalar,Flags>::MappedSparseMatrix(SluMatrix& sluMat)
|
||||
template<typename Scalar, int Flags, typename _Index>
|
||||
MappedSparseMatrix<Scalar,Flags,_Index>::MappedSparseMatrix(SluMatrix& sluMat)
|
||||
{
|
||||
if ((Flags&RowMajorBit)==RowMajorBit)
|
||||
{
|
||||
|
@ -63,8 +63,8 @@ taucs_ccs_matrix SparseMatrixBase<Derived>::asTaucsMatrix()
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename Scalar, int Flags>
|
||||
MappedSparseMatrix<Scalar,Flags>::MappedSparseMatrix(taucs_ccs_matrix& taucsMat)
|
||||
template<typename Scalar, int Flags, typename _Index>
|
||||
MappedSparseMatrix<Scalar,Flags,_Index>::MappedSparseMatrix(taucs_ccs_matrix& taucsMat)
|
||||
{
|
||||
m_innerSize = taucsMat.m;
|
||||
m_outerSize = taucsMat.n;
|
||||
|
@ -207,10 +207,12 @@ template<typename Lhs, typename Rhs, int Mode, int UpLo>
|
||||
struct ei_sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename ei_promote_index_type<typename ei_traits<Lhs>::Index,
|
||||
typename ei_traits<Rhs>::Index>::type Index;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
const bool IsLower = (UpLo==Lower);
|
||||
AmbiVector<Scalar> tempVector(other.rows()*2);
|
||||
AmbiVector<Scalar,Index> tempVector(other.rows()*2);
|
||||
tempVector.setBounds(0,other.rows());
|
||||
|
||||
Rhs res(other.rows(), other.cols());
|
||||
@ -266,7 +268,7 @@ struct ei_sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
|
||||
int count = 0;
|
||||
// FIXME compute a reference value to filter zeros
|
||||
for (typename AmbiVector<Scalar>::Iterator it(tempVector/*,1e-12*/); it; ++it)
|
||||
for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector/*,1e-12*/); it; ++it)
|
||||
{
|
||||
++ count;
|
||||
// std::cerr << "fill " << it.index() << ", " << col << "\n";
|
||||
|
@ -87,7 +87,12 @@ public:
|
||||
{
|
||||
m_times[CPU_TIMER] = getCpuTime() - m_starts[CPU_TIMER];
|
||||
m_times[REAL_TIMER] = getRealTime() - m_starts[REAL_TIMER];
|
||||
#if EIGEN_VERSION_AT_LEAST(2,90,0)
|
||||
m_bests = m_bests.cwiseMin(m_times);
|
||||
#else
|
||||
m_bests(0) = std::min(m_bests(0),m_times(0));
|
||||
m_bests(1) = std::min(m_bests(1),m_times(1));
|
||||
#endif
|
||||
m_totals += m_times;
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,8 @@ int main(int argc, char ** argv)
|
||||
if(procs>1)
|
||||
{
|
||||
BenchTimer tmono;
|
||||
omp_set_num_threads(1);
|
||||
//omp_set_num_threads(1);
|
||||
Eigen::setNbThreads(1);
|
||||
BENCH(tmono, tries, rep, gemm(a,b,c));
|
||||
std::cout << "eigen mono cpu " << tmono.best(CPU_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(CPU_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(CPU_TIMER) << "s)\n";
|
||||
std::cout << "eigen mono real " << tmono.best(REAL_TIMER)/rep << "s \t" << (double(m)*n*p*rep*2/tmono.best(REAL_TIMER))*1e-9 << " GFLOPS \t(" << tmono.total(REAL_TIMER) << "s)\n";
|
||||
|
@ -183,7 +183,7 @@ for(int j=0; j<1000; ++j)
|
||||
}
|
||||
mat.finalize(); // optional for a DynamicSparseMatrix
|
||||
\endcode
|
||||
|
||||
Note that there also exist the insertBackByOuterInner(Index outer, Index, inner) function which allows to write code agnostic to the storage order.
|
||||
|
||||
\section TutorialSparseFeatureSet Supported operators and functions
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
MatrixXcf ones = MatrixXcf::Ones(3,3);
|
||||
ComplexEigenSolver<MatrixXcf> ces(ones);
|
||||
ComplexEigenSolver<MatrixXcf> ces(ones, /* computeEigenvectors = */ false);
|
||||
cout << "The eigenvalues of the 3x3 matrix of ones are:"
|
||||
<< endl << ces.eigenvalues() << endl;
|
||||
|
@ -1,6 +1,6 @@
|
||||
EigenSolver<MatrixXf> es;
|
||||
MatrixXf A = MatrixXf::Random(4,4);
|
||||
es.compute(A);
|
||||
es.compute(A, /* computeEigenvectors = */ false);
|
||||
cout << "The eigenvalues of A are: " << es.eigenvalues().transpose() << endl;
|
||||
es.compute(A + MatrixXf::Identity(4,4)); // re-use es to compute eigenvalues of A+I
|
||||
es.compute(A + MatrixXf::Identity(4,4), false); // re-use es to compute eigenvalues of A+I
|
||||
cout << "The eigenvalues of A+I are: " << es.eigenvalues().transpose() << endl;
|
||||
|
@ -1,4 +1,4 @@
|
||||
MatrixXd ones = MatrixXd::Ones(3,3);
|
||||
EigenSolver<MatrixXd> es(ones);
|
||||
EigenSolver<MatrixXd> es(ones, false);
|
||||
cout << "The eigenvalues of the 3x3 matrix of ones are:"
|
||||
<< endl << es.eigenvalues() << endl;
|
||||
|
@ -1,6 +1,6 @@
|
||||
MatrixXf A = MatrixXf::Random(4,4);
|
||||
RealSchur<MatrixXf> schur(4);
|
||||
schur.compute(A);
|
||||
schur.compute(A, /* computeU = */ false);
|
||||
cout << "The matrix T in the decomposition of A is:" << endl << schur.matrixT() << endl;
|
||||
schur.compute(A.inverse());
|
||||
schur.compute(A.inverse(), /* computeU = */ false);
|
||||
cout << "The matrix T in the decomposition of A^(-1) is:" << endl << schur.matrixT() << endl;
|
||||
|
@ -129,7 +129,7 @@ template<typename ArrayType> void comparisons(const ArrayType& m)
|
||||
VERIFY(((m1.abs()+1)>RealScalar(0.1)).count() == rows*cols);
|
||||
|
||||
typedef Array<typename ArrayType::Index, Dynamic, 1> ArrayOfIndices;
|
||||
|
||||
|
||||
// TODO allows colwise/rowwise for array
|
||||
VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).colwise().count(), ArrayOfIndices::Constant(cols,rows).transpose());
|
||||
VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).rowwise().count(), ArrayOfIndices::Constant(rows, cols));
|
||||
@ -151,10 +151,10 @@ template<typename ArrayType> void array_real(const ArrayType& m)
|
||||
VERIFY_IS_APPROX(m1.sin(), ei_sin(m1));
|
||||
VERIFY_IS_APPROX(m1.cos(), std::cos(m1));
|
||||
VERIFY_IS_APPROX(m1.cos(), ei_cos(m1));
|
||||
|
||||
|
||||
VERIFY_IS_APPROX(ei_cos(m1+RealScalar(3)*m2), ei_cos((m1+RealScalar(3)*m2).eval()));
|
||||
VERIFY_IS_APPROX(std::cos(m1+RealScalar(3)*m2), std::cos((m1+RealScalar(3)*m2).eval()));
|
||||
|
||||
|
||||
VERIFY_IS_APPROX(m1.abs().sqrt(), std::sqrt(std::abs(m1)));
|
||||
VERIFY_IS_APPROX(m1.abs().sqrt(), ei_sqrt(ei_abs(m1)));
|
||||
VERIFY_IS_APPROX(m1.abs(), ei_sqrt(ei_abs2(m1)));
|
||||
@ -163,10 +163,10 @@ template<typename ArrayType> void array_real(const ArrayType& m)
|
||||
VERIFY_IS_APPROX(ei_abs2(std::real(m1)) + ei_abs2(std::imag(m1)), ei_abs2(m1));
|
||||
if(!NumTraits<Scalar>::IsComplex)
|
||||
VERIFY_IS_APPROX(ei_real(m1), m1);
|
||||
|
||||
|
||||
VERIFY_IS_APPROX(m1.abs().log(), std::log(std::abs(m1)));
|
||||
VERIFY_IS_APPROX(m1.abs().log(), ei_log(ei_abs(m1)));
|
||||
|
||||
|
||||
VERIFY_IS_APPROX(m1.exp(), std::exp(m1));
|
||||
VERIFY_IS_APPROX(m1.exp() * m2.exp(), std::exp(m1+m2));
|
||||
VERIFY_IS_APPROX(m1.exp(), ei_exp(m1));
|
||||
|
@ -37,10 +37,10 @@ template<typename MatrixType> void array_for_matrix(const MatrixType& m)
|
||||
MatrixType m1 = MatrixType::Random(rows, cols),
|
||||
m2 = MatrixType::Random(rows, cols),
|
||||
m3(rows, cols);
|
||||
|
||||
|
||||
ColVectorType cv1 = ColVectorType::Random(rows);
|
||||
RowVectorType rv1 = RowVectorType::Random(cols);
|
||||
|
||||
|
||||
Scalar s1 = ei_random<Scalar>(),
|
||||
s2 = ei_random<Scalar>();
|
||||
|
||||
|
@ -103,47 +103,6 @@ template<typename MatrixType> void reverse(const MatrixType& m)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
cout << "m1:" << endl << m1 << endl;
|
||||
cout << "m1c_reversed:" << endl << m1c_reversed << endl;
|
||||
|
||||
cout << "----------------" << endl;
|
||||
|
||||
for ( int i=0; i< rows*cols; i++){
|
||||
cout << m1c_reversed.coeff(i) << endl;
|
||||
}
|
||||
|
||||
cout << "----------------" << endl;
|
||||
|
||||
for ( int i=0; i< rows*cols; i++){
|
||||
cout << m1c_reversed.colwise().reverse().coeff(i) << endl;
|
||||
}
|
||||
|
||||
cout << "================" << endl;
|
||||
|
||||
cout << "m1.coeff( ind ): " << m1.coeff( ind ) << endl;
|
||||
cout << "m1c_reversed.colwise().reverse().coeff( ind ): " << m1c_reversed.colwise().reverse().coeff( ind ) << endl;
|
||||
*/
|
||||
|
||||
//MatrixType m1r_reversed = m1.rowwise().reverse();
|
||||
//VERIFY_IS_APPROX( m1r_reversed.rowwise().reverse().coeff( ind ), m1.coeff( ind ) );
|
||||
|
||||
/*
|
||||
cout << "m1" << endl << m1 << endl;
|
||||
cout << "m1 using coeff(int index)" << endl;
|
||||
for ( int i = 0; i < rows*cols; i++) {
|
||||
cout << m1.coeff(i) << " ";
|
||||
}
|
||||
cout << endl;
|
||||
|
||||
cout << "m1.transpose()" << endl << m1.transpose() << endl;
|
||||
cout << "m1.transpose() using coeff(int index)" << endl;
|
||||
for ( int i = 0; i < rows*cols; i++) {
|
||||
cout << m1.transpose().coeff(i) << " ";
|
||||
}
|
||||
cout << endl;
|
||||
*/
|
||||
/*
|
||||
Scalar x = ei_random<Scalar>();
|
||||
|
||||
int r = ei_random<int>(0, rows-1),
|
||||
@ -152,6 +111,7 @@ template<typename MatrixType> void reverse(const MatrixType& m)
|
||||
m1.reverse()(r, c) = x;
|
||||
VERIFY_IS_APPROX(x, m1(rows - 1 - r, cols - 1 - c));
|
||||
|
||||
/*
|
||||
m1.colwise().reverse()(r, c) = x;
|
||||
VERIFY_IS_APPROX(x, m1(rows - 1 - r, c));
|
||||
|
||||
|
@ -138,7 +138,8 @@ template<typename MatrixType> void basicStuffComplex(const MatrixType& m)
|
||||
VERIFY(ei_imag(s1)==ei_imag_ref(s1));
|
||||
ei_real_ref(s1) = ei_real(s2);
|
||||
ei_imag_ref(s1) = ei_imag(s2);
|
||||
VERIFY(s1==s2);
|
||||
VERIFY(ei_isApprox(s1, s2, NumTraits<RealScalar>::epsilon()));
|
||||
// extended precision in Intel FPUs means that s1 == s2 in the line above is not guaranteed.
|
||||
|
||||
RealMatrixType rm1 = RealMatrixType::Random(rows,cols),
|
||||
rm2 = RealMatrixType::Random(rows,cols);
|
||||
|
@ -26,10 +26,21 @@
|
||||
#define EIGEN_NO_ASSERTION_CHECKING
|
||||
#endif
|
||||
|
||||
static int nb_temporaries;
|
||||
|
||||
#define EIGEN_DEBUG_MATRIX_CTOR { if(size!=0) nb_temporaries++; }
|
||||
|
||||
#include "main.h"
|
||||
#include <Eigen/Cholesky>
|
||||
#include <Eigen/QR>
|
||||
|
||||
#define VERIFY_EVALUATION_COUNT(XPR,N) {\
|
||||
nb_temporaries = 0; \
|
||||
XPR; \
|
||||
if(nb_temporaries!=N) std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; \
|
||||
VERIFY( (#XPR) && nb_temporaries==N ); \
|
||||
}
|
||||
|
||||
#ifdef HAS_GSL
|
||||
#include "gsl_helper.h"
|
||||
#endif
|
||||
@ -110,20 +121,46 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
VERIFY_IS_APPROX(symm * matX, matB);
|
||||
}
|
||||
|
||||
int sign = ei_random<int>()%2 ? 1 : -1;
|
||||
|
||||
if(sign == -1)
|
||||
// LDLT
|
||||
{
|
||||
symm = -symm; // test a negative matrix
|
||||
}
|
||||
int sign = ei_random<int>()%2 ? 1 : -1;
|
||||
|
||||
{
|
||||
LDLT<SquareMatrixType> ldlt(symm);
|
||||
VERIFY_IS_APPROX(symm, ldlt.reconstructedMatrix());
|
||||
vecX = ldlt.solve(vecB);
|
||||
if(sign == -1)
|
||||
{
|
||||
symm = -symm; // test a negative matrix
|
||||
}
|
||||
|
||||
SquareMatrixType symmUp = symm.template triangularView<Upper>();
|
||||
SquareMatrixType symmLo = symm.template triangularView<Lower>();
|
||||
|
||||
LDLT<SquareMatrixType,Lower> ldltlo(symmLo);
|
||||
VERIFY_IS_APPROX(symm, ldltlo.reconstructedMatrix());
|
||||
vecX = ldltlo.solve(vecB);
|
||||
VERIFY_IS_APPROX(symm * vecX, vecB);
|
||||
matX = ldlt.solve(matB);
|
||||
matX = ldltlo.solve(matB);
|
||||
VERIFY_IS_APPROX(symm * matX, matB);
|
||||
|
||||
LDLT<SquareMatrixType,Upper> ldltup(symmUp);
|
||||
VERIFY_IS_APPROX(symm, ldltup.reconstructedMatrix());
|
||||
vecX = ldltup.solve(vecB);
|
||||
VERIFY_IS_APPROX(symm * vecX, vecB);
|
||||
matX = ldltup.solve(matB);
|
||||
VERIFY_IS_APPROX(symm * matX, matB);
|
||||
|
||||
if(MatrixType::RowsAtCompileTime==Dynamic)
|
||||
{
|
||||
// note : each inplace permutation requires a small temporary vector (mask)
|
||||
|
||||
// check inplace solve
|
||||
matX = matB;
|
||||
VERIFY_EVALUATION_COUNT(matX = ldltlo.solve(matX), 0);
|
||||
VERIFY_IS_APPROX(matX, ldltlo.solve(matB).eval());
|
||||
|
||||
|
||||
matX = matB;
|
||||
VERIFY_EVALUATION_COUNT(matX = ldltup.solve(matX), 0);
|
||||
VERIFY_IS_APPROX(matX, ldltup.solve(matB).eval());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@ -134,6 +171,7 @@ template<typename MatrixType> void cholesky_verify_assert()
|
||||
|
||||
LLT<MatrixType> llt;
|
||||
VERIFY_RAISES_ASSERT(llt.matrixL())
|
||||
VERIFY_RAISES_ASSERT(llt.matrixU())
|
||||
VERIFY_RAISES_ASSERT(llt.solve(tmp))
|
||||
VERIFY_RAISES_ASSERT(llt.solveInPlace(&tmp))
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
// for linear algebra. Eigen itself is part of the KDE project.
|
||||
//
|
||||
// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
|
||||
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
@ -23,6 +24,7 @@
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#include "main.h"
|
||||
#include <limits>
|
||||
#include <Eigen/Eigenvalues>
|
||||
#include <Eigen/LU>
|
||||
|
||||
@ -31,12 +33,14 @@
|
||||
template<typename VectorType>
|
||||
void verify_is_approx_upto_permutation(const VectorType& vec1, const VectorType& vec2)
|
||||
{
|
||||
typedef typename NumTraits<typename VectorType::Scalar>::Real RealScalar;
|
||||
|
||||
VERIFY(vec1.cols() == 1);
|
||||
VERIFY(vec2.cols() == 1);
|
||||
VERIFY(vec1.rows() == vec2.rows());
|
||||
for (int k = 1; k <= vec1.rows(); ++k)
|
||||
{
|
||||
VERIFY_IS_APPROX(vec1.array().pow(k).sum(), vec2.array().pow(k).sum());
|
||||
VERIFY_IS_APPROX(vec1.array().pow(RealScalar(k)).sum(), vec2.array().pow(RealScalar(k)).sum());
|
||||
}
|
||||
}
|
||||
|
||||
@ -59,14 +63,20 @@ template<typename MatrixType> void eigensolver(const MatrixType& m)
|
||||
MatrixType symmA = a.adjoint() * a;
|
||||
|
||||
ComplexEigenSolver<MatrixType> ei0(symmA);
|
||||
VERIFY_IS_EQUAL(ei0.info(), Success);
|
||||
VERIFY_IS_APPROX(symmA * ei0.eigenvectors(), ei0.eigenvectors() * ei0.eigenvalues().asDiagonal());
|
||||
|
||||
ComplexEigenSolver<MatrixType> ei1(a);
|
||||
VERIFY_IS_EQUAL(ei1.info(), Success);
|
||||
VERIFY_IS_APPROX(a * ei1.eigenvectors(), ei1.eigenvectors() * ei1.eigenvalues().asDiagonal());
|
||||
// Note: If MatrixType is real then a.eigenvalues() uses EigenSolver and thus
|
||||
// another algorithm so results may differ slightly
|
||||
verify_is_approx_upto_permutation(a.eigenvalues(), ei1.eigenvalues());
|
||||
|
||||
|
||||
ComplexEigenSolver<MatrixType> eiNoEivecs(a, false);
|
||||
VERIFY_IS_EQUAL(eiNoEivecs.info(), Success);
|
||||
VERIFY_IS_APPROX(ei1.eigenvalues(), eiNoEivecs.eigenvalues());
|
||||
|
||||
// Regression test for issue #66
|
||||
MatrixType z = MatrixType::Zero(rows,cols);
|
||||
ComplexEigenSolver<MatrixType> eiz(z);
|
||||
@ -74,6 +84,25 @@ template<typename MatrixType> void eigensolver(const MatrixType& m)
|
||||
|
||||
MatrixType id = MatrixType::Identity(rows, cols);
|
||||
VERIFY_IS_APPROX(id.operatorNorm(), RealScalar(1));
|
||||
|
||||
if (rows > 1)
|
||||
{
|
||||
// Test matrix with NaN
|
||||
a(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
|
||||
ComplexEigenSolver<MatrixType> eiNaN(a);
|
||||
VERIFY_IS_EQUAL(eiNaN.info(), NoConvergence);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m)
|
||||
{
|
||||
ComplexEigenSolver<MatrixType> eig;
|
||||
VERIFY_RAISES_ASSERT(eig.eigenvectors());
|
||||
VERIFY_RAISES_ASSERT(eig.eigenvalues());
|
||||
|
||||
MatrixType a = MatrixType::Random(m.rows(),m.cols());
|
||||
eig.compute(a, false);
|
||||
VERIFY_RAISES_ASSERT(eig.eigenvectors());
|
||||
}
|
||||
|
||||
void test_eigensolver_complex()
|
||||
@ -85,6 +114,11 @@ void test_eigensolver_complex()
|
||||
CALL_SUBTEST_4( eigensolver(Matrix3f()) );
|
||||
}
|
||||
|
||||
CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4cf()) );
|
||||
CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXcd(14,14)) );
|
||||
CALL_SUBTEST_3( eigensolver_verify_assert(Matrix<std::complex<float>, 1, 1>()) );
|
||||
CALL_SUBTEST_4( eigensolver_verify_assert(Matrix3f()) );
|
||||
|
||||
// Test problem size constructors
|
||||
CALL_SUBTEST_5(ComplexEigenSolver<MatrixXf>(10));
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
|
||||
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
@ -23,6 +24,7 @@
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#include "main.h"
|
||||
#include <limits>
|
||||
#include <Eigen/Eigenvalues>
|
||||
|
||||
#ifdef HAS_GSL
|
||||
@ -43,36 +45,52 @@ template<typename MatrixType> void eigensolver(const MatrixType& m)
|
||||
typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealVectorType;
|
||||
typedef typename std::complex<typename NumTraits<typename MatrixType::Scalar>::Real> Complex;
|
||||
|
||||
// RealScalar largerEps = 10*test_precision<RealScalar>();
|
||||
|
||||
MatrixType a = MatrixType::Random(rows,cols);
|
||||
MatrixType a1 = MatrixType::Random(rows,cols);
|
||||
MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1;
|
||||
|
||||
EigenSolver<MatrixType> ei0(symmA);
|
||||
VERIFY_IS_EQUAL(ei0.info(), Success);
|
||||
VERIFY_IS_APPROX(symmA * ei0.pseudoEigenvectors(), ei0.pseudoEigenvectors() * ei0.pseudoEigenvalueMatrix());
|
||||
VERIFY_IS_APPROX((symmA.template cast<Complex>()) * (ei0.pseudoEigenvectors().template cast<Complex>()),
|
||||
(ei0.pseudoEigenvectors().template cast<Complex>()) * (ei0.eigenvalues().asDiagonal()));
|
||||
|
||||
EigenSolver<MatrixType> ei1(a);
|
||||
VERIFY_IS_EQUAL(ei1.info(), Success);
|
||||
VERIFY_IS_APPROX(a * ei1.pseudoEigenvectors(), ei1.pseudoEigenvectors() * ei1.pseudoEigenvalueMatrix());
|
||||
VERIFY_IS_APPROX(a.template cast<Complex>() * ei1.eigenvectors(),
|
||||
ei1.eigenvectors() * ei1.eigenvalues().asDiagonal());
|
||||
VERIFY_IS_APPROX(a.eigenvalues(), ei1.eigenvalues());
|
||||
|
||||
EigenSolver<MatrixType> eiNoEivecs(a, false);
|
||||
VERIFY_IS_EQUAL(eiNoEivecs.info(), Success);
|
||||
VERIFY_IS_APPROX(ei1.eigenvalues(), eiNoEivecs.eigenvalues());
|
||||
VERIFY_IS_APPROX(ei1.pseudoEigenvalueMatrix(), eiNoEivecs.pseudoEigenvalueMatrix());
|
||||
|
||||
MatrixType id = MatrixType::Identity(rows, cols);
|
||||
VERIFY_IS_APPROX(id.operatorNorm(), RealScalar(1));
|
||||
|
||||
if (rows > 2)
|
||||
{
|
||||
// Test matrix with NaN
|
||||
a(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
|
||||
EigenSolver<MatrixType> eiNaN(a);
|
||||
VERIFY_IS_EQUAL(eiNaN.info(), NoConvergence);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename MatrixType> void eigensolver_verify_assert()
|
||||
template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m)
|
||||
{
|
||||
MatrixType tmp;
|
||||
|
||||
EigenSolver<MatrixType> eig;
|
||||
VERIFY_RAISES_ASSERT(eig.eigenvectors())
|
||||
VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors())
|
||||
VERIFY_RAISES_ASSERT(eig.pseudoEigenvalueMatrix())
|
||||
VERIFY_RAISES_ASSERT(eig.eigenvalues())
|
||||
VERIFY_RAISES_ASSERT(eig.eigenvectors());
|
||||
VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors());
|
||||
VERIFY_RAISES_ASSERT(eig.pseudoEigenvalueMatrix());
|
||||
VERIFY_RAISES_ASSERT(eig.eigenvalues());
|
||||
|
||||
MatrixType a = MatrixType::Random(m.rows(),m.cols());
|
||||
eig.compute(a, false);
|
||||
VERIFY_RAISES_ASSERT(eig.eigenvectors());
|
||||
VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors());
|
||||
}
|
||||
|
||||
void test_eigensolver_generic()
|
||||
@ -88,11 +106,11 @@ void test_eigensolver_generic()
|
||||
CALL_SUBTEST_4( eigensolver(Matrix2d()) );
|
||||
}
|
||||
|
||||
CALL_SUBTEST_1( eigensolver_verify_assert<Matrix4f>() );
|
||||
CALL_SUBTEST_2( eigensolver_verify_assert<MatrixXd>() );
|
||||
CALL_SUBTEST_4( eigensolver_verify_assert<Matrix2d>() );
|
||||
CALL_SUBTEST_5( eigensolver_verify_assert<MatrixXf>() );
|
||||
CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4f()) );
|
||||
CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXd(17,17)) );
|
||||
CALL_SUBTEST_3( eigensolver_verify_assert(Matrix<double,1,1>()) );
|
||||
CALL_SUBTEST_4( eigensolver_verify_assert(Matrix2d()) );
|
||||
|
||||
// Test problem size constructors
|
||||
CALL_SUBTEST_6(EigenSolver<MatrixXf>(10));
|
||||
CALL_SUBTEST_5(EigenSolver<MatrixXf>(10));
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
|
||||
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
@ -23,6 +24,7 @@
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#include "main.h"
|
||||
#include <limits>
|
||||
#include <Eigen/Eigenvalues>
|
||||
|
||||
#ifdef HAS_GSL
|
||||
@ -48,10 +50,12 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
|
||||
MatrixType a = MatrixType::Random(rows,cols);
|
||||
MatrixType a1 = MatrixType::Random(rows,cols);
|
||||
MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1;
|
||||
symmA.template triangularView<StrictlyUpper>().setZero();
|
||||
|
||||
MatrixType b = MatrixType::Random(rows,cols);
|
||||
MatrixType b1 = MatrixType::Random(rows,cols);
|
||||
MatrixType symmB = b.adjoint() * b + b1.adjoint() * b1;
|
||||
symmB.template triangularView<StrictlyUpper>().setZero();
|
||||
|
||||
SelfAdjointEigenSolver<MatrixType> eiSymm(symmA);
|
||||
// generalized eigen pb
|
||||
@ -60,6 +64,9 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
|
||||
#ifdef HAS_GSL
|
||||
if (ei_is_same_type<RealScalar,double>::ret)
|
||||
{
|
||||
// restore symmA and symmB.
|
||||
symmA = MatrixType(symmA.template selfadjointView<Lower>());
|
||||
symmB = MatrixType(symmB.template selfadjointView<Lower>());
|
||||
typedef GslTraits<Scalar> Gsl;
|
||||
typename Gsl::Matrix gEvec=0, gSymmA=0, gSymmB=0;
|
||||
typename GslTraits<RealScalar>::Vector gEval=0;
|
||||
@ -89,10 +96,9 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
|
||||
VERIFY((symmA * _evec).isApprox(symmB * (_evec * _eval.asDiagonal()), largerEps));
|
||||
|
||||
// compare with eigen
|
||||
// std::cerr << _eval.transpose() << "\n" << eiSymmGen.eigenvalues().transpose() << "\n\n";
|
||||
// std::cerr << _evec.format(6) << "\n\n" << eiSymmGen.eigenvectors().format(6) << "\n\n\n";
|
||||
MatrixType normalized_eivec = eiSymmGen.eigenvectors()*eiSymmGen.eigenvectors().colwise().norm().asDiagonal().inverse();
|
||||
VERIFY_IS_APPROX(_eval, eiSymmGen.eigenvalues());
|
||||
VERIFY_IS_APPROX(_evec.cwiseAbs(), eiSymmGen.eigenvectors().cwiseAbs());
|
||||
VERIFY_IS_APPROX(_evec.cwiseAbs(), normalized_eivec.cwiseAbs());
|
||||
|
||||
Gsl::free(gSymmA);
|
||||
Gsl::free(gSymmB);
|
||||
@ -101,20 +107,46 @@ template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
|
||||
}
|
||||
#endif
|
||||
|
||||
VERIFY((symmA * eiSymm.eigenvectors()).isApprox(
|
||||
VERIFY_IS_EQUAL(eiSymm.info(), Success);
|
||||
VERIFY((symmA.template selfadjointView<Lower>() * eiSymm.eigenvectors()).isApprox(
|
||||
eiSymm.eigenvectors() * eiSymm.eigenvalues().asDiagonal(), largerEps));
|
||||
VERIFY_IS_APPROX(symmA.template selfadjointView<Lower>().eigenvalues(), eiSymm.eigenvalues());
|
||||
|
||||
SelfAdjointEigenSolver<MatrixType> eiSymmNoEivecs(symmA, false);
|
||||
VERIFY_IS_EQUAL(eiSymmNoEivecs.info(), Success);
|
||||
VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues());
|
||||
|
||||
// generalized eigen problem Ax = lBx
|
||||
VERIFY((symmA * eiSymmGen.eigenvectors()).isApprox(
|
||||
symmB * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
|
||||
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
|
||||
VERIFY((symmA.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
|
||||
symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
|
||||
|
||||
MatrixType sqrtSymmA = eiSymm.operatorSqrt();
|
||||
VERIFY_IS_APPROX(symmA, sqrtSymmA*sqrtSymmA);
|
||||
VERIFY_IS_APPROX(sqrtSymmA, symmA*eiSymm.operatorInverseSqrt());
|
||||
VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
|
||||
VERIFY_IS_APPROX(sqrtSymmA, symmA.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());
|
||||
|
||||
MatrixType id = MatrixType::Identity(rows, cols);
|
||||
VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1));
|
||||
|
||||
SelfAdjointEigenSolver<MatrixType> eiSymmUninitialized;
|
||||
VERIFY_RAISES_ASSERT(eiSymmUninitialized.info());
|
||||
VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvalues());
|
||||
VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
|
||||
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
|
||||
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());
|
||||
|
||||
eiSymmUninitialized.compute(symmA, false);
|
||||
VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
|
||||
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
|
||||
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());
|
||||
|
||||
if (rows > 1)
|
||||
{
|
||||
// Test matrix with NaN
|
||||
symmA(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
|
||||
SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmA);
|
||||
VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence);
|
||||
}
|
||||
}
|
||||
|
||||
void test_eigensolver_selfadjoint()
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user