mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-06-04 18:54:00 +08:00
Document SelfAdjointEigenSolver and add examples.
This commit is contained in:
parent
6ea6276f20
commit
2d74f1ac92
@ -30,13 +30,42 @@
|
||||
*
|
||||
* \class SelfAdjointEigenSolver
|
||||
*
|
||||
* \brief Eigen values/vectors solver for selfadjoint matrix
|
||||
* \brief Computes eigenvalues and eigenvectors of selfadjoint matrices
|
||||
*
|
||||
* \param MatrixType the type of the matrix of which we are computing the eigen decomposition
|
||||
* \tparam _MatrixType the type of the matrix of which we are computing the
|
||||
* eigendecomposition; this is expected to be an instantiation of the Matrix
|
||||
* class template. Currently, only real matrices are supported.
|
||||
*
|
||||
* \note MatrixType must be an actual Matrix type, it can't be an expression type.
|
||||
* A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real
|
||||
* matrices, this means that the matrix is symmetric: it equals its
|
||||
* transpose. This class computes the eigenvalues and eigenvectors of a
|
||||
* selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
|
||||
* \f$ v \f$ such that \f$ Av = \lambda v \f$. The eigenvalues of a
|
||||
* selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
|
||||
* the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
|
||||
* eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint
|
||||
* matrices, the matrix \f$ V \f$ is always invertible). This is called the
|
||||
* eigendecomposition.
|
||||
*
|
||||
* \sa MatrixBase::eigenvalues(), class EigenSolver
|
||||
* The algorithm exploits the fact that the matrix is selfadjoint, making it
|
||||
* faster and more accurate than the general purpose eigenvalue algorithms
|
||||
* implemented in EigenSolver and ComplexEigenSolver.
|
||||
*
|
||||
* This class can also be used to solve the generalized eigenvalue problem
|
||||
* \f$ Av = \lambda Bv \f$. In this case, the matrix \f$ A \f$ should be
|
||||
* selfadjoint and the matrix \f$ B \f$ should be positive definite.
|
||||
*
|
||||
* Call the function compute() to compute the eigenvalues and eigenvectors of
|
||||
* a given matrix. Alternatively, you can use the
|
||||
* SelfAdjointEigenSolver(const MatrixType&, bool) constructor which computes
|
||||
* the eigenvalues and eigenvectors at construction time. Once the eigenvalue
|
||||
* and eigenvectors are computed, they can be retrieved with the eigenvalues()
|
||||
* and eigenvectors() functions.
|
||||
*
|
||||
* The documentation for SelfAdjointEigenSolver(const MatrixType&, bool)
|
||||
* contains an example of the typical use of this class.
|
||||
*
|
||||
* \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver
|
||||
*/
|
||||
template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
{
|
||||
@ -49,13 +78,37 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
Options = MatrixType::Options,
|
||||
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
|
||||
};
|
||||
|
||||
/** \brief Scalar type for matrices of type \p _MatrixType. */
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
|
||||
/** \brief Real scalar type for \p _MatrixType.
|
||||
*
|
||||
* This is just \c Scalar if #Scalar is real (e.g., \c float or
|
||||
* \c double), and the type of the real part of \c Scalar if #Scalar is
|
||||
* complex.
|
||||
*/
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef std::complex<RealScalar> Complex;
|
||||
|
||||
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
|
||||
*
|
||||
* This is a column vector with entries of type #RealScalar.
|
||||
* The length of the vector is the size of \p _MatrixType.
|
||||
*/
|
||||
typedef typename ei_plain_col_type<MatrixType, RealScalar>::type RealVectorType;
|
||||
typedef Tridiagonalization<MatrixType> TridiagonalizationType;
|
||||
// typedef typename TridiagonalizationType::TridiagonalMatrixType TridiagonalMatrixType;
|
||||
|
||||
/** \brief Default constructor for fixed-size matrices.
|
||||
*
|
||||
* The default constructor is useful in cases in which the user intends to
|
||||
* perform decompositions via compute(const MatrixType&, bool) or
|
||||
* compute(const MatrixType&, const MatrixType&, bool). This constructor
|
||||
* can only be used if \p _MatrixType is a fixed-size matrix; use
|
||||
* SelfAdjointEigenSolver(int) for dynamic-size matrices.
|
||||
*
|
||||
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
|
||||
*/
|
||||
SelfAdjointEigenSolver()
|
||||
: m_eivec(int(Size), int(Size)),
|
||||
m_eivalues(int(Size)),
|
||||
@ -64,16 +117,42 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
ei_assert(Size!=Dynamic);
|
||||
}
|
||||
|
||||
/** \brief Constructor, pre-allocates memory for dynamic-size matrices.
|
||||
*
|
||||
* \param [in] size Positive integer, size of the matrix whose
|
||||
* eigenvalues and eigenvectors will be computed.
|
||||
*
|
||||
* This constructor is useful for dynamic-size matrices, when the user
|
||||
* intends to perform decompositions via compute(const MatrixType&, bool)
|
||||
* or compute(const MatrixType&, const MatrixType&, bool). The \p size
|
||||
* parameter is only used as a hint. It is not an error to give a wrong
|
||||
* \p size, but it may impair performance.
|
||||
*
|
||||
* \sa compute(const MatrixType&, bool) for an example
|
||||
*/
|
||||
SelfAdjointEigenSolver(int size)
|
||||
: m_eivec(size, size),
|
||||
m_eivalues(size),
|
||||
m_subdiag(TridiagonalizationType::SizeMinusOne)
|
||||
{}
|
||||
|
||||
/** Constructors computing the eigenvalues of the selfadjoint matrix \a matrix,
|
||||
* as well as the eigenvectors if \a computeEigenvectors is true.
|
||||
/** \brief Constructor; computes eigendecomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
|
||||
* be computed.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
*
|
||||
* \sa compute(MatrixType,bool), SelfAdjointEigenSolver(MatrixType,MatrixType,bool)
|
||||
* This constructor calls compute(const MatrixType&, bool) to compute the
|
||||
* eigenvalues of the matrix \p matrix. The eigenvectors are computed if
|
||||
* \p computeEigenvectors is true.
|
||||
*
|
||||
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out
|
||||
*
|
||||
* \sa compute(const MatrixType&, bool),
|
||||
* SelfAdjointEigenSolver(const MatrixType&, const MatrixType&, bool)
|
||||
*/
|
||||
SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
|
||||
: m_eivec(matrix.rows(), matrix.cols()),
|
||||
@ -84,12 +163,26 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
compute(matrix, computeEigenvectors);
|
||||
}
|
||||
|
||||
/** Constructors computing the eigenvalues of the generalized eigen problem
|
||||
* \f$ Ax = lambda B x \f$ with \a matA the selfadjoint matrix \f$ A \f$
|
||||
* and \a matB the positive definite matrix \f$ B \f$ . The eigenvectors
|
||||
* are computed if \a computeEigenvectors is true.
|
||||
/** \brief Constructor; computes eigendecomposition of given matrix pencil.
|
||||
*
|
||||
* \param[in] matA Selfadjoint matrix in matrix pencil.
|
||||
* \param[in] matB Positive-definite matrix in matrix pencil.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
*
|
||||
* \sa compute(MatrixType,MatrixType,bool), SelfAdjointEigenSolver(MatrixType,bool)
|
||||
* This constructor calls compute(const MatrixType&, const MatrixType&, bool)
|
||||
* to compute the eigenvalues and (if requested) the eigenvectors of the
|
||||
* generalized eigenproblem \f$ Ax = \lambda B x \f$ with \a matA the
|
||||
* selfadjoint matrix \f$ A \f$ and \a matB the positive definite matrix
|
||||
* \f$ B \f$ . The eigenvectors are computed if \a computeEigenvectors is
|
||||
* true.
|
||||
*
|
||||
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.out
|
||||
*
|
||||
* \sa compute(const MatrixType&, const MatrixType&, bool),
|
||||
* SelfAdjointEigenSolver(const MatrixType&, bool)
|
||||
*/
|
||||
SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
|
||||
: m_eivec(matA.rows(), matA.cols()),
|
||||
@ -100,12 +193,91 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
compute(matA, matB, computeEigenvectors);
|
||||
}
|
||||
|
||||
/** \brief Computes eigendecomposition of given matrix.
|
||||
*
|
||||
* \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
|
||||
* be computed.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
* \returns Reference to \c *this
|
||||
*
|
||||
* This function computes the eigenvalues of \p matrix. The eigenvalues()
|
||||
* function can be used to retrieve them. If \p computeEigenvectors is
|
||||
* true, then the eigenvectors are also computed and can be retrieved by
|
||||
* calling eigenvectors().
|
||||
*
|
||||
* This implementation uses a symmetric QR algorithm. The matrix is first
|
||||
* reduced to tridiagonal form using the Tridiagonalization class. The
|
||||
* tridiagonal matrix is then brought to diagonal form with implicit
|
||||
* symmetric QR steps with Wilkinson shift. Details can be found in
|
||||
* Section 8.3 of Golub \& Van Loan, <i>%Matrix Computations</i>.
|
||||
*
|
||||
* The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors
|
||||
* are required and \f$ 4n^3/3 \f$ if they are not required.
|
||||
*
|
||||
* This method reuses the memory in the SelfAdjointEigenSolver object that
|
||||
* was allocated when the object was constructed, if the size of the
|
||||
* matrix does not change.
|
||||
*
|
||||
* Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out
|
||||
*
|
||||
* \sa SelfAdjointEigenSolver(const MatrixType&, bool)
|
||||
*/
|
||||
SelfAdjointEigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
|
||||
|
||||
/** \brief Computes eigendecomposition of given matrix pencil.
|
||||
*
|
||||
* \param[in] matA Selfadjoint matrix in matrix pencil.
|
||||
* \param[in] matB Positive-definite matrix in matrix pencil.
|
||||
* \param[in] computeEigenvectors If true, both the eigenvectors and the
|
||||
* eigenvalues are computed; if false, only the eigenvalues are
|
||||
* computed.
|
||||
* \returns Reference to \c *this
|
||||
*
|
||||
* This function computes eigenvalues and (if requested) the eigenvectors
|
||||
* of the generalized eigenproblem \f$ Ax = \lambda B x \f$ with \a matA
|
||||
* the selfadjoint matrix \f$ A \f$ and \a matB the positive definite
|
||||
* matrix \f$ B \f$. The eigenvalues() function can be used to retrieve
|
||||
* the eigenvalues. If \p computeEigenvectors is true, then the
|
||||
* eigenvectors are also computed and can be retrieved by calling
|
||||
* eigenvectors().
|
||||
*
|
||||
* The implementation uses LLT to compute the Cholesky decomposition
|
||||
* \f$ B = LL^* \f$ and calls compute(const MatrixType&, bool) to compute
|
||||
* the eigendecomposition \f$ L^{-1} A (L^*)^{-1} \f$. This solves the
|
||||
* generalized eigenproblem, because any solution of the generalized
|
||||
* eigenproblem \f$ Ax = \lambda B x \f$ corresponds to a solution
|
||||
* \f$ L^{-1} A (L^*)^{-1} (L^* x) = \lambda (L^* x) \f$ of the
|
||||
* eigenproblem for \f$ L^{-1} A (L^*)^{-1} \f$.
|
||||
*
|
||||
* Example: \include SelfAdjointEigenSolver_compute_MatrixType2.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType2.out
|
||||
*
|
||||
* \sa SelfAdjointEigenSolver(const MatrixType&, const MatrixType&, bool)
|
||||
*/
|
||||
SelfAdjointEigenSolver& compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true);
|
||||
|
||||
/** \returns the computed eigen vectors as a matrix of column vectors */
|
||||
MatrixType eigenvectors(void) const
|
||||
/** \brief Returns the eigenvectors of given matrix (pencil).
|
||||
*
|
||||
* \returns %Matrix whose columns are the eigenvectors.
|
||||
*
|
||||
* \pre The eigenvectors have been computed before.
|
||||
*
|
||||
* Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
|
||||
* to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
|
||||
* eigenvectors are normalized to have (Euclidean) norm equal to one. If
|
||||
* this object was used to solve the eigenproblem for the selfadjoint
|
||||
* matrix \f$ A \f$, then the matrix returned by this function is the
|
||||
* matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$.
|
||||
*
|
||||
* Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
|
||||
*
|
||||
* \sa eigenvalues()
|
||||
*/
|
||||
MatrixType eigenvectors() const
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
ei_assert(m_eigenvectorsOk);
|
||||
@ -113,21 +285,62 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
return m_eivec;
|
||||
}
|
||||
|
||||
/** \returns the computed eigen values */
|
||||
RealVectorType eigenvalues(void) const { return m_eivalues; }
|
||||
|
||||
/** \returns the positive square root of the matrix
|
||||
/** \brief Returns the eigenvalues of given matrix (pencil).
|
||||
*
|
||||
* \note the matrix itself must be positive in order for this to make sense.
|
||||
* \returns Column vector containing the eigenvalues.
|
||||
*
|
||||
* \pre The eigenvalues have been computed before.
|
||||
*
|
||||
* The eigenvalues are repeated according to their algebraic multiplicity,
|
||||
* so there are as many eigenvalues as rows in the matrix.
|
||||
*
|
||||
* Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
|
||||
*
|
||||
* \sa eigenvectors(), MatrixBase::eigenvalues()
|
||||
*/
|
||||
RealVectorType eigenvalues() const { return m_eivalues; }
|
||||
|
||||
/** \brief Computes the positive-definite square root of the matrix.
|
||||
*
|
||||
* \returns the positive-definite square root of the matrix
|
||||
*
|
||||
* \pre The eigenvalues and eigenvectors of a positive-definite matrix
|
||||
* have been computed before.
|
||||
*
|
||||
* The square root of a positive-definite matrix \f$ A \f$ is the
|
||||
* positive-definite matrix whose square equals \f$ A \f$. This function
|
||||
* uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
|
||||
* square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
|
||||
*
|
||||
* Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
|
||||
*
|
||||
* \sa operatorInverseSqrt(),
|
||||
* \ref MatrixFunctions_Module "MatrixFunctions Module"
|
||||
*/
|
||||
MatrixType operatorSqrt() const
|
||||
{
|
||||
return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
|
||||
}
|
||||
|
||||
/** \returns the positive inverse square root of the matrix
|
||||
/** \brief Computes the inverse square root of the matrix.
|
||||
*
|
||||
* \note the matrix itself must be positive definite in order for this to make sense.
|
||||
* \returns the inverse positive-definite square root of the matrix
|
||||
*
|
||||
* \pre The eigenvalues and eigenvectors of a positive-definite matrix
|
||||
* have been computed before.
|
||||
*
|
||||
* This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
|
||||
* compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
|
||||
* cheaper than first computing the square root with operatorSqrt() and
|
||||
* then its inverse with MatrixBase::inverse().
|
||||
*
|
||||
* Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
|
||||
* Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
|
||||
*
|
||||
* \sa operatorSqrt(), MatrixBase::inverse(),
|
||||
* \ref MatrixFunctions_Module "MatrixFunctions Module"
|
||||
*/
|
||||
MatrixType operatorInverseSqrt() const
|
||||
{
|
||||
@ -165,11 +378,6 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
template<typename RealScalar, typename Scalar>
|
||||
static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n);
|
||||
|
||||
/** Computes the eigenvalues of the selfadjoint matrix \a matrix,
|
||||
* as well as the eigenvectors if \a computeEigenvectors is true.
|
||||
*
|
||||
* \sa SelfAdjointEigenSolver(MatrixType,bool), compute(MatrixType,MatrixType,bool)
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||
{
|
||||
@ -233,13 +441,6 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Computes the eigenvalues of the generalized eigen problem
|
||||
* \f$ Ax = lambda B x \f$ with \a matA the selfadjoint matrix \f$ A \f$
|
||||
* and \a matB the positive definite matrix \f$ B \f$ . The eigenvectors
|
||||
* are computed if \a computeEigenvectors is true.
|
||||
*
|
||||
* \sa SelfAdjointEigenSolver(MatrixType,MatrixType,bool), compute(MatrixType,bool)
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::
|
||||
compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors)
|
||||
|
@ -0,0 +1,7 @@
|
||||
SelfAdjointEigenSolver<Matrix4f> es;
|
||||
Matrix4f X = Matrix4f::Random(4,4);
|
||||
Matrix4f A = X + X.transpose();
|
||||
es.compute(A);
|
||||
cout << "The eigenvalues of A are: " << es.eigenvalues().transpose() << endl;
|
||||
es.compute(A + Matrix4f::Identity(4,4)); // re-use es to compute eigenvalues of A+I
|
||||
cout << "The eigenvalues of A+I are: " << es.eigenvalues().transpose() << endl;
|
@ -0,0 +1,17 @@
|
||||
MatrixXd X = MatrixXd::Random(5,5);
|
||||
MatrixXd A = X + X.transpose();
|
||||
cout << "Here is a random symmetric 5x5 matrix, A:" << endl << A << endl << endl;
|
||||
|
||||
SelfAdjointEigenSolver<MatrixXd> es(A);
|
||||
cout << "The eigenvalues of A are:" << endl << es.eigenvalues() << endl;
|
||||
cout << "The matrix of eigenvectors, V, is:" << endl << es.eigenvectors() << endl << endl;
|
||||
|
||||
double lambda = es.eigenvalues()[0];
|
||||
cout << "Consider the first eigenvalue, lambda = " << lambda << endl;
|
||||
VectorXd v = es.eigenvectors().col(0);
|
||||
cout << "If v is the corresponding eigenvector, then lambda * v = " << endl << lambda * v << endl;
|
||||
cout << "... and A * v = " << endl << A * v << endl << endl;
|
||||
|
||||
MatrixXd D = es.eigenvalues().asDiagonal();
|
||||
MatrixXd V = es.eigenvectors();
|
||||
cout << "Finally, V * D * V^(-1) = " << endl << V * D * V.inverse() << endl;
|
@ -0,0 +1,16 @@
|
||||
MatrixXd X = MatrixXd::Random(5,5);
|
||||
MatrixXd A = X + X.transpose();
|
||||
cout << "Here is a random symmetric matrix, A:" << endl << A << endl;
|
||||
X = MatrixXd::Random(5,5);
|
||||
MatrixXd B = X * X.transpose();
|
||||
cout << "and a random postive-definite matrix, B:" << endl << B << endl << endl;
|
||||
|
||||
SelfAdjointEigenSolver<MatrixXd> es(A,B);
|
||||
cout << "The eigenvalues of the pencil (A,B) are:" << endl << es.eigenvalues() << endl;
|
||||
cout << "The matrix of eigenvectors, V, is:" << endl << es.eigenvectors() << endl << endl;
|
||||
|
||||
double lambda = es.eigenvalues()[0];
|
||||
cout << "Consider the first eigenvalue, lambda = " << lambda << endl;
|
||||
VectorXd v = es.eigenvectors().col(0);
|
||||
cout << "If v is the corresponding eigenvector, then A * v = " << endl << A * v << endl;
|
||||
cout << "... and lambda * B * v = " << endl << lambda * B * v << endl << endl;
|
@ -0,0 +1,7 @@
|
||||
SelfAdjointEigenSolver<MatrixXf> es(4);
|
||||
MatrixXf X = MatrixXf::Random(4,4);
|
||||
MatrixXf A = X + X.transpose();
|
||||
es.compute(A);
|
||||
cout << "The eigenvalues of A are: " << es.eigenvalues().transpose() << endl;
|
||||
es.compute(A + MatrixXf::Identity(4,4)); // re-use es to compute eigenvalues of A+I
|
||||
cout << "The eigenvalues of A+I are: " << es.eigenvalues().transpose() << endl;
|
@ -0,0 +1,9 @@
|
||||
MatrixXd X = MatrixXd::Random(5,5);
|
||||
MatrixXd A = X * X.transpose();
|
||||
X = MatrixXd::Random(5,5);
|
||||
MatrixXd B = X * X.transpose();
|
||||
|
||||
SelfAdjointEigenSolver<MatrixXd> es(A,B,false);
|
||||
cout << "The eigenvalues of the pencil (A,B) are:" << endl << es.eigenvalues() << endl;
|
||||
es.compute(B,A,false);
|
||||
cout << "The eigenvalues of the pencil (B,A) are:" << endl << es.eigenvalues() << endl;
|
4
doc/snippets/SelfAdjointEigenSolver_eigenvalues.cpp
Normal file
4
doc/snippets/SelfAdjointEigenSolver_eigenvalues.cpp
Normal file
@ -0,0 +1,4 @@
|
||||
MatrixXd ones = MatrixXd::Ones(3,3);
|
||||
SelfAdjointEigenSolver<MatrixXd> es(ones);
|
||||
cout << "The eigenvalues of the 3x3 matrix of ones are:"
|
||||
<< endl << es.eigenvalues() << endl;
|
4
doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp
Normal file
4
doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp
Normal file
@ -0,0 +1,4 @@
|
||||
MatrixXd ones = MatrixXd::Ones(3,3);
|
||||
SelfAdjointEigenSolver<MatrixXd> es(ones);
|
||||
cout << "The first eigenvector of the 3x3 matrix of ones is:"
|
||||
<< endl << es.eigenvectors().col(1) << endl;
|
@ -0,0 +1,9 @@
|
||||
MatrixXd X = MatrixXd::Random(4,4);
|
||||
MatrixXd A = X * X.transpose();
|
||||
cout << "Here is a random positive-definite matrix, A:" << endl << A << endl << endl;
|
||||
|
||||
SelfAdjointEigenSolver<MatrixXd> es(A);
|
||||
cout << "The inverse square root of A is: " << endl;
|
||||
cout << es.operatorInverseSqrt() << endl;
|
||||
cout << "We can also compute it with operatorSqrt() and inverse(). That yields: " << endl;
|
||||
cout << es.operatorSqrt().inverse() << endl;
|
8
doc/snippets/SelfAdjointEigenSolver_operatorSqrt.cpp
Normal file
8
doc/snippets/SelfAdjointEigenSolver_operatorSqrt.cpp
Normal file
@ -0,0 +1,8 @@
|
||||
MatrixXd X = MatrixXd::Random(4,4);
|
||||
MatrixXd A = X * X.transpose();
|
||||
cout << "Here is a random positive-definite matrix, A:" << endl << A << endl << endl;
|
||||
|
||||
SelfAdjointEigenSolver<MatrixXd> es(A);
|
||||
MatrixXd sqrtA = es.operatorSqrt();
|
||||
cout << "The square root of A is: " << endl << sqrtA << endl;
|
||||
cout << "If we square this, we get: " << endl << sqrtA*sqrtA << endl;
|
Loading…
x
Reference in New Issue
Block a user