mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-10 10:49:04 +08:00
* improvements in the tutorial: triangular matrices, linear algebra
* minor fixes in Part and StaticAssert * EulerAngles: remove the FIXME as I think the current version is fine
This commit is contained in:
parent
bb33ec4ef3
commit
2b20da624a
@ -88,8 +88,10 @@ template<typename MatrixType, unsigned int Mode> class Part
|
||||
|
||||
inline Scalar coeff(int row, int col) const
|
||||
{
|
||||
// SelfAdjointBit doesn't play any role here: just because a matrix is selfadjoint doesn't say anything about
|
||||
// each individual coefficient, except for the not-very-useful-here fact that diagonal coefficients are real.
|
||||
if( ((Flags & LowerTriangularBit) && (col>row)) || ((Flags & UpperTriangularBit) && (row>col)) )
|
||||
return (Flags & SelfAdjointBit) ? ei_conj(m_matrix.coeff(col, row)) : (Scalar)0;
|
||||
return (Scalar)0;
|
||||
if(Flags & UnitDiagBit)
|
||||
return col==row ? (Scalar)1 : m_matrix.coeff(row, col);
|
||||
else if(Flags & ZeroDiagBit)
|
||||
@ -101,7 +103,7 @@ template<typename MatrixType, unsigned int Mode> class Part
|
||||
inline Scalar& coeffRef(int row, int col)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(!(Flags & UnitDiagBit), writing_to_triangular_part_with_unit_diagonal_is_not_supported)
|
||||
EIGEN_STATIC_ASSERT(!(Flags & SelfAdjointBit), default_writing_to_selfadjoint_not_supported)
|
||||
EIGEN_STATIC_ASSERT(!(Flags & SelfAdjointBit), coefficient_write_access_to_selfadjoint_not_supported)
|
||||
ei_assert( (Mode==Upper && col>=row)
|
||||
|| (Mode==Lower && col<=row)
|
||||
|| (Mode==StrictlyUpper && col>row)
|
||||
|
@ -64,13 +64,13 @@
|
||||
you_called_a_fixed_size_method_on_a_dynamic_size_matrix_or_vector,
|
||||
unaligned_load_and_store_operations_unimplemented_on_AltiVec,
|
||||
numeric_type_must_be_floating_point,
|
||||
default_writing_to_selfadjoint_not_supported,
|
||||
coefficient_write_access_to_selfadjoint_not_supported,
|
||||
writing_to_triangular_part_with_unit_diagonal_is_not_supported,
|
||||
this_method_is_only_for_fixed_size,
|
||||
invalid_matrix_product,
|
||||
invalid_vector_vector_product__if_you_wanted_a_dot_or_coeff_wise_product_you_must_use_the_explicit_functions,
|
||||
invalid_matrix_product__if_you_wanted_a_coeff_wise_product_you_must_use_the_explicit_function,
|
||||
you_mixed_different_numeric_types__you_need_to_use_the_cast_method_of_MatrixBase_to_cast_numeric_types_explicitly
|
||||
you_mixed_different_numeric_types__you_need_to_use_the_cast_method_of_MatrixBase_to_cast_numeric_types_explicitly
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -41,9 +41,6 @@
|
||||
* * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode
|
||||
* This corresponds to the right-multiply conventions (with right hand side frames).
|
||||
*/
|
||||
// FIXME perhaps the triplet could be template parameters
|
||||
// and/or packed into constants: EulerXYZ, EulerXYX, etc....
|
||||
// FIXME should we support the reversed conventions ? (left multiply)
|
||||
template<typename Derived>
|
||||
inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>
|
||||
MatrixBase<Derived>::eulerAngles(int a0, int a1, int a2) const
|
||||
|
@ -79,15 +79,15 @@ First of all, VectorXf is the following typedef:
|
||||
typedef Matrix<float, Dynamic, 1> VectorXf;
|
||||
\endcode
|
||||
|
||||
The class template Matrix is declared in src/Core/util/ForwardDeclarations.h with default values for the 3 last template parameters, so that the actual type is:
|
||||
The class template Matrix is declared in src/Core/util/ForwardDeclarations.h with 6 template parameters, but the last 3 are automatically determined by the first 3. So you don't need to worry about them for now. Here, Matrix\<float, Dynamic, 1\> means a matrix of floats, with a dynamic number of rows and 1 column.
|
||||
|
||||
The Matrix class inherits a base class, MatrixBase. Don't worry about it, for now it suffices to say that MatrixBase is what unifies matrices/vectors and all the expressions types -- more on that below.
|
||||
|
||||
When we do
|
||||
\code
|
||||
typedef Matrix<float, Dynamic, 1, ColMajor, Dynamic, 1> VectorXf;
|
||||
Eigen::VectorXf u(size);
|
||||
\endcode
|
||||
However, in most cases you don't have to worry about the 3 last parameters.
|
||||
|
||||
Notice that Matrix has a base class, MatrixBase. Don't worry about it, for now it suffices to say that MatrixBase is what unifies matrices/vectors and all the expressions types -- more on that below.
|
||||
|
||||
We now enter the Matrix::Matrix(int) constructor, in src/Core/Matrix.h. Besides some assertions, all it does is to construct the \a m_storage member, which is of type ei_matrix_storage\<float, Dynamic, Dynamic, 1\>.
|
||||
the constructor that is called is Matrix::Matrix(int), in src/Core/Matrix.h. Besides some assertions, all it does is to construct the \a m_storage member, which is of type ei_matrix_storage\<float, Dynamic, Dynamic, 1\>.
|
||||
|
||||
You may wonder, isn't it overengineering to have the storage in a separate class? The reason is that the Matrix class template covers all kinds of matrices and vector: both fixed-size and dynamic-size. The storage method is not the same in these two cases. For fixed-size, the matrix coefficients are stored as a plain member array. For dynamic-size, the coefficients will be stored as a pointer to a dynamically-allocated array. Because of this, we need to abstract storage away from the Matrix class. That's ei_matrix_storage.
|
||||
|
||||
|
@ -507,7 +507,37 @@ vec1.normalize();\endcode
|
||||
|
||||
<a href="#" class="top">top</a>\section TutorialCoreTriangularMatrix Dealing with triangular matrices
|
||||
|
||||
todo
|
||||
Read/write access to special parts of a matrix can be achieved. See \link MatrixBase::part() const this \endlink for read access and \link MatrixBase::part() this \endlink for write access..
|
||||
|
||||
<table class="tutorial_code">
|
||||
<tr><td>
|
||||
Extract triangular matrices \n from a given matrix m:
|
||||
</td><td>\code
|
||||
m.part<Eigen::Upper>()
|
||||
m.part<Eigen::StrictlyUpper>()
|
||||
m.part<Eigen::UnitUpper>()
|
||||
m.part<Eigen::Lower>()
|
||||
m.part<Eigen::StrictlyLower>()
|
||||
m.part<Eigen::UnitLower>()\endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Write to triangular parts \n of a matrix m:
|
||||
</td><td>\code
|
||||
m1.part<Eigen::Upper>() = m2;
|
||||
m1.part<Eigen::StrictlyUpper>() = m2;
|
||||
m1.part<Eigen::Lower>() = m2;
|
||||
m1.part<Eigen::StrictlyLower>() = m2;\endcode
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
Special: take advantage of symmetry \n (selfadjointness) when copying \n an expression into a matrix
|
||||
</td><td>\code
|
||||
m.part<Eigen::SelfAdjoint>() = someSelfadjointMatrix;
|
||||
m1.part<Eigen::SelfAdjoint>() = m2 + m2.adjoint(); // m2 + m2.adjoint() is selfadjoint \endcode
|
||||
</td></tr>
|
||||
|
||||
|
||||
</table>
|
||||
|
||||
|
||||
<a href="#" class="top">top</a>\section TutorialCoreSpecialTopics Special Topics
|
||||
|
||||
|
@ -11,13 +11,13 @@ namespace Eigen {
|
||||
</div>
|
||||
|
||||
\b Table \b of \b contents
|
||||
- \ref TutorialAdvLinearSolvers
|
||||
- \ref TutorialAdvSolvers
|
||||
- \ref TutorialAdvLU
|
||||
- \ref TutorialAdvCholesky
|
||||
- \ref TutorialAdvQR
|
||||
- \ref TutorialAdvEigenProblems
|
||||
|
||||
\section TutorialAdvLinearSolvers Solving linear problems
|
||||
\section TutorialAdvSolvers Solving linear problems
|
||||
|
||||
This part of the tutorial focuses on solving linear problem of the form \f$ A \mathbf{x} = b \f$,
|
||||
where both \f$ A \f$ and \f$ b \f$ are known, and \f$ x \f$ is the unknown. Moreover, \f$ A \f$
|
||||
@ -26,7 +26,7 @@ involve the product of an inverse matrix with a vector or another matrix: \f$ A^
|
||||
Eigen offers various algorithms to this problem, and its choice mainly depends on the nature of
|
||||
the matrix \f$ A \f$, such as its shape, size and numerical properties.
|
||||
|
||||
\subsection TutorialAdv_Triangular Triangular solver
|
||||
\subsection TutorialAdvSolvers_Triangular Triangular solver
|
||||
If the matrix \f$ A \f$ is triangular (upper or lower) and invertible (the coefficients of the diagonal
|
||||
are all not zero), then the problem can be solved directly using MatrixBase::solveTriangular(), or better,
|
||||
MatrixBase::solveTriangularInPlace(). Here is an example:
|
||||
@ -41,9 +41,9 @@ output:
|
||||
See MatrixBase::solveTriangular() for more details.
|
||||
|
||||
|
||||
\subsection TutorialAdv_Inverse Direct inversion (for small matrices)
|
||||
If the matrix \f$ A \f$ is small (\f$ \leq 4 \f$) and invertible, then the problem can be solved
|
||||
by directly computing the inverse of the matrix \f$ A \f$: \f$ \mathbf{x} = A^{-1} b \f$. With Eigen,
|
||||
\subsection TutorialAdvSolvers_Inverse Direct inversion (for small matrices)
|
||||
If the matrix \f$ A \f$ is small (\f$ \leq 4 \f$) and invertible, then a good approach is to directly compute
|
||||
the inverse of the matrix \f$ A \f$, and then obtain the solution \f$ x \f$ by \f$ \mathbf{x} = A^{-1} b \f$. With Eigen,
|
||||
this can be implemented like this:
|
||||
|
||||
\code
|
||||
@ -57,10 +57,10 @@ Note that the function inverse() is defined in the LU module.
|
||||
See MatrixBase::inverse() for more details.
|
||||
|
||||
|
||||
\subsection TutorialAdv_Symmetric Cholesky (for symmetric matrices)
|
||||
If the matrix \f$ A \f$ is \b symmetric, or more generally selfadjoint, and \b positive \b definite (SPD), then
|
||||
\subsection TutorialAdvSolvers_Symmetric Cholesky (for positive definite matrices)
|
||||
If the matrix \f$ A \f$ is \b positive \b definite, then
|
||||
the best method is to use a Cholesky decomposition.
|
||||
Such SPD matrices often arise when solving overdetermined problems in a least square sense (see below).
|
||||
Such positive definite matrices often arise when solving overdetermined problems in a least square sense (see below).
|
||||
Eigen offers two different Cholesky decompositions: a \f$ LL^T \f$ decomposition where L is a lower triangular matrix,
|
||||
and a \f$ LDL^T \f$ decomposition where L is lower triangular with unit diagonal and D is a diagonal matrix.
|
||||
The latter avoids square roots and is therefore slightly more stable than the former one.
|
||||
@ -93,16 +93,16 @@ lltOfA.solveInPlace(b1);
|
||||
\sa Cholesky_Module, LLT::solve(), LLT::solveInPlace(), LDLT::solve(), LDLT::solveInPlace(), class LLT, class LDLT.
|
||||
|
||||
|
||||
\subsection TutorialAdv_LU LU decomposition (for most cases)
|
||||
If the matrix \f$ A \f$ does not fit in one of the previous category, or if you are unsure about the numerical
|
||||
stability of your problem, then you can use the LU solver based on a decomposition of the same name.
|
||||
Actually, Eigen's LU module does not implement a standard LU decomposition, but rather a so called LU decomposition
|
||||
with full pivoting and rank update which has the advantages to be numerically much more stable.
|
||||
\subsection TutorialAdvSolvers_LU LU decomposition (for most cases)
|
||||
If the matrix \f$ A \f$ does not fit in any of the previous categories, or if you are unsure about the numerical
|
||||
stability of your problem, then you can use the LU solver based on a decomposition of the same name : see the section \ref TutorialAdvLU below. Actually, Eigen's LU module does not implement a standard LU decomposition, but rather a so-called LU decomposition
|
||||
with full pivoting and rank update which has much better numerical stability.
|
||||
The API of the LU solver is the same than the Cholesky one, except that there is no \em in \em place variant:
|
||||
\code
|
||||
Matrix4f A = Matrix4f::Random();
|
||||
Vector4f b = Vector4f::Random();
|
||||
Vector4f x;
|
||||
#include <Eigen/LU>
|
||||
MatrixXf A = MatrixXf::Random(20,20);
|
||||
VectorXf b = VectorXf::Random(20);
|
||||
VectorXf x;
|
||||
A.lu().solve(b, &x);
|
||||
\endcode
|
||||
|
||||
@ -114,18 +114,21 @@ luOfA.solve(b, &x);
|
||||
// ...
|
||||
\endcode
|
||||
|
||||
See the section \ref TutorialAdvLU below.
|
||||
|
||||
\sa class LU, LU::solve(), LU_Module
|
||||
|
||||
|
||||
\subsection TutorialAdv_LU SVD solver (for singular matrices and special cases)
|
||||
\subsection TutorialAdvSolvers_SVD SVD solver (for singular matrices and special cases)
|
||||
Finally, Eigen also offer a solver based on a singular value decomposition (SVD). Again, the API is the
|
||||
same than with Cholesky or LU:
|
||||
\code
|
||||
Matrix4f A = Matrix4f::Random();
|
||||
Vector4f b = Vector4f::Random();
|
||||
Vector4f x;
|
||||
#include <Eigen/SVD>
|
||||
MatrixXf A = MatrixXf::Random(20,20);
|
||||
VectorXf b = VectorXf::Random(20);
|
||||
VectorXf x;
|
||||
A.svd().solve(b, &x);
|
||||
SVD<MatrixXf> luOfA(A);
|
||||
SVD<MatrixXf> svdOfA(A);
|
||||
svdOfA.solve(b, &x);
|
||||
\endcode
|
||||
|
||||
@ -135,7 +138,29 @@ svdOfA.solve(b, &x);
|
||||
|
||||
|
||||
<a href="#" class="top">top</a>\section TutorialAdvLU LU
|
||||
todo
|
||||
|
||||
Eigen provides a rank-revealing LU decomposition with full pivoting, which has very good numerical stability.
|
||||
|
||||
You can obtain the LU decomposition of a matrix by calling \link MatrixBase::lu() lu() \endlink, which is the easiest way if you're going to use the LU decomposition only once, as in
|
||||
\code
|
||||
#include <Eigen/LU>
|
||||
MatrixXf A = MatrixXf::Random(20,20);
|
||||
VectorXf b = VectorXf::Random(20);
|
||||
VectorXf x;
|
||||
A.lu().solve(b, &x);
|
||||
\endcode
|
||||
|
||||
Alternatively, you can construct a named LU decomposition, which allows you to reuse it for more than one operation:
|
||||
\code
|
||||
#include <Eigen/LU>
|
||||
MatrixXf A = MatrixXf::Random(20,20);
|
||||
Eigen::LUDecomposition<MatrixXf> lu(A);
|
||||
cout << "The rank of A is" << lu.rank() << endl;
|
||||
if(lu.isInvertible()) {
|
||||
cout << "A is invertible, its inverse is:" << endl << lu.inverse() << endl;
|
||||
cout << "Here's a matrix whose columns form a basis of the kernel a.k.a. nullspace of A:"
|
||||
<< endl << lu.kernel() << endl;
|
||||
\endcode
|
||||
|
||||
\sa LU_Module, LU::solve(), class LU
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user