mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 03:39:01 +08:00
some more documentation
This commit is contained in:
parent
ac8f7d8c9c
commit
de195e0e78
@ -33,12 +33,41 @@ namespace Eigen {
|
||||
/** \ingroup Unsupported_modules
|
||||
* \defgroup NonLinearOptimization_Module Non linear optimization module
|
||||
*
|
||||
* This module provides implementation of two important algorithms in non linear
|
||||
* optimization. In both cases, we consider a system of non linear functions. Of
|
||||
* course, this should work, and even work very well if those functions are
|
||||
* actually linear. But if this is so, you should probably better use other
|
||||
* methods more fitted to this special case.
|
||||
*
|
||||
* One algorithm allows to find the extremum of such a system (Levenberg
|
||||
* Marquardt algorithm) and the second one is used to find
|
||||
* a zero for the system (Powell hybrid "dogleg" method).
|
||||
*
|
||||
* This code is a port of a reknown implementation for both algorithms,
|
||||
* called minpack (http://en.wikipedia.org/wiki/MINPACK). Those
|
||||
* implementations have been carefully tuned, tested, and used for several
|
||||
* decades.
|
||||
* The original fortran code was automatically translated in C and then c++,
|
||||
* and then cleaned by several authors
|
||||
* (check http://devernay.free.fr/hacks/cminpack.html).
|
||||
*
|
||||
* Finally, we ported this code to Eigen, creating classes and API
|
||||
* coherent with Eigen. When possible, we switched to Eigen
|
||||
* implementation, such as most linear algebra (vectors, matrices, "good" norms).
|
||||
*
|
||||
* Doing so, we were very careful to check the tests we setup at the very
|
||||
* beginning, which ensure that the same results are found, with the same
|
||||
* number of iterations.
|
||||
*
|
||||
* \code
|
||||
* #include <unsupported/Eigen/NonLinearOptimization>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
//@{
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
#include "src/NonLinearOptimization/qrsolv.h"
|
||||
#include "src/NonLinearOptimization/r1updt.h"
|
||||
#include "src/NonLinearOptimization/r1mpyq.h"
|
||||
@ -52,9 +81,10 @@ namespace Eigen {
|
||||
|
||||
#include "src/NonLinearOptimization/chkder.h"
|
||||
|
||||
#endif
|
||||
|
||||
#include "src/NonLinearOptimization/HybridNonLinearSolver.h"
|
||||
#include "src/NonLinearOptimization/LevenbergMarquardt.h"
|
||||
|
||||
//@}
|
||||
|
||||
}
|
||||
|
@ -36,6 +36,22 @@ namespace Eigen {
|
||||
* Warning : this should NOT be confused with automatic differentiation, which
|
||||
* is a different method and has its own module in Eigen.
|
||||
*
|
||||
* Currently only "Forward" and "Central" scheme are implemented. Those
|
||||
* are basic methods, and there exist some more elaborated way of
|
||||
* computing such approximates. They are implemented using both
|
||||
* proprietary and free software, and usually requires linking to an
|
||||
* external library. It is very easy for you to write a functor
|
||||
* using such software, and the purpose is quite orthogonal to what we
|
||||
* want to achieve with Eigen.
|
||||
*
|
||||
* This is why we will not provide wrappers for every great numerical
|
||||
* differenciation software that exist, but should rather stick with those
|
||||
* basic ones, that still are useful for testing.
|
||||
*
|
||||
* Also, the module "Non linear optimization" needs this in order to
|
||||
* provide full features compatibility with the original (c)minpack
|
||||
* package.
|
||||
*
|
||||
* \code
|
||||
* #include <unsupported/Eigen/NumericalDiff>
|
||||
* \endcode
|
||||
|
@ -28,6 +28,16 @@
|
||||
#ifndef EIGEN_HYBRIDNONLINEARSOLVER_H
|
||||
#define EIGEN_HYBRIDNONLINEARSOLVER_H
|
||||
|
||||
/**
|
||||
* \brief Finds a zero of a system of n
|
||||
* nonlinear functions in n variables by a modification of the Powell
|
||||
* hybrid method ("dogleg").
|
||||
*
|
||||
* The user must provide a subroutine which calculates the
|
||||
* functions. The Jacobian is either provided by the user, or approximated
|
||||
* using a forward-difference method.
|
||||
*
|
||||
*/
|
||||
template<typename FunctorType, typename Scalar=double>
|
||||
class HybridNonLinearSolver
|
||||
{
|
||||
|
@ -35,32 +35,15 @@ enum NumericalDiffMode {
|
||||
|
||||
|
||||
/**
|
||||
* \brief asdf
|
||||
*
|
||||
* This class allows you to add a method df() to your functor, which will
|
||||
* use numerical differentiation to compute an approximate of the
|
||||
* derivative for the functor. Of course, if you have an analytical form
|
||||
* for the derivative, you should rather implement df() using it.
|
||||
* for the derivative, you should rather implement df() by yourself.
|
||||
*
|
||||
* More information on
|
||||
* http://en.wikipedia.org/wiki/Numerical_differentiation
|
||||
*
|
||||
* Currently only "Forward" and "Central" scheme are implemented. Those
|
||||
* are basic methods, and there exist some more elaborated way of
|
||||
* computing such approximates. They are implemented using both
|
||||
* proprietary and free software, and usually requires linking to an
|
||||
* external library. It is very easy for you to write a functor
|
||||
* using such software, and the purpose is quite orthogonal to what we
|
||||
* want to achieve with Eigen.
|
||||
*
|
||||
* This is why we will not provide wrappers for every great numerical
|
||||
* differenciation software that exist, but should rather stick with those
|
||||
* basic ones, that still are useful for testing.
|
||||
*
|
||||
* Also, the module "Non linear optimization" needs this in order to
|
||||
* provide full features compatibility with the original (c)minpack
|
||||
* package.
|
||||
*
|
||||
* Currently only "Forward" and "Central" scheme are implemented.
|
||||
*/
|
||||
template<typename Functor, NumericalDiffMode mode=Forward>
|
||||
class NumericalDiff : public Functor
|
||||
|
Loading…
x
Reference in New Issue
Block a user