use an enum for status reporting

This commit is contained in:
Thomas Capricelli 2009-08-25 19:48:53 +02:00
parent d59cc0ad82
commit d38d4709bc
2 changed files with 147 additions and 181 deletions

View File

@ -6,11 +6,22 @@ public:
HybridNonLinearSolver(const FunctorType &_functor) HybridNonLinearSolver(const FunctorType &_functor)
: functor(_functor) {} : functor(_functor) {}
int solve( enum Status {
Running = -1,
ImproperInputParameters = 0,
RelativeErrorTooSmall = 1,
TooManyFunctionEvaluation = 2,
TolTooSmall = 3,
NotMakingProgressJacobian = 4,
NotMakingProgressIterations = 5,
UserAksed = 6
};
Status solve(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol = ei_sqrt(epsilon<Scalar>()) const Scalar tol = ei_sqrt(epsilon<Scalar>())
); );
int solve( Status solve(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
int &nfev, int &njev, int &nfev, int &njev,
Matrix< Scalar, Dynamic, 1 > &diag, Matrix< Scalar, Dynamic, 1 > &diag,
@ -20,11 +31,11 @@ public:
const Scalar xtol = ei_sqrt(epsilon<Scalar>()) const Scalar xtol = ei_sqrt(epsilon<Scalar>())
); );
int solveNumericalDiff( Status solveNumericalDiff(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol = ei_sqrt(epsilon<Scalar>()) const Scalar tol = ei_sqrt(epsilon<Scalar>())
); );
int solveNumericalDiff( Status solveNumericalDiff(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
int &nfev, int &nfev,
Matrix< Scalar, Dynamic, 1 > &diag, Matrix< Scalar, Dynamic, 1 > &diag,
@ -48,23 +59,24 @@ private:
template<typename FunctorType, typename Scalar> template<typename FunctorType, typename Scalar>
int HybridNonLinearSolver<FunctorType,Scalar>::solve( typename HybridNonLinearSolver<FunctorType,Scalar>::Status
HybridNonLinearSolver<FunctorType,Scalar>::solve(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol const Scalar tol
) )
{ {
const int n = x.size(); const int n = x.size();
int info, nfev=0, njev=0; int nfev=0, njev=0;
Matrix< Scalar, Dynamic, 1> diag; Matrix< Scalar, Dynamic, 1> diag;
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || tol < 0.) { if (n <= 0 || tol < 0.) {
printf("HybridNonLinearSolver::solve() bad args : n,tol,..."); printf("HybridNonLinearSolver::solve() bad args : n,tol,...");
return 0; return ImproperInputParameters;
} }
diag.setConstant(n, 1.); diag.setConstant(n, 1.);
info = solve( return solve(
x, x,
nfev, njev, nfev, njev,
diag, diag,
@ -73,13 +85,13 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solve(
100., 100.,
tol tol
); );
return (info==5)?4:info;
} }
template<typename FunctorType, typename Scalar> template<typename FunctorType, typename Scalar>
int HybridNonLinearSolver<FunctorType,Scalar>::solve( typename HybridNonLinearSolver<FunctorType,Scalar>::Status
HybridNonLinearSolver<FunctorType,Scalar>::solve(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
int &nfev, int &nfev,
int &njev, int &njev,
@ -115,10 +127,8 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solve(
int nslow1, nslow2; int nslow1, nslow2;
int ncfail; int ncfail;
Scalar actred, prered; Scalar actred, prered;
int info;
/* Function Body */ /* Function Body */
info = 0;
iflag = 0; iflag = 0;
nfev = 0; nfev = 0;
njev = 0; njev = 0;
@ -126,18 +136,18 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solve(
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || xtol < 0. || maxfev <= 0 || factor <= 0. ) if (n <= 0 || xtol < 0. || maxfev <= 0 || factor <= 0. )
goto algo_end; return RelativeErrorTooSmall;
if (mode == 2) if (mode == 2)
for (j = 0; j < n; ++j) for (j = 0; j < n; ++j)
if (diag[j] <= 0.) goto algo_end; if (diag[j] <= 0.)
return RelativeErrorTooSmall;
/* evaluate the function at the starting point */ /* evaluate the function at the starting point */
/* and calculate its norm. */ /* and calculate its norm. */
iflag = functor.f(x, fvec); iflag = functor.f(x, fvec);
nfev = 1; nfev = 1;
if (iflag < 0) if (iflag < 0) return UserAksed;
goto algo_end;
fnorm = fvec.stableNorm(); fnorm = fvec.stableNorm();
/* initialize iteration counter and monitors. */ /* initialize iteration counter and monitors. */
@ -158,7 +168,7 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solve(
iflag = functor.df(x, fjac); iflag = functor.df(x, fjac);
++njev; ++njev;
if (iflag < 0) if (iflag < 0)
break; return UserAksed;
/* compute the qr factorization of the jacobian. */ /* compute the qr factorization of the jacobian. */
@ -247,8 +257,7 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solve(
iflag = functor.f(wa2, wa4); iflag = functor.f(wa2, wa4);
++nfev; ++nfev;
if (iflag < 0) if (iflag < 0) return UserAksed;
goto algo_end;
fnorm1 = wa4.stableNorm(); fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */ /* compute the scaled actual reduction. */
@ -321,28 +330,24 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solve(
/* test for convergence. */ /* test for convergence. */
if (delta <= xtol * xnorm || fnorm == 0.) if (delta <= xtol * xnorm || fnorm == 0.)
info = 1; return RelativeErrorTooSmall;
if (info != 0)
goto algo_end;
/* tests for termination and stringent tolerances. */ /* tests for termination and stringent tolerances. */
if (nfev >= maxfev) if (nfev >= maxfev)
info = 2; return TooManyFunctionEvaluation;
/* Computing MAX */ /* Computing MAX */
if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm) if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm)
info = 3; return TolTooSmall;
if (nslow2 == 5) if (nslow2 == 5)
info = 4; return NotMakingProgressJacobian;
if (nslow1 == 10) if (nslow1 == 10)
info = 5; return NotMakingProgressIterations;
if (info != 0)
goto algo_end;
/* criterion for recalculating jacobian. */ /* criterion for recalculating jacobian. */
if (ncfail == 2) if (ncfail == 2)
break; break; // leave inner loop and go for the next outer loop iteration
/* calculate the rank one modification to the jacobian */ /* calculate the rank one modification to the jacobian */
/* and update qtf if necessary. */ /* and update qtf if necessary. */
@ -367,33 +372,30 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solve(
} }
/* end of the outer loop. */ /* end of the outer loop. */
} }
algo_end: assert(false); // should never be reached
/* termination, either normal or user imposed. */
if (iflag < 0)
info = iflag;
return info;
} }
template<typename FunctorType, typename Scalar> template<typename FunctorType, typename Scalar>
int HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff( typename HybridNonLinearSolver<FunctorType,Scalar>::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol const Scalar tol
) )
{ {
const int n = x.size(); const int n = x.size();
int info, nfev=0; int nfev=0;
Matrix< Scalar, Dynamic, 1> diag; Matrix< Scalar, Dynamic, 1> diag;
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || tol < 0.) { if (n <= 0 || tol < 0.) {
printf("HybridNonLinearSolver::solve() bad args : n,tol,..."); printf("HybridNonLinearSolver::solve() bad args : n,tol,...");
return 0; return ImproperInputParameters;
} }
diag.setConstant(n, 1.); diag.setConstant(n, 1.);
info = solveNumericalDiff( return solveNumericalDiff(
x, x,
nfev, nfev,
diag, diag,
@ -403,12 +405,12 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
100., 100.,
tol, Scalar(0.) tol, Scalar(0.)
); );
return (info==5)?4:info;
} }
template<typename FunctorType, typename Scalar> template<typename FunctorType, typename Scalar>
int HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff( typename HybridNonLinearSolver<FunctorType,Scalar>::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
int &nfev, int &nfev,
Matrix< Scalar, Dynamic, 1 > &diag, Matrix< Scalar, Dynamic, 1 > &diag,
@ -448,21 +450,20 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
int nslow1, nslow2; int nslow1, nslow2;
int ncfail; int ncfail;
Scalar actred, prered; Scalar actred, prered;
int info;
/* Function Body */ /* Function Body */
info = 0;
iflag = 0; iflag = 0;
nfev = 0; nfev = 0;
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || xtol < 0. || maxfev <= 0 || nb_of_subdiagonals < 0 || nb_of_superdiagonals < 0 || factor <= 0. ) if (n <= 0 || xtol < 0. || maxfev <= 0 || nb_of_subdiagonals < 0 || nb_of_superdiagonals < 0 || factor <= 0. )
goto algo_end; return RelativeErrorTooSmall;
if (mode == 2) if (mode == 2)
for (j = 0; j < n; ++j) for (j = 0; j < n; ++j)
if (diag[j] <= 0.) goto algo_end; if (diag[j] <= 0.)
return RelativeErrorTooSmall;
/* evaluate the function at the starting point */ /* evaluate the function at the starting point */
/* and calculate its norm. */ /* and calculate its norm. */
@ -470,7 +471,7 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
iflag = functor.f(x, fvec); iflag = functor.f(x, fvec);
nfev = 1; nfev = 1;
if (iflag < 0) if (iflag < 0)
goto algo_end; return UserAksed;
fnorm = fvec.stableNorm(); fnorm = fvec.stableNorm();
/* determine the number of calls to fcn needed to compute */ /* determine the number of calls to fcn needed to compute */
@ -498,7 +499,7 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
nb_of_subdiagonals, nb_of_superdiagonals, epsfcn); nb_of_subdiagonals, nb_of_superdiagonals, epsfcn);
nfev += msum; nfev += msum;
if (iflag < 0) if (iflag < 0)
break; return UserAksed;
/* compute the qr factorization of the jacobian. */ /* compute the qr factorization of the jacobian. */
@ -587,8 +588,7 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
iflag = functor.f(wa2, wa4); iflag = functor.f(wa2, wa4);
++nfev; ++nfev;
if (iflag < 0) if (iflag < 0) return UserAksed;
goto algo_end;
fnorm1 = wa4.stableNorm(); fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */ /* compute the scaled actual reduction. */
@ -661,29 +661,25 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
/* test for convergence. */ /* test for convergence. */
if (delta <= xtol * xnorm || fnorm == 0.) if (delta <= xtol * xnorm || fnorm == 0.)
info = 1; return RelativeErrorTooSmall;
if (info != 0)
goto algo_end;
/* tests for termination and stringent tolerances. */ /* tests for termination and stringent tolerances. */
if (nfev >= maxfev) if (nfev >= maxfev)
info = 2; return TooManyFunctionEvaluation;
/* Computing MAX */ /* Computing MAX */
if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm) if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm)
info = 3; return TolTooSmall;
if (nslow2 == 5) if (nslow2 == 5)
info = 4; return NotMakingProgressJacobian;
if (nslow1 == 10) if (nslow1 == 10)
info = 5; return NotMakingProgressIterations;
if (info != 0)
goto algo_end;
/* criterion for recalculating jacobian approximation */ /* criterion for recalculating jacobian approximation */
/* by forward differences. */ /* by forward differences. */
if (ncfail == 2) if (ncfail == 2)
break; break; // leave inner loop and go for the next outer loop iteration
/* calculate the rank one modification to the jacobian */ /* calculate the rank one modification to the jacobian */
/* and update qtf if necessary. */ /* and update qtf if necessary. */
@ -708,10 +704,6 @@ int HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
} }
/* end of the outer loop. */ /* end of the outer loop. */
} }
algo_end: assert(false); // should never be reached
/* termination, either normal or user imposed. */
if (iflag < 0)
info = iflag;
return info;
} }

View File

@ -1,4 +1,5 @@
template<typename FunctorType, typename Scalar=double> template<typename FunctorType, typename Scalar=double>
class LevenbergMarquardt class LevenbergMarquardt
{ {
@ -6,12 +7,26 @@ public:
LevenbergMarquardt(const FunctorType &_functor) LevenbergMarquardt(const FunctorType &_functor)
: functor(_functor) {} : functor(_functor) {}
int minimize( enum Status {
Running = -1,
ImproperInputParameters = 0,
RelativeReductionTooSmall = 1,
RelativeErrorTooSmall = 2,
RelativeErrorAndReductionTooSmall = 3,
CosinusTooSmall = 4,
TooManyFunctionEvaluation = 5,
FtolTooSmall = 6,
XtolTooSmall = 7,
GtolTooSmall = 8,
UserAsked = 9
};
Status minimize(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol = ei_sqrt(epsilon<Scalar>()) const Scalar tol = ei_sqrt(epsilon<Scalar>())
); );
int minimize( Status minimize(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
int &nfev, int &nfev,
int &njev, int &njev,
@ -24,12 +39,12 @@ public:
const Scalar gtol = Scalar(0.) const Scalar gtol = Scalar(0.)
); );
int minimizeNumericalDiff( Status minimizeNumericalDiff(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol = ei_sqrt(epsilon<Scalar>()) const Scalar tol = ei_sqrt(epsilon<Scalar>())
); );
int minimizeNumericalDiff( Status minimizeNumericalDiff(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
int &nfev, int &nfev,
Matrix< Scalar, Dynamic, 1 > &diag, Matrix< Scalar, Dynamic, 1 > &diag,
@ -42,12 +57,12 @@ public:
const Scalar epsfcn = Scalar(0.) const Scalar epsfcn = Scalar(0.)
); );
int minimizeOptimumStorage( Status minimizeOptimumStorage(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol = ei_sqrt(epsilon<Scalar>()) const Scalar tol = ei_sqrt(epsilon<Scalar>())
); );
int minimizeOptimumStorage( Status minimizeOptimumStorage(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
int &nfev, int &nfev,
int &njev, int &njev,
@ -68,16 +83,16 @@ private:
const FunctorType &functor; const FunctorType &functor;
}; };
template<typename FunctorType, typename Scalar> template<typename FunctorType, typename Scalar>
int LevenbergMarquardt<FunctorType,Scalar>::minimize( typename LevenbergMarquardt<FunctorType,Scalar>::Status
LevenbergMarquardt<FunctorType,Scalar>::minimize(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol const Scalar tol
) )
{ {
const int n = x.size(); const int n = x.size();
const int m = functor.nbOfFunctions(); const int m = functor.nbOfFunctions();
int info, nfev=0, njev=0; int nfev=0, njev=0;
Matrix< Scalar, Dynamic, Dynamic > fjac(m, n); Matrix< Scalar, Dynamic, Dynamic > fjac(m, n);
Matrix< Scalar, Dynamic, 1> diag, qtf; Matrix< Scalar, Dynamic, 1> diag, qtf;
VectorXi ipvt; VectorXi ipvt;
@ -85,10 +100,10 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimize(
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.) { if (n <= 0 || m < n || tol < 0.) {
printf("LevenbergMarquardt::minimize() bad args : m,n,tol,..."); printf("LevenbergMarquardt::minimize() bad args : m,n,tol,...");
return 0; return ImproperInputParameters;
} }
info = minimize( return minimize(
x, x,
nfev, njev, nfev, njev,
diag, diag,
@ -97,12 +112,12 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimize(
(n+1)*100, (n+1)*100,
tol, tol, Scalar(0.) tol, tol, Scalar(0.)
); );
return (info==8)?4:info;
} }
template<typename FunctorType, typename Scalar> template<typename FunctorType, typename Scalar>
int LevenbergMarquardt<FunctorType,Scalar>::minimize( typename LevenbergMarquardt<FunctorType,Scalar>::Status
LevenbergMarquardt<FunctorType,Scalar>::minimize(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
int &nfev, int &nfev,
int &njev, int &njev,
@ -135,10 +150,8 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimize(
Scalar ratio; Scalar ratio;
Scalar fnorm, gnorm; Scalar fnorm, gnorm;
Scalar pnorm, xnorm, fnorm1, actred, dirder, prered; Scalar pnorm, xnorm, fnorm1, actred, dirder, prered;
int info;
/* Function Body */ /* Function Body */
info = 0;
iflag = 0; iflag = 0;
nfev = 0; nfev = 0;
njev = 0; njev = 0;
@ -146,19 +159,19 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimize(
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || m < n || ftol < 0. || xtol < 0. || gtol < 0. || maxfev <= 0 || factor <= 0.) if (n <= 0 || m < n || ftol < 0. || xtol < 0. || gtol < 0. || maxfev <= 0 || factor <= 0.)
goto algo_end; return RelativeErrorTooSmall;
if (mode == 2) if (mode == 2)
for (j = 0; j < n; ++j) for (j = 0; j < n; ++j)
if (diag[j] <= 0.) goto algo_end; if (diag[j] <= 0.)
return RelativeErrorTooSmall;
/* evaluate the function at the starting point */ /* evaluate the function at the starting point */
/* and calculate its norm. */ /* and calculate its norm. */
iflag = functor.f(x, fvec); iflag = functor.f(x, fvec);
nfev = 1; nfev = 1;
if (iflag < 0) if (iflag < 0) return UserAsked;
goto algo_end;
fnorm = fvec.stableNorm(); fnorm = fvec.stableNorm();
/* initialize levenberg-marquardt parameter and iteration counter. */ /* initialize levenberg-marquardt parameter and iteration counter. */
@ -174,8 +187,7 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimize(
iflag = functor.df(x, fjac); iflag = functor.df(x, fjac);
++njev; ++njev;
if (iflag < 0) if (iflag < 0) return UserAsked;
break;
/* compute the qr factorization of the jacobian. */ /* compute the qr factorization of the jacobian. */
@ -238,9 +250,7 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimize(
/* test for convergence of the gradient norm. */ /* test for convergence of the gradient norm. */
if (gnorm <= gtol) if (gnorm <= gtol)
info = 4; return CosinusTooSmall;
if (info != 0)
break;
/* rescale if necessary. */ /* rescale if necessary. */
@ -270,8 +280,7 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimize(
iflag = functor.f(wa2, wa4); iflag = functor.f(wa2, wa4);
++nfev; ++nfev;
if (iflag < 0) if (iflag < 0) return UserAsked;
goto algo_end;
fnorm1 = wa4.stableNorm(); fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */ /* compute the scaled actual reduction. */
@ -334,49 +343,41 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimize(
/* tests for convergence. */ /* tests for convergence. */
if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && delta <= xtol * xnorm)
return RelativeErrorAndReductionTooSmall;
if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1.) if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1.)
info = 1; return RelativeReductionTooSmall;
if (delta <= xtol * xnorm) if (delta <= xtol * xnorm)
info = 2; return RelativeErrorTooSmall;
if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && info == 2)
info = 3;
if (info != 0)
goto algo_end;
/* tests for termination and stringent tolerances. */ /* tests for termination and stringent tolerances. */
if (nfev >= maxfev) if (nfev >= maxfev)
info = 5; return TooManyFunctionEvaluation;
if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.) if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.)
info = 6; return FtolTooSmall;
if (delta <= epsilon<Scalar>() * xnorm) if (delta <= epsilon<Scalar>() * xnorm)
info = 7; return XtolTooSmall;
if (gnorm <= epsilon<Scalar>()) if (gnorm <= epsilon<Scalar>())
info = 8; return GtolTooSmall;
if (info != 0)
goto algo_end;
/* end of the inner loop. repeat if iteration unsuccessful. */ /* end of the inner loop. repeat if iteration unsuccessful. */
} while (ratio < Scalar(1e-4)); } while (ratio < Scalar(1e-4));
/* end of the outer loop. */ /* end of the outer loop. */
} }
algo_end: assert(false); // should never be reached
/* termination, either normal or user imposed. */
if (iflag < 0)
info = iflag;
return info;
} }
template<typename FunctorType, typename Scalar> template<typename FunctorType, typename Scalar>
int LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff( typename LevenbergMarquardt<FunctorType,Scalar>::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol const Scalar tol
) )
{ {
const int n = x.size(); const int n = x.size();
const int m = functor.nbOfFunctions(); const int m = functor.nbOfFunctions();
int info, nfev=0; int nfev=0;
Matrix< Scalar, Dynamic, Dynamic > fjac(m, n); Matrix< Scalar, Dynamic, Dynamic > fjac(m, n);
Matrix< Scalar, Dynamic, 1> diag, qtf; Matrix< Scalar, Dynamic, 1> diag, qtf;
VectorXi ipvt; VectorXi ipvt;
@ -384,10 +385,10 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff(
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.) { if (n <= 0 || m < n || tol < 0.) {
printf("ei_lmder1 bad args : m,n,tol,..."); printf("ei_lmder1 bad args : m,n,tol,...");
return 0; return ImproperInputParameters;
} }
info = minimizeNumericalDiff( return minimizeNumericalDiff(
x, x,
nfev, nfev,
diag, diag,
@ -396,11 +397,11 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff(
(n+1)*200, (n+1)*200,
tol, tol, Scalar(0.), Scalar(0.) tol, tol, Scalar(0.), Scalar(0.)
); );
return (info==8)?4:info;
} }
template<typename FunctorType, typename Scalar> template<typename FunctorType, typename Scalar>
int LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff( typename LevenbergMarquardt<FunctorType,Scalar>::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
int &nfev, int &nfev,
Matrix< Scalar, Dynamic, 1 > &diag, Matrix< Scalar, Dynamic, 1 > &diag,
@ -433,28 +434,26 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff(
Scalar ratio; Scalar ratio;
Scalar fnorm, gnorm; Scalar fnorm, gnorm;
Scalar pnorm, xnorm, fnorm1, actred, dirder, prered; Scalar pnorm, xnorm, fnorm1, actred, dirder, prered;
int info;
/* Function Body */ /* Function Body */
info = 0;
iflag = 0; iflag = 0;
nfev = 0; nfev = 0;
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || m < n || ftol < 0. || xtol < 0. || gtol < 0. || maxfev <= 0 || factor <= 0.) if (n <= 0 || m < n || ftol < 0. || xtol < 0. || gtol < 0. || maxfev <= 0 || factor <= 0.)
goto algo_end; return RelativeErrorTooSmall;
if (mode == 2) if (mode == 2)
for (j = 0; j < n; ++j) for (j = 0; j < n; ++j)
if (diag[j] <= 0.) goto algo_end; if (diag[j] <= 0.)
return RelativeErrorTooSmall;
/* evaluate the function at the starting point */ /* evaluate the function at the starting point */
/* and calculate its norm. */ /* and calculate its norm. */
iflag = functor.f(x, fvec); iflag = functor.f(x, fvec);
nfev = 1; nfev = 1;
if (iflag < 0) if (iflag < 0) return UserAsked;
goto algo_end;
fnorm = fvec.stableNorm(); fnorm = fvec.stableNorm();
/* initialize levenberg-marquardt parameter and iteration counter. */ /* initialize levenberg-marquardt parameter and iteration counter. */
@ -470,8 +469,7 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff(
iflag = ei_fdjac2(functor, x, fvec, fjac, epsfcn); iflag = ei_fdjac2(functor, x, fvec, fjac, epsfcn);
nfev += n; nfev += n;
if (iflag < 0) if (iflag < 0) return UserAsked;
break;
/* compute the qr factorization of the jacobian. */ /* compute the qr factorization of the jacobian. */
@ -534,9 +532,7 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff(
/* test for convergence of the gradient norm. */ /* test for convergence of the gradient norm. */
if (gnorm <= gtol) if (gnorm <= gtol)
info = 4; return CosinusTooSmall;
if (info != 0)
break;
/* rescale if necessary. */ /* rescale if necessary. */
@ -566,8 +562,7 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff(
iflag = functor.f(wa2, wa4); iflag = functor.f(wa2, wa4);
++nfev; ++nfev;
if (iflag < 0) if (iflag < 0) return UserAsked;
goto algo_end;
fnorm1 = wa4.stableNorm(); fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */ /* compute the scaled actual reduction. */
@ -630,49 +625,42 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeNumericalDiff(
/* tests for convergence. */ /* tests for convergence. */
if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && delta <= xtol * xnorm)
return RelativeErrorAndReductionTooSmall;
if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1.) if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1.)
info = 1; return RelativeReductionTooSmall;
if (delta <= xtol * xnorm) if (delta <= xtol * xnorm)
info = 2; return RelativeErrorTooSmall;
if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && info == 2)
info = 3;
if (info != 0)
goto algo_end;
/* tests for termination and stringent tolerances. */ /* tests for termination and stringent tolerances. */
if (nfev >= maxfev) if (nfev >= maxfev)
info = 5; return TooManyFunctionEvaluation;
if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.) if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.)
info = 6; return FtolTooSmall;
if (delta <= epsilon<Scalar>() * xnorm) if (delta <= epsilon<Scalar>() * xnorm)
info = 7; return XtolTooSmall;
if (gnorm <= epsilon<Scalar>()) if (gnorm <= epsilon<Scalar>())
info = 8; return GtolTooSmall;
if (info != 0)
goto algo_end;
/* end of the inner loop. repeat if iteration unsuccessful. */ /* end of the inner loop. repeat if iteration unsuccessful. */
} while (ratio < Scalar(1e-4)); } while (ratio < Scalar(1e-4));
/* end of the outer loop. */ /* end of the outer loop. */
} }
algo_end: assert(false); // should never be reached
/* termination, either normal or user imposed. */
if (iflag < 0)
info = iflag;
return info;
} }
template<typename FunctorType, typename Scalar> template<typename FunctorType, typename Scalar>
int LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage( typename LevenbergMarquardt<FunctorType,Scalar>::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol const Scalar tol
) )
{ {
const int n = x.size(); const int n = x.size();
const int m = functor.nbOfFunctions(); const int m = functor.nbOfFunctions();
int info, nfev=0, njev=0; int nfev=0, njev=0;
Matrix< Scalar, Dynamic, Dynamic > fjac(m, n); Matrix< Scalar, Dynamic, Dynamic > fjac(m, n);
Matrix< Scalar, Dynamic, 1> diag, qtf; Matrix< Scalar, Dynamic, 1> diag, qtf;
VectorXi ipvt; VectorXi ipvt;
@ -680,10 +668,10 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || m < n || tol < 0.) { if (n <= 0 || m < n || tol < 0.) {
printf("LevenbergMarquardtOptimumStorage::minimize() bad args : m,n,tol,..."); printf("LevenbergMarquardtOptimumStorage::minimize() bad args : m,n,tol,...");
return 0; return ImproperInputParameters;
} }
info = minimizeOptimumStorage( return minimizeOptimumStorage(
x, x,
nfev, njev, nfev, njev,
diag, diag,
@ -692,11 +680,11 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
(n+1)*100, (n+1)*100,
tol, tol, Scalar(0.) tol, tol, Scalar(0.)
); );
return (info==8)?4:info;
} }
template<typename FunctorType, typename Scalar> template<typename FunctorType, typename Scalar>
int LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage( typename LevenbergMarquardt<FunctorType,Scalar>::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
Matrix< Scalar, Dynamic, 1 > &x, Matrix< Scalar, Dynamic, 1 > &x,
int &nfev, int &nfev,
int &njev, int &njev,
@ -729,10 +717,8 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
Scalar ratio; Scalar ratio;
Scalar fnorm, gnorm; Scalar fnorm, gnorm;
Scalar pnorm, xnorm, fnorm1, actred, dirder, prered; Scalar pnorm, xnorm, fnorm1, actred, dirder, prered;
int info;
/* Function Body */ /* Function Body */
info = 0;
iflag = 0; iflag = 0;
nfev = 0; nfev = 0;
njev = 0; njev = 0;
@ -740,19 +726,19 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || m < n || ftol < 0. || xtol < 0. || gtol < 0. || maxfev <= 0 || factor <= 0.) if (n <= 0 || m < n || ftol < 0. || xtol < 0. || gtol < 0. || maxfev <= 0 || factor <= 0.)
goto algo_end; return RelativeErrorTooSmall;
if (mode == 2) if (mode == 2)
for (j = 0; j < n; ++j) for (j = 0; j < n; ++j)
if (diag[j] <= 0.) goto algo_end; if (diag[j] <= 0.)
return RelativeErrorTooSmall;
/* evaluate the function at the starting point */ /* evaluate the function at the starting point */
/* and calculate its norm. */ /* and calculate its norm. */
iflag = functor.f(x, fvec); iflag = functor.f(x, fvec);
nfev = 1; nfev = 1;
if (iflag < 0) if (iflag < 0) return UserAsked;
goto algo_end;
fnorm = fvec.stableNorm(); fnorm = fvec.stableNorm();
/* initialize levenberg-marquardt parameter and iteration counter. */ /* initialize levenberg-marquardt parameter and iteration counter. */
@ -773,8 +759,7 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
fjac.fill(0.); fjac.fill(0.);
iflag = 2; iflag = 2;
for (i = 0; i < m; ++i) { for (i = 0; i < m; ++i) {
if (functor.df(x, wa3, iflag) < 0) if (functor.df(x, wa3, iflag) < 0) return UserAsked;
break;
temp = fvec[i]; temp = fvec[i];
ei_rwupdt<Scalar>(n, fjac.data(), fjac.rows(), wa3.data(), qtf.data(), &temp, wa1.data(), wa2.data()); ei_rwupdt<Scalar>(n, fjac.data(), fjac.rows(), wa3.data(), qtf.data(), &temp, wa1.data(), wa2.data());
++iflag; ++iflag;
@ -848,9 +833,7 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
/* test for convergence of the gradient norm. */ /* test for convergence of the gradient norm. */
if (gnorm <= gtol) if (gnorm <= gtol)
info = 4; return CosinusTooSmall;
if (info != 0)
break;
/* rescale if necessary. */ /* rescale if necessary. */
@ -880,8 +863,7 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
iflag = functor.f(wa2, wa4); iflag = functor.f(wa2, wa4);
++nfev; ++nfev;
if (iflag < 0) if (iflag < 0) return UserAsked;
goto algo_end;
fnorm1 = wa4.stableNorm(); fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */ /* compute the scaled actual reduction. */
@ -944,37 +926,29 @@ int LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
/* tests for convergence. */ /* tests for convergence. */
if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && delta <= xtol * xnorm)
return RelativeErrorAndReductionTooSmall;
if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1.) if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1.)
info = 1; return RelativeReductionTooSmall;
if (delta <= xtol * xnorm) if (delta <= xtol * xnorm)
info = 2; return RelativeErrorTooSmall;
if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && info == 2)
info = 3;
if (info != 0)
goto algo_end;
/* tests for termination and stringent tolerances. */ /* tests for termination and stringent tolerances. */
if (nfev >= maxfev) if (nfev >= maxfev)
info = 5; return TooManyFunctionEvaluation;
if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.) if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.)
info = 6; return FtolTooSmall;
if (delta <= epsilon<Scalar>() * xnorm) if (delta <= epsilon<Scalar>() * xnorm)
info = 7; return XtolTooSmall;
if (gnorm <= epsilon<Scalar>()) if (gnorm <= epsilon<Scalar>())
info = 8; return GtolTooSmall;
if (info != 0)
goto algo_end;
/* end of the inner loop. repeat if iteration unsuccessful. */ /* end of the inner loop. repeat if iteration unsuccessful. */
} while (ratio < Scalar(1e-4)); } while (ratio < Scalar(1e-4));
/* end of the outer loop. */ /* end of the outer loop. */
} }
algo_end: assert(false); // should never be reached
/* termination, either normal or user imposed. */
if (iflag < 0)
info = iflag;
return info;
} }