diff --git a/unsupported/Eigen/src/NonLinear/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinear/HybridNonLinearSolver.h index 7836367ad..58a4da2dc 100644 --- a/unsupported/Eigen/src/NonLinear/HybridNonLinearSolver.h +++ b/unsupported/Eigen/src/NonLinear/HybridNonLinearSolver.h @@ -6,11 +6,22 @@ public: HybridNonLinearSolver(const FunctorType &_functor) : functor(_functor) {} - int solve( + enum Status { + Running = -1, + ImproperInputParameters = 0, + RelativeErrorTooSmall = 1, + TooManyFunctionEvaluation = 2, + TolTooSmall = 3, + NotMakingProgressJacobian = 4, + NotMakingProgressIterations = 5, + UserAksed = 6 + }; + + Status solve( Matrix< Scalar, Dynamic, 1 > &x, const Scalar tol = ei_sqrt(epsilon()) ); - int solve( + Status solve( Matrix< Scalar, Dynamic, 1 > &x, int &nfev, int &njev, Matrix< Scalar, Dynamic, 1 > &diag, @@ -20,11 +31,11 @@ public: const Scalar xtol = ei_sqrt(epsilon()) ); - int solveNumericalDiff( + Status solveNumericalDiff( Matrix< Scalar, Dynamic, 1 > &x, const Scalar tol = ei_sqrt(epsilon()) ); - int solveNumericalDiff( + Status solveNumericalDiff( Matrix< Scalar, Dynamic, 1 > &x, int &nfev, Matrix< Scalar, Dynamic, 1 > &diag, @@ -48,23 +59,24 @@ private: template -int HybridNonLinearSolver::solve( +typename HybridNonLinearSolver::Status +HybridNonLinearSolver::solve( Matrix< Scalar, Dynamic, 1 > &x, const Scalar tol ) { const int n = x.size(); - int info, nfev=0, njev=0; + int nfev=0, njev=0; Matrix< Scalar, Dynamic, 1> diag; /* check the input parameters for errors. */ if (n <= 0 || tol < 0.) { printf("HybridNonLinearSolver::solve() bad args : n,tol,..."); - return 0; + return ImproperInputParameters; } diag.setConstant(n, 1.); - info = solve( + return solve( x, nfev, njev, diag, @@ -73,13 +85,13 @@ int HybridNonLinearSolver::solve( 100., tol ); - return (info==5)?4:info; } template -int HybridNonLinearSolver::solve( +typename HybridNonLinearSolver::Status +HybridNonLinearSolver::solve( Matrix< Scalar, Dynamic, 1 > &x, int &nfev, int &njev, @@ -115,10 +127,8 @@ int HybridNonLinearSolver::solve( int nslow1, nslow2; int ncfail; Scalar actred, prered; - int info; /* Function Body */ - info = 0; iflag = 0; nfev = 0; njev = 0; @@ -126,18 +136,18 @@ int HybridNonLinearSolver::solve( /* check the input parameters for errors. */ if (n <= 0 || xtol < 0. || maxfev <= 0 || factor <= 0. ) - goto algo_end; + return RelativeErrorTooSmall; if (mode == 2) for (j = 0; j < n; ++j) - if (diag[j] <= 0.) goto algo_end; + if (diag[j] <= 0.) + return RelativeErrorTooSmall; /* evaluate the function at the starting point */ /* and calculate its norm. */ iflag = functor.f(x, fvec); nfev = 1; - if (iflag < 0) - goto algo_end; + if (iflag < 0) return UserAksed; fnorm = fvec.stableNorm(); /* initialize iteration counter and monitors. */ @@ -158,7 +168,7 @@ int HybridNonLinearSolver::solve( iflag = functor.df(x, fjac); ++njev; if (iflag < 0) - break; + return UserAksed; /* compute the qr factorization of the jacobian. */ @@ -247,8 +257,7 @@ int HybridNonLinearSolver::solve( iflag = functor.f(wa2, wa4); ++nfev; - if (iflag < 0) - goto algo_end; + if (iflag < 0) return UserAksed; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ @@ -321,28 +330,24 @@ int HybridNonLinearSolver::solve( /* test for convergence. */ if (delta <= xtol * xnorm || fnorm == 0.) - info = 1; - if (info != 0) - goto algo_end; + return RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= maxfev) - info = 2; + return TooManyFunctionEvaluation; /* Computing MAX */ if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon() * xnorm) - info = 3; + return TolTooSmall; if (nslow2 == 5) - info = 4; + return NotMakingProgressJacobian; if (nslow1 == 10) - info = 5; - if (info != 0) - goto algo_end; + return NotMakingProgressIterations; /* criterion for recalculating jacobian. */ if (ncfail == 2) - break; + break; // leave inner loop and go for the next outer loop iteration /* calculate the rank one modification to the jacobian */ /* and update qtf if necessary. */ @@ -367,33 +372,30 @@ int HybridNonLinearSolver::solve( } /* end of the outer loop. */ } -algo_end: - /* termination, either normal or user imposed. */ - if (iflag < 0) - info = iflag; - return info; + assert(false); // should never be reached } template -int HybridNonLinearSolver::solveNumericalDiff( +typename HybridNonLinearSolver::Status +HybridNonLinearSolver::solveNumericalDiff( Matrix< Scalar, Dynamic, 1 > &x, const Scalar tol ) { const int n = x.size(); - int info, nfev=0; + int nfev=0; Matrix< Scalar, Dynamic, 1> diag; /* check the input parameters for errors. */ if (n <= 0 || tol < 0.) { printf("HybridNonLinearSolver::solve() bad args : n,tol,..."); - return 0; + return ImproperInputParameters; } diag.setConstant(n, 1.); - info = solveNumericalDiff( + return solveNumericalDiff( x, nfev, diag, @@ -403,12 +405,12 @@ int HybridNonLinearSolver::solveNumericalDiff( 100., tol, Scalar(0.) ); - return (info==5)?4:info; } template -int HybridNonLinearSolver::solveNumericalDiff( +typename HybridNonLinearSolver::Status +HybridNonLinearSolver::solveNumericalDiff( Matrix< Scalar, Dynamic, 1 > &x, int &nfev, Matrix< Scalar, Dynamic, 1 > &diag, @@ -448,21 +450,20 @@ int HybridNonLinearSolver::solveNumericalDiff( int nslow1, nslow2; int ncfail; Scalar actred, prered; - int info; /* Function Body */ - info = 0; iflag = 0; nfev = 0; /* check the input parameters for errors. */ if (n <= 0 || xtol < 0. || maxfev <= 0 || nb_of_subdiagonals < 0 || nb_of_superdiagonals < 0 || factor <= 0. ) - goto algo_end; + return RelativeErrorTooSmall; if (mode == 2) for (j = 0; j < n; ++j) - if (diag[j] <= 0.) goto algo_end; + if (diag[j] <= 0.) + return RelativeErrorTooSmall; /* evaluate the function at the starting point */ /* and calculate its norm. */ @@ -470,7 +471,7 @@ int HybridNonLinearSolver::solveNumericalDiff( iflag = functor.f(x, fvec); nfev = 1; if (iflag < 0) - goto algo_end; + return UserAksed; fnorm = fvec.stableNorm(); /* determine the number of calls to fcn needed to compute */ @@ -498,7 +499,7 @@ int HybridNonLinearSolver::solveNumericalDiff( nb_of_subdiagonals, nb_of_superdiagonals, epsfcn); nfev += msum; if (iflag < 0) - break; + return UserAksed; /* compute the qr factorization of the jacobian. */ @@ -587,8 +588,7 @@ int HybridNonLinearSolver::solveNumericalDiff( iflag = functor.f(wa2, wa4); ++nfev; - if (iflag < 0) - goto algo_end; + if (iflag < 0) return UserAksed; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ @@ -661,29 +661,25 @@ int HybridNonLinearSolver::solveNumericalDiff( /* test for convergence. */ if (delta <= xtol * xnorm || fnorm == 0.) - info = 1; - if (info != 0) - goto algo_end; + return RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= maxfev) - info = 2; + return TooManyFunctionEvaluation; /* Computing MAX */ if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon() * xnorm) - info = 3; + return TolTooSmall; if (nslow2 == 5) - info = 4; + return NotMakingProgressJacobian; if (nslow1 == 10) - info = 5; - if (info != 0) - goto algo_end; + return NotMakingProgressIterations; /* criterion for recalculating jacobian approximation */ /* by forward differences. */ if (ncfail == 2) - break; + break; // leave inner loop and go for the next outer loop iteration /* calculate the rank one modification to the jacobian */ /* and update qtf if necessary. */ @@ -708,10 +704,6 @@ int HybridNonLinearSolver::solveNumericalDiff( } /* end of the outer loop. */ } -algo_end: - /* termination, either normal or user imposed. */ - if (iflag < 0) - info = iflag; - return info; + assert(false); // should never be reached } diff --git a/unsupported/Eigen/src/NonLinear/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinear/LevenbergMarquardt.h index 175c74395..5bd0223a0 100644 --- a/unsupported/Eigen/src/NonLinear/LevenbergMarquardt.h +++ b/unsupported/Eigen/src/NonLinear/LevenbergMarquardt.h @@ -1,4 +1,5 @@ + template class LevenbergMarquardt { @@ -6,12 +7,26 @@ public: LevenbergMarquardt(const FunctorType &_functor) : functor(_functor) {} - int minimize( + enum Status { + Running = -1, + ImproperInputParameters = 0, + RelativeReductionTooSmall = 1, + RelativeErrorTooSmall = 2, + RelativeErrorAndReductionTooSmall = 3, + CosinusTooSmall = 4, + TooManyFunctionEvaluation = 5, + FtolTooSmall = 6, + XtolTooSmall = 7, + GtolTooSmall = 8, + UserAsked = 9 + }; + + Status minimize( Matrix< Scalar, Dynamic, 1 > &x, const Scalar tol = ei_sqrt(epsilon()) ); - int minimize( + Status minimize( Matrix< Scalar, Dynamic, 1 > &x, int &nfev, int &njev, @@ -24,12 +39,12 @@ public: const Scalar gtol = Scalar(0.) ); - int minimizeNumericalDiff( + Status minimizeNumericalDiff( Matrix< Scalar, Dynamic, 1 > &x, const Scalar tol = ei_sqrt(epsilon()) ); - int minimizeNumericalDiff( + Status minimizeNumericalDiff( Matrix< Scalar, Dynamic, 1 > &x, int &nfev, Matrix< Scalar, Dynamic, 1 > &diag, @@ -42,12 +57,12 @@ public: const Scalar epsfcn = Scalar(0.) ); - int minimizeOptimumStorage( + Status minimizeOptimumStorage( Matrix< Scalar, Dynamic, 1 > &x, const Scalar tol = ei_sqrt(epsilon()) ); - int minimizeOptimumStorage( + Status minimizeOptimumStorage( Matrix< Scalar, Dynamic, 1 > &x, int &nfev, int &njev, @@ -68,16 +83,16 @@ private: const FunctorType &functor; }; - template -int LevenbergMarquardt::minimize( +typename LevenbergMarquardt::Status +LevenbergMarquardt::minimize( Matrix< Scalar, Dynamic, 1 > &x, const Scalar tol ) { const int n = x.size(); const int m = functor.nbOfFunctions(); - int info, nfev=0, njev=0; + int nfev=0, njev=0; Matrix< Scalar, Dynamic, Dynamic > fjac(m, n); Matrix< Scalar, Dynamic, 1> diag, qtf; VectorXi ipvt; @@ -85,10 +100,10 @@ int LevenbergMarquardt::minimize( /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) { printf("LevenbergMarquardt::minimize() bad args : m,n,tol,..."); - return 0; + return ImproperInputParameters; } - info = minimize( + return minimize( x, nfev, njev, diag, @@ -97,12 +112,12 @@ int LevenbergMarquardt::minimize( (n+1)*100, tol, tol, Scalar(0.) ); - return (info==8)?4:info; } template -int LevenbergMarquardt::minimize( +typename LevenbergMarquardt::Status +LevenbergMarquardt::minimize( Matrix< Scalar, Dynamic, 1 > &x, int &nfev, int &njev, @@ -135,10 +150,8 @@ int LevenbergMarquardt::minimize( Scalar ratio; Scalar fnorm, gnorm; Scalar pnorm, xnorm, fnorm1, actred, dirder, prered; - int info; /* Function Body */ - info = 0; iflag = 0; nfev = 0; njev = 0; @@ -146,19 +159,19 @@ int LevenbergMarquardt::minimize( /* check the input parameters for errors. */ if (n <= 0 || m < n || ftol < 0. || xtol < 0. || gtol < 0. || maxfev <= 0 || factor <= 0.) - goto algo_end; + return RelativeErrorTooSmall; if (mode == 2) for (j = 0; j < n; ++j) - if (diag[j] <= 0.) goto algo_end; + if (diag[j] <= 0.) + return RelativeErrorTooSmall; /* evaluate the function at the starting point */ /* and calculate its norm. */ iflag = functor.f(x, fvec); nfev = 1; - if (iflag < 0) - goto algo_end; + if (iflag < 0) return UserAsked; fnorm = fvec.stableNorm(); /* initialize levenberg-marquardt parameter and iteration counter. */ @@ -174,8 +187,7 @@ int LevenbergMarquardt::minimize( iflag = functor.df(x, fjac); ++njev; - if (iflag < 0) - break; + if (iflag < 0) return UserAsked; /* compute the qr factorization of the jacobian. */ @@ -238,9 +250,7 @@ int LevenbergMarquardt::minimize( /* test for convergence of the gradient norm. */ if (gnorm <= gtol) - info = 4; - if (info != 0) - break; + return CosinusTooSmall; /* rescale if necessary. */ @@ -270,8 +280,7 @@ int LevenbergMarquardt::minimize( iflag = functor.f(wa2, wa4); ++nfev; - if (iflag < 0) - goto algo_end; + if (iflag < 0) return UserAsked; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ @@ -334,49 +343,41 @@ int LevenbergMarquardt::minimize( /* tests for convergence. */ + if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && delta <= xtol * xnorm) + return RelativeErrorAndReductionTooSmall; if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1.) - info = 1; + return RelativeReductionTooSmall; if (delta <= xtol * xnorm) - info = 2; - if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && info == 2) - info = 3; - if (info != 0) - goto algo_end; + return RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= maxfev) - info = 5; + return TooManyFunctionEvaluation; if (ei_abs(actred) <= epsilon() && prered <= epsilon() && Scalar(.5) * ratio <= 1.) - info = 6; + return FtolTooSmall; if (delta <= epsilon() * xnorm) - info = 7; + return XtolTooSmall; if (gnorm <= epsilon()) - info = 8; - if (info != 0) - goto algo_end; + return GtolTooSmall; /* end of the inner loop. repeat if iteration unsuccessful. */ } while (ratio < Scalar(1e-4)); /* end of the outer loop. */ } -algo_end: - - /* termination, either normal or user imposed. */ - if (iflag < 0) - info = iflag; - return info; + assert(false); // should never be reached } template -int LevenbergMarquardt::minimizeNumericalDiff( +typename LevenbergMarquardt::Status +LevenbergMarquardt::minimizeNumericalDiff( Matrix< Scalar, Dynamic, 1 > &x, const Scalar tol ) { const int n = x.size(); const int m = functor.nbOfFunctions(); - int info, nfev=0; + int nfev=0; Matrix< Scalar, Dynamic, Dynamic > fjac(m, n); Matrix< Scalar, Dynamic, 1> diag, qtf; VectorXi ipvt; @@ -384,10 +385,10 @@ int LevenbergMarquardt::minimizeNumericalDiff( /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) { printf("ei_lmder1 bad args : m,n,tol,..."); - return 0; + return ImproperInputParameters; } - info = minimizeNumericalDiff( + return minimizeNumericalDiff( x, nfev, diag, @@ -396,11 +397,11 @@ int LevenbergMarquardt::minimizeNumericalDiff( (n+1)*200, tol, tol, Scalar(0.), Scalar(0.) ); - return (info==8)?4:info; } template -int LevenbergMarquardt::minimizeNumericalDiff( +typename LevenbergMarquardt::Status +LevenbergMarquardt::minimizeNumericalDiff( Matrix< Scalar, Dynamic, 1 > &x, int &nfev, Matrix< Scalar, Dynamic, 1 > &diag, @@ -433,28 +434,26 @@ int LevenbergMarquardt::minimizeNumericalDiff( Scalar ratio; Scalar fnorm, gnorm; Scalar pnorm, xnorm, fnorm1, actred, dirder, prered; - int info; /* Function Body */ - info = 0; iflag = 0; nfev = 0; /* check the input parameters for errors. */ if (n <= 0 || m < n || ftol < 0. || xtol < 0. || gtol < 0. || maxfev <= 0 || factor <= 0.) - goto algo_end; + return RelativeErrorTooSmall; if (mode == 2) for (j = 0; j < n; ++j) - if (diag[j] <= 0.) goto algo_end; + if (diag[j] <= 0.) + return RelativeErrorTooSmall; /* evaluate the function at the starting point */ /* and calculate its norm. */ iflag = functor.f(x, fvec); nfev = 1; - if (iflag < 0) - goto algo_end; + if (iflag < 0) return UserAsked; fnorm = fvec.stableNorm(); /* initialize levenberg-marquardt parameter and iteration counter. */ @@ -470,8 +469,7 @@ int LevenbergMarquardt::minimizeNumericalDiff( iflag = ei_fdjac2(functor, x, fvec, fjac, epsfcn); nfev += n; - if (iflag < 0) - break; + if (iflag < 0) return UserAsked; /* compute the qr factorization of the jacobian. */ @@ -534,9 +532,7 @@ int LevenbergMarquardt::minimizeNumericalDiff( /* test for convergence of the gradient norm. */ if (gnorm <= gtol) - info = 4; - if (info != 0) - break; + return CosinusTooSmall; /* rescale if necessary. */ @@ -566,8 +562,7 @@ int LevenbergMarquardt::minimizeNumericalDiff( iflag = functor.f(wa2, wa4); ++nfev; - if (iflag < 0) - goto algo_end; + if (iflag < 0) return UserAsked; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ @@ -630,49 +625,42 @@ int LevenbergMarquardt::minimizeNumericalDiff( /* tests for convergence. */ + if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && delta <= xtol * xnorm) + return RelativeErrorAndReductionTooSmall; if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1.) - info = 1; + return RelativeReductionTooSmall; if (delta <= xtol * xnorm) - info = 2; - if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && info == 2) - info = 3; - if (info != 0) - goto algo_end; + return RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= maxfev) - info = 5; + return TooManyFunctionEvaluation; if (ei_abs(actred) <= epsilon() && prered <= epsilon() && Scalar(.5) * ratio <= 1.) - info = 6; + return FtolTooSmall; if (delta <= epsilon() * xnorm) - info = 7; + return XtolTooSmall; if (gnorm <= epsilon()) - info = 8; - if (info != 0) - goto algo_end; + return GtolTooSmall; + /* end of the inner loop. repeat if iteration unsuccessful. */ } while (ratio < Scalar(1e-4)); /* end of the outer loop. */ } -algo_end: - - /* termination, either normal or user imposed. */ - if (iflag < 0) - info = iflag; - return info; + assert(false); // should never be reached } template -int LevenbergMarquardt::minimizeOptimumStorage( +typename LevenbergMarquardt::Status +LevenbergMarquardt::minimizeOptimumStorage( Matrix< Scalar, Dynamic, 1 > &x, const Scalar tol ) { const int n = x.size(); const int m = functor.nbOfFunctions(); - int info, nfev=0, njev=0; + int nfev=0, njev=0; Matrix< Scalar, Dynamic, Dynamic > fjac(m, n); Matrix< Scalar, Dynamic, 1> diag, qtf; VectorXi ipvt; @@ -680,10 +668,10 @@ int LevenbergMarquardt::minimizeOptimumStorage( /* check the input parameters for errors. */ if (n <= 0 || m < n || tol < 0.) { printf("LevenbergMarquardtOptimumStorage::minimize() bad args : m,n,tol,..."); - return 0; + return ImproperInputParameters; } - info = minimizeOptimumStorage( + return minimizeOptimumStorage( x, nfev, njev, diag, @@ -692,11 +680,11 @@ int LevenbergMarquardt::minimizeOptimumStorage( (n+1)*100, tol, tol, Scalar(0.) ); - return (info==8)?4:info; } template -int LevenbergMarquardt::minimizeOptimumStorage( +typename LevenbergMarquardt::Status +LevenbergMarquardt::minimizeOptimumStorage( Matrix< Scalar, Dynamic, 1 > &x, int &nfev, int &njev, @@ -729,10 +717,8 @@ int LevenbergMarquardt::minimizeOptimumStorage( Scalar ratio; Scalar fnorm, gnorm; Scalar pnorm, xnorm, fnorm1, actred, dirder, prered; - int info; /* Function Body */ - info = 0; iflag = 0; nfev = 0; njev = 0; @@ -740,19 +726,19 @@ int LevenbergMarquardt::minimizeOptimumStorage( /* check the input parameters for errors. */ if (n <= 0 || m < n || ftol < 0. || xtol < 0. || gtol < 0. || maxfev <= 0 || factor <= 0.) - goto algo_end; + return RelativeErrorTooSmall; if (mode == 2) for (j = 0; j < n; ++j) - if (diag[j] <= 0.) goto algo_end; + if (diag[j] <= 0.) + return RelativeErrorTooSmall; /* evaluate the function at the starting point */ /* and calculate its norm. */ iflag = functor.f(x, fvec); nfev = 1; - if (iflag < 0) - goto algo_end; + if (iflag < 0) return UserAsked; fnorm = fvec.stableNorm(); /* initialize levenberg-marquardt parameter and iteration counter. */ @@ -773,8 +759,7 @@ int LevenbergMarquardt::minimizeOptimumStorage( fjac.fill(0.); iflag = 2; for (i = 0; i < m; ++i) { - if (functor.df(x, wa3, iflag) < 0) - break; + if (functor.df(x, wa3, iflag) < 0) return UserAsked; temp = fvec[i]; ei_rwupdt(n, fjac.data(), fjac.rows(), wa3.data(), qtf.data(), &temp, wa1.data(), wa2.data()); ++iflag; @@ -848,9 +833,7 @@ int LevenbergMarquardt::minimizeOptimumStorage( /* test for convergence of the gradient norm. */ if (gnorm <= gtol) - info = 4; - if (info != 0) - break; + return CosinusTooSmall; /* rescale if necessary. */ @@ -880,8 +863,7 @@ int LevenbergMarquardt::minimizeOptimumStorage( iflag = functor.f(wa2, wa4); ++nfev; - if (iflag < 0) - goto algo_end; + if (iflag < 0) return UserAsked; fnorm1 = wa4.stableNorm(); /* compute the scaled actual reduction. */ @@ -944,37 +926,29 @@ int LevenbergMarquardt::minimizeOptimumStorage( /* tests for convergence. */ + if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && delta <= xtol * xnorm) + return RelativeErrorAndReductionTooSmall; if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1.) - info = 1; + return RelativeReductionTooSmall; if (delta <= xtol * xnorm) - info = 2; - if (ei_abs(actred) <= ftol && prered <= ftol && Scalar(.5) * ratio <= 1. && info == 2) - info = 3; - if (info != 0) - goto algo_end; + return RelativeErrorTooSmall; /* tests for termination and stringent tolerances. */ if (nfev >= maxfev) - info = 5; + return TooManyFunctionEvaluation; if (ei_abs(actred) <= epsilon() && prered <= epsilon() && Scalar(.5) * ratio <= 1.) - info = 6; + return FtolTooSmall; if (delta <= epsilon() * xnorm) - info = 7; + return XtolTooSmall; if (gnorm <= epsilon()) - info = 8; - if (info != 0) - goto algo_end; + return GtolTooSmall; + /* end of the inner loop. repeat if iteration unsuccessful. */ } while (ratio < Scalar(1e-4)); /* end of the outer loop. */ } -algo_end: - - /* termination, either normal or user imposed. */ - if (iflag < 0) - info = iflag; - return info; + assert(false); // should never be reached }