remove spaces

This commit is contained in:
Thomas Capricelli 2010-01-26 10:50:43 +01:00
parent 113995355b
commit d791b51112
3 changed files with 0 additions and 69 deletions

View File

@ -429,7 +429,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
/* Function Body */ /* Function Body */
nfev = 0; nfev = 0;
njev = 0; njev = 0;

View File

@ -207,7 +207,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
njev = 0; njev = 0;
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.) if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
return ImproperInputParameters; return ImproperInputParameters;
@ -218,14 +217,12 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
/* evaluate the function at the starting point */ /* evaluate the function at the starting point */
/* and calculate its norm. */ /* and calculate its norm. */
nfev = 1; nfev = 1;
if ( functor(x, fvec) < 0) if ( functor(x, fvec) < 0)
return UserAsked; return UserAsked;
fnorm = fvec.stableNorm(); fnorm = fvec.stableNorm();
/* initialize levenberg-marquardt parameter and iteration counter. */ /* initialize levenberg-marquardt parameter and iteration counter. */
par = 0.; par = 0.;
iter = 1; iter = 1;
@ -242,7 +239,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
int i, j, l; int i, j, l;
/* calculate the jacobian matrix. */ /* calculate the jacobian matrix. */
int df_ret = functor.df(x, fjac); int df_ret = functor.df(x, fjac);
if (df_ret<0) if (df_ret<0)
return UserAsked; return UserAsked;
@ -252,7 +248,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
else njev++; else njev++;
/* compute the qr factorization of the jacobian. */ /* compute the qr factorization of the jacobian. */
wa2 = fjac.colwise().blueNorm(); wa2 = fjac.colwise().blueNorm();
ColPivHouseholderQR<JacobianType> qrfac(fjac); ColPivHouseholderQR<JacobianType> qrfac(fjac);
fjac = qrfac.matrixQR(); fjac = qrfac.matrixQR();
@ -264,7 +259,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
/* on the first iteration and if mode is 1, scale according */ /* on the first iteration and if mode is 1, scale according */
/* to the norms of the columns of the initial jacobian. */ /* to the norms of the columns of the initial jacobian. */
if (iter == 1) { if (iter == 1) {
if (mode != 2) if (mode != 2)
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
@ -275,7 +269,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
/* on the first iteration, calculate the norm of the scaled x */ /* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */ /* and initialize the step bound delta. */
wa3 = diag.cwiseProduct(x); wa3 = diag.cwiseProduct(x);
xnorm = wa3.stableNorm(); xnorm = wa3.stableNorm();
delta = parameters.factor * xnorm; delta = parameters.factor * xnorm;
@ -285,7 +278,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
/* form (q transpose)*fvec and store the first n components in */ /* form (q transpose)*fvec and store the first n components in */
/* qtf. */ /* qtf. */
#if 0 #if 0
// find a way to only compute the first n items, we have m>>n here. // find a way to only compute the first n items, we have m>>n here.
wa4 = fvec; wa4 = fvec;
@ -309,7 +301,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
#endif #endif
/* compute the norm of the scaled gradient. */ /* compute the norm of the scaled gradient. */
gnorm = 0.; gnorm = 0.;
if (fnorm != 0.) if (fnorm != 0.)
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
@ -324,12 +315,10 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
} }
/* test for convergence of the gradient norm. */ /* test for convergence of the gradient norm. */
if (gnorm <= parameters.gtol) if (gnorm <= parameters.gtol)
return CosinusTooSmall; return CosinusTooSmall;
/* rescale if necessary. */ /* rescale if necessary. */
if (mode != 2) /* Computing MAX */ if (mode != 2) /* Computing MAX */
diag = diag.cwiseMax(wa2); diag = diag.cwiseMax(wa2);
@ -337,37 +326,31 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
do { do {
/* determine the levenberg-marquardt parameter. */ /* determine the levenberg-marquardt parameter. */
ei_lmpar2<Scalar>(qrfac, diag, qtf, delta, par, wa1); ei_lmpar2<Scalar>(qrfac, diag, qtf, delta, par, wa1);
/* store the direction p and x + p. calculate the norm of p. */ /* store the direction p and x + p. calculate the norm of p. */
wa1 = -wa1; wa1 = -wa1;
wa2 = x + wa1; wa2 = x + wa1;
wa3 = diag.cwiseProduct(wa1); wa3 = diag.cwiseProduct(wa1);
pnorm = wa3.stableNorm(); pnorm = wa3.stableNorm();
/* on the first iteration, adjust the initial step bound. */ /* on the first iteration, adjust the initial step bound. */
if (iter == 1) if (iter == 1)
delta = std::min(delta,pnorm); delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */ /* evaluate the function at x + p and calculate its norm. */
if ( functor(wa2, wa4) < 0) if ( functor(wa2, wa4) < 0)
return UserAsked; return UserAsked;
++nfev; ++nfev;
fnorm1 = wa4.stableNorm(); fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */ /* compute the scaled actual reduction. */
actred = -1.; actred = -1.;
if (Scalar(.1) * fnorm1 < fnorm) /* Computing 2nd power */ if (Scalar(.1) * fnorm1 < fnorm) /* Computing 2nd power */
actred = 1. - ei_abs2(fnorm1 / fnorm); actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction and */ /* compute the scaled predicted reduction and */
/* the scaled directional derivative. */ /* the scaled directional derivative. */
wa3.fill(0.); wa3.fill(0.);
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
l = ipvt[j]; l = ipvt[j];
@ -383,13 +366,11 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
/* compute the ratio of the actual to the predicted */ /* compute the ratio of the actual to the predicted */
/* reduction. */ /* reduction. */
ratio = 0.; ratio = 0.;
if (prered != 0.) if (prered != 0.)
ratio = actred / prered; ratio = actred / prered;
/* update the step bound. */ /* update the step bound. */
if (ratio <= Scalar(.25)) { if (ratio <= Scalar(.25)) {
if (actred >= 0.) if (actred >= 0.)
temp = Scalar(.5); temp = Scalar(.5);
@ -406,7 +387,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
} }
/* test for successful iteration. */ /* test for successful iteration. */
if (ratio >= Scalar(1e-4)) { if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */ /* successful iteration. update x, fvec, and their norms. */
x = wa2; x = wa2;
@ -418,7 +398,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
} }
/* tests for convergence. */ /* tests for convergence. */
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
return RelativeErrorAndReductionTooSmall; return RelativeErrorAndReductionTooSmall;
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
@ -427,7 +406,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
return RelativeErrorTooSmall; return RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */ /* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev) if (nfev >= parameters.maxfev)
return TooManyFunctionEvaluation; return TooManyFunctionEvaluation;
if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.) if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.)
@ -489,7 +467,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
njev = 0; njev = 0;
/* check the input parameters for errors. */ /* check the input parameters for errors. */
if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.) if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
return ImproperInputParameters; return ImproperInputParameters;
@ -500,14 +477,12 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
/* evaluate the function at the starting point */ /* evaluate the function at the starting point */
/* and calculate its norm. */ /* and calculate its norm. */
nfev = 1; nfev = 1;
if ( functor(x, fvec) < 0) if ( functor(x, fvec) < 0)
return UserAsked; return UserAsked;
fnorm = fvec.stableNorm(); fnorm = fvec.stableNorm();
/* initialize levenberg-marquardt parameter and iteration counter. */ /* initialize levenberg-marquardt parameter and iteration counter. */
par = 0.; par = 0.;
iter = 1; iter = 1;
@ -529,7 +504,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
/* calculated one row at a time, while simultaneously */ /* calculated one row at a time, while simultaneously */
/* forming (q transpose)*fvec and storing the first */ /* forming (q transpose)*fvec and storing the first */
/* n components in qtf. */ /* n components in qtf. */
qtf.fill(0.); qtf.fill(0.);
fjac.fill(0.); fjac.fill(0.);
int rownb = 2; int rownb = 2;
@ -543,7 +517,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
/* if the jacobian is rank deficient, call qrfac to */ /* if the jacobian is rank deficient, call qrfac to */
/* reorder its columns and update the components of qtf. */ /* reorder its columns and update the components of qtf. */
sing = false; sing = false;
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
if (fjac(j,j) == 0.) { if (fjac(j,j) == 0.) {
@ -578,7 +551,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
/* on the first iteration and if mode is 1, scale according */ /* on the first iteration and if mode is 1, scale according */
/* to the norms of the columns of the initial jacobian. */ /* to the norms of the columns of the initial jacobian. */
if (iter == 1) { if (iter == 1) {
if (mode != 2) if (mode != 2)
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
@ -589,7 +561,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
/* on the first iteration, calculate the norm of the scaled x */ /* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */ /* and initialize the step bound delta. */
wa3 = diag.cwiseProduct(x); wa3 = diag.cwiseProduct(x);
xnorm = wa3.stableNorm(); xnorm = wa3.stableNorm();
delta = parameters.factor * xnorm; delta = parameters.factor * xnorm;
@ -598,7 +569,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
} }
/* compute the norm of the scaled gradient. */ /* compute the norm of the scaled gradient. */
gnorm = 0.; gnorm = 0.;
if (fnorm != 0.) if (fnorm != 0.)
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
@ -613,12 +583,10 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
} }
/* test for convergence of the gradient norm. */ /* test for convergence of the gradient norm. */
if (gnorm <= parameters.gtol) if (gnorm <= parameters.gtol)
return CosinusTooSmall; return CosinusTooSmall;
/* rescale if necessary. */ /* rescale if necessary. */
if (mode != 2) /* Computing MAX */ if (mode != 2) /* Computing MAX */
diag = diag.cwiseMax(wa2); diag = diag.cwiseMax(wa2);
@ -626,37 +594,31 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
do { do {
/* determine the levenberg-marquardt parameter. */ /* determine the levenberg-marquardt parameter. */
ei_lmpar<Scalar>(fjac, ipvt, diag, qtf, delta, par, wa1); ei_lmpar<Scalar>(fjac, ipvt, diag, qtf, delta, par, wa1);
/* store the direction p and x + p. calculate the norm of p. */ /* store the direction p and x + p. calculate the norm of p. */
wa1 = -wa1; wa1 = -wa1;
wa2 = x + wa1; wa2 = x + wa1;
wa3 = diag.cwiseProduct(wa1); wa3 = diag.cwiseProduct(wa1);
pnorm = wa3.stableNorm(); pnorm = wa3.stableNorm();
/* on the first iteration, adjust the initial step bound. */ /* on the first iteration, adjust the initial step bound. */
if (iter == 1) if (iter == 1)
delta = std::min(delta,pnorm); delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */ /* evaluate the function at x + p and calculate its norm. */
if ( functor(wa2, wa4) < 0) if ( functor(wa2, wa4) < 0)
return UserAsked; return UserAsked;
++nfev; ++nfev;
fnorm1 = wa4.stableNorm(); fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */ /* compute the scaled actual reduction. */
actred = -1.; actred = -1.;
if (Scalar(.1) * fnorm1 < fnorm) /* Computing 2nd power */ if (Scalar(.1) * fnorm1 < fnorm) /* Computing 2nd power */
actred = 1. - ei_abs2(fnorm1 / fnorm); actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction and */ /* compute the scaled predicted reduction and */
/* the scaled directional derivative. */ /* the scaled directional derivative. */
wa3.fill(0.); wa3.fill(0.);
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
l = ipvt[j]; l = ipvt[j];
@ -672,13 +634,11 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
/* compute the ratio of the actual to the predicted */ /* compute the ratio of the actual to the predicted */
/* reduction. */ /* reduction. */
ratio = 0.; ratio = 0.;
if (prered != 0.) if (prered != 0.)
ratio = actred / prered; ratio = actred / prered;
/* update the step bound. */ /* update the step bound. */
if (ratio <= Scalar(.25)) { if (ratio <= Scalar(.25)) {
if (actred >= 0.) if (actred >= 0.)
temp = Scalar(.5); temp = Scalar(.5);
@ -695,7 +655,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
} }
/* test for successful iteration. */ /* test for successful iteration. */
if (ratio >= Scalar(1e-4)) { if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */ /* successful iteration. update x, fvec, and their norms. */
x = wa2; x = wa2;
@ -707,7 +666,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
} }
/* tests for convergence. */ /* tests for convergence. */
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
return RelativeErrorAndReductionTooSmall; return RelativeErrorAndReductionTooSmall;
if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) if (ei_abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
@ -716,7 +674,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
return RelativeErrorTooSmall; return RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */ /* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev) if (nfev >= parameters.maxfev)
return TooManyFunctionEvaluation; return TooManyFunctionEvaluation;
if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.) if (ei_abs(actred) <= epsilon<Scalar>() && prered <= epsilon<Scalar>() && Scalar(.5) * ratio <= 1.)

View File

@ -30,7 +30,6 @@ void ei_lmpar(
/* compute and store in x the gauss-newton direction. if the */ /* compute and store in x the gauss-newton direction. if the */
/* jacobian is rank-deficient, obtain a least squares solution. */ /* jacobian is rank-deficient, obtain a least squares solution. */
int nsing = n-1; int nsing = n-1;
wa1 = qtb; wa1 = qtb;
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
@ -52,7 +51,6 @@ void ei_lmpar(
/* initialize the iteration counter. */ /* initialize the iteration counter. */
/* evaluate the function at the origin, and test */ /* evaluate the function at the origin, and test */
/* for acceptance of the gauss-newton direction. */ /* for acceptance of the gauss-newton direction. */
iter = 0; iter = 0;
wa2 = diag.cwiseProduct(x); wa2 = diag.cwiseProduct(x);
dxnorm = wa2.blueNorm(); dxnorm = wa2.blueNorm();
@ -65,7 +63,6 @@ void ei_lmpar(
/* if the jacobian is not rank deficient, the newton */ /* if the jacobian is not rank deficient, the newton */
/* step provides a lower bound, parl, for the zero of */ /* step provides a lower bound, parl, for the zero of */
/* the function. otherwise set this bound to zero. */ /* the function. otherwise set this bound to zero. */
parl = 0.; parl = 0.;
if (nsing >= n-1) { if (nsing >= n-1) {
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
@ -85,7 +82,6 @@ void ei_lmpar(
} }
/* calculate an upper bound, paru, for the zero of the function. */ /* calculate an upper bound, paru, for the zero of the function. */
for (j = 0; j < n; ++j) for (j = 0; j < n; ++j)
wa1[j] = r.col(j).head(j+1).dot(qtb.head(j+1)) / diag[ipvt[j]]; wa1[j] = r.col(j).head(j+1).dot(qtb.head(j+1)) / diag[ipvt[j]];
@ -96,22 +92,18 @@ void ei_lmpar(
/* if the input par lies outside of the interval (parl,paru), */ /* if the input par lies outside of the interval (parl,paru), */
/* set par to the closer endpoint. */ /* set par to the closer endpoint. */
par = std::max(par,parl); par = std::max(par,parl);
par = std::min(par,paru); par = std::min(par,paru);
if (par == 0.) if (par == 0.)
par = gnorm / dxnorm; par = gnorm / dxnorm;
/* beginning of an iteration. */ /* beginning of an iteration. */
while (true) { while (true) {
++iter; ++iter;
/* evaluate the function at the current value of par. */ /* evaluate the function at the current value of par. */
if (par == 0.) if (par == 0.)
par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */ par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
wa1 = ei_sqrt(par)* diag; wa1 = ei_sqrt(par)* diag;
Matrix< Scalar, Dynamic, 1 > sdiag(n); Matrix< Scalar, Dynamic, 1 > sdiag(n);
@ -125,12 +117,10 @@ void ei_lmpar(
/* if the function is small enough, accept the current value */ /* if the function is small enough, accept the current value */
/* of par. also test for the exceptional cases where parl */ /* of par. also test for the exceptional cases where parl */
/* is zero or the number of iterations has reached 10. */ /* is zero or the number of iterations has reached 10. */
if (ei_abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) if (ei_abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
break; break;
/* compute the newton correction. */ /* compute the newton correction. */
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
l = ipvt[j]; l = ipvt[j];
wa1[j] = diag[l] * (wa2[l] / dxnorm); wa1[j] = diag[l] * (wa2[l] / dxnorm);
@ -145,23 +135,19 @@ void ei_lmpar(
parc = fp / delta / temp / temp; parc = fp / delta / temp / temp;
/* depending on the sign of the function, update parl or paru. */ /* depending on the sign of the function, update parl or paru. */
if (fp > 0.) if (fp > 0.)
parl = std::max(parl,par); parl = std::max(parl,par);
if (fp < 0.) if (fp < 0.)
paru = std::min(paru,par); paru = std::min(paru,par);
/* compute an improved estimate for par. */ /* compute an improved estimate for par. */
/* Computing MAX */ /* Computing MAX */
par = std::max(parl,par+parc); par = std::max(parl,par+parc);
/* end of an iteration. */ /* end of an iteration. */
} }
/* termination. */ /* termination. */
if (iter == 0) if (iter == 0)
par = 0.; par = 0.;
return; return;
@ -198,7 +184,6 @@ void ei_lmpar2(
/* compute and store in x the gauss-newton direction. if the */ /* compute and store in x the gauss-newton direction. if the */
/* jacobian is rank-deficient, obtain a least squares solution. */ /* jacobian is rank-deficient, obtain a least squares solution. */
// const int rank = qr.nonzeroPivots(); // exactly double(0.) // const int rank = qr.nonzeroPivots(); // exactly double(0.)
const int rank = qr.rank(); // use a threshold const int rank = qr.rank(); // use a threshold
wa1 = qtb; wa1.segment(rank,n-rank).setZero(); wa1 = qtb; wa1.segment(rank,n-rank).setZero();
@ -209,7 +194,6 @@ void ei_lmpar2(
/* initialize the iteration counter. */ /* initialize the iteration counter. */
/* evaluate the function at the origin, and test */ /* evaluate the function at the origin, and test */
/* for acceptance of the gauss-newton direction. */ /* for acceptance of the gauss-newton direction. */
iter = 0; iter = 0;
wa2 = diag.cwiseProduct(x); wa2 = diag.cwiseProduct(x);
dxnorm = wa2.blueNorm(); dxnorm = wa2.blueNorm();
@ -222,7 +206,6 @@ void ei_lmpar2(
/* if the jacobian is not rank deficient, the newton */ /* if the jacobian is not rank deficient, the newton */
/* step provides a lower bound, parl, for the zero of */ /* step provides a lower bound, parl, for the zero of */
/* the function. otherwise set this bound to zero. */ /* the function. otherwise set this bound to zero. */
parl = 0.; parl = 0.;
if (rank==n) { if (rank==n) {
wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm; wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm;
@ -232,7 +215,6 @@ void ei_lmpar2(
} }
/* calculate an upper bound, paru, for the zero of the function. */ /* calculate an upper bound, paru, for the zero of the function. */
for (j = 0; j < n; ++j) for (j = 0; j < n; ++j)
wa1[j] = qr.matrixQR().col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)]; wa1[j] = qr.matrixQR().col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)];
@ -243,23 +225,19 @@ void ei_lmpar2(
/* if the input par lies outside of the interval (parl,paru), */ /* if the input par lies outside of the interval (parl,paru), */
/* set par to the closer endpoint. */ /* set par to the closer endpoint. */
par = std::max(par,parl); par = std::max(par,parl);
par = std::min(par,paru); par = std::min(par,paru);
if (par == 0.) if (par == 0.)
par = gnorm / dxnorm; par = gnorm / dxnorm;
/* beginning of an iteration. */ /* beginning of an iteration. */
Matrix< Scalar, Dynamic, Dynamic > s = qr.matrixQR(); Matrix< Scalar, Dynamic, Dynamic > s = qr.matrixQR();
while (true) { while (true) {
++iter; ++iter;
/* evaluate the function at the current value of par. */ /* evaluate the function at the current value of par. */
if (par == 0.) if (par == 0.)
par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */ par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
wa1 = ei_sqrt(par)* diag; wa1 = ei_sqrt(par)* diag;
Matrix< Scalar, Dynamic, 1 > sdiag(n); Matrix< Scalar, Dynamic, 1 > sdiag(n);
@ -273,12 +251,10 @@ void ei_lmpar2(
/* if the function is small enough, accept the current value */ /* if the function is small enough, accept the current value */
/* of par. also test for the exceptional cases where parl */ /* of par. also test for the exceptional cases where parl */
/* is zero or the number of iterations has reached 10. */ /* is zero or the number of iterations has reached 10. */
if (ei_abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) if (ei_abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
break; break;
/* compute the newton correction. */ /* compute the newton correction. */
wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm); wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm);
for (j = 0; j < n; ++j) { for (j = 0; j < n; ++j) {
wa1[j] /= sdiag[j]; wa1[j] /= sdiag[j];
@ -290,7 +266,6 @@ void ei_lmpar2(
parc = fp / delta / temp / temp; parc = fp / delta / temp / temp;
/* depending on the sign of the function, update parl or paru. */ /* depending on the sign of the function, update parl or paru. */
if (fp > 0.) if (fp > 0.)
parl = std::max(parl,par); parl = std::max(parl,par);
if (fp < 0.) if (fp < 0.)