split every algorithm in *Init() + while(running) { *OneStep() }

This commit is contained in:
Thomas Capricelli 2009-08-25 23:43:33 +02:00
parent bbd44ef0ad
commit eac9293449
3 changed files with 1149 additions and 984 deletions

View File

@ -37,6 +37,17 @@ public:
Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol = ei_sqrt(epsilon<Scalar>())
);
Status solveInit(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
const int mode=1
);
Status solveOneStep(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
const int mode=1
);
Status solve(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
@ -47,6 +58,17 @@ public:
Matrix< Scalar, Dynamic, 1 > &x,
const Scalar tol = ei_sqrt(epsilon<Scalar>())
);
Status solveNumericalDiffInit(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
const int mode=1
);
Status solveNumericalDiffOneStep(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
const int mode=1
);
Status solveNumericalDiff(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
@ -107,11 +129,9 @@ HybridNonLinearSolver<FunctorType,Scalar>::solve(
);
}
template<typename FunctorType, typename Scalar>
typename HybridNonLinearSolver<FunctorType,Scalar>::Status
HybridNonLinearSolver<FunctorType,Scalar>::solve(
HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
const int mode
@ -124,7 +144,6 @@ HybridNonLinearSolver<FunctorType,Scalar>::solve(
qtf.resize(n);
R.resize( (n*(n+1))/2);
fjac.resize(n, n);
fvec.resize(n);
if (mode != 2)
diag.resize(n);
assert( (mode!=2 || diag.size()==n) || "When using mode==2, the caller must provide a valid 'diag'");
@ -158,220 +177,243 @@ HybridNonLinearSolver<FunctorType,Scalar>::solve(
nslow1 = 0;
nslow2 = 0;
/* beginning of the outer loop. */
return Running;
}
template<typename FunctorType, typename Scalar>
typename HybridNonLinearSolver<FunctorType,Scalar>::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
const int mode
)
{
int i, j, l, iwa[1];
jeval = true;
/* calculate the jacobian matrix. */
if ( functor.df(x, fjac) < 0)
return UserAksed;
++njev;
/* compute the qr factorization of the jacobian. */
ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), false, iwa, 1, wa1.data(), wa2.data());
/* on the first iteration and if mode is 1, scale according */
/* to the norms of the columns of the initial jacobian. */
if (iter == 1) {
if (mode != 2)
for (j = 0; j < n; ++j) {
diag[j] = wa2[j];
if (wa2[j] == 0.)
diag[j] = 1.;
}
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
wa3 = diag.cwise() * x;
xnorm = wa3.stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
}
/* form (q transpose)*fvec and store in qtf. */
qtf = fvec;
for (j = 0; j < n; ++j)
if (fjac(j,j) != 0.) {
sum = 0.;
for (i = j; i < n; ++i)
sum += fjac(i,j) * qtf[i];
temp = -sum / fjac(j,j);
for (i = j; i < n; ++i)
qtf[i] += fjac(i,j) * temp;
}
/* copy the triangular factor of the qr factorization into r. */
sing = false;
for (j = 0; j < n; ++j) {
l = j;
if (j)
for (i = 0; i < j; ++i) {
R[l] = fjac(i,j);
l = l + n - i -1;
}
R[l] = wa1[j];
if (wa1[j] == 0.)
sing = true;
}
/* accumulate the orthogonal factor in fjac. */
ei_qform<Scalar>(n, n, fjac.data(), fjac.rows(), wa1.data());
/* rescale if necessary. */
/* Computing MAX */
if (mode != 2)
diag = diag.cwise().max(wa2);
/* beginning of the inner loop. */
while (true) {
int i, j, l, iwa[1];
jeval = true;
/* calculate the jacobian matrix. */
/* determine the direction p. */
if ( functor.df(x, fjac) < 0)
ei_dogleg<Scalar>(R, diag, qtf, delta, wa1);
/* store the direction p and x + p. calculate the norm of p. */
wa1 = -wa1;
wa2 = x + wa1;
wa3 = diag.cwise() * wa1;
pnorm = wa3.stableNorm();
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor.f(wa2, wa4) < 0)
return UserAksed;
++njev;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the qr factorization of the jacobian. */
/* compute the scaled actual reduction. */
ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), false, iwa, 1, wa1.data(), wa2.data());
actred = -1.;
if (fnorm1 < fnorm) /* Computing 2nd power */
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* on the first iteration and if mode is 1, scale according */
/* to the norms of the columns of the initial jacobian. */
/* compute the scaled predicted reduction. */
if (iter == 1) {
if (mode != 2)
for (j = 0; j < n; ++j) {
diag[j] = wa2[j];
if (wa2[j] == 0.)
diag[j] = 1.;
}
l = 0;
for (i = 0; i < n; ++i) {
sum = 0.;
for (j = i; j < n; ++j) {
sum += R[l] * wa1[j];
++l;
}
wa3[i] = qtf[i] + sum;
}
temp = wa3.stableNorm();
prered = 0.;
if (temp < fnorm) /* Computing 2nd power */
prered = 1. - ei_abs2(temp / fnorm);
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
/* compute the ratio of the actual to the predicted */
/* reduction. */
wa3 = diag.cwise() * x;
xnorm = wa3.stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
ratio = 0.;
if (prered > 0.)
ratio = actred / prered;
/* update the step bound. */
if (ratio < Scalar(.1)) {
ncsuc = 0;
++ncfail;
delta = Scalar(.5) * delta;
} else {
ncfail = 0;
++ncsuc;
if (ratio >= Scalar(.5) || ncsuc > 1) /* Computing MAX */
delta = std::max(delta, pnorm / Scalar(.5));
if (ei_abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
}
}
/* form (q transpose)*fvec and store in qtf. */
/* test for successful iteration. */
qtf = fvec;
for (j = 0; j < n; ++j)
if (fjac(j,j) != 0.) {
sum = 0.;
for (i = j; i < n; ++i)
sum += fjac(i,j) * qtf[i];
temp = -sum / fjac(j,j);
for (i = j; i < n; ++i)
qtf[i] += fjac(i,j) * temp;
}
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
wa2 = diag.cwise() * x;
fvec = wa4;
xnorm = wa2.stableNorm();
fnorm = fnorm1;
++iter;
}
/* copy the triangular factor of the qr factorization into r. */
/* determine the progress of the iteration. */
++nslow1;
if (actred >= Scalar(.001))
nslow1 = 0;
if (jeval)
++nslow2;
if (actred >= Scalar(.1))
nslow2 = 0;
/* test for convergence. */
if (delta <= parameters.xtol * xnorm || fnorm == 0.)
return RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev)
return TooManyFunctionEvaluation;
if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm)
return TolTooSmall;
if (nslow2 == 5)
return NotMakingProgressJacobian;
if (nslow1 == 10)
return NotMakingProgressIterations;
/* criterion for recalculating jacobian. */
if (ncfail == 2)
break; // leave inner loop and go for the next outer loop iteration
/* calculate the rank one modification to the jacobian */
/* and update qtf if necessary. */
sing = false;
for (j = 0; j < n; ++j) {
l = j;
if (j)
for (i = 0; i < j; ++i) {
R[l] = fjac(i,j);
l = l + n - i -1;
}
R[l] = wa1[j];
if (wa1[j] == 0.)
sing = true;
sum = wa4.dot(fjac.col(j));
wa2[j] = (sum - wa3[j]) / pnorm;
wa1[j] = diag[j] * (diag[j] * wa1[j] / pnorm);
if (ratio >= Scalar(1e-4))
qtf[j] = sum;
}
/* accumulate the orthogonal factor in fjac. */
/* compute the qr factorization of the updated jacobian. */
ei_qform<Scalar>(n, n, fjac.data(), fjac.rows(), wa1.data());
ei_r1updt<Scalar>(n, n, R.data(), R.size(), wa1.data(), wa2.data(), wa3.data(), &sing);
ei_r1mpyq<Scalar>(n, n, fjac.data(), fjac.rows(), wa2.data(), wa3.data());
ei_r1mpyq<Scalar>(1, n, qtf.data(), 1, wa2.data(), wa3.data());
/* rescale if necessary. */
/* end of the inner loop. */
/* Computing MAX */
if (mode != 2)
diag = diag.cwise().max(wa2);
/* beginning of the inner loop. */
while (true) {
/* determine the direction p. */
ei_dogleg<Scalar>(R, diag, qtf, delta, wa1);
/* store the direction p and x + p. calculate the norm of p. */
wa1 = -wa1;
wa2 = x + wa1;
wa3 = diag.cwise() * wa1;
pnorm = wa3.stableNorm();
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor.f(wa2, wa4) < 0)
return UserAksed;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
actred = -1.;
if (fnorm1 < fnorm) /* Computing 2nd power */
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction. */
l = 0;
for (i = 0; i < n; ++i) {
sum = 0.;
for (j = i; j < n; ++j) {
sum += R[l] * wa1[j];
++l;
}
wa3[i] = qtf[i] + sum;
}
temp = wa3.stableNorm();
prered = 0.;
if (temp < fnorm) /* Computing 2nd power */
prered = 1. - ei_abs2(temp / fnorm);
/* compute the ratio of the actual to the predicted */
/* reduction. */
ratio = 0.;
if (prered > 0.)
ratio = actred / prered;
/* update the step bound. */
if (ratio < Scalar(.1)) {
ncsuc = 0;
++ncfail;
delta = Scalar(.5) * delta;
} else {
ncfail = 0;
++ncsuc;
if (ratio >= Scalar(.5) || ncsuc > 1) /* Computing MAX */
delta = std::max(delta, pnorm / Scalar(.5));
if (ei_abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
}
}
/* test for successful iteration. */
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
wa2 = diag.cwise() * x;
fvec = wa4;
xnorm = wa2.stableNorm();
fnorm = fnorm1;
++iter;
}
/* determine the progress of the iteration. */
++nslow1;
if (actred >= Scalar(.001))
nslow1 = 0;
if (jeval)
++nslow2;
if (actred >= Scalar(.1))
nslow2 = 0;
/* test for convergence. */
if (delta <= parameters.xtol * xnorm || fnorm == 0.)
return RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev)
return TooManyFunctionEvaluation;
if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm)
return TolTooSmall;
if (nslow2 == 5)
return NotMakingProgressJacobian;
if (nslow1 == 10)
return NotMakingProgressIterations;
/* criterion for recalculating jacobian. */
if (ncfail == 2)
break; // leave inner loop and go for the next outer loop iteration
/* calculate the rank one modification to the jacobian */
/* and update qtf if necessary. */
for (j = 0; j < n; ++j) {
sum = wa4.dot(fjac.col(j));
wa2[j] = (sum - wa3[j]) / pnorm;
wa1[j] = diag[j] * (diag[j] * wa1[j] / pnorm);
if (ratio >= Scalar(1e-4))
qtf[j] = sum;
}
/* compute the qr factorization of the updated jacobian. */
ei_r1updt<Scalar>(n, n, R.data(), R.size(), wa1.data(), wa2.data(), wa3.data(), &sing);
ei_r1mpyq<Scalar>(n, n, fjac.data(), fjac.rows(), wa2.data(), wa3.data());
ei_r1mpyq<Scalar>(1, n, qtf.data(), 1, wa2.data(), wa3.data());
/* end of the inner loop. */
jeval = false;
}
/* end of the outer loop. */
jeval = false;
}
assert(false); // should never be reached
/* end of the outer loop. */
return Running;
}
template<typename FunctorType, typename Scalar>
typename HybridNonLinearSolver<FunctorType,Scalar>::Status
HybridNonLinearSolver<FunctorType,Scalar>::solve(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
const int mode
)
{
Status status = solveInit(x, parameters, mode);
while (status==Running)
status = solveOneStep(x, parameters, mode);
return status;
}
@ -403,10 +445,9 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
);
}
template<typename FunctorType, typename Scalar>
typename HybridNonLinearSolver<FunctorType,Scalar>::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
const int mode
@ -428,6 +469,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
diag.resize(n);
assert( (mode!=2 || diag.size()==n) || "When using mode==2, the caller must provide a valid 'diag'");
/* Function Body */
nfev = 0;
@ -457,220 +499,246 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
nslow1 = 0;
nslow2 = 0;
/* beginning of the outer loop. */
while (true) {
int i, j, l, iwa[1];
jeval = true;
/* calculate the jacobian matrix. */
if (ei_fdjac1(functor, x, fvec, fjac, nsub, nsup, parameters.epsfcn) <0)
return UserAksed;
nfev += std::min(nsub+ nsup+ 1, n);
/* compute the qr factorization of the jacobian. */
ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), false, iwa, 1, wa1.data(), wa2.data());
/* on the first iteration and if mode is 1, scale according */
/* to the norms of the columns of the initial jacobian. */
if (iter == 1) {
if (mode != 2)
for (j = 0; j < n; ++j) {
diag[j] = wa2[j];
if (wa2[j] == 0.)
diag[j] = 1.;
}
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
wa3 = diag.cwise() * x;
xnorm = wa3.stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
}
/* form (q transpose)*fvec and store in qtf. */
qtf = fvec;
for (j = 0; j < n; ++j)
if (fjac(j,j) != 0.) {
sum = 0.;
for (i = j; i < n; ++i)
sum += fjac(i,j) * qtf[i];
temp = -sum / fjac(j,j);
for (i = j; i < n; ++i)
qtf[i] += fjac(i,j) * temp;
}
/* copy the triangular factor of the qr factorization into r. */
sing = false;
for (j = 0; j < n; ++j) {
l = j;
if (j)
for (i = 0; i < j; ++i) {
R[l] = fjac(i,j);
l = l + n - i -1;
}
R[l] = wa1[j];
if (wa1[j] == 0.)
sing = true;
}
/* accumulate the orthogonal factor in fjac. */
ei_qform<Scalar>(n, n, fjac.data(), fjac.rows(), wa1.data());
/* rescale if necessary. */
/* Computing MAX */
if (mode != 2)
diag = diag.cwise().max(wa2);
/* beginning of the inner loop. */
while (true) {
/* determine the direction p. */
ei_dogleg<Scalar>(R, diag, qtf, delta, wa1);
/* store the direction p and x + p. calculate the norm of p. */
wa1 = -wa1;
wa2 = x + wa1;
wa3 = diag.cwise() * wa1;
pnorm = wa3.stableNorm();
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor.f(wa2, wa4) < 0)
return UserAksed;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
actred = -1.;
if (fnorm1 < fnorm) /* Computing 2nd power */
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction. */
l = 0;
for (i = 0; i < n; ++i) {
sum = 0.;
for (j = i; j < n; ++j) {
sum += R[l] * wa1[j];
++l;
}
wa3[i] = qtf[i] + sum;
}
temp = wa3.stableNorm();
prered = 0.;
if (temp < fnorm) /* Computing 2nd power */
prered = 1. - ei_abs2(temp / fnorm);
/* compute the ratio of the actual to the predicted */
/* reduction. */
ratio = 0.;
if (prered > 0.)
ratio = actred / prered;
/* update the step bound. */
if (ratio < Scalar(.1)) {
ncsuc = 0;
++ncfail;
delta = Scalar(.5) * delta;
} else {
ncfail = 0;
++ncsuc;
if (ratio >= Scalar(.5) || ncsuc > 1) /* Computing MAX */
delta = std::max(delta, pnorm / Scalar(.5));
if (ei_abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
}
}
/* test for successful iteration. */
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
wa2 = diag.cwise() * x;
fvec = wa4;
xnorm = wa2.stableNorm();
fnorm = fnorm1;
++iter;
}
/* determine the progress of the iteration. */
++nslow1;
if (actred >= Scalar(.001))
nslow1 = 0;
if (jeval)
++nslow2;
if (actred >= Scalar(.1))
nslow2 = 0;
/* test for convergence. */
if (delta <= parameters.xtol * xnorm || fnorm == 0.)
return RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev)
return TooManyFunctionEvaluation;
if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm)
return TolTooSmall;
if (nslow2 == 5)
return NotMakingProgressJacobian;
if (nslow1 == 10)
return NotMakingProgressIterations;
/* criterion for recalculating jacobian approximation */
/* by forward differences. */
if (ncfail == 2)
break; // leave inner loop and go for the next outer loop iteration
/* calculate the rank one modification to the jacobian */
/* and update qtf if necessary. */
for (j = 0; j < n; ++j) {
sum = wa4.dot(fjac.col(j));
wa2[j] = (sum - wa3[j]) / pnorm;
wa1[j] = diag[j] * (diag[j] * wa1[j] / pnorm);
if (ratio >= Scalar(1e-4))
qtf[j] = sum;
}
/* compute the qr factorization of the updated jacobian. */
ei_r1updt<Scalar>(n, n, R.data(), R.size(), wa1.data(), wa2.data(), wa3.data(), &sing);
ei_r1mpyq<Scalar>(n, n, fjac.data(), fjac.rows(), wa2.data(), wa3.data());
ei_r1mpyq<Scalar>(1, n, qtf.data(), 1, wa2.data(), wa3.data());
/* end of the inner loop. */
jeval = false;
}
/* end of the outer loop. */
}
assert(false); // should never be reached
return Running;
}
template<typename FunctorType, typename Scalar>
typename HybridNonLinearSolver<FunctorType,Scalar>::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
const int mode
)
{
int i, j, l, iwa[1];
jeval = true;
int nsub = parameters.nb_of_subdiagonals;
int nsup = parameters.nb_of_superdiagonals;
if (nsub<0) nsub= n-1;
if (nsup<0) nsup= n-1;
/* calculate the jacobian matrix. */
if (ei_fdjac1(functor, x, fvec, fjac, nsub, nsup, parameters.epsfcn) <0)
return UserAksed;
nfev += std::min(nsub+ nsup+ 1, n);
/* compute the qr factorization of the jacobian. */
ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), false, iwa, 1, wa1.data(), wa2.data());
/* on the first iteration and if mode is 1, scale according */
/* to the norms of the columns of the initial jacobian. */
if (iter == 1) {
if (mode != 2)
for (j = 0; j < n; ++j) {
diag[j] = wa2[j];
if (wa2[j] == 0.)
diag[j] = 1.;
}
/* on the first iteration, calculate the norm of the scaled x */
/* and initialize the step bound delta. */
wa3 = diag.cwise() * x;
xnorm = wa3.stableNorm();
delta = parameters.factor * xnorm;
if (delta == 0.)
delta = parameters.factor;
}
/* form (q transpose)*fvec and store in qtf. */
qtf = fvec;
for (j = 0; j < n; ++j)
if (fjac(j,j) != 0.) {
sum = 0.;
for (i = j; i < n; ++i)
sum += fjac(i,j) * qtf[i];
temp = -sum / fjac(j,j);
for (i = j; i < n; ++i)
qtf[i] += fjac(i,j) * temp;
}
/* copy the triangular factor of the qr factorization into r. */
sing = false;
for (j = 0; j < n; ++j) {
l = j;
if (j)
for (i = 0; i < j; ++i) {
R[l] = fjac(i,j);
l = l + n - i -1;
}
R[l] = wa1[j];
if (wa1[j] == 0.)
sing = true;
}
/* accumulate the orthogonal factor in fjac. */
ei_qform<Scalar>(n, n, fjac.data(), fjac.rows(), wa1.data());
/* rescale if necessary. */
/* Computing MAX */
if (mode != 2)
diag = diag.cwise().max(wa2);
/* beginning of the inner loop. */
while (true) {
/* determine the direction p. */
ei_dogleg<Scalar>(R, diag, qtf, delta, wa1);
/* store the direction p and x + p. calculate the norm of p. */
wa1 = -wa1;
wa2 = x + wa1;
wa3 = diag.cwise() * wa1;
pnorm = wa3.stableNorm();
/* on the first iteration, adjust the initial step bound. */
if (iter == 1)
delta = std::min(delta,pnorm);
/* evaluate the function at x + p and calculate its norm. */
if ( functor.f(wa2, wa4) < 0)
return UserAksed;
++nfev;
fnorm1 = wa4.stableNorm();
/* compute the scaled actual reduction. */
actred = -1.;
if (fnorm1 < fnorm) /* Computing 2nd power */
actred = 1. - ei_abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction. */
l = 0;
for (i = 0; i < n; ++i) {
sum = 0.;
for (j = i; j < n; ++j) {
sum += R[l] * wa1[j];
++l;
}
wa3[i] = qtf[i] + sum;
}
temp = wa3.stableNorm();
prered = 0.;
if (temp < fnorm) /* Computing 2nd power */
prered = 1. - ei_abs2(temp / fnorm);
/* compute the ratio of the actual to the predicted */
/* reduction. */
ratio = 0.;
if (prered > 0.)
ratio = actred / prered;
/* update the step bound. */
if (ratio < Scalar(.1)) {
ncsuc = 0;
++ncfail;
delta = Scalar(.5) * delta;
} else {
ncfail = 0;
++ncsuc;
if (ratio >= Scalar(.5) || ncsuc > 1) /* Computing MAX */
delta = std::max(delta, pnorm / Scalar(.5));
if (ei_abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
}
}
/* test for successful iteration. */
if (ratio >= Scalar(1e-4)) {
/* successful iteration. update x, fvec, and their norms. */
x = wa2;
wa2 = diag.cwise() * x;
fvec = wa4;
xnorm = wa2.stableNorm();
fnorm = fnorm1;
++iter;
}
/* determine the progress of the iteration. */
++nslow1;
if (actred >= Scalar(.001))
nslow1 = 0;
if (jeval)
++nslow2;
if (actred >= Scalar(.1))
nslow2 = 0;
/* test for convergence. */
if (delta <= parameters.xtol * xnorm || fnorm == 0.)
return RelativeErrorTooSmall;
/* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev)
return TooManyFunctionEvaluation;
if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= epsilon<Scalar>() * xnorm)
return TolTooSmall;
if (nslow2 == 5)
return NotMakingProgressJacobian;
if (nslow1 == 10)
return NotMakingProgressIterations;
/* criterion for recalculating jacobian approximation */
/* by forward differences. */
if (ncfail == 2)
break; // leave inner loop and go for the next outer loop iteration
/* calculate the rank one modification to the jacobian */
/* and update qtf if necessary. */
for (j = 0; j < n; ++j) {
sum = wa4.dot(fjac.col(j));
wa2[j] = (sum - wa3[j]) / pnorm;
wa1[j] = diag[j] * (diag[j] * wa1[j] / pnorm);
if (ratio >= Scalar(1e-4))
qtf[j] = sum;
}
/* compute the qr factorization of the updated jacobian. */
ei_r1updt<Scalar>(n, n, R.data(), R.size(), wa1.data(), wa2.data(), wa3.data(), &sing);
ei_r1mpyq<Scalar>(n, n, fjac.data(), fjac.rows(), wa2.data(), wa3.data());
ei_r1mpyq<Scalar>(1, n, qtf.data(), 1, wa2.data(), wa3.data());
/* end of the inner loop. */
jeval = false;
}
/* end of the outer loop. */
return Running;
}
template<typename FunctorType, typename Scalar>
typename HybridNonLinearSolver<FunctorType,Scalar>::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
Matrix< Scalar, Dynamic, 1 > &x,
const Parameters &parameters,
const int mode
)
{
Status status = solveNumericalDiffInit(x, parameters, mode);
while (status==Running)
status = solveNumericalDiffOneStep(x, parameters, mode);
return status;
}

File diff suppressed because it is too large Load Diff

View File

@ -1836,6 +1836,7 @@ void test_NonLinear()
printf("x[1] : %.32g\n", x[1]);
printf("x[2] : %.32g\n", x[2]);
printf("x[3] : %.32g\n", x[3]);
printf("fvec.squaredNorm() : %.32g\n", fvec.squaredNorm());
printf("fvec.blueNorm() : %.32g\n", solver.fvec.blueNorm());
printf("fvec.blueNorm() : %.32g\n", lm.fvec.blueNorm());
*/