mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 11:49:02 +08:00
more eigenization, dropped 'ipvt' in lm
This commit is contained in:
parent
8a690299c6
commit
69f11c08a1
@ -125,7 +125,7 @@ public:
|
|||||||
Parameters parameters;
|
Parameters parameters;
|
||||||
FVectorType fvec, qtf, diag;
|
FVectorType fvec, qtf, diag;
|
||||||
JacobianType fjac;
|
JacobianType fjac;
|
||||||
VectorXi ipvt;
|
PermutationMatrix<Dynamic,Dynamic> permutation;
|
||||||
int nfev;
|
int nfev;
|
||||||
int njev;
|
int njev;
|
||||||
int iter;
|
int iter;
|
||||||
@ -195,7 +195,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
|
|||||||
wa1.resize(n); wa2.resize(n); wa3.resize(n);
|
wa1.resize(n); wa2.resize(n); wa3.resize(n);
|
||||||
wa4.resize(m);
|
wa4.resize(m);
|
||||||
fvec.resize(m);
|
fvec.resize(m);
|
||||||
ipvt.resize(n);
|
|
||||||
fjac.resize(m, n);
|
fjac.resize(m, n);
|
||||||
if (mode != 2)
|
if (mode != 2)
|
||||||
diag.resize(n);
|
diag.resize(n);
|
||||||
@ -236,7 +235,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
const int mode
|
const int mode
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
int i, j, l;
|
int j;
|
||||||
|
|
||||||
/* calculate the jacobian matrix. */
|
/* calculate the jacobian matrix. */
|
||||||
int df_ret = functor.df(x, fjac);
|
int df_ret = functor.df(x, fjac);
|
||||||
@ -251,21 +250,14 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
wa2 = fjac.colwise().blueNorm();
|
wa2 = fjac.colwise().blueNorm();
|
||||||
ColPivHouseholderQR<JacobianType> qrfac(fjac);
|
ColPivHouseholderQR<JacobianType> qrfac(fjac);
|
||||||
fjac = qrfac.matrixQR();
|
fjac = qrfac.matrixQR();
|
||||||
wa1 = fjac.diagonal();
|
permutation = qrfac.colsPermutation();
|
||||||
fjac.diagonal() = qrfac.hCoeffs();
|
|
||||||
ipvt = qrfac.colsPermutation().indices();
|
|
||||||
// TODO : avoid this:
|
|
||||||
for(int i=0; i< fjac.cols(); i++) fjac.col(i).segment(i+1, fjac.rows()-i-1) *= fjac(i,i); // rescale vectors
|
|
||||||
|
|
||||||
/* on the first iteration and if mode is 1, scale according */
|
/* on the first iteration and if mode is 1, scale according */
|
||||||
/* to the norms of the columns of the initial jacobian. */
|
/* to the norms of the columns of the initial jacobian. */
|
||||||
if (iter == 1) {
|
if (iter == 1) {
|
||||||
if (mode != 2)
|
if (mode != 2)
|
||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j)
|
||||||
diag[j] = wa2[j];
|
diag[j] = (wa2[j]==0.)? 1. : wa2[j];
|
||||||
if (wa2[j] == 0.)
|
|
||||||
diag[j] = 1.;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* on the first iteration, calculate the norm of the scaled x */
|
/* on the first iteration, calculate the norm of the scaled x */
|
||||||
/* and initialize the step bound delta. */
|
/* and initialize the step bound delta. */
|
||||||
@ -278,48 +270,23 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
|
|
||||||
/* form (q transpose)*fvec and store the first n components in */
|
/* form (q transpose)*fvec and store the first n components in */
|
||||||
/* qtf. */
|
/* qtf. */
|
||||||
#if 0
|
|
||||||
// find a way to only compute the first n items, we have m>>n here.
|
|
||||||
wa4 = fvec;
|
wa4 = fvec;
|
||||||
wa4.applyOnTheLeft(qrfac.householderQ().adjoint());
|
wa4.applyOnTheLeft(qrfac.householderQ().adjoint());
|
||||||
wa4 = wa4.head(n);
|
qtf = wa4.head(n);
|
||||||
fjac.diagonal() = wa1;
|
|
||||||
#else
|
|
||||||
wa4 = fvec;
|
|
||||||
for (j = 0; j < n; ++j) {
|
|
||||||
if (fjac(j,j) != 0.) {
|
|
||||||
sum = 0.;
|
|
||||||
for (i = j; i < m; ++i)
|
|
||||||
sum += fjac(i,j) * wa4[i];
|
|
||||||
temp = -sum / fjac(j,j);
|
|
||||||
for (i = j; i < m; ++i)
|
|
||||||
wa4[i] += fjac(i,j) * temp;
|
|
||||||
}
|
|
||||||
fjac(j,j) = wa1[j];
|
|
||||||
qtf[j] = wa4[j];
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* compute the norm of the scaled gradient. */
|
/* compute the norm of the scaled gradient. */
|
||||||
gnorm = 0.;
|
gnorm = 0.;
|
||||||
if (fnorm != 0.)
|
if (fnorm != 0.)
|
||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j)
|
||||||
l = ipvt[j];
|
if (wa2[permutation.indices()[j]] != 0.)
|
||||||
if (wa2[l] != 0.) {
|
gnorm = std::max(gnorm, ei_abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
|
||||||
sum = 0.;
|
|
||||||
for (i = 0; i <= j; ++i)
|
|
||||||
sum += fjac(i,j) * (qtf[i] / fnorm);
|
|
||||||
/* Computing MAX */
|
|
||||||
gnorm = std::max(gnorm, ei_abs(sum / wa2[l]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* test for convergence of the gradient norm. */
|
/* test for convergence of the gradient norm. */
|
||||||
if (gnorm <= parameters.gtol)
|
if (gnorm <= parameters.gtol)
|
||||||
return CosinusTooSmall;
|
return CosinusTooSmall;
|
||||||
|
|
||||||
/* rescale if necessary. */
|
/* rescale if necessary. */
|
||||||
if (mode != 2) /* Computing MAX */
|
if (mode != 2)
|
||||||
diag = diag.cwiseMax(wa2);
|
diag = diag.cwiseMax(wa2);
|
||||||
|
|
||||||
/* beginning of the inner loop. */
|
/* beginning of the inner loop. */
|
||||||
@ -346,21 +313,14 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
|
|
||||||
/* compute the scaled actual reduction. */
|
/* compute the scaled actual reduction. */
|
||||||
actred = -1.;
|
actred = -1.;
|
||||||
if (Scalar(.1) * fnorm1 < fnorm) /* Computing 2nd power */
|
if (Scalar(.1) * fnorm1 < fnorm)
|
||||||
actred = 1. - ei_abs2(fnorm1 / fnorm);
|
actred = 1. - ei_abs2(fnorm1 / fnorm);
|
||||||
|
|
||||||
/* compute the scaled predicted reduction and */
|
/* compute the scaled predicted reduction and */
|
||||||
/* the scaled directional derivative. */
|
/* the scaled directional derivative. */
|
||||||
wa3.fill(0.);
|
wa3 = fjac.template triangularView<Upper>() * (qrfac.colsPermutation().inverse() *wa1);
|
||||||
for (j = 0; j < n; ++j) {
|
|
||||||
l = ipvt[j];
|
|
||||||
temp = wa1[l];
|
|
||||||
for (i = 0; i <= j; ++i)
|
|
||||||
wa3[i] += fjac(i,j) * temp;
|
|
||||||
}
|
|
||||||
temp1 = ei_abs2(wa3.stableNorm() / fnorm);
|
temp1 = ei_abs2(wa3.stableNorm() / fnorm);
|
||||||
temp2 = ei_abs2(ei_sqrt(par) * pnorm / fnorm);
|
temp2 = ei_abs2(ei_sqrt(par) * pnorm / fnorm);
|
||||||
/* Computing 2nd power */
|
|
||||||
prered = temp1 + temp2 / Scalar(.5);
|
prered = temp1 + temp2 / Scalar(.5);
|
||||||
dirder = -(temp1 + temp2);
|
dirder = -(temp1 + temp2);
|
||||||
|
|
||||||
@ -455,7 +415,6 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
|
|||||||
wa1.resize(n); wa2.resize(n); wa3.resize(n);
|
wa1.resize(n); wa2.resize(n); wa3.resize(n);
|
||||||
wa4.resize(m);
|
wa4.resize(m);
|
||||||
fvec.resize(m);
|
fvec.resize(m);
|
||||||
ipvt.resize(n);
|
|
||||||
fjac.resize(m, n);
|
fjac.resize(m, n);
|
||||||
if (mode != 2)
|
if (mode != 2)
|
||||||
diag.resize(n);
|
diag.resize(n);
|
||||||
@ -497,7 +456,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
const int mode
|
const int mode
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
int i, j, l;
|
int i, j;
|
||||||
bool sing;
|
bool sing;
|
||||||
|
|
||||||
/* compute the qr factorization of the jacobian matrix */
|
/* compute the qr factorization of the jacobian matrix */
|
||||||
@ -519,20 +478,20 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
/* reorder its columns and update the components of qtf. */
|
/* reorder its columns and update the components of qtf. */
|
||||||
sing = false;
|
sing = false;
|
||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j) {
|
||||||
if (fjac(j,j) == 0.) {
|
if (fjac(j,j) == 0.)
|
||||||
sing = true;
|
sing = true;
|
||||||
}
|
|
||||||
ipvt[j] = j;
|
|
||||||
wa2[j] = fjac.col(j).head(j).stableNorm();
|
wa2[j] = fjac.col(j).head(j).stableNorm();
|
||||||
}
|
}
|
||||||
|
permutation.setIdentity(n);
|
||||||
if (sing) {
|
if (sing) {
|
||||||
wa2 = fjac.colwise().blueNorm();
|
wa2 = fjac.colwise().blueNorm();
|
||||||
// TODO We have no unit test covering this branch.. untested
|
// TODO We have no unit test covering this code path, do not modify
|
||||||
|
// before it is carefully tested
|
||||||
ColPivHouseholderQR<JacobianType> qrfac(fjac);
|
ColPivHouseholderQR<JacobianType> qrfac(fjac);
|
||||||
fjac = qrfac.matrixQR();
|
fjac = qrfac.matrixQR();
|
||||||
wa1 = fjac.diagonal();
|
wa1 = fjac.diagonal();
|
||||||
fjac.diagonal() = qrfac.hCoeffs();
|
fjac.diagonal() = qrfac.hCoeffs();
|
||||||
ipvt = qrfac.colsPermutation().indices();
|
permutation = qrfac.colsPermutation();
|
||||||
// TODO : avoid this:
|
// TODO : avoid this:
|
||||||
for(int ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors
|
for(int ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors
|
||||||
|
|
||||||
@ -553,11 +512,8 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
/* to the norms of the columns of the initial jacobian. */
|
/* to the norms of the columns of the initial jacobian. */
|
||||||
if (iter == 1) {
|
if (iter == 1) {
|
||||||
if (mode != 2)
|
if (mode != 2)
|
||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j)
|
||||||
diag[j] = wa2[j];
|
diag[j] = (wa2[j]==0.)? 1. : wa2[j];
|
||||||
if (wa2[j] == 0.)
|
|
||||||
diag[j] = 1.;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* on the first iteration, calculate the norm of the scaled x */
|
/* on the first iteration, calculate the norm of the scaled x */
|
||||||
/* and initialize the step bound delta. */
|
/* and initialize the step bound delta. */
|
||||||
@ -571,30 +527,23 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
/* compute the norm of the scaled gradient. */
|
/* compute the norm of the scaled gradient. */
|
||||||
gnorm = 0.;
|
gnorm = 0.;
|
||||||
if (fnorm != 0.)
|
if (fnorm != 0.)
|
||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j)
|
||||||
l = ipvt[j];
|
if (wa2[permutation.indices()[j]] != 0.)
|
||||||
if (wa2[l] != 0.) {
|
gnorm = std::max(gnorm, ei_abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
|
||||||
sum = 0.;
|
|
||||||
for (i = 0; i <= j; ++i)
|
|
||||||
sum += fjac(i,j) * (qtf[i] / fnorm);
|
|
||||||
/* Computing MAX */
|
|
||||||
gnorm = std::max(gnorm, ei_abs(sum / wa2[l]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* test for convergence of the gradient norm. */
|
/* test for convergence of the gradient norm. */
|
||||||
if (gnorm <= parameters.gtol)
|
if (gnorm <= parameters.gtol)
|
||||||
return CosinusTooSmall;
|
return CosinusTooSmall;
|
||||||
|
|
||||||
/* rescale if necessary. */
|
/* rescale if necessary. */
|
||||||
if (mode != 2) /* Computing MAX */
|
if (mode != 2)
|
||||||
diag = diag.cwiseMax(wa2);
|
diag = diag.cwiseMax(wa2);
|
||||||
|
|
||||||
/* beginning of the inner loop. */
|
/* beginning of the inner loop. */
|
||||||
do {
|
do {
|
||||||
|
|
||||||
/* determine the levenberg-marquardt parameter. */
|
/* determine the levenberg-marquardt parameter. */
|
||||||
ei_lmpar<Scalar>(fjac, ipvt, diag, qtf, delta, par, wa1);
|
ei_lmpar<Scalar>(fjac, permutation.indices(), diag, qtf, delta, par, wa1);
|
||||||
|
|
||||||
/* store the direction p and x + p. calculate the norm of p. */
|
/* store the direction p and x + p. calculate the norm of p. */
|
||||||
wa1 = -wa1;
|
wa1 = -wa1;
|
||||||
@ -614,21 +563,14 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
|
|
||||||
/* compute the scaled actual reduction. */
|
/* compute the scaled actual reduction. */
|
||||||
actred = -1.;
|
actred = -1.;
|
||||||
if (Scalar(.1) * fnorm1 < fnorm) /* Computing 2nd power */
|
if (Scalar(.1) * fnorm1 < fnorm)
|
||||||
actred = 1. - ei_abs2(fnorm1 / fnorm);
|
actred = 1. - ei_abs2(fnorm1 / fnorm);
|
||||||
|
|
||||||
/* compute the scaled predicted reduction and */
|
/* compute the scaled predicted reduction and */
|
||||||
/* the scaled directional derivative. */
|
/* the scaled directional derivative. */
|
||||||
wa3.fill(0.);
|
wa3 = fjac.corner(TopLeft,n,n).template triangularView<Upper>() * (permutation.inverse() * wa1);
|
||||||
for (j = 0; j < n; ++j) {
|
|
||||||
l = ipvt[j];
|
|
||||||
temp = wa1[l];
|
|
||||||
for (i = 0; i <= j; ++i)
|
|
||||||
wa3[i] += fjac(i,j) * temp;
|
|
||||||
}
|
|
||||||
temp1 = ei_abs2(wa3.stableNorm() / fnorm);
|
temp1 = ei_abs2(wa3.stableNorm() / fnorm);
|
||||||
temp2 = ei_abs2(ei_sqrt(par) * pnorm / fnorm);
|
temp2 = ei_abs2(ei_sqrt(par) * pnorm / fnorm);
|
||||||
/* Computing 2nd power */
|
|
||||||
prered = temp1 + temp2 / Scalar(.5);
|
prered = temp1 + temp2 / Scalar(.5);
|
||||||
dirder = -(temp1 + temp2);
|
dirder = -(temp1 + temp2);
|
||||||
|
|
||||||
|
@ -178,7 +178,6 @@ void ei_lmpar2(
|
|||||||
const int n = qr.matrixQR().cols();
|
const int n = qr.matrixQR().cols();
|
||||||
assert(n==diag.size());
|
assert(n==diag.size());
|
||||||
assert(n==qtb.size());
|
assert(n==qtb.size());
|
||||||
assert(n==x.size());
|
|
||||||
|
|
||||||
Matrix< Scalar, Dynamic, 1 > wa1, wa2;
|
Matrix< Scalar, Dynamic, 1 > wa1, wa2;
|
||||||
|
|
||||||
|
@ -216,7 +216,7 @@ void testLmder()
|
|||||||
|
|
||||||
// check covariance
|
// check covariance
|
||||||
covfac = fnorm*fnorm/(m-n);
|
covfac = fnorm*fnorm/(m-n);
|
||||||
ei_covar(lm.fjac, lm.ipvt); // TODO : move this as a function of lm
|
ei_covar(lm.fjac, lm.permutation.indices()); // TODO : move this as a function of lm
|
||||||
|
|
||||||
MatrixXd cov_ref(n,n);
|
MatrixXd cov_ref(n,n);
|
||||||
cov_ref <<
|
cov_ref <<
|
||||||
@ -605,7 +605,7 @@ void testLmdif()
|
|||||||
|
|
||||||
// check covariance
|
// check covariance
|
||||||
covfac = fnorm*fnorm/(m-n);
|
covfac = fnorm*fnorm/(m-n);
|
||||||
ei_covar(lm.fjac, lm.ipvt);
|
ei_covar(lm.fjac, lm.permutation.indices()); // TODO : move this as a function of lm
|
||||||
|
|
||||||
MatrixXd cov_ref(n,n);
|
MatrixXd cov_ref(n,n);
|
||||||
cov_ref <<
|
cov_ref <<
|
||||||
@ -1010,7 +1010,7 @@ void testNistLanczos1(void)
|
|||||||
VERIFY( 79 == lm.nfev);
|
VERIFY( 79 == lm.nfev);
|
||||||
VERIFY( 72 == lm.njev);
|
VERIFY( 72 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.429961002287e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.430899764097e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 9.5100000027E-02 );
|
VERIFY_IS_APPROX(x[0], 9.5100000027E-02 );
|
||||||
VERIFY_IS_APPROX(x[1], 1.0000000001E+00 );
|
VERIFY_IS_APPROX(x[1], 1.0000000001E+00 );
|
||||||
@ -1031,7 +1031,7 @@ void testNistLanczos1(void)
|
|||||||
VERIFY( 9 == lm.nfev);
|
VERIFY( 9 == lm.nfev);
|
||||||
VERIFY( 8 == lm.njev);
|
VERIFY( 8 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.43059335827267E-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.428595533845e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 9.5100000027E-02 );
|
VERIFY_IS_APPROX(x[0], 9.5100000027E-02 );
|
||||||
VERIFY_IS_APPROX(x[1], 1.0000000001E+00 );
|
VERIFY_IS_APPROX(x[1], 1.0000000001E+00 );
|
||||||
@ -1171,8 +1171,8 @@ void testNistMGH10(void)
|
|||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 2 == info);
|
VERIFY( 2 == info);
|
||||||
VERIFY( 281 == lm.nfev);
|
VERIFY( 284 == lm.nfev);
|
||||||
VERIFY( 248 == lm.njev);
|
VERIFY( 249 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01);
|
||||||
// check x
|
// check x
|
||||||
@ -1188,7 +1188,7 @@ void testNistMGH10(void)
|
|||||||
info = lm.minimize(x);
|
info = lm.minimize(x);
|
||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 2 == info);
|
VERIFY( 3 == info);
|
||||||
VERIFY( 126 == lm.nfev);
|
VERIFY( 126 == lm.nfev);
|
||||||
VERIFY( 116 == lm.njev);
|
VERIFY( 116 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
@ -1270,7 +1270,7 @@ void testNistBoxBOD(void)
|
|||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 17 == lm.nfev);
|
VERIFY( 15 == lm.nfev);
|
||||||
VERIFY( 14 == lm.njev);
|
VERIFY( 14 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
|
||||||
@ -1332,8 +1332,8 @@ void testNistMGH17(void)
|
|||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 2 == info);
|
VERIFY( 2 == info);
|
||||||
VERIFY( 605 == lm.nfev);
|
VERIFY( 602 == lm.nfev);
|
||||||
VERIFY( 544 == lm.njev);
|
VERIFY( 545 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
|
||||||
// check x
|
// check x
|
||||||
@ -1419,15 +1419,15 @@ void testNistMGH09(void)
|
|||||||
|
|
||||||
// check return value
|
// check return value
|
||||||
VERIFY( 1 == info);
|
VERIFY( 1 == info);
|
||||||
VERIFY( 486 == lm.nfev);
|
VERIFY( 490 == lm.nfev);
|
||||||
VERIFY( 377 == lm.njev);
|
VERIFY( 376 == lm.njev);
|
||||||
// check norm^2
|
// check norm^2
|
||||||
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04);
|
VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04);
|
||||||
// check x
|
// check x
|
||||||
VERIFY_IS_APPROX(x[0], 0.1928077089); // should be 1.9280693458E-01
|
VERIFY_IS_APPROX(x[0], 0.1928077089); // should be 1.9280693458E-01
|
||||||
VERIFY_IS_APPROX(x[1], 0.1912649346); // should be 1.9128232873E-01
|
VERIFY_IS_APPROX(x[1], 0.19126423573); // should be 1.9128232873E-01
|
||||||
VERIFY_IS_APPROX(x[2], 0.1230532308); // should be 1.2305650693E-01
|
VERIFY_IS_APPROX(x[2], 0.12305309914); // should be 1.2305650693E-01
|
||||||
VERIFY_IS_APPROX(x[3], 0.1360542773); // should be 1.3606233068E-01
|
VERIFY_IS_APPROX(x[3], 0.13605395375); // should be 1.3606233068E-01
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Second try
|
* Second try
|
||||||
@ -1845,7 +1845,6 @@ void test_NonLinearOptimization()
|
|||||||
|
|
||||||
printf("info, nfev, njev : %d, %d, %d\n", info, lm.nfev, lm.njev);
|
printf("info, nfev, njev : %d, %d, %d\n", info, lm.nfev, lm.njev);
|
||||||
printf("fvec.squaredNorm() : %.13g\n", lm.fvec.squaredNorm());
|
printf("fvec.squaredNorm() : %.13g\n", lm.fvec.squaredNorm());
|
||||||
printf("fvec.squaredNorm() : %.32g\n", lm.fvec.squaredNorm());
|
|
||||||
std::cout << x << std::endl;
|
std::cout << x << std::endl;
|
||||||
std::cout.precision(9);
|
std::cout.precision(9);
|
||||||
std::cout << x[0] << std::endl;
|
std::cout << x[0] << std::endl;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user