mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 03:39:01 +08:00
port unsupported modules to new API
This commit is contained in:
parent
cab85218db
commit
39209edd71
@ -207,7 +207,7 @@ MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M, MatrixType
|
|||||||
m_tmp2(M.rows(),M.cols()),
|
m_tmp2(M.rows(),M.cols()),
|
||||||
m_Id(MatrixType::Identity(M.rows(), M.cols())),
|
m_Id(MatrixType::Identity(M.rows(), M.cols())),
|
||||||
m_squarings(0),
|
m_squarings(0),
|
||||||
m_l1norm(static_cast<float>(M.cwise().abs().colwise().sum().maxCoeff()))
|
m_l1norm(static_cast<float>(M.cwiseAbs().colwise().sum().maxCoeff()))
|
||||||
{
|
{
|
||||||
computeUV(RealScalar());
|
computeUV(RealScalar());
|
||||||
m_tmp1 = m_U + m_V; // numerator of Pade approximant
|
m_tmp1 = m_U + m_V; // numerator of Pade approximant
|
||||||
|
@ -110,7 +110,7 @@ void MatrixFunctionAtomic<MatrixType>::computeMu()
|
|||||||
const MatrixType N = MatrixType::Identity(m_Arows, m_Arows) - m_Ashifted;
|
const MatrixType N = MatrixType::Identity(m_Arows, m_Arows) - m_Ashifted;
|
||||||
VectorType e = VectorType::Ones(m_Arows);
|
VectorType e = VectorType::Ones(m_Arows);
|
||||||
N.template triangularView<UpperTriangular>().solveInPlace(e);
|
N.template triangularView<UpperTriangular>().solveInPlace(e);
|
||||||
m_mu = e.cwise().abs().maxCoeff();
|
m_mu = e.cwiseAbs().maxCoeff();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \brief Determine whether Taylor series has converged */
|
/** \brief Determine whether Taylor series has converged */
|
||||||
@ -119,8 +119,8 @@ bool MatrixFunctionAtomic<MatrixType>::taylorConverged(int s, const MatrixType&
|
|||||||
const MatrixType& Fincr, const MatrixType& P)
|
const MatrixType& Fincr, const MatrixType& P)
|
||||||
{
|
{
|
||||||
const int n = F.rows();
|
const int n = F.rows();
|
||||||
const RealScalar F_norm = F.cwise().abs().rowwise().sum().maxCoeff();
|
const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff();
|
||||||
const RealScalar Fincr_norm = Fincr.cwise().abs().rowwise().sum().maxCoeff();
|
const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff();
|
||||||
if (Fincr_norm < epsilon<Scalar>() * F_norm) {
|
if (Fincr_norm < epsilon<Scalar>() * F_norm) {
|
||||||
RealScalar delta = 0;
|
RealScalar delta = 0;
|
||||||
RealScalar rfactorial = 1;
|
RealScalar rfactorial = 1;
|
||||||
@ -132,7 +132,7 @@ bool MatrixFunctionAtomic<MatrixType>::taylorConverged(int s, const MatrixType&
|
|||||||
rfactorial *= r;
|
rfactorial *= r;
|
||||||
delta = std::max(delta, mx / rfactorial);
|
delta = std::max(delta, mx / rfactorial);
|
||||||
}
|
}
|
||||||
const RealScalar P_norm = P.cwise().abs().rowwise().sum().maxCoeff();
|
const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
|
||||||
if (m_mu * delta * P_norm < epsilon<Scalar>() * F_norm)
|
if (m_mu * delta * P_norm < epsilon<Scalar>() * F_norm)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -241,7 +241,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
|||||||
|
|
||||||
/* on the first iteration, calculate the norm of the scaled x */
|
/* on the first iteration, calculate the norm of the scaled x */
|
||||||
/* and initialize the step bound delta. */
|
/* and initialize the step bound delta. */
|
||||||
wa3 = diag.cwise() * x;
|
wa3 = diag.cwiseProduct(x);
|
||||||
xnorm = wa3.stableNorm();
|
xnorm = wa3.stableNorm();
|
||||||
delta = parameters.factor * xnorm;
|
delta = parameters.factor * xnorm;
|
||||||
if (delta == 0.)
|
if (delta == 0.)
|
||||||
@ -285,7 +285,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
|||||||
|
|
||||||
/* Computing MAX */
|
/* Computing MAX */
|
||||||
if (mode != 2)
|
if (mode != 2)
|
||||||
diag = diag.cwise().max(wa2);
|
diag = diag.cwiseMax(wa2);
|
||||||
|
|
||||||
/* beginning of the inner loop. */
|
/* beginning of the inner loop. */
|
||||||
|
|
||||||
@ -299,7 +299,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
|||||||
|
|
||||||
wa1 = -wa1;
|
wa1 = -wa1;
|
||||||
wa2 = x + wa1;
|
wa2 = x + wa1;
|
||||||
wa3 = diag.cwise() * wa1;
|
wa3 = diag.cwiseProduct(wa1);
|
||||||
pnorm = wa3.stableNorm();
|
pnorm = wa3.stableNorm();
|
||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
@ -364,7 +364,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
|||||||
if (ratio >= Scalar(1e-4)) {
|
if (ratio >= Scalar(1e-4)) {
|
||||||
/* successful iteration. update x, fvec, and their norms. */
|
/* successful iteration. update x, fvec, and their norms. */
|
||||||
x = wa2;
|
x = wa2;
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
fvec = wa4;
|
fvec = wa4;
|
||||||
xnorm = wa2.stableNorm();
|
xnorm = wa2.stableNorm();
|
||||||
fnorm = fnorm1;
|
fnorm = fnorm1;
|
||||||
@ -555,7 +555,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
|||||||
|
|
||||||
/* on the first iteration, calculate the norm of the scaled x */
|
/* on the first iteration, calculate the norm of the scaled x */
|
||||||
/* and initialize the step bound delta. */
|
/* and initialize the step bound delta. */
|
||||||
wa3 = diag.cwise() * x;
|
wa3 = diag.cwiseProduct(x);
|
||||||
xnorm = wa3.stableNorm();
|
xnorm = wa3.stableNorm();
|
||||||
delta = parameters.factor * xnorm;
|
delta = parameters.factor * xnorm;
|
||||||
if (delta == 0.)
|
if (delta == 0.)
|
||||||
@ -599,7 +599,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
|||||||
|
|
||||||
/* Computing MAX */
|
/* Computing MAX */
|
||||||
if (mode != 2)
|
if (mode != 2)
|
||||||
diag = diag.cwise().max(wa2);
|
diag = diag.cwiseMax(wa2);
|
||||||
|
|
||||||
/* beginning of the inner loop. */
|
/* beginning of the inner loop. */
|
||||||
|
|
||||||
@ -613,7 +613,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
|||||||
|
|
||||||
wa1 = -wa1;
|
wa1 = -wa1;
|
||||||
wa2 = x + wa1;
|
wa2 = x + wa1;
|
||||||
wa3 = diag.cwise() * wa1;
|
wa3 = diag.cwiseProduct(wa1);
|
||||||
pnorm = wa3.stableNorm();
|
pnorm = wa3.stableNorm();
|
||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
@ -678,7 +678,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
|||||||
if (ratio >= Scalar(1e-4)) {
|
if (ratio >= Scalar(1e-4)) {
|
||||||
/* successful iteration. update x, fvec, and their norms. */
|
/* successful iteration. update x, fvec, and their norms. */
|
||||||
x = wa2;
|
x = wa2;
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
fvec = wa4;
|
fvec = wa4;
|
||||||
xnorm = wa2.stableNorm();
|
xnorm = wa2.stableNorm();
|
||||||
fnorm = fnorm1;
|
fnorm = fnorm1;
|
||||||
|
@ -253,7 +253,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
|
|
||||||
wa2 = fjac.colwise().blueNorm();
|
wa2 = fjac.colwise().blueNorm();
|
||||||
ei_qrfac<Scalar>(m, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
|
ei_qrfac<Scalar>(m, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
|
||||||
ipvt.cwise()-=1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
|
ipvt.array() -= 1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
|
||||||
|
|
||||||
/* on the first iteration and if mode is 1, scale according */
|
/* on the first iteration and if mode is 1, scale according */
|
||||||
/* to the norms of the columns of the initial jacobian. */
|
/* to the norms of the columns of the initial jacobian. */
|
||||||
@ -269,7 +269,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
/* on the first iteration, calculate the norm of the scaled x */
|
/* on the first iteration, calculate the norm of the scaled x */
|
||||||
/* and initialize the step bound delta. */
|
/* and initialize the step bound delta. */
|
||||||
|
|
||||||
wa3 = diag.cwise() * x;
|
wa3 = diag.cwiseProduct(x);
|
||||||
xnorm = wa3.stableNorm();
|
xnorm = wa3.stableNorm();
|
||||||
delta = parameters.factor * xnorm;
|
delta = parameters.factor * xnorm;
|
||||||
if (delta == 0.)
|
if (delta == 0.)
|
||||||
@ -316,7 +316,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
/* rescale if necessary. */
|
/* rescale if necessary. */
|
||||||
|
|
||||||
if (mode != 2) /* Computing MAX */
|
if (mode != 2) /* Computing MAX */
|
||||||
diag = diag.cwise().max(wa2);
|
diag = diag.cwiseMax(wa2);
|
||||||
|
|
||||||
/* beginning of the inner loop. */
|
/* beginning of the inner loop. */
|
||||||
do {
|
do {
|
||||||
@ -329,7 +329,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
|
|
||||||
wa1 = -wa1;
|
wa1 = -wa1;
|
||||||
wa2 = x + wa1;
|
wa2 = x + wa1;
|
||||||
wa3 = diag.cwise() * wa1;
|
wa3 = diag.cwiseProduct(wa1);
|
||||||
pnorm = wa3.stableNorm();
|
pnorm = wa3.stableNorm();
|
||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
@ -395,7 +395,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
|||||||
if (ratio >= Scalar(1e-4)) {
|
if (ratio >= Scalar(1e-4)) {
|
||||||
/* successful iteration. update x, fvec, and their norms. */
|
/* successful iteration. update x, fvec, and their norms. */
|
||||||
x = wa2;
|
x = wa2;
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
fvec = wa4;
|
fvec = wa4;
|
||||||
xnorm = wa2.stableNorm();
|
xnorm = wa2.stableNorm();
|
||||||
fnorm = fnorm1;
|
fnorm = fnorm1;
|
||||||
@ -538,10 +538,10 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
wa2[j] = fjac.col(j).head(j).stableNorm();
|
wa2[j] = fjac.col(j).head(j).stableNorm();
|
||||||
}
|
}
|
||||||
if (sing) {
|
if (sing) {
|
||||||
ipvt.cwise()+=1;
|
ipvt.array() += 1;
|
||||||
wa2 = fjac.colwise().blueNorm();
|
wa2 = fjac.colwise().blueNorm();
|
||||||
ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
|
ei_qrfac<Scalar>(n, n, fjac.data(), fjac.rows(), true, ipvt.data(), wa1.data());
|
||||||
ipvt.cwise()-=1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
|
ipvt.array() -= 1; // qrfac() creates ipvt with fortran convention (1->n), convert it to c (0->n-1)
|
||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j) {
|
||||||
if (fjac(j,j) != 0.) {
|
if (fjac(j,j) != 0.) {
|
||||||
sum = 0.;
|
sum = 0.;
|
||||||
@ -569,7 +569,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
/* on the first iteration, calculate the norm of the scaled x */
|
/* on the first iteration, calculate the norm of the scaled x */
|
||||||
/* and initialize the step bound delta. */
|
/* and initialize the step bound delta. */
|
||||||
|
|
||||||
wa3 = diag.cwise() * x;
|
wa3 = diag.cwiseProduct(x);
|
||||||
xnorm = wa3.stableNorm();
|
xnorm = wa3.stableNorm();
|
||||||
delta = parameters.factor * xnorm;
|
delta = parameters.factor * xnorm;
|
||||||
if (delta == 0.)
|
if (delta == 0.)
|
||||||
@ -599,7 +599,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
/* rescale if necessary. */
|
/* rescale if necessary. */
|
||||||
|
|
||||||
if (mode != 2) /* Computing MAX */
|
if (mode != 2) /* Computing MAX */
|
||||||
diag = diag.cwise().max(wa2);
|
diag = diag.cwiseMax(wa2);
|
||||||
|
|
||||||
/* beginning of the inner loop. */
|
/* beginning of the inner loop. */
|
||||||
do {
|
do {
|
||||||
@ -612,7 +612,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
|
|
||||||
wa1 = -wa1;
|
wa1 = -wa1;
|
||||||
wa2 = x + wa1;
|
wa2 = x + wa1;
|
||||||
wa3 = diag.cwise() * wa1;
|
wa3 = diag.cwiseProduct(wa1);
|
||||||
pnorm = wa3.stableNorm();
|
pnorm = wa3.stableNorm();
|
||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
@ -678,7 +678,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
|||||||
if (ratio >= Scalar(1e-4)) {
|
if (ratio >= Scalar(1e-4)) {
|
||||||
/* successful iteration. update x, fvec, and their norms. */
|
/* successful iteration. update x, fvec, and their norms. */
|
||||||
x = wa2;
|
x = wa2;
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
fvec = wa4;
|
fvec = wa4;
|
||||||
xnorm = wa2.stableNorm();
|
xnorm = wa2.stableNorm();
|
||||||
fnorm = fnorm1;
|
fnorm = fnorm1;
|
||||||
|
@ -50,7 +50,7 @@ void ei_dogleg(
|
|||||||
/* test whether the gauss-newton direction is acceptable. */
|
/* test whether the gauss-newton direction is acceptable. */
|
||||||
|
|
||||||
wa1.fill(0.);
|
wa1.fill(0.);
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
qnorm = wa2.stableNorm();
|
qnorm = wa2.stableNorm();
|
||||||
if (qnorm <= delta)
|
if (qnorm <= delta)
|
||||||
return;
|
return;
|
||||||
@ -80,7 +80,7 @@ void ei_dogleg(
|
|||||||
/* calculate the point along the scaled gradient */
|
/* calculate the point along the scaled gradient */
|
||||||
/* at which the quadratic is minimized. */
|
/* at which the quadratic is minimized. */
|
||||||
|
|
||||||
wa1.cwise() /= diag*gnorm;
|
wa1.array() /= (diag*gnorm).array();
|
||||||
l = 0;
|
l = 0;
|
||||||
for (j = 0; j < n; ++j) {
|
for (j = 0; j < n; ++j) {
|
||||||
sum = 0.;
|
sum = 0.;
|
||||||
|
@ -54,7 +54,7 @@ void ei_lmpar(
|
|||||||
/* for acceptance of the gauss-newton direction. */
|
/* for acceptance of the gauss-newton direction. */
|
||||||
|
|
||||||
iter = 0;
|
iter = 0;
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
dxnorm = wa2.blueNorm();
|
dxnorm = wa2.blueNorm();
|
||||||
fp = dxnorm - delta;
|
fp = dxnorm - delta;
|
||||||
if (fp <= Scalar(0.1) * delta) {
|
if (fp <= Scalar(0.1) * delta) {
|
||||||
@ -117,7 +117,7 @@ void ei_lmpar(
|
|||||||
Matrix< Scalar, Dynamic, 1 > sdiag(n);
|
Matrix< Scalar, Dynamic, 1 > sdiag(n);
|
||||||
ei_qrsolv<Scalar>(r, ipvt, wa1, qtb, x, sdiag);
|
ei_qrsolv<Scalar>(r, ipvt, wa1, qtb, x, sdiag);
|
||||||
|
|
||||||
wa2 = diag.cwise() * x;
|
wa2 = diag.cwiseProduct(x);
|
||||||
dxnorm = wa2.blueNorm();
|
dxnorm = wa2.blueNorm();
|
||||||
temp = fp;
|
temp = fp;
|
||||||
fp = dxnorm - delta;
|
fp = dxnorm - delta;
|
||||||
|
@ -45,7 +45,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(double, Dim)
|
|||||||
|
|
||||||
template<typename Scalar, int Dim> AlignedBox<Scalar, Dim> ei_bounding_box(const Matrix<Scalar, Dim, 1> &v) { return AlignedBox<Scalar, Dim>(v); }
|
template<typename Scalar, int Dim> AlignedBox<Scalar, Dim> ei_bounding_box(const Matrix<Scalar, Dim, 1> &v) { return AlignedBox<Scalar, Dim>(v); }
|
||||||
template<int Dim> AlignedBox<double, Dim> ei_bounding_box(const Ball<Dim> &b)
|
template<int Dim> AlignedBox<double, Dim> ei_bounding_box(const Ball<Dim> &b)
|
||||||
{ return AlignedBox<double, Dim>(b.center.cwise() - b.radius, b.center.cwise() + b.radius); }
|
{ return AlignedBox<double, Dim>(b.center.array() - b.radius, b.center.array() + b.radius); }
|
||||||
|
|
||||||
|
|
||||||
template<int Dim>
|
template<int Dim>
|
||||||
|
@ -36,7 +36,7 @@ double binom(int n, int k)
|
|||||||
template <typename Derived, typename OtherDerived>
|
template <typename Derived, typename OtherDerived>
|
||||||
double relerr(const MatrixBase<Derived>& A, const MatrixBase<OtherDerived>& B)
|
double relerr(const MatrixBase<Derived>& A, const MatrixBase<OtherDerived>& B)
|
||||||
{
|
{
|
||||||
return std::sqrt((A - B).cwise().abs2().sum() / std::min(A.cwise().abs2().sum(), B.cwise().abs2().sum()));
|
return std::sqrt((A - B).cwiseAbs2().sum() / std::min(A.cwiseAbs2().sum(), B.cwiseAbs2().sum()));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
Loading…
x
Reference in New Issue
Block a user