mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-14 04:35:57 +08:00
Addresses comments on Eigen pull request PR-174.
* Get rid of code-duplication for real vs. complex matrices. * Fix flipped arguments to select. * Make the condition estimation functions free functions. * Use Vector::Unit() to generate canonical unit vectors. * Misc. cleanup.
This commit is contained in:
parent
30242b7565
commit
86e0ed81f8
@ -198,7 +198,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
|||||||
RealScalar rcond() const
|
RealScalar rcond() const
|
||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||||
return ConditionEstimator<LDLT<MatrixType, UpLo>, true >::rcond(m_l1_norm, *this);
|
return ReciprocalConditionNumberEstimate(m_l1_norm, *this);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Derived>
|
template <typename Derived>
|
||||||
@ -216,6 +216,12 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
|||||||
|
|
||||||
MatrixType reconstructedMatrix() const;
|
MatrixType reconstructedMatrix() const;
|
||||||
|
|
||||||
|
/** \returns the decomposition itself to allow generic code to do
|
||||||
|
* ldlt.transpose().solve(rhs).
|
||||||
|
*/
|
||||||
|
const LDLT<MatrixType, UpLo>& transpose() const { return *this; };
|
||||||
|
const LDLT<MatrixType, UpLo>& adjoint() const { return *this; };
|
||||||
|
|
||||||
inline Index rows() const { return m_matrix.rows(); }
|
inline Index rows() const { return m_matrix.rows(); }
|
||||||
inline Index cols() const { return m_matrix.cols(); }
|
inline Index cols() const { return m_matrix.cols(); }
|
||||||
|
|
||||||
@ -454,14 +460,14 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const EigenBase<InputTyp
|
|||||||
if (_UpLo == Lower) {
|
if (_UpLo == Lower) {
|
||||||
for (int col = 0; col < size; ++col) {
|
for (int col = 0; col < size; ++col) {
|
||||||
const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).cwiseAbs().sum() +
|
const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).cwiseAbs().sum() +
|
||||||
m_matrix.row(col).tail(col).cwiseAbs().sum();
|
m_matrix.row(col).head(col).cwiseAbs().sum();
|
||||||
if (abs_col_sum > m_l1_norm) {
|
if (abs_col_sum > m_l1_norm) {
|
||||||
m_l1_norm = abs_col_sum;
|
m_l1_norm = abs_col_sum;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (int col = 0; col < a.cols(); ++col) {
|
for (int col = 0; col < a.cols(); ++col) {
|
||||||
const RealScalar abs_col_sum = m_matrix.col(col).tail(col).cwiseAbs().sum() +
|
const RealScalar abs_col_sum = m_matrix.col(col).head(col).cwiseAbs().sum() +
|
||||||
m_matrix.row(col).tail(size - col).cwiseAbs().sum();
|
m_matrix.row(col).tail(size - col).cwiseAbs().sum();
|
||||||
if (abs_col_sum > m_l1_norm) {
|
if (abs_col_sum > m_l1_norm) {
|
||||||
m_l1_norm = abs_col_sum;
|
m_l1_norm = abs_col_sum;
|
||||||
|
@ -142,7 +142,7 @@ template<typename _MatrixType, int _UpLo> class LLT
|
|||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && "LLT is not initialized.");
|
eigen_assert(m_isInitialized && "LLT is not initialized.");
|
||||||
eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative");
|
eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative");
|
||||||
return ConditionEstimator<LLT<MatrixType, UpLo>, true >::rcond(m_l1_norm, *this);
|
return ReciprocalConditionNumberEstimate(m_l1_norm, *this);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns the LLT decomposition matrix
|
/** \returns the LLT decomposition matrix
|
||||||
@ -169,6 +169,12 @@ template<typename _MatrixType, int _UpLo> class LLT
|
|||||||
return m_info;
|
return m_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** \returns the decomposition itself to allow generic code to do
|
||||||
|
* llt.transpose().solve(rhs).
|
||||||
|
*/
|
||||||
|
const LLT<MatrixType, UpLo>& transpose() const { return *this; };
|
||||||
|
const LLT<MatrixType, UpLo>& adjoint() const { return *this; };
|
||||||
|
|
||||||
inline Index rows() const { return m_matrix.rows(); }
|
inline Index rows() const { return m_matrix.rows(); }
|
||||||
inline Index cols() const { return m_matrix.cols(); }
|
inline Index cols() const { return m_matrix.cols(); }
|
||||||
|
|
||||||
@ -409,14 +415,14 @@ LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>
|
|||||||
if (_UpLo == Lower) {
|
if (_UpLo == Lower) {
|
||||||
for (int col = 0; col < size; ++col) {
|
for (int col = 0; col < size; ++col) {
|
||||||
const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).cwiseAbs().sum() +
|
const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).cwiseAbs().sum() +
|
||||||
m_matrix.row(col).tail(col).cwiseAbs().sum();
|
m_matrix.row(col).head(col).cwiseAbs().sum();
|
||||||
if (abs_col_sum > m_l1_norm) {
|
if (abs_col_sum > m_l1_norm) {
|
||||||
m_l1_norm = abs_col_sum;
|
m_l1_norm = abs_col_sum;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (int col = 0; col < a.cols(); ++col) {
|
for (int col = 0; col < a.cols(); ++col) {
|
||||||
const RealScalar abs_col_sum = m_matrix.col(col).tail(col).cwiseAbs().sum() +
|
const RealScalar abs_col_sum = m_matrix.col(col).head(col).cwiseAbs().sum() +
|
||||||
m_matrix.row(col).tail(size - col).cwiseAbs().sum();
|
m_matrix.row(col).tail(size - col).cwiseAbs().sum();
|
||||||
if (abs_col_sum > m_l1_norm) {
|
if (abs_col_sum > m_l1_norm) {
|
||||||
m_l1_norm = abs_col_sum;
|
m_l1_norm = abs_col_sum;
|
||||||
|
@ -13,19 +13,35 @@
|
|||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
template <typename Decomposition, bool IsSelfAdjoint, bool IsComplex>
|
template <typename MatrixType>
|
||||||
struct EstimateInverseMatrixL1NormImpl {};
|
inline typename MatrixType::RealScalar MatrixL1Norm(const MatrixType& matrix) {
|
||||||
|
return matrix.cwiseAbs().colwise().sum().maxCoeff();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Vector>
|
||||||
|
inline typename Vector::RealScalar VectorL1Norm(const Vector& v) {
|
||||||
|
return v.template lpNorm<1>();
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Vector, typename RealVector, bool IsComplex>
|
||||||
|
struct SignOrUnity {
|
||||||
|
static inline Vector run(const Vector& v) {
|
||||||
|
const RealVector v_abs = v.cwiseAbs();
|
||||||
|
return (v_abs.array() == 0).select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Partial specialization to avoid elementwise division for real vectors.
|
||||||
|
template <typename Vector>
|
||||||
|
struct SignOrUnity<Vector, Vector, false> {
|
||||||
|
static inline Vector run(const Vector& v) {
|
||||||
|
return (v.array() < 0).select(-Vector::Ones(v.size()), Vector::Ones(v.size()));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
|
|
||||||
template <typename Decomposition, bool IsSelfAdjoint = false>
|
/** \class ConditionEstimator
|
||||||
class ConditionEstimator {
|
|
||||||
public:
|
|
||||||
typedef typename Decomposition::MatrixType MatrixType;
|
|
||||||
typedef typename internal::traits<MatrixType>::Scalar Scalar;
|
|
||||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
|
||||||
typedef typename internal::plain_col_type<MatrixType>::type Vector;
|
|
||||||
|
|
||||||
/** \class ConditionEstimator
|
|
||||||
* \ingroup Core_Module
|
* \ingroup Core_Module
|
||||||
*
|
*
|
||||||
* \brief Condition number estimator.
|
* \brief Condition number estimator.
|
||||||
@ -41,17 +57,20 @@ class ConditionEstimator {
|
|||||||
*
|
*
|
||||||
* \sa FullPivLU, PartialPivLU.
|
* \sa FullPivLU, PartialPivLU.
|
||||||
*/
|
*/
|
||||||
static RealScalar rcond(const MatrixType& matrix, const Decomposition& dec) {
|
template <typename Decomposition>
|
||||||
|
typename Decomposition::RealScalar ReciprocalConditionNumberEstimate(
|
||||||
|
const typename Decomposition::MatrixType& matrix,
|
||||||
|
const Decomposition& dec) {
|
||||||
eigen_assert(matrix.rows() == dec.rows());
|
eigen_assert(matrix.rows() == dec.rows());
|
||||||
eigen_assert(matrix.cols() == dec.cols());
|
eigen_assert(matrix.cols() == dec.cols());
|
||||||
eigen_assert(matrix.rows() == matrix.cols());
|
eigen_assert(matrix.rows() == matrix.cols());
|
||||||
if (dec.rows() == 0) {
|
if (dec.rows() == 0) {
|
||||||
return RealScalar(1);
|
return Decomposition::RealScalar(1);
|
||||||
}
|
|
||||||
return rcond(MatrixL1Norm(matrix), dec);
|
|
||||||
}
|
}
|
||||||
|
return ReciprocalConditionNumberEstimate(MatrixL1Norm(matrix), dec);
|
||||||
|
}
|
||||||
|
|
||||||
/** \class ConditionEstimator
|
/** \class ConditionEstimator
|
||||||
* \ingroup Core_Module
|
* \ingroup Core_Module
|
||||||
*
|
*
|
||||||
* \brief Condition number estimator.
|
* \brief Condition number estimator.
|
||||||
@ -67,7 +86,9 @@ class ConditionEstimator {
|
|||||||
*
|
*
|
||||||
* \sa FullPivLU, PartialPivLU.
|
* \sa FullPivLU, PartialPivLU.
|
||||||
*/
|
*/
|
||||||
static RealScalar rcond(RealScalar matrix_norm, const Decomposition& dec) {
|
template <typename Decomposition>
|
||||||
|
typename Decomposition::RealScalar ReciprocalConditionNumberEstimate(
|
||||||
|
typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) {
|
||||||
eigen_assert(dec.rows() == dec.cols());
|
eigen_assert(dec.rows() == dec.cols());
|
||||||
if (dec.rows() == 0) {
|
if (dec.rows() == 0) {
|
||||||
return 1;
|
return 1;
|
||||||
@ -75,12 +96,11 @@ class ConditionEstimator {
|
|||||||
if (matrix_norm == 0) {
|
if (matrix_norm == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
const RealScalar inverse_matrix_norm = EstimateInverseMatrixL1Norm(dec);
|
const typename Decomposition::RealScalar inverse_matrix_norm = InverseMatrixL1NormEstimate(dec);
|
||||||
return inverse_matrix_norm == 0 ? 0
|
return inverse_matrix_norm == 0 ? 0 : (1 / inverse_matrix_norm) / matrix_norm;
|
||||||
: (1 / inverse_matrix_norm) / matrix_norm;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \returns an estimate of ||inv(matrix)||_1 given a decomposition of
|
* \returns an estimate of ||inv(matrix)||_1 given a decomposition of
|
||||||
* matrix that implements .solve() and .adjoint().solve() methods.
|
* matrix that implements .solve() and .adjoint().solve() methods.
|
||||||
*
|
*
|
||||||
@ -95,103 +115,65 @@ class ConditionEstimator {
|
|||||||
* ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be
|
* ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be
|
||||||
* computed directly in O(n^2) operations.
|
* computed directly in O(n^2) operations.
|
||||||
*/
|
*/
|
||||||
static RealScalar EstimateInverseMatrixL1Norm(const Decomposition& dec) {
|
template <typename Decomposition>
|
||||||
|
typename Decomposition::RealScalar InverseMatrixL1NormEstimate(
|
||||||
|
const Decomposition& dec) {
|
||||||
|
typedef typename Decomposition::MatrixType MatrixType;
|
||||||
|
typedef typename Decomposition::Scalar Scalar;
|
||||||
|
typedef typename Decomposition::RealScalar RealScalar;
|
||||||
|
typedef typename internal::plain_col_type<MatrixType>::type Vector;
|
||||||
|
typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVector;
|
||||||
|
const bool is_complex = (NumTraits<Scalar>::IsComplex != 0);
|
||||||
|
|
||||||
eigen_assert(dec.rows() == dec.cols());
|
eigen_assert(dec.rows() == dec.cols());
|
||||||
if (dec.rows() == 0) {
|
const int n = dec.rows();
|
||||||
|
if (n == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return internal::EstimateInverseMatrixL1NormImpl<
|
Vector v = Vector::Ones(n) / n;
|
||||||
Decomposition, IsSelfAdjoint,
|
|
||||||
NumTraits<Scalar>::IsComplex != 0>::compute(dec);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* \returns the induced matrix l1-norm
|
|
||||||
* ||matrix||_1 = sup ||matrix * v||_1 / ||v||_1, which is equal to
|
|
||||||
* the greatest absolute column sum.
|
|
||||||
*/
|
|
||||||
static inline Scalar MatrixL1Norm(const MatrixType& matrix) {
|
|
||||||
return matrix.cwiseAbs().colwise().sum().maxCoeff();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
namespace internal {
|
|
||||||
|
|
||||||
template <typename Decomposition, typename Vector, bool IsSelfAdjoint = false>
|
|
||||||
struct solve_helper {
|
|
||||||
static inline Vector solve_adjoint(const Decomposition& dec,
|
|
||||||
const Vector& v) {
|
|
||||||
return dec.adjoint().solve(v);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Partial specialization for self_adjoint matrices.
|
|
||||||
template <typename Decomposition, typename Vector>
|
|
||||||
struct solve_helper<Decomposition, Vector, true> {
|
|
||||||
static inline Vector solve_adjoint(const Decomposition& dec,
|
|
||||||
const Vector& v) {
|
|
||||||
return dec.solve(v);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
// Partial specialization for real matrices.
|
|
||||||
template <typename Decomposition, bool IsSelfAdjoint>
|
|
||||||
struct EstimateInverseMatrixL1NormImpl<Decomposition, IsSelfAdjoint, false> {
|
|
||||||
typedef typename Decomposition::MatrixType MatrixType;
|
|
||||||
typedef typename internal::traits<MatrixType>::Scalar Scalar;
|
|
||||||
typedef typename internal::plain_col_type<MatrixType>::type Vector;
|
|
||||||
|
|
||||||
// Shorthand for vector L1 norm in Eigen.
|
|
||||||
static inline Scalar VectorL1Norm(const Vector& v) {
|
|
||||||
return v.template lpNorm<1>();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline Scalar compute(const Decomposition& dec) {
|
|
||||||
const int n = dec.rows();
|
|
||||||
const Vector plus = Vector::Ones(n);
|
|
||||||
Vector v = plus / n;
|
|
||||||
v = dec.solve(v);
|
v = dec.solve(v);
|
||||||
Scalar lower_bound = VectorL1Norm(v);
|
|
||||||
if (n == 1) {
|
|
||||||
return lower_bound;
|
|
||||||
}
|
|
||||||
// lower_bound is a lower bound on
|
// lower_bound is a lower bound on
|
||||||
// ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
|
// ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
|
||||||
// and is the objective maximized by the ("super-") gradient ascent
|
// and is the objective maximized by the ("super-") gradient ascent
|
||||||
// algorithm.
|
// algorithm below.
|
||||||
// Basic idea: We know that the optimum is achieved at one of the simplices
|
RealScalar lower_bound = internal::VectorL1Norm(v);
|
||||||
// v = e_i, so in each iteration we follow a super-gradient to move towards
|
if (n == 1) {
|
||||||
// the optimal one.
|
return lower_bound;
|
||||||
Scalar old_lower_bound = lower_bound;
|
}
|
||||||
const Vector minus = -Vector::Ones(n);
|
// Gradient ascent algorithm follows: We know that the optimum is achieved at
|
||||||
Vector sign_vector = (v.cwiseAbs().array() == 0).select(plus, minus);
|
// one of the simplices v = e_i, so in each iteration we follow a
|
||||||
Vector old_sign_vector = sign_vector;
|
// super-gradient to move towards the optimal one.
|
||||||
|
RealScalar old_lower_bound = lower_bound;
|
||||||
|
Vector sign_vector(n);
|
||||||
|
Vector old_sign_vector;
|
||||||
int v_max_abs_index = -1;
|
int v_max_abs_index = -1;
|
||||||
int old_v_max_abs_index = v_max_abs_index;
|
int old_v_max_abs_index = v_max_abs_index;
|
||||||
for (int k = 0; k < 4; ++k) {
|
for (int k = 0; k < 4; ++k) {
|
||||||
// argmax |inv(matrix)^T * sign_vector|
|
sign_vector = internal::SignOrUnity<Vector, RealVector, is_complex>::run(v);
|
||||||
v = solve_helper<Decomposition, Vector, IsSelfAdjoint>::solve_adjoint(dec, sign_vector);
|
if (k > 0 && !is_complex) {
|
||||||
v.cwiseAbs().maxCoeff(&v_max_abs_index);
|
if (sign_vector == old_sign_vector) {
|
||||||
|
// Break if the solution stagnated.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|
|
||||||
|
v = dec.adjoint().solve(sign_vector);
|
||||||
|
v.real().cwiseAbs().maxCoeff(&v_max_abs_index);
|
||||||
if (v_max_abs_index == old_v_max_abs_index) {
|
if (v_max_abs_index == old_v_max_abs_index) {
|
||||||
// Break if the solution stagnated.
|
// Break if the solution stagnated.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Move to the new simplex e_j, where j = v_max_abs_index.
|
// Move to the new simplex e_j, where j = v_max_abs_index.
|
||||||
v.setZero();
|
v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j.
|
||||||
v[v_max_abs_index] = 1;
|
lower_bound = internal::VectorL1Norm(v);
|
||||||
v = dec.solve(v); // v = inv(matrix) * e_j.
|
|
||||||
lower_bound = VectorL1Norm(v);
|
|
||||||
if (lower_bound <= old_lower_bound) {
|
if (lower_bound <= old_lower_bound) {
|
||||||
// Break if the gradient step did not increase the lower_bound.
|
// Break if the gradient step did not increase the lower_bound.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sign_vector = (v.array() < 0).select(plus, minus);
|
if (!is_complex) {
|
||||||
if (sign_vector == old_sign_vector) {
|
|
||||||
// Break if the solution stagnated.
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
old_sign_vector = sign_vector;
|
old_sign_vector = sign_vector;
|
||||||
|
}
|
||||||
old_v_max_abs_index = v_max_abs_index;
|
old_v_max_abs_index = v_max_abs_index;
|
||||||
old_lower_bound = lower_bound;
|
old_lower_bound = lower_bound;
|
||||||
}
|
}
|
||||||
@ -206,87 +188,6 @@ struct EstimateInverseMatrixL1NormImpl<Decomposition, IsSelfAdjoint, false> {
|
|||||||
// sequence of backsubstitutions and permutations), which could cause
|
// sequence of backsubstitutions and permutations), which could cause
|
||||||
// Hager's algorithm to vastly underestimate ||matrix||_1.
|
// Hager's algorithm to vastly underestimate ||matrix||_1.
|
||||||
Scalar alternating_sign = 1;
|
Scalar alternating_sign = 1;
|
||||||
for (int i = 0; i < n; ++i) {
|
|
||||||
v[i] = alternating_sign * static_cast<Scalar>(1) +
|
|
||||||
(static_cast<Scalar>(i) / (static_cast<Scalar>(n - 1)));
|
|
||||||
alternating_sign = -alternating_sign;
|
|
||||||
}
|
|
||||||
v = dec.solve(v);
|
|
||||||
const Scalar alternate_lower_bound =
|
|
||||||
(2 * VectorL1Norm(v)) / (3 * static_cast<Scalar>(n));
|
|
||||||
return numext::maxi(lower_bound, alternate_lower_bound);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Partial specialization for complex matrices.
|
|
||||||
template <typename Decomposition, bool IsSelfAdjoint>
|
|
||||||
struct EstimateInverseMatrixL1NormImpl<Decomposition, IsSelfAdjoint, true> {
|
|
||||||
typedef typename Decomposition::MatrixType MatrixType;
|
|
||||||
typedef typename internal::traits<MatrixType>::Scalar Scalar;
|
|
||||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
|
||||||
typedef typename internal::plain_col_type<MatrixType>::type Vector;
|
|
||||||
typedef typename internal::plain_col_type<MatrixType, RealScalar>::type
|
|
||||||
RealVector;
|
|
||||||
|
|
||||||
// Shorthand for vector L1 norm in Eigen.
|
|
||||||
static inline RealScalar VectorL1Norm(const Vector& v) {
|
|
||||||
return v.template lpNorm<1>();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline RealScalar compute(const Decomposition& dec) {
|
|
||||||
const int n = dec.rows();
|
|
||||||
const Vector ones = Vector::Ones(n);
|
|
||||||
Vector v = ones / n;
|
|
||||||
v = dec.solve(v);
|
|
||||||
RealScalar lower_bound = VectorL1Norm(v);
|
|
||||||
if (n == 1) {
|
|
||||||
return lower_bound;
|
|
||||||
}
|
|
||||||
// lower_bound is a lower bound on
|
|
||||||
// ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
|
|
||||||
// and is the objective maximized by the ("super-") gradient ascent
|
|
||||||
// algorithm.
|
|
||||||
// Basic idea: We know that the optimum is achieved at one of the simplices
|
|
||||||
// v = e_i, so in each iteration we follow a super-gradient to move towards
|
|
||||||
// the optimal one.
|
|
||||||
RealScalar old_lower_bound = lower_bound;
|
|
||||||
int v_max_abs_index = -1;
|
|
||||||
int old_v_max_abs_index = v_max_abs_index;
|
|
||||||
for (int k = 0; k < 4; ++k) {
|
|
||||||
// argmax |inv(matrix)^* * sign_vector|
|
|
||||||
RealVector abs_v = v.cwiseAbs();
|
|
||||||
const Vector psi =
|
|
||||||
(abs_v.array() == 0).select(v.cwiseQuotient(abs_v), ones);
|
|
||||||
v = solve_helper<Decomposition, Vector, IsSelfAdjoint>::solve_adjoint(dec, psi);
|
|
||||||
const RealVector z = v.real();
|
|
||||||
z.cwiseAbs().maxCoeff(&v_max_abs_index);
|
|
||||||
if (v_max_abs_index == old_v_max_abs_index) {
|
|
||||||
// Break if the solution stagnated.
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Move to the new simplex e_j, where j = v_max_abs_index.
|
|
||||||
v.setZero();
|
|
||||||
v[v_max_abs_index] = 1;
|
|
||||||
v = dec.solve(v); // v = inv(matrix) * e_j.
|
|
||||||
lower_bound = VectorL1Norm(v);
|
|
||||||
if (lower_bound <= old_lower_bound) {
|
|
||||||
// Break if the gradient step did not increase the lower_bound.
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
old_v_max_abs_index = v_max_abs_index;
|
|
||||||
old_lower_bound = lower_bound;
|
|
||||||
}
|
|
||||||
// The following calculates an independent estimate of ||matrix||_1 by
|
|
||||||
// multiplying matrix by a vector with entries of slowly increasing
|
|
||||||
// magnitude and alternating sign:
|
|
||||||
// v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.
|
|
||||||
// This improvement to Hager's algorithm above is due to Higham. It was
|
|
||||||
// added to make the algorithm more robust in certain corner cases where
|
|
||||||
// large elements in the matrix might otherwise escape detection due to
|
|
||||||
// exact cancellation (especially when op and op_adjoint correspond to a
|
|
||||||
// sequence of backsubstitutions and permutations), which could cause
|
|
||||||
// Hager's algorithm to vastly underestimate ||matrix||_1.
|
|
||||||
RealScalar alternating_sign = 1;
|
|
||||||
for (int i = 0; i < n; ++i) {
|
for (int i = 0; i < n; ++i) {
|
||||||
v[i] = alternating_sign * static_cast<RealScalar>(1) +
|
v[i] = alternating_sign * static_cast<RealScalar>(1) +
|
||||||
(static_cast<RealScalar>(i) / (static_cast<RealScalar>(n - 1)));
|
(static_cast<RealScalar>(i) / (static_cast<RealScalar>(n - 1)));
|
||||||
@ -294,12 +195,10 @@ struct EstimateInverseMatrixL1NormImpl<Decomposition, IsSelfAdjoint, true> {
|
|||||||
}
|
}
|
||||||
v = dec.solve(v);
|
v = dec.solve(v);
|
||||||
const RealScalar alternate_lower_bound =
|
const RealScalar alternate_lower_bound =
|
||||||
(2 * VectorL1Norm(v)) / (3 * static_cast<RealScalar>(n));
|
(2 * internal::VectorL1Norm(v)) / (3 * static_cast<RealScalar>(n));
|
||||||
return numext::maxi(lower_bound, alternate_lower_bound);
|
return numext::maxi(lower_bound, alternate_lower_bound);
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace internal
|
|
||||||
} // namespace Eigen
|
} // namespace Eigen
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -237,7 +237,7 @@ template<typename _MatrixType> class FullPivLU
|
|||||||
inline RealScalar rcond() const
|
inline RealScalar rcond() const
|
||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
|
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
|
||||||
return ConditionEstimator<FullPivLU<_MatrixType> >::rcond(m_l1_norm, *this);
|
return ReciprocalConditionNumberEstimate(m_l1_norm, *this);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns the determinant of the matrix of which
|
/** \returns the determinant of the matrix of which
|
||||||
|
@ -157,7 +157,7 @@ template<typename _MatrixType> class PartialPivLU
|
|||||||
inline RealScalar rcond() const
|
inline RealScalar rcond() const
|
||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
|
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
|
||||||
return ConditionEstimator<PartialPivLU<_MatrixType> >::rcond(m_l1_norm, *this);
|
return ReciprocalConditionNumberEstimate(m_l1_norm, *this);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns the inverse of the matrix of which *this is the LU decomposition.
|
/** \returns the inverse of the matrix of which *this is the LU decomposition.
|
||||||
|
@ -91,12 +91,12 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
|||||||
matX = chollo.solve(matB);
|
matX = chollo.solve(matB);
|
||||||
VERIFY_IS_APPROX(symm * matX, matB);
|
VERIFY_IS_APPROX(symm * matX, matB);
|
||||||
|
|
||||||
// Verify that the estimated condition number is within a factor of 10 of the
|
|
||||||
// truth.
|
|
||||||
const MatrixType symmLo_inverse = chollo.solve(MatrixType::Identity(rows,cols));
|
const MatrixType symmLo_inverse = chollo.solve(MatrixType::Identity(rows,cols));
|
||||||
RealScalar rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Lower>(symmLo)) /
|
RealScalar rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Lower>(symmLo)) /
|
||||||
matrix_l1_norm<MatrixType, Lower>(symmLo_inverse);
|
matrix_l1_norm<MatrixType, Lower>(symmLo_inverse);
|
||||||
RealScalar rcond_est = chollo.rcond();
|
RealScalar rcond_est = chollo.rcond();
|
||||||
|
// Verify that the estimated condition number is within a factor of 10 of the
|
||||||
|
// truth.
|
||||||
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
||||||
|
|
||||||
// test the upper mode
|
// test the upper mode
|
||||||
@ -160,12 +160,12 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
|||||||
matX = ldltlo.solve(matB);
|
matX = ldltlo.solve(matB);
|
||||||
VERIFY_IS_APPROX(symm * matX, matB);
|
VERIFY_IS_APPROX(symm * matX, matB);
|
||||||
|
|
||||||
// Verify that the estimated condition number is within a factor of 10 of the
|
|
||||||
// truth.
|
|
||||||
const MatrixType symmLo_inverse = ldltlo.solve(MatrixType::Identity(rows,cols));
|
const MatrixType symmLo_inverse = ldltlo.solve(MatrixType::Identity(rows,cols));
|
||||||
RealScalar rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Lower>(symmLo)) /
|
RealScalar rcond = (RealScalar(1) / matrix_l1_norm<MatrixType, Lower>(symmLo)) /
|
||||||
matrix_l1_norm<MatrixType, Lower>(symmLo_inverse);
|
matrix_l1_norm<MatrixType, Lower>(symmLo_inverse);
|
||||||
RealScalar rcond_est = ldltlo.rcond();
|
RealScalar rcond_est = ldltlo.rcond();
|
||||||
|
// Verify that the estimated condition number is within a factor of 10 of the
|
||||||
|
// truth.
|
||||||
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
||||||
|
|
||||||
|
|
||||||
|
@ -151,10 +151,10 @@ template<typename MatrixType> void lu_invertible()
|
|||||||
MatrixType m1_inverse = lu.inverse();
|
MatrixType m1_inverse = lu.inverse();
|
||||||
VERIFY_IS_APPROX(m2, m1_inverse*m3);
|
VERIFY_IS_APPROX(m2, m1_inverse*m3);
|
||||||
|
|
||||||
// Verify that the estimated condition number is within a factor of 10 of the
|
|
||||||
// truth.
|
|
||||||
RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse);
|
RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse);
|
||||||
const RealScalar rcond_est = lu.rcond();
|
const RealScalar rcond_est = lu.rcond();
|
||||||
|
// Verify that the estimated condition number is within a factor of 10 of the
|
||||||
|
// truth.
|
||||||
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
||||||
|
|
||||||
// test solve with transposed
|
// test solve with transposed
|
||||||
@ -197,10 +197,9 @@ template<typename MatrixType> void lu_partial_piv()
|
|||||||
MatrixType m1_inverse = plu.inverse();
|
MatrixType m1_inverse = plu.inverse();
|
||||||
VERIFY_IS_APPROX(m2, m1_inverse*m3);
|
VERIFY_IS_APPROX(m2, m1_inverse*m3);
|
||||||
|
|
||||||
// Test condition number estimation.
|
|
||||||
RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse);
|
RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse);
|
||||||
// Verify that the estimate is within a factor of 10 of the truth.
|
|
||||||
const RealScalar rcond_est = plu.rcond();
|
const RealScalar rcond_est = plu.rcond();
|
||||||
|
// Verify that the estimate is within a factor of 10 of the truth.
|
||||||
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10);
|
||||||
|
|
||||||
// test solve with transposed
|
// test solve with transposed
|
||||||
|
Loading…
x
Reference in New Issue
Block a user