From 1aa89fb85548dc425d54d2cbe7f28915c29db13a Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Fri, 1 Apr 2016 10:27:59 -0700 Subject: [PATCH 01/14] Add matrix condition estimator module that implements the Higham/Hager algorithm from http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf used in LPACK. Add rcond() methods to FullPivLU and PartialPivLU. --- Eigen/Core | 9 +- Eigen/src/Core/ConditionEstimator.h | 279 ++++++++++++++++++++++++++++ Eigen/src/LU/FullPivLU.h | 13 +- Eigen/src/LU/PartialPivLU.h | 19 +- test/lu.cpp | 22 ++- 5 files changed, 332 insertions(+), 10 deletions(-) create mode 100644 Eigen/src/Core/ConditionEstimator.h diff --git a/Eigen/Core b/Eigen/Core index 8428c51e4..0a196c814 100644 --- a/Eigen/Core +++ b/Eigen/Core @@ -33,13 +33,13 @@ #ifdef EIGEN_EXCEPTIONS #undef EIGEN_EXCEPTIONS #endif - + // All functions callable from CUDA code must be qualified with __device__ #define EIGEN_DEVICE_FUNC __host__ __device__ - + #else #define EIGEN_DEVICE_FUNC - + #endif #if defined(__CUDA_ARCH__) @@ -282,7 +282,7 @@ inline static const char *SimdInstructionSetsInUse(void) { // we use size_t frequently and we'll never remember to prepend it with std:: everytime just to // ensure QNX/QCC support using std::size_t; -// gcc 4.6.0 wants std:: for ptrdiff_t +// gcc 4.6.0 wants std:: for ptrdiff_t using std::ptrdiff_t; /** \defgroup Core_Module Core module @@ -422,6 +422,7 @@ using std::ptrdiff_t; #include "src/Core/products/TriangularSolverVector.h" #include "src/Core/BandMatrix.h" #include "src/Core/CoreIterators.h" +#include "src/Core/ConditionEstimator.h" #include "src/Core/BooleanRedux.h" #include "src/Core/Select.h" diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h new file mode 100644 index 000000000..ab6f59319 --- /dev/null +++ b/Eigen/src/Core/ConditionEstimator.h @@ -0,0 +1,279 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CONDITIONESTIMATOR_H +#define EIGEN_CONDITIONESTIMATOR_H + +namespace Eigen { + +namespace internal { +template +struct EstimateInverseL1NormImpl {}; +} // namespace internal + +template +class ConditionEstimator { + public: + typedef typename Decomposition::MatrixType MatrixType; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename internal::plain_col_type::type Vector; + + /** \class ConditionEstimator + * \ingroup Core_Module + * + * \brief Condition number estimator. + * + * Computing a decomposition of a dense matrix takes O(n^3) operations, while + * this method estimates the condition number quickly and reliably in O(n^2) + * operations. + * + * \returns an estimate of the reciprocal condition number + * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given the matrix and + * its decomposition. Supports the following decompositions: FullPivLU, + * PartialPivLU. + * + * \sa FullPivLU, PartialPivLU. + */ + static RealScalar rcond(const MatrixType& matrix, const Decomposition& dec) { + eigen_assert(matrix.rows() == dec.rows()); + eigen_assert(matrix.cols() == dec.cols()); + eigen_assert(matrix.rows() == matrix.cols()); + if (dec.rows() == 0) { + return RealScalar(1); + } + RealScalar matrix_l1_norm = matrix.cwiseAbs().colwise().sum().maxCoeff(); + return rcond(MatrixL1Norm(matrix), dec); + } + + /** \class ConditionEstimator + * \ingroup Core_Module + * + * \brief Condition number estimator. + * + * Computing a decomposition of a dense matrix takes O(n^3) operations, while + * this method estimates the condition number quickly and reliably in O(n^2) + * operations. + * + * \returns an estimate of the reciprocal condition number + * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and + * its decomposition. Supports the following decompositions: FullPivLU, + * PartialPivLU. + * + * \sa FullPivLU, PartialPivLU. + */ + static RealScalar rcond(RealScalar matrix_norm, const Decomposition& dec) { + eigen_assert(dec.rows() == dec.cols()); + if (dec.rows() == 0) { + return 1; + } + if (matrix_norm == 0) { + return 0; + } + const RealScalar inverse_matrix_norm = EstimateInverseL1Norm(dec); + return inverse_matrix_norm == 0 ? 0 + : (1 / inverse_matrix_norm) / matrix_norm; + } + + /* + * Fast algorithm for computing a lower bound estimate on the L1 norm of + * the inverse of the matrix using at most 10 calls to the solve method on its + * decomposition. This is an implementation of Algorithm 4.1 in + * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf + * The most common usage of this algorithm is in estimating the condition + * number ||A||_1 * ||A^{-1}||_1 of a matrix A. While ||A||_1 can be computed + * directly in O(dims^2) operations (see MatrixL1Norm() below), while + * there is no cheap closed-form expression for ||A^{-1}||_1. + * Given a decompostion of A, this algorithm estimates ||A^{-1}|| in O(dims^2) + * operations. This is done by providing operators that use the decomposition + * to solve systems of the form A x = b or A^* z = c by back-substitution, + * each costing O(dims^2) operations. Since at most 10 calls are performed, + * the total cost is O(dims^2), as opposed to O(dims^3) if the inverse matrix + * B^{-1} was formed explicitly. + */ + static RealScalar EstimateInverseL1Norm(const Decomposition& dec) { + eigen_assert(dec.rows() == dec.cols()); + const int n = dec.rows(); + if (n == 0) { + return 0; + } + return internal::EstimateInverseL1NormImpl< + Decomposition, NumTraits::IsComplex>::compute(dec); + } +}; + +namespace internal { +// Partial specialization for real matrices. +template +struct EstimateInverseL1NormImpl { + typedef typename Decomposition::MatrixType MatrixType; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::plain_col_type::type Vector; + + // Shorthand for vector L1 norm in Eigen. + inline static Scalar VectorL1Norm(const Vector& v) { + return v.template lpNorm<1>(); + } + + static inline Scalar compute(const Decomposition& dec) { + const int n = dec.rows(); + const Vector plus = Vector::Ones(n); + Vector v = plus / n; + v = dec.solve(v); + Scalar lower_bound = VectorL1Norm(v); + if (n == 1) { + return lower_bound; + } + // lower_bound is a lower bound on ||inv(A)||_1 = sup_v ||inv(A) v||_1 / + // ||v||_1 and is the objective maximized by the ("super-") gradient ascent + // algorithm. + // Basic idea: We know that the optimum is achieved at one of the simplices + // v = e_i, so in each iteration we follow a super-gradient to move towards + // the optimal one. + Scalar old_lower_bound = lower_bound; + const Vector minus = -Vector::Ones(n); + Vector sign_vector = (v.cwiseAbs().array() == 0).select(plus, minus); + Vector old_sign_vector = sign_vector; + int v_max_abs_index = -1; + int old_v_max_abs_index = v_max_abs_index; + for (int k = 0; k < 4; ++k) { + // argmax |inv(A)^T * sign_vector| + v = dec.transpose().solve(sign_vector); + v.cwiseAbs().maxCoeff(&v_max_abs_index); + if (v_max_abs_index == old_v_max_abs_index) { + // Break if the solution stagnated. + break; + } + // Move to the new simplex e_j, where j = v_max_abs_index. + v.setZero(); + v[v_max_abs_index] = 1; + v = dec.solve(v); // v = inv(A) * e_j. + lower_bound = VectorL1Norm(v); + if (lower_bound <= old_lower_bound) { + // Break if the gradient step did not increase the lower_bound. + break; + } + sign_vector = (v.array() < 0).select(plus, minus); + if (sign_vector == old_sign_vector) { + // Break if the solution stagnated. + break; + } + old_sign_vector = sign_vector; + old_v_max_abs_index = v_max_abs_index; + old_lower_bound = lower_bound; + } + // The following calculates an independent estimate of ||A||_1 by + // multiplying + // A by a vector with entries of slowly increasing magnitude and alternating + // sign: v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. This + // improvement + // to Hager's algorithm above is due to Higham. It was added to make the + // algorithm more robust in certain corner cases where large elements in + // the matrix might otherwise escape detection due to exact cancellation + // (especially when op and op_adjoint correspond to a sequence of + // backsubstitutions and permutations), which could cause Hager's algorithm + // to vastly underestimate ||A||_1. + Scalar alternating_sign = 1; + for (int i = 0; i < n; ++i) { + v[i] = alternating_sign * static_cast(1) + + (static_cast(i) / (static_cast(n - 1))); + alternating_sign = -alternating_sign; + } + v = dec.solve(v); + const Scalar alternate_lower_bound = + (2 * VectorL1Norm(v)) / (3 * static_cast(n)); + return numext::maxi(lower_bound, alternate_lower_bound); + } +}; + +// Partial specialization for complex matrices. +template +struct EstimateInverseL1NormImpl { + typedef typename Decomposition::MatrixType MatrixType; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename internal::plain_col_type::type Vector; + typedef typename internal::plain_col_type::type + RealVector; + + // Shorthand for vector L1 norm in Eigen. + inline static RealScalar VectorL1Norm(const Vector& v) { + return v.template lpNorm<1>(); + } + + static inline RealScalar compute(const Decomposition& dec) { + const int n = dec.rows(); + const Vector ones = Vector::Ones(n); + Vector v = ones / n; + v = dec.solve(v); + RealScalar lower_bound = VectorL1Norm(v); + if (n == 1) { + return lower_bound; + } + // lower_bound is a lower bound on ||inv(A)||_1 = sup_v ||inv(A) v||_1 / + // ||v||_1 and is the objective maximized by the ("super-") gradient ascent + // algorithm. + // Basic idea: We know that the optimum is achieved at one of the simplices + // v = e_i, so in each iteration we follow a super-gradient to move towards + // the optimal one. + RealScalar old_lower_bound = lower_bound; + int v_max_abs_index = -1; + int old_v_max_abs_index = v_max_abs_index; + for (int k = 0; k < 4; ++k) { + // argmax |inv(A)^* * sign_vector| + RealVector abs_v = v.cwiseAbs(); + const Vector psi = + (abs_v.array() == 0).select(v.cwiseQuotient(abs_v), ones); + v = dec.adjoint().solve(psi); + const RealVector z = v.real(); + z.cwiseAbs().maxCoeff(&v_max_abs_index); + if (v_max_abs_index == old_v_max_abs_index) { + // Break if the solution stagnated. + break; + } + // Move to the new simplex e_j, where j = v_max_abs_index. + v.setZero(); + v[v_max_abs_index] = 1; + v = dec.solve(v); // v = inv(A) * e_j. + lower_bound = VectorL1Norm(v); + if (lower_bound <= old_lower_bound) { + // Break if the gradient step did not increase the lower_bound. + break; + } + old_v_max_abs_index = v_max_abs_index; + old_lower_bound = lower_bound; + } + // The following calculates an independent estimate of ||A||_1 by + // multiplying + // A by a vector with entries of slowly increasing magnitude and alternating + // sign: v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. This + // improvement + // to Hager's algorithm above is due to Higham. It was added to make the + // algorithm more robust in certain corner cases where large elements in + // the matrix might otherwise escape detection due to exact cancellation + // (especially when op and op_adjoint correspond to a sequence of + // backsubstitutions and permutations), which could cause Hager's algorithm + // to vastly underestimate ||A||_1. + RealScalar alternating_sign = 1; + for (int i = 0; i < n; ++i) { + v[i] = alternating_sign * static_cast(1) + + (static_cast(i) / (static_cast(n - 1))); + alternating_sign = -alternating_sign; + } + v = dec.solve(v); + const RealScalar alternate_lower_bound = + (2 * VectorL1Norm(v)) / (3 * static_cast(n)); + return numext::maxi(lower_bound, alternate_lower_bound); + } +}; + +} // namespace internal +} // namespace Eigen + +#endif diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h index 1721213d6..ff0b78c35 100644 --- a/Eigen/src/LU/FullPivLU.h +++ b/Eigen/src/LU/FullPivLU.h @@ -231,6 +231,15 @@ template class FullPivLU return Solve(*this, b.derived()); } + /** \returns an estimate of the reciprocal condition number of the matrix of which *this is + the LU decomposition. + */ + inline RealScalar rcond() const + { + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return ConditionEstimator >::rcond(m_l1_norm, *this); + } + /** \returns the determinant of the matrix of which * *this is the LU decomposition. It has only linear complexity * (that is, O(n) where n is the dimension of the square matrix) @@ -410,6 +419,7 @@ template class FullPivLU IntColVectorType m_rowsTranspositions; IntRowVectorType m_colsTranspositions; Index m_det_pq, m_nonzero_pivots; + RealScalar m_l1_norm; RealScalar m_maxpivot, m_prescribedThreshold; bool m_isInitialized, m_usePrescribedThreshold; }; @@ -455,11 +465,12 @@ FullPivLU& FullPivLU::compute(const EigenBase // the permutations are stored as int indices, so just to be sure: eigen_assert(matrix.rows()<=NumTraits::highest() && matrix.cols()<=NumTraits::highest()); - m_isInitialized = true; m_lu = matrix.derived(); + m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff(); computeInPlace(); + m_isInitialized = true; return *this; } diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h index ab7797d2a..5d71a66d0 100644 --- a/Eigen/src/LU/PartialPivLU.h +++ b/Eigen/src/LU/PartialPivLU.h @@ -76,7 +76,6 @@ template class PartialPivLU typedef Transpositions TranspositionType; typedef typename MatrixType::PlainObject PlainObject; - /** * \brief Default Constructor. * @@ -152,6 +151,15 @@ template class PartialPivLU return Solve(*this, b.derived()); } + /** \returns an estimate of the reciprocal condition number of the matrix of which *this is + the LU decomposition. + */ + inline RealScalar rcond() const + { + eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); + return ConditionEstimator >::rcond(m_l1_norm, *this); + } + /** \returns the inverse of the matrix of which *this is the LU decomposition. * * \warning The matrix being decomposed here is assumed to be invertible. If you need to check for @@ -178,7 +186,7 @@ template class PartialPivLU * * \sa MatrixBase::determinant() */ - typename internal::traits::Scalar determinant() const; + Scalar determinant() const; MatrixType reconstructedMatrix() const; @@ -247,6 +255,7 @@ template class PartialPivLU PermutationType m_p; TranspositionType m_rowsTranspositions; Index m_det_p; + RealScalar m_l1_norm; bool m_isInitialized; }; @@ -256,6 +265,7 @@ PartialPivLU::PartialPivLU() m_p(), m_rowsTranspositions(), m_det_p(0), + m_l1_norm(0), m_isInitialized(false) { } @@ -266,6 +276,7 @@ PartialPivLU::PartialPivLU(Index size) m_p(size), m_rowsTranspositions(size), m_det_p(0), + m_l1_norm(0), m_isInitialized(false) { } @@ -277,6 +288,7 @@ PartialPivLU::PartialPivLU(const EigenBase& matrix) m_p(matrix.rows()), m_rowsTranspositions(matrix.rows()), m_det_p(0), + m_l1_norm(0), m_isInitialized(false) { compute(matrix.derived()); @@ -467,6 +479,7 @@ PartialPivLU& PartialPivLU::compute(const EigenBase::highest()); m_lu = matrix.derived(); + m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff(); eigen_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices"); const Index size = matrix.rows(); @@ -484,7 +497,7 @@ PartialPivLU& PartialPivLU::compute(const EigenBase -typename internal::traits::Scalar PartialPivLU::determinant() const +typename PartialPivLU::Scalar PartialPivLU::determinant() const { eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); return Scalar(m_det_p) * m_lu.diagonal().prod(); diff --git a/test/lu.cpp b/test/lu.cpp index f14435114..31991520d 100644 --- a/test/lu.cpp +++ b/test/lu.cpp @@ -11,6 +11,11 @@ #include using namespace std; +template +typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) { + return m.cwiseAbs().colwise().sum().maxCoeff(); +} + template void lu_non_invertible() { typedef typename MatrixType::Index Index; @@ -143,7 +148,13 @@ template void lu_invertible() m3 = MatrixType::Random(size,size); m2 = lu.solve(m3); VERIFY_IS_APPROX(m3, m1*m2); - VERIFY_IS_APPROX(m2, lu.inverse()*m3); + MatrixType m1_inverse = lu.inverse(); + VERIFY_IS_APPROX(m2, m1_inverse*m3); + + // Test condition number estimation. + RealScalar rcond = RealScalar(1) / matrix_l1_norm(m1) / matrix_l1_norm(m1_inverse); + // Verify that the estimate is within a factor of 10 of the truth. + VERIFY(lu.rcond() > rcond / 10 && lu.rcond() < rcond * 10); // test solve with transposed lu.template _solve_impl_transposed(m3, m2); @@ -170,6 +181,7 @@ template void lu_partial_piv() PartialPivLU.h */ typedef typename MatrixType::Index Index; + typedef typename NumTraits::Real RealScalar; Index size = internal::random(1,4); MatrixType m1(size, size), m2(size, size), m3(size, size); @@ -181,7 +193,13 @@ template void lu_partial_piv() m3 = MatrixType::Random(size,size); m2 = plu.solve(m3); VERIFY_IS_APPROX(m3, m1*m2); - VERIFY_IS_APPROX(m2, plu.inverse()*m3); + MatrixType m1_inverse = plu.inverse(); + VERIFY_IS_APPROX(m2, m1_inverse*m3); + + // Test condition number estimation. + RealScalar rcond = RealScalar(1) / matrix_l1_norm(m1) / matrix_l1_norm(m1_inverse); + // Verify that the estimate is within a factor of 10 of the truth. + VERIFY(plu.rcond() > rcond / 10 && plu.rcond() < rcond * 10); // test solve with transposed plu.template _solve_impl_transposed(m3, m2); From 91414e0042779b1b9d312d9255f389e67aa38106 Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Fri, 1 Apr 2016 11:58:17 -0700 Subject: [PATCH 02/14] Fix comments in ConditionEstimator and minor cleanup. --- Eigen/src/Core/ConditionEstimator.h | 119 +++++++++++++++------------- test/lu.cpp | 4 +- 2 files changed, 65 insertions(+), 58 deletions(-) diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index ab6f59319..68e4535aa 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -14,7 +14,7 @@ namespace Eigen { namespace internal { template -struct EstimateInverseL1NormImpl {}; +struct EstimateInverseMatrixL1NormImpl {}; } // namespace internal template @@ -48,7 +48,6 @@ class ConditionEstimator { if (dec.rows() == 0) { return RealScalar(1); } - RealScalar matrix_l1_norm = matrix.cwiseAbs().colwise().sum().maxCoeff(); return rcond(MatrixL1Norm(matrix), dec); } @@ -76,42 +75,50 @@ class ConditionEstimator { if (matrix_norm == 0) { return 0; } - const RealScalar inverse_matrix_norm = EstimateInverseL1Norm(dec); + const RealScalar inverse_matrix_norm = EstimateInverseMatrixL1Norm(dec); return inverse_matrix_norm == 0 ? 0 : (1 / inverse_matrix_norm) / matrix_norm; } - /* - * Fast algorithm for computing a lower bound estimate on the L1 norm of - * the inverse of the matrix using at most 10 calls to the solve method on its - * decomposition. This is an implementation of Algorithm 4.1 in - * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf - * The most common usage of this algorithm is in estimating the condition - * number ||A||_1 * ||A^{-1}||_1 of a matrix A. While ||A||_1 can be computed - * directly in O(dims^2) operations (see MatrixL1Norm() below), while - * there is no cheap closed-form expression for ||A^{-1}||_1. - * Given a decompostion of A, this algorithm estimates ||A^{-1}|| in O(dims^2) - * operations. This is done by providing operators that use the decomposition - * to solve systems of the form A x = b or A^* z = c by back-substitution, - * each costing O(dims^2) operations. Since at most 10 calls are performed, - * the total cost is O(dims^2), as opposed to O(dims^3) if the inverse matrix - * B^{-1} was formed explicitly. - */ - static RealScalar EstimateInverseL1Norm(const Decomposition& dec) { + /** + * \returns an estimate of ||inv(matrix)||_1 given a decomposition of + * matrix that implements .solve() and .adjoint().solve() methods. + * + * The method implements Algorithms 4.1 and 5.1 from + * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf + * which also forms the basis for the condition number estimators in + * LAPACK. Since at most 10 calls to the solve method of dec are + * performed, the total cost is O(dims^2), as opposed to O(dims^3) + * needed to compute the inverse matrix explicitly. + * + * The most common usage is in estimating the condition number + * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be + * computed directly in O(n^2) operations. + */ + static RealScalar EstimateInverseMatrixL1Norm(const Decomposition& dec) { eigen_assert(dec.rows() == dec.cols()); - const int n = dec.rows(); - if (n == 0) { + if (dec.rows() == 0) { return 0; } - return internal::EstimateInverseL1NormImpl< + return internal::EstimateInverseMatrixL1NormImpl< Decomposition, NumTraits::IsComplex>::compute(dec); } + + /** + * \returns the induced matrix l1-norm + * ||matrix||_1 = sup ||matrix * v||_1 / ||v||_1, which is equal to + * the greatest absolute column sum. + */ + inline static Scalar MatrixL1Norm(const MatrixType& matrix) { + return matrix.cwiseAbs().colwise().sum().maxCoeff(); + } }; namespace internal { + // Partial specialization for real matrices. template -struct EstimateInverseL1NormImpl { +struct EstimateInverseMatrixL1NormImpl { typedef typename Decomposition::MatrixType MatrixType; typedef typename internal::traits::Scalar Scalar; typedef typename internal::plain_col_type::type Vector; @@ -130,8 +137,9 @@ struct EstimateInverseL1NormImpl { if (n == 1) { return lower_bound; } - // lower_bound is a lower bound on ||inv(A)||_1 = sup_v ||inv(A) v||_1 / - // ||v||_1 and is the objective maximized by the ("super-") gradient ascent + // lower_bound is a lower bound on + // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 + // and is the objective maximized by the ("super-") gradient ascent // algorithm. // Basic idea: We know that the optimum is achieved at one of the simplices // v = e_i, so in each iteration we follow a super-gradient to move towards @@ -143,8 +151,8 @@ struct EstimateInverseL1NormImpl { int v_max_abs_index = -1; int old_v_max_abs_index = v_max_abs_index; for (int k = 0; k < 4; ++k) { - // argmax |inv(A)^T * sign_vector| - v = dec.transpose().solve(sign_vector); + // argmax |inv(matrix)^T * sign_vector| + v = dec.adjoint().solve(sign_vector); v.cwiseAbs().maxCoeff(&v_max_abs_index); if (v_max_abs_index == old_v_max_abs_index) { // Break if the solution stagnated. @@ -153,7 +161,7 @@ struct EstimateInverseL1NormImpl { // Move to the new simplex e_j, where j = v_max_abs_index. v.setZero(); v[v_max_abs_index] = 1; - v = dec.solve(v); // v = inv(A) * e_j. + v = dec.solve(v); // v = inv(matrix) * e_j. lower_bound = VectorL1Norm(v); if (lower_bound <= old_lower_bound) { // Break if the gradient step did not increase the lower_bound. @@ -168,17 +176,16 @@ struct EstimateInverseL1NormImpl { old_v_max_abs_index = v_max_abs_index; old_lower_bound = lower_bound; } - // The following calculates an independent estimate of ||A||_1 by - // multiplying - // A by a vector with entries of slowly increasing magnitude and alternating - // sign: v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. This - // improvement - // to Hager's algorithm above is due to Higham. It was added to make the - // algorithm more robust in certain corner cases where large elements in - // the matrix might otherwise escape detection due to exact cancellation - // (especially when op and op_adjoint correspond to a sequence of - // backsubstitutions and permutations), which could cause Hager's algorithm - // to vastly underestimate ||A||_1. + // The following calculates an independent estimate of ||matrix||_1 by + // multiplying matrix by a vector with entries of slowly increasing + // magnitude and alternating sign: + // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. + // This improvement to Hager's algorithm above is due to Higham. It was + // added to make the algorithm more robust in certain corner cases where + // large elements in the matrix might otherwise escape detection due to + // exact cancellation (especially when op and op_adjoint correspond to a + // sequence of backsubstitutions and permutations), which could cause + // Hager's algorithm to vastly underestimate ||matrix||_1. Scalar alternating_sign = 1; for (int i = 0; i < n; ++i) { v[i] = alternating_sign * static_cast(1) + @@ -194,7 +201,7 @@ struct EstimateInverseL1NormImpl { // Partial specialization for complex matrices. template -struct EstimateInverseL1NormImpl { +struct EstimateInverseMatrixL1NormImpl { typedef typename Decomposition::MatrixType MatrixType; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; @@ -216,8 +223,9 @@ struct EstimateInverseL1NormImpl { if (n == 1) { return lower_bound; } - // lower_bound is a lower bound on ||inv(A)||_1 = sup_v ||inv(A) v||_1 / - // ||v||_1 and is the objective maximized by the ("super-") gradient ascent + // lower_bound is a lower bound on + // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 + // and is the objective maximized by the ("super-") gradient ascent // algorithm. // Basic idea: We know that the optimum is achieved at one of the simplices // v = e_i, so in each iteration we follow a super-gradient to move towards @@ -226,7 +234,7 @@ struct EstimateInverseL1NormImpl { int v_max_abs_index = -1; int old_v_max_abs_index = v_max_abs_index; for (int k = 0; k < 4; ++k) { - // argmax |inv(A)^* * sign_vector| + // argmax |inv(matrix)^* * sign_vector| RealVector abs_v = v.cwiseAbs(); const Vector psi = (abs_v.array() == 0).select(v.cwiseQuotient(abs_v), ones); @@ -240,7 +248,7 @@ struct EstimateInverseL1NormImpl { // Move to the new simplex e_j, where j = v_max_abs_index. v.setZero(); v[v_max_abs_index] = 1; - v = dec.solve(v); // v = inv(A) * e_j. + v = dec.solve(v); // v = inv(matrix) * e_j. lower_bound = VectorL1Norm(v); if (lower_bound <= old_lower_bound) { // Break if the gradient step did not increase the lower_bound. @@ -249,17 +257,16 @@ struct EstimateInverseL1NormImpl { old_v_max_abs_index = v_max_abs_index; old_lower_bound = lower_bound; } - // The following calculates an independent estimate of ||A||_1 by - // multiplying - // A by a vector with entries of slowly increasing magnitude and alternating - // sign: v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. This - // improvement - // to Hager's algorithm above is due to Higham. It was added to make the - // algorithm more robust in certain corner cases where large elements in - // the matrix might otherwise escape detection due to exact cancellation - // (especially when op and op_adjoint correspond to a sequence of - // backsubstitutions and permutations), which could cause Hager's algorithm - // to vastly underestimate ||A||_1. + // The following calculates an independent estimate of ||matrix||_1 by + // multiplying matrix by a vector with entries of slowly increasing + // magnitude and alternating sign: + // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. + // This improvement to Hager's algorithm above is due to Higham. It was + // added to make the algorithm more robust in certain corner cases where + // large elements in the matrix might otherwise escape detection due to + // exact cancellation (especially when op and op_adjoint correspond to a + // sequence of backsubstitutions and permutations), which could cause + // Hager's algorithm to vastly underestimate ||matrix||_1. RealScalar alternating_sign = 1; for (int i = 0; i < n; ++i) { v[i] = alternating_sign * static_cast(1) + diff --git a/test/lu.cpp b/test/lu.cpp index 31991520d..9e8059f58 100644 --- a/test/lu.cpp +++ b/test/lu.cpp @@ -152,7 +152,7 @@ template void lu_invertible() VERIFY_IS_APPROX(m2, m1_inverse*m3); // Test condition number estimation. - RealScalar rcond = RealScalar(1) / matrix_l1_norm(m1) / matrix_l1_norm(m1_inverse); + RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); // Verify that the estimate is within a factor of 10 of the truth. VERIFY(lu.rcond() > rcond / 10 && lu.rcond() < rcond * 10); @@ -197,7 +197,7 @@ template void lu_partial_piv() VERIFY_IS_APPROX(m2, m1_inverse*m3); // Test condition number estimation. - RealScalar rcond = RealScalar(1) / matrix_l1_norm(m1) / matrix_l1_norm(m1_inverse); + RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); // Verify that the estimate is within a factor of 10 of the truth. VERIFY(plu.rcond() > rcond / 10 && plu.rcond() < rcond * 10); From fb8dccc23e5f717319c230c2701a5fbf1d3c3975 Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Fri, 1 Apr 2016 12:48:18 -0700 Subject: [PATCH 03/14] Replace "inline static" with "static inline" for consistency. --- Eigen/src/Core/ConditionEstimator.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index 68e4535aa..b65306d56 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -109,7 +109,7 @@ class ConditionEstimator { * ||matrix||_1 = sup ||matrix * v||_1 / ||v||_1, which is equal to * the greatest absolute column sum. */ - inline static Scalar MatrixL1Norm(const MatrixType& matrix) { + static inline Scalar MatrixL1Norm(const MatrixType& matrix) { return matrix.cwiseAbs().colwise().sum().maxCoeff(); } }; @@ -124,7 +124,7 @@ struct EstimateInverseMatrixL1NormImpl { typedef typename internal::plain_col_type::type Vector; // Shorthand for vector L1 norm in Eigen. - inline static Scalar VectorL1Norm(const Vector& v) { + static inline Scalar VectorL1Norm(const Vector& v) { return v.template lpNorm<1>(); } @@ -210,7 +210,7 @@ struct EstimateInverseMatrixL1NormImpl { RealVector; // Shorthand for vector L1 norm in Eigen. - inline static RealScalar VectorL1Norm(const Vector& v) { + static inline RealScalar VectorL1Norm(const Vector& v) { return v.template lpNorm<1>(); } From f54137606eb6d68cbafd10d90013e254b26137ed Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Fri, 1 Apr 2016 16:19:45 -0700 Subject: [PATCH 04/14] Add condition estimation to Cholesky (LLT) factorization. --- Eigen/src/Cholesky/LLT.h | 53 +++++++++++++++++++++++------ Eigen/src/Core/ConditionEstimator.h | 37 +++++++++++++++----- test/cholesky.cpp | 47 ++++++++++++++++++------- test/lu.cpp | 10 +++--- 4 files changed, 111 insertions(+), 36 deletions(-) diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h index 74cf5bfe1..b55c5bebf 100644 --- a/Eigen/src/Cholesky/LLT.h +++ b/Eigen/src/Cholesky/LLT.h @@ -10,7 +10,7 @@ #ifndef EIGEN_LLT_H #define EIGEN_LLT_H -namespace Eigen { +namespace Eigen { namespace internal{ template struct LLT_Traits; @@ -40,7 +40,7 @@ template struct LLT_Traits; * * Example: \include LLT_example.cpp * Output: \verbinclude LLT_example.out - * + * * \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT */ /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH) @@ -135,6 +135,16 @@ template class LLT template LLT& compute(const EigenBase& matrix); + /** \returns an estimate of the reciprocal condition number of the matrix of + * which *this is the Cholesky decomposition. + */ + RealScalar rcond() const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative"); + return ConditionEstimator, true >::rcond(m_l1_norm, *this); + } + /** \returns the LLT decomposition matrix * * TODO: document the storage layout @@ -164,7 +174,7 @@ template class LLT template LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); - + #ifndef EIGEN_PARSED_BY_DOXYGEN template EIGEN_DEVICE_FUNC @@ -172,17 +182,18 @@ template class LLT #endif protected: - + static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } - + /** \internal * Used to compute and store L * The strict upper part is not used and even not initialized. */ MatrixType m_matrix; + RealScalar m_l1_norm; bool m_isInitialized; ComputationInfo m_info; }; @@ -268,7 +279,7 @@ template struct llt_inplace static Index unblocked(MatrixType& mat) { using std::sqrt; - + eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); for(Index k = 0; k < size; ++k) @@ -328,7 +339,7 @@ template struct llt_inplace return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } }; - + template struct llt_inplace { typedef typename NumTraits::Real RealScalar; @@ -387,12 +398,32 @@ template LLT& LLT::compute(const EigenBase& a) { check_template_parameters(); - + eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix.resize(size, size); m_matrix = a.derived(); + // Compute matrix L1 norm = max abs column sum. + m_l1_norm = RealScalar(0); + if (_UpLo == Lower) { + for (int col = 0; col < size; ++col) { + const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).cwiseAbs().sum() + + m_matrix.row(col).tail(col).cwiseAbs().sum(); + if (abs_col_sum > m_l1_norm) { + m_l1_norm = abs_col_sum; + } + } + } else { + for (int col = 0; col < a.cols(); ++col) { + const RealScalar abs_col_sum = m_matrix.col(col).tail(col).cwiseAbs().sum() + + m_matrix.row(col).tail(size - col).cwiseAbs().sum(); + if (abs_col_sum > m_l1_norm) { + m_l1_norm = abs_col_sum; + } + } + } + m_isInitialized = true; bool ok = Traits::inplace_decomposition(m_matrix); m_info = ok ? Success : NumericalIssue; @@ -419,7 +450,7 @@ LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, c return *this; } - + #ifndef EIGEN_PARSED_BY_DOXYGEN template template @@ -431,7 +462,7 @@ void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const #endif /** \internal use x = llt_object.solve(x); - * + * * This is the \em in-place version of solve(). * * \param bAndX represents both the right-hand side matrix b and result x. @@ -483,7 +514,7 @@ SelfAdjointView::llt() const return LLT(m_matrix); } #endif // __CUDACC__ - + } // end namespace Eigen #endif // EIGEN_LLT_H diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index b65306d56..dca3da417 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -13,11 +13,11 @@ namespace Eigen { namespace internal { -template +template struct EstimateInverseMatrixL1NormImpl {}; } // namespace internal -template +template class ConditionEstimator { public: typedef typename Decomposition::MatrixType MatrixType; @@ -101,7 +101,8 @@ class ConditionEstimator { return 0; } return internal::EstimateInverseMatrixL1NormImpl< - Decomposition, NumTraits::IsComplex>::compute(dec); + Decomposition, IsSelfAdjoint, + NumTraits::IsComplex != 0>::compute(dec); } /** @@ -116,9 +117,27 @@ class ConditionEstimator { namespace internal { +template +struct solve_helper { + static inline Vector solve_adjoint(const Decomposition& dec, + const Vector& v) { + return dec.adjoint().solve(v); + } +}; + +// Partial specialization for self_adjoint matrices. +template +struct solve_helper { + static inline Vector solve_adjoint(const Decomposition& dec, + const Vector& v) { + return dec.solve(v); + } +}; + + // Partial specialization for real matrices. -template -struct EstimateInverseMatrixL1NormImpl { +template +struct EstimateInverseMatrixL1NormImpl { typedef typename Decomposition::MatrixType MatrixType; typedef typename internal::traits::Scalar Scalar; typedef typename internal::plain_col_type::type Vector; @@ -152,7 +171,7 @@ struct EstimateInverseMatrixL1NormImpl { int old_v_max_abs_index = v_max_abs_index; for (int k = 0; k < 4; ++k) { // argmax |inv(matrix)^T * sign_vector| - v = dec.adjoint().solve(sign_vector); + v = solve_helper::solve_adjoint(dec, sign_vector); v.cwiseAbs().maxCoeff(&v_max_abs_index); if (v_max_abs_index == old_v_max_abs_index) { // Break if the solution stagnated. @@ -200,8 +219,8 @@ struct EstimateInverseMatrixL1NormImpl { }; // Partial specialization for complex matrices. -template -struct EstimateInverseMatrixL1NormImpl { +template +struct EstimateInverseMatrixL1NormImpl { typedef typename Decomposition::MatrixType MatrixType; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; @@ -238,7 +257,7 @@ struct EstimateInverseMatrixL1NormImpl { RealVector abs_v = v.cwiseAbs(); const Vector psi = (abs_v.array() == 0).select(v.cwiseQuotient(abs_v), ones); - v = dec.adjoint().solve(psi); + v = solve_helper::solve_adjoint(dec, psi); const RealVector z = v.real(); z.cwiseAbs().maxCoeff(&v_max_abs_index); if (v_max_abs_index == old_v_max_abs_index) { diff --git a/test/cholesky.cpp b/test/cholesky.cpp index d652af5bf..8a21cdbd5 100644 --- a/test/cholesky.cpp +++ b/test/cholesky.cpp @@ -17,6 +17,12 @@ #include #include +template +typename MatrixType::RealScalar matrix_l1_norm(const MatrixType& m) { + MatrixType symm = m.template selfadjointView(); + return symm.cwiseAbs().colwise().sum().maxCoeff(); +} + template class CholType> void test_chol_update(const MatrixType& symm) { typedef typename MatrixType::Scalar Scalar; @@ -77,7 +83,7 @@ template void cholesky(const MatrixType& m) { SquareMatrixType symmUp = symm.template triangularView(); SquareMatrixType symmLo = symm.template triangularView(); - + LLT chollo(symmLo); VERIFY_IS_APPROX(symm, chollo.reconstructedMatrix()); vecX = chollo.solve(vecB); @@ -85,6 +91,14 @@ template void cholesky(const MatrixType& m) matX = chollo.solve(matB); VERIFY_IS_APPROX(symm * matX, matB); + // Verify that the estimated condition number is within a factor of 10 of the + // truth. + const MatrixType symmLo_inverse = chollo.solve(MatrixType::Identity(rows,cols)); + RealScalar rcond = (RealScalar(1) / matrix_l1_norm(symmLo)) / + matrix_l1_norm(symmLo_inverse); + RealScalar rcond_est = chollo.rcond(); + VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); + // test the upper mode LLT cholup(symmUp); VERIFY_IS_APPROX(symm, cholup.reconstructedMatrix()); @@ -93,6 +107,15 @@ template void cholesky(const MatrixType& m) matX = cholup.solve(matB); VERIFY_IS_APPROX(symm * matX, matB); + // Verify that the estimated condition number is within a factor of 10 of the + // truth. + const MatrixType symmUp_inverse = cholup.solve(MatrixType::Identity(rows,cols)); + rcond = (RealScalar(1) / matrix_l1_norm(symmUp)) / + matrix_l1_norm(symmUp_inverse); + rcond_est = cholup.rcond(); + VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); + + MatrixType neg = -symmLo; chollo.compute(neg); VERIFY(chollo.info()==NumericalIssue); @@ -101,7 +124,7 @@ template void cholesky(const MatrixType& m) VERIFY_IS_APPROX(MatrixType(chollo.matrixU().transpose().conjugate()), MatrixType(chollo.matrixL())); VERIFY_IS_APPROX(MatrixType(cholup.matrixL().transpose().conjugate()), MatrixType(cholup.matrixU())); VERIFY_IS_APPROX(MatrixType(cholup.matrixU().transpose().conjugate()), MatrixType(cholup.matrixL())); - + // test some special use cases of SelfCwiseBinaryOp: MatrixType m1 = MatrixType::Random(rows,cols), m2(rows,cols); m2 = m1; @@ -167,7 +190,7 @@ template void cholesky(const MatrixType& m) // restore if(sign == -1) symm = -symm; - + // check matrices coming from linear constraints with Lagrange multipliers if(rows>=3) { @@ -183,7 +206,7 @@ template void cholesky(const MatrixType& m) vecX = ldltlo.solve(vecB); VERIFY_IS_APPROX(A * vecX, vecB); } - + // check non-full rank matrices if(rows>=3) { @@ -199,7 +222,7 @@ template void cholesky(const MatrixType& m) vecX = ldltlo.solve(vecB); VERIFY_IS_APPROX(A * vecX, vecB); } - + // check matrices with a wide spectrum if(rows>=3) { @@ -225,7 +248,7 @@ template void cholesky(const MatrixType& m) { RealScalar large_tol = std::sqrt(test_precision()); VERIFY((A * vecX).isApprox(vecB, large_tol)); - + ++g_test_level; VERIFY_IS_APPROX(A * vecX,vecB); --g_test_level; @@ -314,14 +337,14 @@ template void cholesky_bug241(const MatrixType& m) } // LDLT is not guaranteed to work for indefinite matrices, but happens to work fine if matrix is diagonal. -// This test checks that LDLT reports correctly that matrix is indefinite. +// This test checks that LDLT reports correctly that matrix is indefinite. // See http://forum.kde.org/viewtopic.php?f=74&t=106942 and bug 736 template void cholesky_definiteness(const MatrixType& m) { eigen_assert(m.rows() == 2 && m.cols() == 2); MatrixType mat; LDLT ldlt(2); - + { mat << 1, 0, 0, -1; ldlt.compute(mat); @@ -384,11 +407,11 @@ void test_cholesky() CALL_SUBTEST_3( cholesky_definiteness(Matrix2d()) ); CALL_SUBTEST_4( cholesky(Matrix3f()) ); CALL_SUBTEST_5( cholesky(Matrix4d()) ); - - s = internal::random(1,EIGEN_TEST_MAX_SIZE); + + s = internal::random(1,EIGEN_TEST_MAX_SIZE); CALL_SUBTEST_2( cholesky(MatrixXd(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) - + s = internal::random(1,EIGEN_TEST_MAX_SIZE/2); CALL_SUBTEST_6( cholesky_cplx(MatrixXcd(s,s)) ); TEST_SET_BUT_UNUSED_VARIABLE(s) @@ -402,6 +425,6 @@ void test_cholesky() // Test problem size constructors CALL_SUBTEST_9( LLT(10) ); CALL_SUBTEST_9( LDLT(10) ); - + TEST_SET_BUT_UNUSED_VARIABLE(nb_temporaries) } diff --git a/test/lu.cpp b/test/lu.cpp index 9e8059f58..53b3fcee4 100644 --- a/test/lu.cpp +++ b/test/lu.cpp @@ -151,10 +151,11 @@ template void lu_invertible() MatrixType m1_inverse = lu.inverse(); VERIFY_IS_APPROX(m2, m1_inverse*m3); - // Test condition number estimation. + // Verify that the estimated condition number is within a factor of 10 of the + // truth. RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); - // Verify that the estimate is within a factor of 10 of the truth. - VERIFY(lu.rcond() > rcond / 10 && lu.rcond() < rcond * 10); + const RealScalar rcond_est = lu.rcond(); + VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); // test solve with transposed lu.template _solve_impl_transposed(m3, m2); @@ -199,7 +200,8 @@ template void lu_partial_piv() // Test condition number estimation. RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); // Verify that the estimate is within a factor of 10 of the truth. - VERIFY(plu.rcond() > rcond / 10 && plu.rcond() < rcond * 10); + const RealScalar rcond_est = plu.rcond(); + VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); // test solve with transposed plu.template _solve_impl_transposed(m3, m2); From 9d51f7c457671bfcbab9a1d62d416e1a83e6ad8a Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Fri, 1 Apr 2016 16:48:38 -0700 Subject: [PATCH 05/14] Add rcond method to LDLT. --- Eigen/src/Cholesky/LDLT.h | 54 ++++++++++++++++++++++++++++++--------- test/cholesky.cpp | 17 ++++++++++++ 2 files changed, 59 insertions(+), 12 deletions(-) diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index c3cc3746c..9753c84d8 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -13,7 +13,7 @@ #ifndef EIGEN_LDLT_H #define EIGEN_LDLT_H -namespace Eigen { +namespace Eigen { namespace internal { template struct LDLT_Traits; @@ -73,11 +73,11 @@ template class LDLT * The default constructor is useful in cases in which the user intends to * perform decompositions via LDLT::compute(const MatrixType&). */ - LDLT() - : m_matrix(), - m_transpositions(), + LDLT() + : m_matrix(), + m_transpositions(), m_sign(internal::ZeroSign), - m_isInitialized(false) + m_isInitialized(false) {} /** \brief Default Constructor with memory preallocation @@ -168,7 +168,7 @@ template class LDLT * \note_about_checking_solutions * * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$ - * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, + * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function @@ -192,6 +192,15 @@ template class LDLT template LDLT& compute(const EigenBase& matrix); + /** \returns an estimate of the reciprocal condition number of the matrix of + * which *this is the LDLT decomposition. + */ + RealScalar rcond() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return ConditionEstimator, true >::rcond(m_l1_norm, *this); + } + template LDLT& rankUpdate(const MatrixBase& w, const RealScalar& alpha=1); @@ -220,7 +229,7 @@ template class LDLT eigen_assert(m_isInitialized && "LDLT is not initialized."); return Success; } - + #ifndef EIGEN_PARSED_BY_DOXYGEN template EIGEN_DEVICE_FUNC @@ -228,7 +237,7 @@ template class LDLT #endif protected: - + static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); @@ -241,6 +250,7 @@ template class LDLT * is not stored), and the diagonal entries correspond to D. */ MatrixType m_matrix; + RealScalar m_l1_norm; TranspositionType m_transpositions; TmpMatrixType m_temporary; internal::SignMatrix m_sign; @@ -314,7 +324,7 @@ template<> struct ldlt_inplace if(rs>0) A21.noalias() -= A20 * temp.head(k); } - + // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot // was smaller than the cutoff value. However, since LDLT is not rank-revealing // we should only make sure that we do not introduce INF or NaN values. @@ -433,12 +443,32 @@ template LDLT& LDLT::compute(const EigenBase& a) { check_template_parameters(); - + eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix = a.derived(); + // Compute matrix L1 norm = max abs column sum. + m_l1_norm = RealScalar(0); + if (_UpLo == Lower) { + for (int col = 0; col < size; ++col) { + const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).cwiseAbs().sum() + + m_matrix.row(col).tail(col).cwiseAbs().sum(); + if (abs_col_sum > m_l1_norm) { + m_l1_norm = abs_col_sum; + } + } + } else { + for (int col = 0; col < a.cols(); ++col) { + const RealScalar abs_col_sum = m_matrix.col(col).tail(col).cwiseAbs().sum() + + m_matrix.row(col).tail(size - col).cwiseAbs().sum(); + if (abs_col_sum > m_l1_norm) { + m_l1_norm = abs_col_sum; + } + } + } + m_transpositions.resize(size); m_isInitialized = false; m_temporary.resize(size); @@ -466,7 +496,7 @@ LDLT& LDLT::rankUpdate(const MatrixBase::_solve_impl(const RhsType &rhs, DstType &dst) cons // diagonal element is not well justified and leads to numerical issues in some cases. // Moreover, Lapack's xSYTRS routines use 0 for the tolerance. RealScalar tolerance = RealScalar(1) / NumTraits::highest(); - + for (Index i = 0; i < vecD.size(); ++i) { if(abs(vecD(i)) > tolerance) diff --git a/test/cholesky.cpp b/test/cholesky.cpp index 8a21cdbd5..148a0b388 100644 --- a/test/cholesky.cpp +++ b/test/cholesky.cpp @@ -160,6 +160,15 @@ template void cholesky(const MatrixType& m) matX = ldltlo.solve(matB); VERIFY_IS_APPROX(symm * matX, matB); + // Verify that the estimated condition number is within a factor of 10 of the + // truth. + const MatrixType symmLo_inverse = ldltlo.solve(MatrixType::Identity(rows,cols)); + RealScalar rcond = (RealScalar(1) / matrix_l1_norm(symmLo)) / + matrix_l1_norm(symmLo_inverse); + RealScalar rcond_est = ldltlo.rcond(); + VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); + + LDLT ldltup(symmUp); VERIFY_IS_APPROX(symm, ldltup.reconstructedMatrix()); vecX = ldltup.solve(vecB); @@ -167,6 +176,14 @@ template void cholesky(const MatrixType& m) matX = ldltup.solve(matB); VERIFY_IS_APPROX(symm * matX, matB); + // Verify that the estimated condition number is within a factor of 10 of the + // truth. + const MatrixType symmUp_inverse = ldltup.solve(MatrixType::Identity(rows,cols)); + rcond = (RealScalar(1) / matrix_l1_norm(symmUp)) / + matrix_l1_norm(symmUp_inverse); + rcond_est = ldltup.rcond(); + VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); + VERIFY_IS_APPROX(MatrixType(ldltlo.matrixL().transpose().conjugate()), MatrixType(ldltlo.matrixU())); VERIFY_IS_APPROX(MatrixType(ldltlo.matrixU().transpose().conjugate()), MatrixType(ldltlo.matrixL())); VERIFY_IS_APPROX(MatrixType(ldltup.matrixL().transpose().conjugate()), MatrixType(ldltup.matrixU())); From 86e0ed81f8db5a0c9562b62a67a9ba60ec58dec0 Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Mon, 4 Apr 2016 14:20:01 -0700 Subject: [PATCH 06/14] Addresses comments on Eigen pull request PR-174. * Get rid of code-duplication for real vs. complex matrices. * Fix flipped arguments to select. * Make the condition estimation functions free functions. * Use Vector::Unit() to generate canonical unit vectors. * Misc. cleanup. --- Eigen/src/Cholesky/LDLT.h | 12 +- Eigen/src/Cholesky/LLT.h | 12 +- Eigen/src/Core/ConditionEstimator.h | 401 +++++++++++----------------- Eigen/src/LU/FullPivLU.h | 2 +- Eigen/src/LU/PartialPivLU.h | 2 +- test/cholesky.cpp | 8 +- test/lu.cpp | 7 +- 7 files changed, 177 insertions(+), 267 deletions(-) diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index 9753c84d8..80f18977c 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -198,7 +198,7 @@ template class LDLT RealScalar rcond() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); - return ConditionEstimator, true >::rcond(m_l1_norm, *this); + return ReciprocalConditionNumberEstimate(m_l1_norm, *this); } template @@ -216,6 +216,12 @@ template class LDLT MatrixType reconstructedMatrix() const; + /** \returns the decomposition itself to allow generic code to do + * ldlt.transpose().solve(rhs). + */ + const LDLT& transpose() const { return *this; }; + const LDLT& adjoint() const { return *this; }; + inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } @@ -454,14 +460,14 @@ LDLT& LDLT::compute(const EigenBase m_l1_norm) { m_l1_norm = abs_col_sum; } } } else { for (int col = 0; col < a.cols(); ++col) { - const RealScalar abs_col_sum = m_matrix.col(col).tail(col).cwiseAbs().sum() + + const RealScalar abs_col_sum = m_matrix.col(col).head(col).cwiseAbs().sum() + m_matrix.row(col).tail(size - col).cwiseAbs().sum(); if (abs_col_sum > m_l1_norm) { m_l1_norm = abs_col_sum; diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h index b55c5bebf..94da1d52d 100644 --- a/Eigen/src/Cholesky/LLT.h +++ b/Eigen/src/Cholesky/LLT.h @@ -142,7 +142,7 @@ template class LLT { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative"); - return ConditionEstimator, true >::rcond(m_l1_norm, *this); + return ReciprocalConditionNumberEstimate(m_l1_norm, *this); } /** \returns the LLT decomposition matrix @@ -169,6 +169,12 @@ template class LLT return m_info; } + /** \returns the decomposition itself to allow generic code to do + * llt.transpose().solve(rhs). + */ + const LLT& transpose() const { return *this; }; + const LLT& adjoint() const { return *this; }; + inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } @@ -409,14 +415,14 @@ LLT& LLT::compute(const EigenBase if (_UpLo == Lower) { for (int col = 0; col < size; ++col) { const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).cwiseAbs().sum() + - m_matrix.row(col).tail(col).cwiseAbs().sum(); + m_matrix.row(col).head(col).cwiseAbs().sum(); if (abs_col_sum > m_l1_norm) { m_l1_norm = abs_col_sum; } } } else { for (int col = 0; col < a.cols(); ++col) { - const RealScalar abs_col_sum = m_matrix.col(col).tail(col).cwiseAbs().sum() + + const RealScalar abs_col_sum = m_matrix.col(col).head(col).cwiseAbs().sum() + m_matrix.row(col).tail(size - col).cwiseAbs().sum(); if (abs_col_sum > m_l1_norm) { m_l1_norm = abs_col_sum; diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index dca3da417..12b4ae648 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -13,19 +13,35 @@ namespace Eigen { namespace internal { -template -struct EstimateInverseMatrixL1NormImpl {}; +template +inline typename MatrixType::RealScalar MatrixL1Norm(const MatrixType& matrix) { + return matrix.cwiseAbs().colwise().sum().maxCoeff(); +} + +template +inline typename Vector::RealScalar VectorL1Norm(const Vector& v) { + return v.template lpNorm<1>(); +} + +template +struct SignOrUnity { + static inline Vector run(const Vector& v) { + const RealVector v_abs = v.cwiseAbs(); + return (v_abs.array() == 0).select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs)); + } +}; + +// Partial specialization to avoid elementwise division for real vectors. +template +struct SignOrUnity { + static inline Vector run(const Vector& v) { + return (v.array() < 0).select(-Vector::Ones(v.size()), Vector::Ones(v.size())); + } +}; + } // namespace internal -template -class ConditionEstimator { - public: - typedef typename Decomposition::MatrixType MatrixType; - typedef typename internal::traits::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - typedef typename internal::plain_col_type::type Vector; - - /** \class ConditionEstimator +/** \class ConditionEstimator * \ingroup Core_Module * * \brief Condition number estimator. @@ -41,265 +57,148 @@ class ConditionEstimator { * * \sa FullPivLU, PartialPivLU. */ - static RealScalar rcond(const MatrixType& matrix, const Decomposition& dec) { - eigen_assert(matrix.rows() == dec.rows()); - eigen_assert(matrix.cols() == dec.cols()); - eigen_assert(matrix.rows() == matrix.cols()); - if (dec.rows() == 0) { - return RealScalar(1); - } - return rcond(MatrixL1Norm(matrix), dec); +template +typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( + const typename Decomposition::MatrixType& matrix, + const Decomposition& dec) { + eigen_assert(matrix.rows() == dec.rows()); + eigen_assert(matrix.cols() == dec.cols()); + eigen_assert(matrix.rows() == matrix.cols()); + if (dec.rows() == 0) { + return Decomposition::RealScalar(1); } + return ReciprocalConditionNumberEstimate(MatrixL1Norm(matrix), dec); +} - /** \class ConditionEstimator - * \ingroup Core_Module - * - * \brief Condition number estimator. - * - * Computing a decomposition of a dense matrix takes O(n^3) operations, while - * this method estimates the condition number quickly and reliably in O(n^2) - * operations. - * - * \returns an estimate of the reciprocal condition number - * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and - * its decomposition. Supports the following decompositions: FullPivLU, - * PartialPivLU. - * - * \sa FullPivLU, PartialPivLU. - */ - static RealScalar rcond(RealScalar matrix_norm, const Decomposition& dec) { - eigen_assert(dec.rows() == dec.cols()); - if (dec.rows() == 0) { - return 1; - } - if (matrix_norm == 0) { - return 0; - } - const RealScalar inverse_matrix_norm = EstimateInverseMatrixL1Norm(dec); - return inverse_matrix_norm == 0 ? 0 - : (1 / inverse_matrix_norm) / matrix_norm; +/** \class ConditionEstimator + * \ingroup Core_Module + * + * \brief Condition number estimator. + * + * Computing a decomposition of a dense matrix takes O(n^3) operations, while + * this method estimates the condition number quickly and reliably in O(n^2) + * operations. + * + * \returns an estimate of the reciprocal condition number + * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and + * its decomposition. Supports the following decompositions: FullPivLU, + * PartialPivLU. + * + * \sa FullPivLU, PartialPivLU. + */ +template +typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( + typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) { + eigen_assert(dec.rows() == dec.cols()); + if (dec.rows() == 0) { + return 1; } - - /** - * \returns an estimate of ||inv(matrix)||_1 given a decomposition of - * matrix that implements .solve() and .adjoint().solve() methods. - * - * The method implements Algorithms 4.1 and 5.1 from - * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf - * which also forms the basis for the condition number estimators in - * LAPACK. Since at most 10 calls to the solve method of dec are - * performed, the total cost is O(dims^2), as opposed to O(dims^3) - * needed to compute the inverse matrix explicitly. - * - * The most common usage is in estimating the condition number - * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be - * computed directly in O(n^2) operations. - */ - static RealScalar EstimateInverseMatrixL1Norm(const Decomposition& dec) { - eigen_assert(dec.rows() == dec.cols()); - if (dec.rows() == 0) { - return 0; - } - return internal::EstimateInverseMatrixL1NormImpl< - Decomposition, IsSelfAdjoint, - NumTraits::IsComplex != 0>::compute(dec); + if (matrix_norm == 0) { + return 0; } + const typename Decomposition::RealScalar inverse_matrix_norm = InverseMatrixL1NormEstimate(dec); + return inverse_matrix_norm == 0 ? 0 : (1 / inverse_matrix_norm) / matrix_norm; +} - /** - * \returns the induced matrix l1-norm - * ||matrix||_1 = sup ||matrix * v||_1 / ||v||_1, which is equal to - * the greatest absolute column sum. - */ - static inline Scalar MatrixL1Norm(const MatrixType& matrix) { - return matrix.cwiseAbs().colwise().sum().maxCoeff(); - } -}; - -namespace internal { - -template -struct solve_helper { - static inline Vector solve_adjoint(const Decomposition& dec, - const Vector& v) { - return dec.adjoint().solve(v); - } -}; - -// Partial specialization for self_adjoint matrices. -template -struct solve_helper { - static inline Vector solve_adjoint(const Decomposition& dec, - const Vector& v) { - return dec.solve(v); - } -}; - - -// Partial specialization for real matrices. -template -struct EstimateInverseMatrixL1NormImpl { +/** + * \returns an estimate of ||inv(matrix)||_1 given a decomposition of + * matrix that implements .solve() and .adjoint().solve() methods. + * + * The method implements Algorithms 4.1 and 5.1 from + * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf + * which also forms the basis for the condition number estimators in + * LAPACK. Since at most 10 calls to the solve method of dec are + * performed, the total cost is O(dims^2), as opposed to O(dims^3) + * needed to compute the inverse matrix explicitly. + * + * The most common usage is in estimating the condition number + * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be + * computed directly in O(n^2) operations. + */ +template +typename Decomposition::RealScalar InverseMatrixL1NormEstimate( + const Decomposition& dec) { typedef typename Decomposition::MatrixType MatrixType; - typedef typename internal::traits::Scalar Scalar; + typedef typename Decomposition::Scalar Scalar; + typedef typename Decomposition::RealScalar RealScalar; typedef typename internal::plain_col_type::type Vector; + typedef typename internal::plain_col_type::type RealVector; + const bool is_complex = (NumTraits::IsComplex != 0); - // Shorthand for vector L1 norm in Eigen. - static inline Scalar VectorL1Norm(const Vector& v) { - return v.template lpNorm<1>(); + eigen_assert(dec.rows() == dec.cols()); + const int n = dec.rows(); + if (n == 0) { + return 0; } + Vector v = Vector::Ones(n) / n; + v = dec.solve(v); - static inline Scalar compute(const Decomposition& dec) { - const int n = dec.rows(); - const Vector plus = Vector::Ones(n); - Vector v = plus / n; - v = dec.solve(v); - Scalar lower_bound = VectorL1Norm(v); - if (n == 1) { - return lower_bound; - } - // lower_bound is a lower bound on - // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 - // and is the objective maximized by the ("super-") gradient ascent - // algorithm. - // Basic idea: We know that the optimum is achieved at one of the simplices - // v = e_i, so in each iteration we follow a super-gradient to move towards - // the optimal one. - Scalar old_lower_bound = lower_bound; - const Vector minus = -Vector::Ones(n); - Vector sign_vector = (v.cwiseAbs().array() == 0).select(plus, minus); - Vector old_sign_vector = sign_vector; - int v_max_abs_index = -1; - int old_v_max_abs_index = v_max_abs_index; - for (int k = 0; k < 4; ++k) { - // argmax |inv(matrix)^T * sign_vector| - v = solve_helper::solve_adjoint(dec, sign_vector); - v.cwiseAbs().maxCoeff(&v_max_abs_index); - if (v_max_abs_index == old_v_max_abs_index) { - // Break if the solution stagnated. - break; - } - // Move to the new simplex e_j, where j = v_max_abs_index. - v.setZero(); - v[v_max_abs_index] = 1; - v = dec.solve(v); // v = inv(matrix) * e_j. - lower_bound = VectorL1Norm(v); - if (lower_bound <= old_lower_bound) { - // Break if the gradient step did not increase the lower_bound. - break; - } - sign_vector = (v.array() < 0).select(plus, minus); + // lower_bound is a lower bound on + // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 + // and is the objective maximized by the ("super-") gradient ascent + // algorithm below. + RealScalar lower_bound = internal::VectorL1Norm(v); + if (n == 1) { + return lower_bound; + } + // Gradient ascent algorithm follows: We know that the optimum is achieved at + // one of the simplices v = e_i, so in each iteration we follow a + // super-gradient to move towards the optimal one. + RealScalar old_lower_bound = lower_bound; + Vector sign_vector(n); + Vector old_sign_vector; + int v_max_abs_index = -1; + int old_v_max_abs_index = v_max_abs_index; + for (int k = 0; k < 4; ++k) { + sign_vector = internal::SignOrUnity::run(v); + if (k > 0 && !is_complex) { if (sign_vector == old_sign_vector) { // Break if the solution stagnated. break; } + } + // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )| + v = dec.adjoint().solve(sign_vector); + v.real().cwiseAbs().maxCoeff(&v_max_abs_index); + if (v_max_abs_index == old_v_max_abs_index) { + // Break if the solution stagnated. + break; + } + // Move to the new simplex e_j, where j = v_max_abs_index. + v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j. + lower_bound = internal::VectorL1Norm(v); + if (lower_bound <= old_lower_bound) { + // Break if the gradient step did not increase the lower_bound. + break; + } + if (!is_complex) { old_sign_vector = sign_vector; - old_v_max_abs_index = v_max_abs_index; - old_lower_bound = lower_bound; } - // The following calculates an independent estimate of ||matrix||_1 by - // multiplying matrix by a vector with entries of slowly increasing - // magnitude and alternating sign: - // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. - // This improvement to Hager's algorithm above is due to Higham. It was - // added to make the algorithm more robust in certain corner cases where - // large elements in the matrix might otherwise escape detection due to - // exact cancellation (especially when op and op_adjoint correspond to a - // sequence of backsubstitutions and permutations), which could cause - // Hager's algorithm to vastly underestimate ||matrix||_1. - Scalar alternating_sign = 1; - for (int i = 0; i < n; ++i) { - v[i] = alternating_sign * static_cast(1) + - (static_cast(i) / (static_cast(n - 1))); - alternating_sign = -alternating_sign; - } - v = dec.solve(v); - const Scalar alternate_lower_bound = - (2 * VectorL1Norm(v)) / (3 * static_cast(n)); - return numext::maxi(lower_bound, alternate_lower_bound); + old_v_max_abs_index = v_max_abs_index; + old_lower_bound = lower_bound; } -}; - -// Partial specialization for complex matrices. -template -struct EstimateInverseMatrixL1NormImpl { - typedef typename Decomposition::MatrixType MatrixType; - typedef typename internal::traits::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - typedef typename internal::plain_col_type::type Vector; - typedef typename internal::plain_col_type::type - RealVector; - - // Shorthand for vector L1 norm in Eigen. - static inline RealScalar VectorL1Norm(const Vector& v) { - return v.template lpNorm<1>(); + // The following calculates an independent estimate of ||matrix||_1 by + // multiplying matrix by a vector with entries of slowly increasing + // magnitude and alternating sign: + // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. + // This improvement to Hager's algorithm above is due to Higham. It was + // added to make the algorithm more robust in certain corner cases where + // large elements in the matrix might otherwise escape detection due to + // exact cancellation (especially when op and op_adjoint correspond to a + // sequence of backsubstitutions and permutations), which could cause + // Hager's algorithm to vastly underestimate ||matrix||_1. + Scalar alternating_sign = 1; + for (int i = 0; i < n; ++i) { + v[i] = alternating_sign * static_cast(1) + + (static_cast(i) / (static_cast(n - 1))); + alternating_sign = -alternating_sign; } + v = dec.solve(v); + const RealScalar alternate_lower_bound = + (2 * internal::VectorL1Norm(v)) / (3 * static_cast(n)); + return numext::maxi(lower_bound, alternate_lower_bound); +} - static inline RealScalar compute(const Decomposition& dec) { - const int n = dec.rows(); - const Vector ones = Vector::Ones(n); - Vector v = ones / n; - v = dec.solve(v); - RealScalar lower_bound = VectorL1Norm(v); - if (n == 1) { - return lower_bound; - } - // lower_bound is a lower bound on - // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 - // and is the objective maximized by the ("super-") gradient ascent - // algorithm. - // Basic idea: We know that the optimum is achieved at one of the simplices - // v = e_i, so in each iteration we follow a super-gradient to move towards - // the optimal one. - RealScalar old_lower_bound = lower_bound; - int v_max_abs_index = -1; - int old_v_max_abs_index = v_max_abs_index; - for (int k = 0; k < 4; ++k) { - // argmax |inv(matrix)^* * sign_vector| - RealVector abs_v = v.cwiseAbs(); - const Vector psi = - (abs_v.array() == 0).select(v.cwiseQuotient(abs_v), ones); - v = solve_helper::solve_adjoint(dec, psi); - const RealVector z = v.real(); - z.cwiseAbs().maxCoeff(&v_max_abs_index); - if (v_max_abs_index == old_v_max_abs_index) { - // Break if the solution stagnated. - break; - } - // Move to the new simplex e_j, where j = v_max_abs_index. - v.setZero(); - v[v_max_abs_index] = 1; - v = dec.solve(v); // v = inv(matrix) * e_j. - lower_bound = VectorL1Norm(v); - if (lower_bound <= old_lower_bound) { - // Break if the gradient step did not increase the lower_bound. - break; - } - old_v_max_abs_index = v_max_abs_index; - old_lower_bound = lower_bound; - } - // The following calculates an independent estimate of ||matrix||_1 by - // multiplying matrix by a vector with entries of slowly increasing - // magnitude and alternating sign: - // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. - // This improvement to Hager's algorithm above is due to Higham. It was - // added to make the algorithm more robust in certain corner cases where - // large elements in the matrix might otherwise escape detection due to - // exact cancellation (especially when op and op_adjoint correspond to a - // sequence of backsubstitutions and permutations), which could cause - // Hager's algorithm to vastly underestimate ||matrix||_1. - RealScalar alternating_sign = 1; - for (int i = 0; i < n; ++i) { - v[i] = alternating_sign * static_cast(1) + - (static_cast(i) / (static_cast(n - 1))); - alternating_sign = -alternating_sign; - } - v = dec.solve(v); - const RealScalar alternate_lower_bound = - (2 * VectorL1Norm(v)) / (3 * static_cast(n)); - return numext::maxi(lower_bound, alternate_lower_bound); - } -}; - -} // namespace internal } // namespace Eigen #endif diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h index ff0b78c35..978a54eff 100644 --- a/Eigen/src/LU/FullPivLU.h +++ b/Eigen/src/LU/FullPivLU.h @@ -237,7 +237,7 @@ template class FullPivLU inline RealScalar rcond() const { eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); - return ConditionEstimator >::rcond(m_l1_norm, *this); + return ReciprocalConditionNumberEstimate(m_l1_norm, *this); } /** \returns the determinant of the matrix of which diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h index 5d71a66d0..b22dd75fe 100644 --- a/Eigen/src/LU/PartialPivLU.h +++ b/Eigen/src/LU/PartialPivLU.h @@ -157,7 +157,7 @@ template class PartialPivLU inline RealScalar rcond() const { eigen_assert(m_isInitialized && "PartialPivLU is not initialized."); - return ConditionEstimator >::rcond(m_l1_norm, *this); + return ReciprocalConditionNumberEstimate(m_l1_norm, *this); } /** \returns the inverse of the matrix of which *this is the LU decomposition. diff --git a/test/cholesky.cpp b/test/cholesky.cpp index 148a0b388..b7abc230b 100644 --- a/test/cholesky.cpp +++ b/test/cholesky.cpp @@ -91,12 +91,12 @@ template void cholesky(const MatrixType& m) matX = chollo.solve(matB); VERIFY_IS_APPROX(symm * matX, matB); - // Verify that the estimated condition number is within a factor of 10 of the - // truth. const MatrixType symmLo_inverse = chollo.solve(MatrixType::Identity(rows,cols)); RealScalar rcond = (RealScalar(1) / matrix_l1_norm(symmLo)) / matrix_l1_norm(symmLo_inverse); RealScalar rcond_est = chollo.rcond(); + // Verify that the estimated condition number is within a factor of 10 of the + // truth. VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); // test the upper mode @@ -160,12 +160,12 @@ template void cholesky(const MatrixType& m) matX = ldltlo.solve(matB); VERIFY_IS_APPROX(symm * matX, matB); - // Verify that the estimated condition number is within a factor of 10 of the - // truth. const MatrixType symmLo_inverse = ldltlo.solve(MatrixType::Identity(rows,cols)); RealScalar rcond = (RealScalar(1) / matrix_l1_norm(symmLo)) / matrix_l1_norm(symmLo_inverse); RealScalar rcond_est = ldltlo.rcond(); + // Verify that the estimated condition number is within a factor of 10 of the + // truth. VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); diff --git a/test/lu.cpp b/test/lu.cpp index 53b3fcee4..9787f4d86 100644 --- a/test/lu.cpp +++ b/test/lu.cpp @@ -151,10 +151,10 @@ template void lu_invertible() MatrixType m1_inverse = lu.inverse(); VERIFY_IS_APPROX(m2, m1_inverse*m3); - // Verify that the estimated condition number is within a factor of 10 of the - // truth. RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); const RealScalar rcond_est = lu.rcond(); + // Verify that the estimated condition number is within a factor of 10 of the + // truth. VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); // test solve with transposed @@ -197,10 +197,9 @@ template void lu_partial_piv() MatrixType m1_inverse = plu.inverse(); VERIFY_IS_APPROX(m2, m1_inverse*m3); - // Test condition number estimation. RealScalar rcond = (RealScalar(1) / matrix_l1_norm(m1)) / matrix_l1_norm(m1_inverse); - // Verify that the estimate is within a factor of 10 of the truth. const RealScalar rcond_est = plu.rcond(); + // Verify that the estimate is within a factor of 10 of the truth. VERIFY(rcond_est > rcond / 10 && rcond_est < rcond * 10); // test solve with transposed From 513c3729605a8d447d6f4b494851601601db70e4 Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Mon, 4 Apr 2016 14:34:59 -0700 Subject: [PATCH 07/14] Fix docstrings to list all supported decompositions. --- Eigen/src/Core/ConditionEstimator.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index 12b4ae648..35ec56128 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -53,9 +53,9 @@ struct SignOrUnity { * \returns an estimate of the reciprocal condition number * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given the matrix and * its decomposition. Supports the following decompositions: FullPivLU, - * PartialPivLU. + * PartialPivLU, LDLT, and LLT. * - * \sa FullPivLU, PartialPivLU. + * \sa FullPivLU, PartialPivLU, LDLT, LLT. */ template typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( @@ -82,9 +82,9 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( * \returns an estimate of the reciprocal condition number * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and * its decomposition. Supports the following decompositions: FullPivLU, - * PartialPivLU. + * PartialPivLU, LDLT, and LLT. * - * \sa FullPivLU, PartialPivLU. + * \sa FullPivLU, PartialPivLU, LDLT, LLT. */ template typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( @@ -114,6 +114,10 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( * The most common usage is in estimating the condition number * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be * computed directly in O(n^2) operations. + * + * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and LLT. + * + * \sa FullPivLU, PartialPivLU, LDLT, LLT. */ template typename Decomposition::RealScalar InverseMatrixL1NormEstimate( From 4d07064a3d357453aab7fe668065ce89a31ee4ab Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Tue, 5 Apr 2016 16:40:48 -0700 Subject: [PATCH 08/14] Fix bug in alternate lower bound calculation due to missing parentheses. Make a few expressions more concise. --- Eigen/src/Core/ConditionEstimator.h | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index 35ec56128..9027aa2f7 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -134,8 +134,7 @@ typename Decomposition::RealScalar InverseMatrixL1NormEstimate( if (n == 0) { return 0; } - Vector v = Vector::Ones(n) / n; - v = dec.solve(v); + Vector v = dec.solve(Vector::Ones(n) / n); // lower_bound is a lower bound on // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 @@ -155,11 +154,9 @@ typename Decomposition::RealScalar InverseMatrixL1NormEstimate( int old_v_max_abs_index = v_max_abs_index; for (int k = 0; k < 4; ++k) { sign_vector = internal::SignOrUnity::run(v); - if (k > 0 && !is_complex) { - if (sign_vector == old_sign_vector) { - // Break if the solution stagnated. - break; - } + if (k > 0 && !is_complex && sign_vector == old_sign_vector) { + // Break if the solution stagnated. + break; } // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )| v = dec.adjoint().solve(sign_vector); @@ -193,8 +190,9 @@ typename Decomposition::RealScalar InverseMatrixL1NormEstimate( // Hager's algorithm to vastly underestimate ||matrix||_1. Scalar alternating_sign = 1; for (int i = 0; i < n; ++i) { - v[i] = alternating_sign * static_cast(1) + - (static_cast(i) / (static_cast(n - 1))); + v[i] = alternating_sign * + (static_cast(1) + + (static_cast(i) / (static_cast(n - 1)))); alternating_sign = -alternating_sign; } v = dec.solve(v); From 0b5546d182928acb428daba0787755180da4b3fc Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Thu, 7 Apr 2016 15:49:30 -0700 Subject: [PATCH 09/14] Use lpNorm<1>() to compute l1 norms in LLT and LDLT. --- Eigen/src/Cholesky/LDLT.h | 8 ++++---- Eigen/src/Cholesky/LLT.h | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index 80f18977c..902376fd6 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -459,16 +459,16 @@ LDLT& LDLT::compute(const EigenBase() + + m_matrix.row(col).head(col).template lpNorm<1>(); if (abs_col_sum > m_l1_norm) { m_l1_norm = abs_col_sum; } } } else { for (int col = 0; col < a.cols(); ++col) { - const RealScalar abs_col_sum = m_matrix.col(col).head(col).cwiseAbs().sum() + - m_matrix.row(col).tail(size - col).cwiseAbs().sum(); + const RealScalar abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + + m_matrix.row(col).tail(size - col).template lpNorm<1>(); if (abs_col_sum > m_l1_norm) { m_l1_norm = abs_col_sum; } diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h index 94da1d52d..dc2ccd6a4 100644 --- a/Eigen/src/Cholesky/LLT.h +++ b/Eigen/src/Cholesky/LLT.h @@ -414,16 +414,16 @@ LLT& LLT::compute(const EigenBase m_l1_norm = RealScalar(0); if (_UpLo == Lower) { for (int col = 0; col < size; ++col) { - const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).cwiseAbs().sum() + - m_matrix.row(col).head(col).cwiseAbs().sum(); + const RealScalar abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + + m_matrix.row(col).head(col).template lpNorm<1>(); if (abs_col_sum > m_l1_norm) { m_l1_norm = abs_col_sum; } } } else { for (int col = 0; col < a.cols(); ++col) { - const RealScalar abs_col_sum = m_matrix.col(col).head(col).cwiseAbs().sum() + - m_matrix.row(col).tail(size - col).cwiseAbs().sum(); + const RealScalar abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + + m_matrix.row(col).tail(size - col).template lpNorm<1>(); if (abs_col_sum > m_l1_norm) { m_l1_norm = abs_col_sum; } From fd872aefb3a9a0e4be08e66671494babdb921c2b Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Thu, 7 Apr 2016 16:28:44 -0700 Subject: [PATCH 10/14] Remove transpose() method from LLT and LDLT classes as it would imply conjugation. Explicitly cast constants to RealScalar in ConditionEstimator.h. --- Eigen/src/Cholesky/LDLT.h | 3 +-- Eigen/src/Cholesky/LLT.h | 3 +-- Eigen/src/Core/ConditionEstimator.h | 31 +++++++++++++++++++---------- 3 files changed, 22 insertions(+), 15 deletions(-) diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index 902376fd6..d246a459c 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -217,9 +217,8 @@ template class LDLT MatrixType reconstructedMatrix() const; /** \returns the decomposition itself to allow generic code to do - * ldlt.transpose().solve(rhs). + * ldlt.adjoint().solve(rhs). */ - const LDLT& transpose() const { return *this; }; const LDLT& adjoint() const { return *this; }; inline Index rows() const { return m_matrix.rows(); } diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h index dc2ccd6a4..f88afe8b5 100644 --- a/Eigen/src/Cholesky/LLT.h +++ b/Eigen/src/Cholesky/LLT.h @@ -170,9 +170,8 @@ template class LLT } /** \returns the decomposition itself to allow generic code to do - * llt.transpose().solve(rhs). + * llt.adjoint().solve(rhs). */ - const LLT& transpose() const { return *this; }; const LLT& adjoint() const { return *this; }; inline Index rows() const { return m_matrix.rows(); } diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index 9027aa2f7..19c6102f7 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -27,7 +27,8 @@ template struct SignOrUnity { static inline Vector run(const Vector& v) { const RealVector v_abs = v.cwiseAbs(); - return (v_abs.array() == 0).select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs)); + return (v_abs.array() == static_cast(0)) + .select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs)); } }; @@ -35,7 +36,8 @@ struct SignOrUnity { template struct SignOrUnity { static inline Vector run(const Vector& v) { - return (v.array() < 0).select(-Vector::Ones(v.size()), Vector::Ones(v.size())); + return (v.array() < static_cast(0)) + .select(-Vector::Ones(v.size()), Vector::Ones(v.size())); } }; @@ -65,7 +67,7 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( eigen_assert(matrix.cols() == dec.cols()); eigen_assert(matrix.rows() == matrix.cols()); if (dec.rows() == 0) { - return Decomposition::RealScalar(1); + return static_cast(1); } return ReciprocalConditionNumberEstimate(MatrixL1Norm(matrix), dec); } @@ -89,15 +91,20 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( template typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) { + typedef typename Decomposition::RealScalar RealScalar; eigen_assert(dec.rows() == dec.cols()); if (dec.rows() == 0) { - return 1; + return static_cast(1); } - if (matrix_norm == 0) { - return 0; + if (matrix_norm == static_cast(0)) { + return static_cast(0); } - const typename Decomposition::RealScalar inverse_matrix_norm = InverseMatrixL1NormEstimate(dec); - return inverse_matrix_norm == 0 ? 0 : (1 / inverse_matrix_norm) / matrix_norm; + const typename Decomposition::RealScalar inverse_matrix_norm = + InverseMatrixL1NormEstimate(dec); + return (inverse_matrix_norm == static_cast(0) + ? static_cast(0) + : (static_cast(1) / inverse_matrix_norm) / + matrix_norm); } /** @@ -115,7 +122,8 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be * computed directly in O(n^2) operations. * - * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and LLT. + * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and + * LLT. * * \sa FullPivLU, PartialPivLU, LDLT, LLT. */ @@ -126,7 +134,8 @@ typename Decomposition::RealScalar InverseMatrixL1NormEstimate( typedef typename Decomposition::Scalar Scalar; typedef typename Decomposition::RealScalar RealScalar; typedef typename internal::plain_col_type::type Vector; - typedef typename internal::plain_col_type::type RealVector; + typedef typename internal::plain_col_type::type + RealVector; const bool is_complex = (NumTraits::IsComplex != 0); eigen_assert(dec.rows() == dec.cols()); @@ -188,7 +197,7 @@ typename Decomposition::RealScalar InverseMatrixL1NormEstimate( // exact cancellation (especially when op and op_adjoint correspond to a // sequence of backsubstitutions and permutations), which could cause // Hager's algorithm to vastly underestimate ||matrix||_1. - Scalar alternating_sign = 1; + Scalar alternating_sign(static_cast(1)); for (int i = 0; i < n; ++i) { v[i] = alternating_sign * (static_cast(1) + From d51803a728c95bf32ff9b920db95f5106749f719 Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Thu, 7 Apr 2016 16:39:48 -0700 Subject: [PATCH 11/14] Use Index instead of int for indexing and sizes. --- Eigen/src/Core/ConditionEstimator.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index 19c6102f7..d7f6ef53d 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -139,11 +139,11 @@ typename Decomposition::RealScalar InverseMatrixL1NormEstimate( const bool is_complex = (NumTraits::IsComplex != 0); eigen_assert(dec.rows() == dec.cols()); - const int n = dec.rows(); + const Index n = dec.rows(); if (n == 0) { return 0; } - Vector v = dec.solve(Vector::Ones(n) / n); + Vector v = dec.solve(Vector::Ones(n) / static_cast(n)); // lower_bound is a lower bound on // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 @@ -159,8 +159,8 @@ typename Decomposition::RealScalar InverseMatrixL1NormEstimate( RealScalar old_lower_bound = lower_bound; Vector sign_vector(n); Vector old_sign_vector; - int v_max_abs_index = -1; - int old_v_max_abs_index = v_max_abs_index; + Index v_max_abs_index = -1; + Index old_v_max_abs_index = v_max_abs_index; for (int k = 0; k < 4; ++k) { sign_vector = internal::SignOrUnity::run(v); if (k > 0 && !is_complex && sign_vector == old_sign_vector) { @@ -198,7 +198,7 @@ typename Decomposition::RealScalar InverseMatrixL1NormEstimate( // sequence of backsubstitutions and permutations), which could cause // Hager's algorithm to vastly underestimate ||matrix||_1. Scalar alternating_sign(static_cast(1)); - for (int i = 0; i < n; ++i) { + for (Index i = 0; i < n; ++i) { v[i] = alternating_sign * (static_cast(1) + (static_cast(i) / (static_cast(n - 1)))); From 283c51cd5edd5d706635b0bd05593f84cb8d618a Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Thu, 7 Apr 2016 16:45:40 -0700 Subject: [PATCH 12/14] Widen short-circuiting ReciprocalConditionNumberEstimate so we don't call InverseMatrixL1NormEstimate for dec.rows() <= 1. --- Eigen/src/Core/ConditionEstimator.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index d7f6ef53d..c4c073fa2 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -66,7 +66,7 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( eigen_assert(matrix.rows() == dec.rows()); eigen_assert(matrix.cols() == dec.cols()); eigen_assert(matrix.rows() == matrix.cols()); - if (dec.rows() == 0) { + if (dec.rows() <= 1) { return static_cast(1); } return ReciprocalConditionNumberEstimate(MatrixL1Norm(matrix), dec); @@ -93,7 +93,7 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) { typedef typename Decomposition::RealScalar RealScalar; eigen_assert(dec.rows() == dec.cols()); - if (dec.rows() == 0) { + if (dec.rows() <= 1) { return static_cast(1); } if (matrix_norm == static_cast(0)) { From ee6c69733aeb06942cabf3bccd12715ef0e43ecf Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Sat, 9 Apr 2016 12:45:49 -0700 Subject: [PATCH 13/14] A few tiny adjustments to short-circuit logic. --- Eigen/src/Core/ConditionEstimator.h | 31 +++++++++++++---------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index c4c073fa2..5fb0cdbd5 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -65,10 +65,6 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( const Decomposition& dec) { eigen_assert(matrix.rows() == dec.rows()); eigen_assert(matrix.cols() == dec.cols()); - eigen_assert(matrix.rows() == matrix.cols()); - if (dec.rows() <= 1) { - return static_cast(1); - } return ReciprocalConditionNumberEstimate(MatrixL1Norm(matrix), dec); } @@ -93,18 +89,20 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) { typedef typename Decomposition::RealScalar RealScalar; eigen_assert(dec.rows() == dec.cols()); - if (dec.rows() <= 1) { - return static_cast(1); + if (dec.rows() == 0) { + return RealScalar(1); } - if (matrix_norm == static_cast(0)) { - return static_cast(0); + if (matrix_norm == RealScalar(0)) { + return RealScalar(0); + } + if (dec.rows() == 1) { + return RealScalar(1); } const typename Decomposition::RealScalar inverse_matrix_norm = InverseMatrixL1NormEstimate(dec); - return (inverse_matrix_norm == static_cast(0) - ? static_cast(0) - : (static_cast(1) / inverse_matrix_norm) / - matrix_norm); + return (inverse_matrix_norm == RealScalar(0) + ? RealScalar(0) + : (RealScalar(1) / inverse_matrix_norm) / matrix_norm); } /** @@ -143,7 +141,7 @@ typename Decomposition::RealScalar InverseMatrixL1NormEstimate( if (n == 0) { return 0; } - Vector v = dec.solve(Vector::Ones(n) / static_cast(n)); + Vector v = dec.solve(Vector::Ones(n) / Scalar(n)); // lower_bound is a lower bound on // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 @@ -197,16 +195,15 @@ typename Decomposition::RealScalar InverseMatrixL1NormEstimate( // exact cancellation (especially when op and op_adjoint correspond to a // sequence of backsubstitutions and permutations), which could cause // Hager's algorithm to vastly underestimate ||matrix||_1. - Scalar alternating_sign(static_cast(1)); + Scalar alternating_sign(RealScalar(1)); for (Index i = 0; i < n; ++i) { v[i] = alternating_sign * - (static_cast(1) + - (static_cast(i) / (static_cast(n - 1)))); + (RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1)))); alternating_sign = -alternating_sign; } v = dec.solve(v); const RealScalar alternate_lower_bound = - (2 * internal::VectorL1Norm(v)) / (3 * static_cast(n)); + (2 * internal::VectorL1Norm(v)) / (3 * RealScalar(n)); return numext::maxi(lower_bound, alternate_lower_bound); } From 096e355f8e1b12d3c1f50a8f69dfd7b01def54c5 Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Sat, 9 Apr 2016 15:29:56 -0700 Subject: [PATCH 14/14] Add short-circuit to avoid calling matrix norm for empty matrix. --- Eigen/src/Core/ConditionEstimator.h | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/Eigen/src/Core/ConditionEstimator.h b/Eigen/src/Core/ConditionEstimator.h index 5fb0cdbd5..f53c2a837 100644 --- a/Eigen/src/Core/ConditionEstimator.h +++ b/Eigen/src/Core/ConditionEstimator.h @@ -65,6 +65,7 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( const Decomposition& dec) { eigen_assert(matrix.rows() == dec.rows()); eigen_assert(matrix.cols() == dec.cols()); + if (dec.rows() == 0) return typename Decomposition::RealScalar(1); return ReciprocalConditionNumberEstimate(MatrixL1Norm(matrix), dec); } @@ -89,15 +90,9 @@ typename Decomposition::RealScalar ReciprocalConditionNumberEstimate( typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) { typedef typename Decomposition::RealScalar RealScalar; eigen_assert(dec.rows() == dec.cols()); - if (dec.rows() == 0) { - return RealScalar(1); - } - if (matrix_norm == RealScalar(0)) { - return RealScalar(0); - } - if (dec.rows() == 1) { - return RealScalar(1); - } + if (dec.rows() == 0) return RealScalar(1); + if (matrix_norm == RealScalar(0)) return RealScalar(0); + if (dec.rows() == 1) return RealScalar(1); const typename Decomposition::RealScalar inverse_matrix_norm = InverseMatrixL1NormEstimate(dec); return (inverse_matrix_norm == RealScalar(0)