diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h index 2bfc5ebd9..454f9ce52 100644 --- a/Eigen/src/Core/MathFunctions.h +++ b/Eigen/src/Core/MathFunctions.h @@ -332,37 +332,45 @@ inline NewType cast(const OldType& x) * Implementation of atanh2 * ****************************************************************************/ -template -struct atanh2_default_impl +template +struct atanh2_impl { - typedef Scalar retval; - typedef typename NumTraits::Real RealScalar; - static inline Scalar run(const Scalar& x, const Scalar& y) + static inline Scalar run(const Scalar& x, const Scalar& r) + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + #if __cplusplus >= 201103L + using std::log1p; + return log1p(2 * x / (r - x)) / 2; + #else + using std::abs; + using std::log; + using std::sqrt; + Scalar z = x / r; + if (r == 0 || abs(z) > sqrt(NumTraits::epsilon())) + return log((r + x) / (r - x)) / 2; + else + return z + z*z*z / 3; + #endif + } +}; + +template +struct atanh2_impl > +{ + typedef std::complex Scalar; + static inline Scalar run(const Scalar& x, const Scalar& r) { - using std::abs; using std::log; + using std::norm; using std::sqrt; - Scalar z = x / y; - if (y == Scalar(0) || abs(z) > sqrt(NumTraits::epsilon())) - return RealScalar(0.5) * log((y + x) / (y - x)); + Scalar z = x / r; + if (r == Scalar(0) || norm(z) > NumTraits::epsilon()) + return RealScalar(0.5) * log((r + x) / (r - x)); else return z + z*z*z / RealScalar(3); } }; -template -struct atanh2_default_impl -{ - static inline Scalar run(const Scalar&, const Scalar&) - { - EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) - return Scalar(0); - } -}; - -template -struct atanh2_impl : atanh2_default_impl::IsInteger> {}; - template struct atanh2_retval { diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index 9193b6abb..fbed47233 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -162,9 +162,6 @@ template class MatrixBase #ifndef EIGEN_PARSED_BY_DOXYGEN template Derived& lazyAssign(const ProductBase& other); - - template - Derived& lazyAssign(const MatrixPowerProduct& other); #endif // not EIGEN_PARSED_BY_DOXYGEN template @@ -458,6 +455,7 @@ template class MatrixBase const MatrixSquareRootReturnValue sqrt() const; const MatrixLogarithmReturnValue log() const; const MatrixPowerReturnValue pow(const RealScalar& p) const; + const MatrixComplexPowerReturnValue pow(const std::complex& p) const; #ifdef EIGEN2_SUPPORT template diff --git a/Eigen/src/Core/util/ForwardDeclarations.h b/Eigen/src/Core/util/ForwardDeclarations.h index d6a814586..cbedb7da4 100644 --- a/Eigen/src/Core/util/ForwardDeclarations.h +++ b/Eigen/src/Core/util/ForwardDeclarations.h @@ -271,7 +271,7 @@ template class MatrixFunctionReturnValue; template class MatrixSquareRootReturnValue; template class MatrixLogarithmReturnValue; template class MatrixPowerReturnValue; -template class MatrixPowerProduct; +template class MatrixComplexPowerReturnValue; namespace internal { template diff --git a/unsupported/Eigen/MatrixFunctions b/unsupported/Eigen/MatrixFunctions index 0991817d5..0b12aaffb 100644 --- a/unsupported/Eigen/MatrixFunctions +++ b/unsupported/Eigen/MatrixFunctions @@ -13,8 +13,6 @@ #include #include -#include -#include #include #include @@ -230,22 +228,65 @@ const MatrixPowerReturnValue MatrixBase::pow(RealScalar p) con \endcode \param[in] M base of the matrix power, should be a square matrix. -\param[in] p exponent of the matrix power, should be real. +\param[in] p exponent of the matrix power. The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$, where exp denotes the matrix exponential, and log denotes the matrix logarithm. -The matrix \f$ M \f$ should meet the conditions to be an argument of -matrix logarithm. If \p p is not of the real scalar type of \p M, it -is casted into the real scalar type of \p M. +If \p p is complex, the scalar type of \p M should be the type of \p +p . \f$ M^p \f$ simply evaluates into \f$ \exp(p \log(M)) \f$. +Therefore, the matrix \f$ M \f$ should meet the conditions to be an +argument of matrix logarithm. -This function computes the matrix power using the Schur-Padé +If \p p is real, it is casted into the real scalar type of \p M. Then +this function computes the matrix power using the Schur-Padé algorithm as implemented by class MatrixPower. The exponent is split into integral part and fractional part, where the fractional part is in the interval \f$ (-1, 1) \f$. The main diagonal and the first super-diagonal is directly computed. +If \p M is singular with a semisimple zero eigenvalue and \p p is +positive, the Schur factor \f$ T \f$ is reordered with Givens +rotations, i.e. + +\f[ T = \left[ \begin{array}{cc} + T_1 & T_2 \\ + 0 & 0 + \end{array} \right] \f] + +where \f$ T_1 \f$ is invertible. Then \f$ T^p \f$ is given by + +\f[ T^p = \left[ \begin{array}{cc} + T_1^p & T_1^{-1} T_1^p T_2 \\ + 0 & 0 + \end{array}. \right] \f] + +\warning Fractional power of a matrix with a non-semisimple zero +eigenvalue is not well-defined. We introduce an assertion failure +against inaccurate result, e.g. \code +#include +#include + +int main() +{ + Eigen::Matrix4d A; + A << 0, 0, 2, 3, + 0, 0, 4, 5, + 0, 0, 6, 7, + 0, 0, 8, 9; + std::cout << A.pow(0.37) << std::endl; + + // The 1 makes eigenvalue 0 non-semisimple. + A.coeffRef(0, 1) = 1; + + // This fails if EIGEN_NO_DEBUG is undefined. + std::cout << A.pow(0.37) << std::endl; + + return 0; +} +\endcode + Details of the algorithm can be found in: Nicholas J. Higham and Lijing Lin, "A Schur-Padé algorithm for fractional powers of a matrix," SIAM J. %Matrix Anal. Applic., diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h index 532896c3b..80be9fd7a 100644 --- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h +++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h @@ -12,11 +12,64 @@ #ifndef KRONECKER_TENSOR_PRODUCT_H #define KRONECKER_TENSOR_PRODUCT_H -namespace Eigen { - -template class SparseMatrix; +namespace Eigen { /*! + * \ingroup KroneckerProduct_Module + * + * \brief The base class of dense and sparse Kronecker product. + * + * \tparam Derived is the derived type. + */ +template +class KroneckerProductBase : public ReturnByValue +{ + private: + typedef typename internal::traits Traits; + typedef typename Traits::Lhs Lhs; + typedef typename Traits::Rhs Rhs; + typedef typename Traits::Scalar Scalar; + + protected: + typedef typename Traits::Index Index; + + public: + /*! \brief Constructor. */ + KroneckerProductBase(const Lhs& A, const Rhs& B) + : m_A(A), m_B(B) + {} + + inline Index rows() const { return m_A.rows() * m_B.rows(); } + inline Index cols() const { return m_A.cols() * m_B.cols(); } + + /*! + * This overrides ReturnByValue::coeff because this function is + * efficient enough. + */ + Scalar coeff(Index row, Index col) const + { + return m_A.coeff(row / m_B.rows(), col / m_B.cols()) * + m_B.coeff(row % m_B.rows(), col % m_B.cols()); + } + + /*! + * This overrides ReturnByValue::coeff because this function is + * efficient enough. + */ + Scalar coeff(Index i) const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size()); + } + + protected: + typename Lhs::Nested m_A; + typename Rhs::Nested m_B; +}; + +/*! + * \ingroup KroneckerProduct_Module + * * \brief Kronecker tensor product helper class for dense matrices * * This class is the return value of kroneckerProduct(MatrixBase, @@ -27,43 +80,26 @@ template class SparseMatrix; * \tparam Rhs Type of the rignt-hand side, a matrix expression. */ template -class KroneckerProduct : public ReturnByValue > +class KroneckerProduct : public KroneckerProductBase > { private: - typedef ReturnByValue Base; - typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef KroneckerProductBase Base; + using Base::m_A; + using Base::m_B; public: /*! \brief Constructor. */ KroneckerProduct(const Lhs& A, const Rhs& B) - : m_A(A), m_B(B) + : Base(A, B) {} /*! \brief Evaluate the Kronecker tensor product. */ template void evalTo(Dest& dst) const; - - inline Index rows() const { return m_A.rows() * m_B.rows(); } - inline Index cols() const { return m_A.cols() * m_B.cols(); } - - Scalar coeff(Index row, Index col) const - { - return m_A.coeff(row / m_B.rows(), col / m_B.cols()) * - m_B.coeff(row % m_B.rows(), col % m_B.cols()); - } - - Scalar coeff(Index i) const - { - EIGEN_STATIC_ASSERT_VECTOR_ONLY(KroneckerProduct); - return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size()); - } - - private: - typename Lhs::Nested m_A; - typename Rhs::Nested m_B; }; /*! + * \ingroup KroneckerProduct_Module + * * \brief Kronecker tensor product helper class for sparse matrices * * If at least one of the operands is a sparse matrix expression, @@ -77,40 +113,28 @@ class KroneckerProduct : public ReturnByValue > * \tparam Rhs Type of the rignt-hand side, a matrix expression. */ template -class KroneckerProductSparse : public EigenBase > +class KroneckerProductSparse : public KroneckerProductBase > { private: - typedef typename internal::traits::Index Index; + typedef KroneckerProductBase Base; + using Base::m_A; + using Base::m_B; public: /*! \brief Constructor. */ KroneckerProductSparse(const Lhs& A, const Rhs& B) - : m_A(A), m_B(B) + : Base(A, B) {} /*! \brief Evaluate the Kronecker tensor product. */ template void evalTo(Dest& dst) const; - - inline Index rows() const { return m_A.rows() * m_B.rows(); } - inline Index cols() const { return m_A.cols() * m_B.cols(); } - - template - operator SparseMatrix() - { - SparseMatrix result; - evalTo(result.derived()); - return result; - } - - private: - typename Lhs::Nested m_A; - typename Rhs::Nested m_B; }; template template void KroneckerProduct::evalTo(Dest& dst) const { + typedef typename Base::Index Index; const int BlockRows = Rhs::RowsAtCompileTime, BlockCols = Rhs::ColsAtCompileTime; const Index Br = m_B.rows(), @@ -124,9 +148,10 @@ template template void KroneckerProductSparse::evalTo(Dest& dst) const { + typedef typename Base::Index Index; const Index Br = m_B.rows(), Bc = m_B.cols(); - dst.resize(rows(),cols()); + dst.resize(this->rows(), this->cols()); dst.resizeNonZeros(0); dst.reserve(m_A.nonZeros() * m_B.nonZeros()); @@ -155,6 +180,7 @@ struct traits > typedef typename remove_all<_Lhs>::type Lhs; typedef typename remove_all<_Rhs>::type Rhs; typedef typename scalar_product_traits::ReturnType Scalar; + typedef typename promote_index_type::type Index; enum { Rows = size_at_compile_time::RowsAtCompileTime, traits::RowsAtCompileTime>::ret, @@ -193,6 +219,8 @@ struct traits > | EvalBeforeNestingBit | EvalBeforeAssigningBit, CoeffReadCost = Dynamic }; + + typedef SparseMatrix ReturnType; }; } // end namespace internal @@ -228,6 +256,16 @@ KroneckerProduct kroneckerProduct(const MatrixBase& a, const MatrixBase< * Computes Kronecker tensor product of two matrices, at least one of * which is sparse * + * \warning If you want to replace a matrix by its Kronecker product + * with some matrix, do \b NOT do this: + * \code + * A = kroneckerProduct(A,B); // bug!!! caused by aliasing effect + * \endcode + * instead, use eval() to work around this: + * \code + * A = kroneckerProduct(A,B).eval(); + * \endcode + * * \param a Dense/sparse matrix a * \param b Dense/sparse matrix b * \return Kronecker tensor product of a and b, stored in a sparse diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h index 6825a7882..30b36a179 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h @@ -21,8 +21,8 @@ namespace Eigen { * expected to be an instantiation of the Matrix class template. */ template -class MatrixExponential { - +class MatrixExponential : internal::noncopyable +{ public: /** \brief Constructor. @@ -32,7 +32,7 @@ class MatrixExponential { * * \param[in] M matrix whose exponential is to be computed. */ - MatrixExponential(const MatrixType &M); + explicit MatrixExponential(const MatrixType &M); /** \brief Computes the matrix exponential. * @@ -43,10 +43,6 @@ class MatrixExponential { private: - // Prevent copying - MatrixExponential(const MatrixExponential&); - MatrixExponential& operator=(const MatrixExponential&); - /** \brief Compute the (3,3)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé @@ -130,6 +126,7 @@ class MatrixExponential { */ void computeUV(long double); + struct ScalingOp; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename std::complex ComplexScalar; @@ -159,6 +156,43 @@ class MatrixExponential { RealScalar m_l1norm; }; +/** \brief Scaling operator. + * + * This struct is used by CwiseUnaryOp to scale a matrix by \f$ 2^{-s} \f$. + */ +template +struct MatrixExponential::ScalingOp +{ + /** \brief Constructor. + * + * \param[in] squarings The integer \f$ s \f$ in this document. + */ + ScalingOp(int squarings) : m_squarings(squarings) { } + + /** \brief Scale a matrix coefficient. + * + * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$. + */ + inline const RealScalar operator() (const RealScalar& x) const + { + using std::ldexp; + return ldexp(x, -m_squarings); + } + + /** \brief Scale a matrix coefficient. + * + * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$. + */ + inline const ComplexScalar operator() (const ComplexScalar& x) const + { + using std::ldexp; + return ComplexScalar(ldexp(x.real(), -m_squarings), ldexp(x.imag(), -m_squarings)); + } + + private: + int m_squarings; +}; + template MatrixExponential::MatrixExponential(const MatrixType &M) : m_M(M), @@ -284,7 +318,6 @@ template void MatrixExponential::computeUV(float) { using std::frexp; - using std::pow; if (m_l1norm < 4.258730016922831e-001) { pade3(m_M); } else if (m_l1norm < 1.880152677804762e+000) { @@ -293,7 +326,7 @@ void MatrixExponential::computeUV(float) const float maxnorm = 3.925724783138660f; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; - MatrixType A = m_M / pow(Scalar(2), m_squarings); + MatrixType A = CwiseUnaryOp(m_M, m_squarings); pade7(A); } } @@ -302,7 +335,6 @@ template void MatrixExponential::computeUV(double) { using std::frexp; - using std::pow; if (m_l1norm < 1.495585217958292e-002) { pade3(m_M); } else if (m_l1norm < 2.539398330063230e-001) { @@ -315,7 +347,7 @@ void MatrixExponential::computeUV(double) const double maxnorm = 5.371920351148152; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; - MatrixType A = m_M / pow(Scalar(2), m_squarings); + MatrixType A = CwiseUnaryOp(m_M, m_squarings); pade13(A); } } @@ -324,7 +356,6 @@ template void MatrixExponential::computeUV(long double) { using std::frexp; - using std::pow; #if LDBL_MANT_DIG == 53 // double precision computeUV(double()); #elif LDBL_MANT_DIG <= 64 // extended precision @@ -340,7 +371,7 @@ void MatrixExponential::computeUV(long double) const long double maxnorm = 4.0246098906697353063L; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; - MatrixType A = m_M / pow(Scalar(2), m_squarings); + MatrixType A = CwiseUnaryOp(m_M, m_squarings); pade13(A); } #elif LDBL_MANT_DIG <= 106 // double-double @@ -358,7 +389,7 @@ void MatrixExponential::computeUV(long double) const long double maxnorm = 3.2579440895405400856599663723517L; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; - MatrixType A = m_M / pow(Scalar(2), m_squarings); + MatrixType A = CwiseUnaryOp(m_M, m_squarings); pade17(A); } #elif LDBL_MANT_DIG <= 112 // quadruple precison @@ -376,7 +407,7 @@ void MatrixExponential::computeUV(long double) const long double maxnorm = 2.884233277829519311757165057717815L; frexp(m_l1norm / maxnorm, &m_squarings); if (m_squarings < 0) m_squarings = 0; - MatrixType A = m_M / pow(Scalar(2), m_squarings); + MatrixType A = CwiseUnaryOp(m_M, m_squarings); pade17(A); } #else @@ -407,7 +438,7 @@ template struct MatrixExponentialReturnValue * \param[in] src %Matrix (expression) forming the argument of the * matrix exponential. */ - MatrixExponentialReturnValue(const Derived& src) : m_src(src) { } + explicit MatrixExponentialReturnValue(const Derived& src) : m_src(src) { } /** \brief Compute the matrix exponential. * @@ -427,8 +458,6 @@ template struct MatrixExponentialReturnValue protected: const Derived& m_src; - private: - MatrixExponentialReturnValue& operator=(const MatrixExponentialReturnValue&); }; namespace internal { diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h index 7d426640c..ad54d8ed0 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h @@ -34,7 +34,7 @@ namespace Eigen { template ::Scalar>::IsComplex> -class MatrixFunction +class MatrixFunction : internal::noncopyable { public: @@ -65,7 +65,7 @@ class MatrixFunction * \brief Partial specialization of MatrixFunction for real matrices */ template -class MatrixFunction +class MatrixFunction : internal::noncopyable { private: @@ -111,8 +111,6 @@ class MatrixFunction private: typename internal::nested::type m_A; /**< \brief Reference to argument of matrix function. */ AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */ - - MatrixFunction& operator=(const MatrixFunction&); }; @@ -120,7 +118,7 @@ class MatrixFunction * \brief Partial specialization of MatrixFunction for complex matrices */ template -class MatrixFunction +class MatrixFunction : internal::noncopyable { private: @@ -176,8 +174,6 @@ class MatrixFunction * separation constant is set to 0.1, a value taken from the * paper by Davies and Higham. */ static const RealScalar separation() { return static_cast(0.1); } - - MatrixFunction& operator=(const MatrixFunction&); }; /** \brief Constructor. @@ -531,8 +527,6 @@ template class MatrixFunctionReturnValue private: typename internal::nested::type m_A; StemFunction *m_f; - - MatrixFunctionReturnValue& operator=(const MatrixFunctionReturnValue&); }; namespace internal { diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h index efe332c48..d6ff5f1ce 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h @@ -21,7 +21,7 @@ namespace Eigen { * entries are close to each other. */ template -class MatrixFunctionAtomic +class MatrixFunctionAtomic : internal::noncopyable { public: @@ -44,10 +44,6 @@ class MatrixFunctionAtomic private: - // Prevent copying - MatrixFunctionAtomic(const MatrixFunctionAtomic&); - MatrixFunctionAtomic& operator=(const MatrixFunctionAtomic&); - void computeMu(); bool taylorConverged(Index s, const MatrixType& F, const MatrixType& Fincr, const MatrixType& P); diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h index c744fc05f..33cfadfb4 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h @@ -28,7 +28,7 @@ namespace Eigen { * \sa class MatrixFunctionAtomic, MatrixBase::log() */ template -class MatrixLogarithmAtomic +class MatrixLogarithmAtomic : internal::noncopyable { public: @@ -71,10 +71,6 @@ private: std::numeric_limits::digits<= 64? 8: // extended precision std::numeric_limits::digits<=106? 10: // double-double 11; // quadruple precision - - // Prevent copying - MatrixLogarithmAtomic(const MatrixLogarithmAtomic&); - MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&); }; /** \brief Compute logarithm of triangular matrix with clustered eigenvalues. */ @@ -429,7 +425,7 @@ public: * * \param[in] A %Matrix (expression) forming the argument of the matrix logarithm. */ - MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { } + explicit MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { } /** \brief Compute the matrix logarithm. * @@ -458,8 +454,6 @@ public: private: typename internal::nested::type m_A; - - MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&); }; namespace internal { diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h index c32437281..5548bd95c 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h @@ -14,16 +14,48 @@ namespace Eigen { template class MatrixPower; +/** + * \ingroup MatrixFunctions_Module + * + * \brief Proxy for the matrix power of some matrix. + * + * \tparam MatrixType type of the base, a matrix. + * + * This class holds the arguments to the matrix power until it is + * assigned or evaluated for some other reason (so the argument + * should not be changed in the meantime). It is the return type of + * MatrixPower::operator() and related functions and most of the + * time this is the only way it is used. + */ +/* TODO This class is only used by MatrixPower, so it should be nested + * into MatrixPower, like MatrixPower::ReturnValue. However, my + * compiler complained about unused template parameter in the + * following declaration in namespace internal. + * + * template + * struct traits::ReturnValue>; + */ template -class MatrixPowerRetval : public ReturnByValue< MatrixPowerRetval > +class MatrixPowerParenthesesReturnValue : public ReturnByValue< MatrixPowerParenthesesReturnValue > { public: typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; - MatrixPowerRetval(MatrixPower& pow, RealScalar p) : m_pow(pow), m_p(p) + /** + * \brief Constructor. + * + * \param[in] pow %MatrixPower storing the base. + * \param[in] p scalar, the exponent of the matrix power. + */ + MatrixPowerParenthesesReturnValue(MatrixPower& pow, RealScalar p) : m_pow(pow), m_p(p) { } + /** + * \brief Compute the matrix power. + * + * \param[out] result + */ template inline void evalTo(ResultType& res) const { m_pow.compute(res, m_p); } @@ -34,11 +66,25 @@ class MatrixPowerRetval : public ReturnByValue< MatrixPowerRetval > private: MatrixPower& m_pow; const RealScalar m_p; - MatrixPowerRetval& operator=(const MatrixPowerRetval&); }; +/** + * \ingroup MatrixFunctions_Module + * + * \brief Class for computing matrix powers. + * + * \tparam MatrixType type of the base, expected to be an instantiation + * of the Matrix class template. + * + * This class is capable of computing triangular real/complex matrices + * raised to a power in the interval \f$ (-1, 1) \f$. + * + * \note Currently this class is only used by MatrixPower. One may + * insist that this be nested into MatrixPower. This class is here to + * faciliate future development of triangular matrix functions. + */ template -class MatrixPowerAtomic +class MatrixPowerAtomic : internal::noncopyable { private: enum { @@ -49,14 +95,14 @@ class MatrixPowerAtomic typedef typename MatrixType::RealScalar RealScalar; typedef std::complex ComplexScalar; typedef typename MatrixType::Index Index; - typedef Array ArrayType; + typedef Block ResultType; const MatrixType& m_A; RealScalar m_p; - void computePade(int degree, const MatrixType& IminusT, MatrixType& res) const; - void compute2x2(MatrixType& res, RealScalar p) const; - void computeBig(MatrixType& res) const; + void computePade(int degree, const MatrixType& IminusT, ResultType& res) const; + void compute2x2(ResultType& res, RealScalar p) const; + void computeBig(ResultType& res) const; static int getPadeDegree(float normIminusT); static int getPadeDegree(double normIminusT); static int getPadeDegree(long double normIminusT); @@ -64,24 +110,45 @@ class MatrixPowerAtomic static RealScalar computeSuperDiag(RealScalar, RealScalar, RealScalar p); public: + /** + * \brief Constructor. + * + * \param[in] T the base of the matrix power. + * \param[in] p the exponent of the matrix power, should be in + * \f$ (-1, 1) \f$. + * + * The class stores a reference to T, so it should not be changed + * (or destroyed) before evaluation. Only the upper triangular + * part of T is read. + */ MatrixPowerAtomic(const MatrixType& T, RealScalar p); - void compute(MatrixType& res) const; + + /** + * \brief Compute the matrix power. + * + * \param[out] res \f$ A^p \f$ where A and p are specified in the + * constructor. + */ + void compute(ResultType& res) const; }; template MatrixPowerAtomic::MatrixPowerAtomic(const MatrixType& T, RealScalar p) : m_A(T), m_p(p) -{ eigen_assert(T.rows() == T.cols()); } +{ + eigen_assert(T.rows() == T.cols()); + eigen_assert(p > -1 && p < 1); +} template -void MatrixPowerAtomic::compute(MatrixType& res) const +void MatrixPowerAtomic::compute(ResultType& res) const { - res.resizeLike(m_A); + using std::pow; switch (m_A.rows()) { case 0: break; case 1: - res(0,0) = std::pow(m_A(0,0), m_p); + res(0,0) = pow(m_A(0,0), m_p); break; case 2: compute2x2(res, m_p); @@ -92,25 +159,24 @@ void MatrixPowerAtomic::compute(MatrixType& res) const } template -void MatrixPowerAtomic::computePade(int degree, const MatrixType& IminusT, MatrixType& res) const +void MatrixPowerAtomic::computePade(int degree, const MatrixType& IminusT, ResultType& res) const { - int i = degree<<1; - res = (m_p-degree) / ((i-1)<<1) * IminusT; + int i = 2*degree; + res = (m_p-degree) / (2*i-2) * IminusT; + for (--i; i; --i) { res = (MatrixType::Identity(IminusT.rows(), IminusT.cols()) + res).template triangularView() - .solve((i==1 ? -m_p : i&1 ? (-m_p-(i>>1))/(i<<1) : (m_p-(i>>1))/((i-1)<<1)) * IminusT).eval(); + .solve((i==1 ? -m_p : i&1 ? (-m_p-i/2)/(2*i) : (m_p-i/2)/(2*i-2)) * IminusT).eval(); } res += MatrixType::Identity(IminusT.rows(), IminusT.cols()); } // This function assumes that res has the correct size (see bug 614) template -void MatrixPowerAtomic::compute2x2(MatrixType& res, RealScalar p) const +void MatrixPowerAtomic::compute2x2(ResultType& res, RealScalar p) const { using std::abs; using std::pow; - - ArrayType logTdiag = m_A.diagonal().array().log(); res.coeffRef(0,0) = pow(m_A.coeff(0,0), p); for (Index i=1; i < m_A.cols(); ++i) { @@ -126,8 +192,9 @@ void MatrixPowerAtomic::compute2x2(MatrixType& res, RealScalar p) co } template -void MatrixPowerAtomic::computeBig(MatrixType& res) const +void MatrixPowerAtomic::computeBig(ResultType& res) const { + using std::ldexp; const int digits = std::numeric_limits::digits; const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1f: // sigle precision digits <= 53? 2.789358995219730e-1: // double precision @@ -139,19 +206,6 @@ void MatrixPowerAtomic::computeBig(MatrixType& res) const int degree, degree2, numberOfSquareRoots = 0; bool hasExtraSquareRoot = false; - /* FIXME - * For singular T, norm(I - T) >= 1 but maxNormForPade < 1, leads to infinite - * loop. We should move 0 eigenvalues to bottom right corner. We need not - * worry about tiny values (e.g. 1e-300) because they will reach 1 if - * repetitively sqrt'ed. - * - * If the 0 eigenvalues are semisimple, they can form a 0 matrix at the - * bottom right corner. - * - * [ T A ]^p [ T^p (T^-1 T^p A) ] - * [ ] = [ ] - * [ 0 0 ] [ 0 0 ] - */ for (Index i=0; i < m_A.cols(); ++i) eigen_assert(m_A(i,i) != RealScalar(0)); @@ -172,7 +226,7 @@ void MatrixPowerAtomic::computeBig(MatrixType& res) const computePade(degree, IminusT, res); for (; numberOfSquareRoots; --numberOfSquareRoots) { - compute2x2(res, std::ldexp(m_p, -numberOfSquareRoots)); + compute2x2(res, ldexp(m_p, -numberOfSquareRoots)); res = res.template triangularView() * res; } compute2x2(res, m_p); @@ -237,19 +291,28 @@ template inline typename MatrixPowerAtomic::ComplexScalar MatrixPowerAtomic::computeSuperDiag(const ComplexScalar& curr, const ComplexScalar& prev, RealScalar p) { - ComplexScalar logCurr = std::log(curr); - ComplexScalar logPrev = std::log(prev); - int unwindingNumber = std::ceil((numext::imag(logCurr - logPrev) - M_PI) / (2*M_PI)); + using std::ceil; + using std::exp; + using std::log; + using std::sinh; + + ComplexScalar logCurr = log(curr); + ComplexScalar logPrev = log(prev); + int unwindingNumber = ceil((numext::imag(logCurr - logPrev) - M_PI) / (2*M_PI)); ComplexScalar w = numext::atanh2(curr - prev, curr + prev) + ComplexScalar(0, M_PI*unwindingNumber); - return RealScalar(2) * std::exp(RealScalar(0.5) * p * (logCurr + logPrev)) * std::sinh(p * w) / (curr - prev); + return RealScalar(2) * exp(RealScalar(0.5) * p * (logCurr + logPrev)) * sinh(p * w) / (curr - prev); } template inline typename MatrixPowerAtomic::RealScalar MatrixPowerAtomic::computeSuperDiag(RealScalar curr, RealScalar prev, RealScalar p) { + using std::exp; + using std::log; + using std::sinh; + RealScalar w = numext::atanh2(curr - prev, curr + prev); - return 2 * std::exp(p * (std::log(curr) + std::log(prev)) / 2) * std::sinh(p * w) / (curr - prev); + return 2 * exp(p * (log(curr) + log(prev)) / 2) * sinh(p * w) / (curr - prev); } /** @@ -272,15 +335,9 @@ MatrixPowerAtomic::computeSuperDiag(RealScalar curr, RealScalar prev * Output: \verbinclude MatrixPower_optimal.out */ template -class MatrixPower +class MatrixPower : internal::noncopyable { private: - enum { - RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime - }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; @@ -294,7 +351,11 @@ class MatrixPower * The class stores a reference to A, so it should not be changed * (or destroyed) before evaluation. */ - explicit MatrixPower(const MatrixType& A) : m_A(A), m_conditionNumber(0) + explicit MatrixPower(const MatrixType& A) : + m_A(A), + m_conditionNumber(0), + m_rank(A.cols()), + m_nulls(0) { eigen_assert(A.rows() == A.cols()); } /** @@ -304,8 +365,8 @@ class MatrixPower * \return The expression \f$ A^p \f$, where A is specified in the * constructor. */ - const MatrixPowerRetval operator()(RealScalar p) - { return MatrixPowerRetval(*this, p); } + const MatrixPowerParenthesesReturnValue operator()(RealScalar p) + { return MatrixPowerParenthesesReturnValue(*this, p); } /** * \brief Compute the matrix power. @@ -322,21 +383,54 @@ class MatrixPower private: typedef std::complex ComplexScalar; - typedef Matrix ComplexMatrix; + typedef Matrix ComplexMatrix; + /** \brief Reference to the base of matrix power. */ typename MatrixType::Nested m_A; + + /** \brief Temporary storage. */ MatrixType m_tmp; - ComplexMatrix m_T, m_U, m_fT; + + /** \brief Store the result of Schur decomposition. */ + ComplexMatrix m_T, m_U; + + /** \brief Store fractional power of m_T. */ + ComplexMatrix m_fT; + + /** + * \brief Condition number of m_A. + * + * It is initialized as 0 to avoid performing unnecessary Schur + * decomposition, which is the bottleneck. + */ RealScalar m_conditionNumber; - RealScalar modfAndInit(RealScalar, RealScalar*); + /** \brief Rank of m_A. */ + Index m_rank; + + /** \brief Rank deficiency of m_A. */ + Index m_nulls; + + /** + * \brief Split p into integral part and fractional part. + * + * \param[in] p The exponent. + * \param[out] p The fractional part ranging in \f$ (-1, 1) \f$. + * \param[out] intpart The integral part. + * + * Only if the fractional part is nonzero, it calls initialize(). + */ + void split(RealScalar& p, RealScalar& intpart); + + /** \brief Perform Schur decomposition for fractional power. */ + void initialize(); template - void computeIntPower(ResultType&, RealScalar); + void computeIntPower(ResultType& res, RealScalar p); template - void computeFracPower(ResultType&, RealScalar); + void computeFracPower(ResultType& res, RealScalar p); template static void revertSchur( @@ -355,59 +449,102 @@ template template void MatrixPower::compute(ResultType& res, RealScalar p) { + using std::pow; switch (cols()) { case 0: break; case 1: - res(0,0) = std::pow(m_A.coeff(0,0), p); + res(0,0) = pow(m_A.coeff(0,0), p); break; default: - RealScalar intpart, x = modfAndInit(p, &intpart); + RealScalar intpart; + split(p, intpart); + + res = MatrixType::Identity(rows(), cols()); computeIntPower(res, intpart); - computeFracPower(res, x); + if (p) computeFracPower(res, p); } } template -typename MatrixPower::RealScalar -MatrixPower::modfAndInit(RealScalar x, RealScalar* intpart) +void MatrixPower::split(RealScalar& p, RealScalar& intpart) { - typedef Array RealArray; + using std::floor; + using std::pow; - *intpart = std::floor(x); - RealScalar res = x - *intpart; + intpart = floor(p); + p -= intpart; - if (!m_conditionNumber && res) { - const ComplexSchur schurOfA(m_A); - m_T = schurOfA.matrixT(); - m_U = schurOfA.matrixU(); - - const RealArray absTdiag = m_T.diagonal().array().abs(); - m_conditionNumber = absTdiag.maxCoeff() / absTdiag.minCoeff(); + // Perform Schur decomposition if it is not yet performed and the power is + // not an integer. + if (!m_conditionNumber && p) + initialize(); + + // Choose the more stable of intpart = floor(p) and intpart = ceil(p). + if (p > RealScalar(0.5) && p > (1-p) * pow(m_conditionNumber, p)) { + --p; + ++intpart; + } +} + +template +void MatrixPower::initialize() +{ + const ComplexSchur schurOfA(m_A); + JacobiRotation rot; + ComplexScalar eigenvalue; + + m_fT.resizeLike(m_A); + m_T = schurOfA.matrixT(); + m_U = schurOfA.matrixU(); + m_conditionNumber = m_T.diagonal().array().abs().maxCoeff() / m_T.diagonal().array().abs().minCoeff(); + + // Move zero eigenvalues to the bottom right corner. + for (Index i = cols()-1; i>=0; --i) { + if (m_rank <= 2) + return; + if (m_T.coeff(i,i) == RealScalar(0)) { + for (Index j=i+1; j < m_rank; ++j) { + eigenvalue = m_T.coeff(j,j); + rot.makeGivens(m_T.coeff(j-1,j), eigenvalue); + m_T.applyOnTheRight(j-1, j, rot); + m_T.applyOnTheLeft(j-1, j, rot.adjoint()); + m_T.coeffRef(j-1,j-1) = eigenvalue; + m_T.coeffRef(j,j) = RealScalar(0); + m_U.applyOnTheRight(j-1, j, rot); + } + --m_rank; + } } - if (res>RealScalar(0.5) && res>(1-res)*std::pow(m_conditionNumber, res)) { - --res; - ++*intpart; + m_nulls = rows() - m_rank; + if (m_nulls) { + eigen_assert(m_T.bottomRightCorner(m_nulls, m_nulls).isZero() + && "Base of matrix power should be invertible or with a semisimple zero eigenvalue."); + m_fT.bottomRows(m_nulls).fill(RealScalar(0)); } - return res; } template template void MatrixPower::computeIntPower(ResultType& res, RealScalar p) { - RealScalar pp = std::abs(p); + using std::abs; + using std::fmod; + RealScalar pp = abs(p); - if (p<0) m_tmp = m_A.inverse(); - else m_tmp = m_A; + if (p<0) + m_tmp = m_A.inverse(); + else + m_tmp = m_A; - res = MatrixType::Identity(rows(), cols()); - while (pp >= 1) { - if (std::fmod(pp, 2) >= 1) + while (true) { + if (fmod(pp, 2) >= 1) res = m_tmp * res; - m_tmp *= m_tmp; pp /= 2; + if (pp < 1) + break; + m_tmp *= m_tmp; } } @@ -415,12 +552,17 @@ template template void MatrixPower::computeFracPower(ResultType& res, RealScalar p) { - if (p) { - eigen_assert(m_conditionNumber); - MatrixPowerAtomic(m_T, p).compute(m_fT); - revertSchur(m_tmp, m_fT, m_U); - res = m_tmp * res; + Block blockTp(m_fT, 0, 0, m_rank, m_rank); + eigen_assert(m_conditionNumber); + eigen_assert(m_rank + m_nulls == rows()); + + MatrixPowerAtomic(m_T.topLeftCorner(m_rank, m_rank), p).compute(blockTp); + if (m_nulls) { + m_fT.topRightCorner(m_rank, m_nulls) = m_T.topLeftCorner(m_rank, m_rank).template triangularView() + .solve(blockTp * m_T.topRightCorner(m_rank, m_nulls)); } + revertSchur(m_tmp, m_fT, m_U); + res = m_tmp * res; } template @@ -464,7 +606,7 @@ class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue +class MatrixComplexPowerReturnValue : public ReturnByValue< MatrixComplexPowerReturnValue > +{ + public: + typedef typename Derived::PlainObject PlainObject; + typedef typename std::complex ComplexScalar; + typedef typename Derived::Index Index; + + /** + * \brief Constructor. + * + * \param[in] A %Matrix (expression), the base of the matrix power. + * \param[in] p complex scalar, the exponent of the matrix power. + */ + MatrixComplexPowerReturnValue(const Derived& A, const ComplexScalar& p) : m_A(A), m_p(p) + { } + + /** + * \brief Compute the matrix power. + * + * Because \p p is complex, \f$ A^p \f$ is simply evaluated as \f$ + * \exp(p \log(A)) \f$. + * + * \param[out] result \f$ A^p \f$ where \p A and \p p are as in the + * constructor. + */ + template + inline void evalTo(ResultType& res) const + { res = (m_p * m_A.log()).exp(); } + + Index rows() const { return m_A.rows(); } + Index cols() const { return m_A.cols(); } + + private: + const Derived& m_A; + const ComplexScalar m_p; }; namespace internal { template -struct traits< MatrixPowerRetval > +struct traits< MatrixPowerParenthesesReturnValue > { typedef typename MatrixPowerType::PlainObject ReturnType; }; template struct traits< MatrixPowerReturnValue > { typedef typename Derived::PlainObject ReturnType; }; +template +struct traits< MatrixComplexPowerReturnValue > +{ typedef typename Derived::PlainObject ReturnType; }; + } template const MatrixPowerReturnValue MatrixBase::pow(const RealScalar& p) const { return MatrixPowerReturnValue(derived(), p); } +template +const MatrixComplexPowerReturnValue MatrixBase::pow(const std::complex& p) const +{ return MatrixComplexPowerReturnValue(derived(), p); } + } // namespace Eigen #endif // EIGEN_MATRIX_POWER diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h index b48ea9d46..0cd39ebe4 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h @@ -24,7 +24,7 @@ namespace Eigen { * \sa MatrixSquareRoot, MatrixSquareRootTriangular */ template -class MatrixSquareRootQuasiTriangular +class MatrixSquareRootQuasiTriangular : internal::noncopyable { public: @@ -36,7 +36,7 @@ class MatrixSquareRootQuasiTriangular * The class stores a reference to \p A, so it should not be * changed (or destroyed) before compute() is called. */ - MatrixSquareRootQuasiTriangular(const MatrixType& A) + explicit MatrixSquareRootQuasiTriangular(const MatrixType& A) : m_A(A) { eigen_assert(A.rows() == A.cols()); @@ -253,10 +253,10 @@ void MatrixSquareRootQuasiTriangular * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular */ template -class MatrixSquareRootTriangular +class MatrixSquareRootTriangular : internal::noncopyable { public: - MatrixSquareRootTriangular(const MatrixType& A) + explicit MatrixSquareRootTriangular(const MatrixType& A) : m_A(A) { eigen_assert(A.rows() == A.cols()); @@ -321,7 +321,7 @@ class MatrixSquareRoot * The class stores a reference to \p A, so it should not be * changed (or destroyed) before compute() is called. */ - MatrixSquareRoot(const MatrixType& A); + explicit MatrixSquareRoot(const MatrixType& A); /** \brief Compute the matrix square root * @@ -341,7 +341,7 @@ class MatrixSquareRoot { public: - MatrixSquareRoot(const MatrixType& A) + explicit MatrixSquareRoot(const MatrixType& A) : m_A(A) { eigen_assert(A.rows() == A.cols()); @@ -370,11 +370,11 @@ class MatrixSquareRoot // ********** Partial specialization for complex matrices ********** template -class MatrixSquareRoot +class MatrixSquareRoot : internal::noncopyable { public: - MatrixSquareRoot(const MatrixType& A) + explicit MatrixSquareRoot(const MatrixType& A) : m_A(A) { eigen_assert(A.rows() == A.cols()); @@ -422,7 +422,7 @@ template class MatrixSquareRootReturnValue * \param[in] src %Matrix (expression) forming the argument of the * matrix square root. */ - MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { } + explicit MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { } /** \brief Compute the matrix square root. * @@ -442,8 +442,6 @@ template class MatrixSquareRootReturnValue protected: const Derived& m_src; - private: - MatrixSquareRootReturnValue& operator=(const MatrixSquareRootReturnValue&); }; namespace internal { diff --git a/unsupported/Eigen/src/MatrixFunctions/StemFunction.h b/unsupported/Eigen/src/MatrixFunctions/StemFunction.h index 724e55c1d..0a815d834 100644 --- a/unsupported/Eigen/src/MatrixFunctions/StemFunction.h +++ b/unsupported/Eigen/src/MatrixFunctions/StemFunction.h @@ -16,7 +16,7 @@ namespace Eigen { * \brief Stem functions corresponding to standard mathematical functions. */ template -class StdStemFunctions +class StdStemFunctions : internal::noncopyable { public: diff --git a/unsupported/test/kronecker_product.cpp b/unsupported/test/kronecker_product.cpp index 8ddc6ec28..c68a07de8 100644 --- a/unsupported/test/kronecker_product.cpp +++ b/unsupported/test/kronecker_product.cpp @@ -107,31 +107,34 @@ void test_kronecker_product() SparseMatrix SM_row_a(SM_a), SM_row_b(SM_b); - // test kroneckerProduct(DM_block,DM,DM_fixedSize) + // test DM_fixedSize = kroneckerProduct(DM_block,DM) Matrix DM_fix_ab = kroneckerProduct(DM_a.topLeftCorner<2,3>(),DM_b); CALL_SUBTEST(check_kronecker_product(DM_fix_ab)); + CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a.topLeftCorner<2,3>(),DM_b))); for(int i=0;i(2,5) = kroneckerProduct(DM_a,DM_b); CALL_SUBTEST(check_kronecker_product(DM_block_ab.block<6,6>(2,5))); - // test kroneckerProduct(DM,DM,DM) + // test DM = kroneckerProduct(DM,DM) MatrixXd DM_ab = kroneckerProduct(DM_a,DM_b); CALL_SUBTEST(check_kronecker_product(DM_ab)); + CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a,DM_b))); - // test kroneckerProduct(SM,DM,SM) + // test SM = kroneckerProduct(SM,DM) SparseMatrix SM_ab = kroneckerProduct(SM_a,DM_b); CALL_SUBTEST(check_kronecker_product(SM_ab)); SparseMatrix SM_ab2 = kroneckerProduct(SM_a,DM_b); CALL_SUBTEST(check_kronecker_product(SM_ab2)); + CALL_SUBTEST(check_kronecker_product(kroneckerProduct(SM_a,DM_b))); - // test kroneckerProduct(DM,SM,SM) + // test SM = kroneckerProduct(DM,SM) SM_ab.setZero(); SM_ab.insert(0,0)=37.0; SM_ab = kroneckerProduct(DM_a,SM_b); @@ -140,8 +143,9 @@ void test_kronecker_product() SM_ab2.insert(0,0)=37.0; SM_ab2 = kroneckerProduct(DM_a,SM_b); CALL_SUBTEST(check_kronecker_product(SM_ab2)); + CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a,SM_b))); - // test kroneckerProduct(SM,SM,SM) + // test SM = kroneckerProduct(SM,SM) SM_ab.resize(2,33); SM_ab.insert(0,0)=37.0; SM_ab = kroneckerProduct(SM_a,SM_b); @@ -150,8 +154,9 @@ void test_kronecker_product() SM_ab2.insert(0,0)=37.0; SM_ab2 = kroneckerProduct(SM_a,SM_b); CALL_SUBTEST(check_kronecker_product(SM_ab2)); + CALL_SUBTEST(check_kronecker_product(kroneckerProduct(SM_a,SM_b))); - // test kroneckerProduct(SM,SM,SM) with sparse pattern + // test SM = kroneckerProduct(SM,SM) with sparse pattern SM_a.resize(4,5); SM_b.resize(3,2); SM_a.resizeNonZeros(0); @@ -169,7 +174,7 @@ void test_kronecker_product() SM_ab = kroneckerProduct(SM_a,SM_b); CALL_SUBTEST(check_sparse_kronecker_product(SM_ab)); - // test dimension of result of kroneckerProduct(DM,DM,DM) + // test dimension of result of DM = kroneckerProduct(DM,DM) MatrixXd DM_a2(2,1); MatrixXd DM_b2(5,4); MatrixXd DM_ab2 = kroneckerProduct(DM_a2,DM_b2); diff --git a/unsupported/test/matrix_functions.h b/unsupported/test/matrix_functions.h index 5817caef6..295da16b6 100644 --- a/unsupported/test/matrix_functions.h +++ b/unsupported/test/matrix_functions.h @@ -10,27 +10,48 @@ #include "main.h" #include +// For complex matrices, any matrix is fine. +template::Scalar>::IsComplex> +struct processTriangularMatrix +{ + static void run(MatrixType&, MatrixType&, const MatrixType&) + { } +}; + +// For real matrices, make sure none of the eigenvalues are negative. +template +struct processTriangularMatrix +{ + static void run(MatrixType& m, MatrixType& T, const MatrixType& U) + { + typedef typename MatrixType::Index Index; + const Index size = m.cols(); + + for (Index i=0; i < size; ++i) { + if (i == size - 1 || T.coeff(i+1,i) == 0) + T.coeffRef(i,i) = std::abs(T.coeff(i,i)); + else + ++i; + } + m = U * T * U.transpose(); + } +}; + template ::Scalar>::IsComplex> struct generateTestMatrix; -// for real matrices, make sure none of the eigenvalues are negative template struct generateTestMatrix { static void run(MatrixType& result, typename MatrixType::Index size) { - MatrixType mat = MatrixType::Random(size, size); - EigenSolver es(mat); - typename EigenSolver::EigenvalueType eivals = es.eigenvalues(); - for (typename MatrixType::Index i = 0; i < size; ++i) { - if (eivals(i).imag() == 0 && eivals(i).real() < 0) - eivals(i) = -eivals(i); - } - result = (es.eigenvectors() * eivals.asDiagonal() * es.eigenvectors().inverse()).real(); + result = MatrixType::Random(size, size); + RealSchur schur(result); + MatrixType T = schur.matrixT(); + processTriangularMatrix::run(result, T, schur.matrixU()); } }; -// for complex matrices, any matrix is fine template struct generateTestMatrix { diff --git a/unsupported/test/matrix_power.cpp b/unsupported/test/matrix_power.cpp index b9d513b45..849e4287b 100644 --- a/unsupported/test/matrix_power.cpp +++ b/unsupported/test/matrix_power.cpp @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2012 Chen-Pang He +// Copyright (C) 2012, 2013 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed @@ -9,33 +9,6 @@ #include "matrix_functions.h" -template ::IsComplex> -struct generateTriangularMatrix; - -// for real matrices, make sure none of the eigenvalues are negative -template -struct generateTriangularMatrix -{ - static void run(MatrixType& result, typename MatrixType::Index size) - { - result.resize(size, size); - result.template triangularView() = MatrixType::Random(size, size); - for (typename MatrixType::Index i = 0; i < size; ++i) - result.coeffRef(i,i) = std::abs(result.coeff(i,i)); - } -}; - -// for complex matrices, any matrix is fine -template -struct generateTriangularMatrix -{ - static void run(MatrixType& result, typename MatrixType::Index size) - { - result.resize(size, size); - result.template triangularView() = MatrixType::Random(size, size); - } -}; - template void test2dRotation(double tol) { @@ -53,7 +26,7 @@ void test2dRotation(double tol) C = Apow(std::ldexp(angle,1) / M_PI); std::cout << "test2dRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n'; - VERIFY(C.isApprox(B, static_cast(tol))); + VERIFY(C.isApprox(B, tol)); } } @@ -75,12 +48,26 @@ void test2dHyperbolicRotation(double tol) C = Apow(angle); std::cout << "test2dHyperbolicRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n'; - VERIFY(C.isApprox(B, static_cast(tol))); + VERIFY(C.isApprox(B, tol)); + } +} + +template +void test3dRotation(double tol) +{ + Matrix v; + T angle; + + for (int i=0; i<=20; ++i) { + v = Matrix::Random(); + v.normalize(); + angle = pow(10, (i-10) / 5.); + VERIFY(AngleAxis(angle, v).matrix().isApprox(AngleAxis(1,v).matrix().pow(angle), tol)); } } template -void testExponentLaws(const MatrixType& m, double tol) +void testGeneral(const MatrixType& m, double tol) { typedef typename MatrixType::RealScalar RealScalar; MatrixType m1, m2, m3, m4, m5; @@ -97,19 +84,64 @@ void testExponentLaws(const MatrixType& m, double tol) m4 = mpow(x+y); m5.noalias() = m2 * m3; - VERIFY(m4.isApprox(m5, static_cast(tol))); + VERIFY(m4.isApprox(m5, tol)); m4 = mpow(x*y); m5 = m2.pow(y); - VERIFY(m4.isApprox(m5, static_cast(tol))); + VERIFY(m4.isApprox(m5, tol)); m4 = (std::abs(x) * m1).pow(y); m5 = std::pow(std::abs(x), y) * m3; - VERIFY(m4.isApprox(m5, static_cast(tol))); + VERIFY(m4.isApprox(m5, tol)); + } +} + +template +void testSingular(MatrixType m, double tol) +{ + const int IsComplex = NumTraits::Scalar>::IsComplex; + typedef typename internal::conditional< IsComplex, MatrixSquareRootTriangular, + MatrixSquareRootQuasiTriangular >::type SquareRootType; + typedef typename internal::conditional, const MatrixType&>::type TriangularType; + typename internal::conditional< IsComplex, ComplexSchur, RealSchur >::type schur; + MatrixType T; + + for (int i=0; i < g_repeat; ++i) { + m.setRandom(); + m.col(0).fill(0); + + schur.compute(m); + T = schur.matrixT(); + const MatrixType& U = schur.matrixU(); + processTriangularMatrix::run(m, T, U); + MatrixPower mpow(m); + + SquareRootType(T).compute(T); + VERIFY(mpow(0.5).isApprox(U * (TriangularType(T) * U.adjoint()), tol)); + + SquareRootType(T).compute(T); + VERIFY(mpow(0.25).isApprox(U * (TriangularType(T) * U.adjoint()), tol)); + + SquareRootType(T).compute(T); + VERIFY(mpow(0.125).isApprox(U * (TriangularType(T) * U.adjoint()), tol)); + } +} + +template +void testLogThenExp(MatrixType m, double tol) +{ + typedef typename MatrixType::Scalar Scalar; + Scalar x; + + for (int i=0; i < g_repeat; ++i) { + generateTestMatrix::run(m, m.rows()); + x = internal::random(); + VERIFY(m.pow(x).isApprox((x * m.log()).exp(), tol)); } } typedef Matrix Matrix3dRowMajor; +typedef Matrix Matrix3e; typedef Matrix MatrixXe; void test_matrix_power() @@ -121,13 +153,46 @@ void test_matrix_power() CALL_SUBTEST_1(test2dHyperbolicRotation(1e-5)); CALL_SUBTEST_9(test2dHyperbolicRotation(1e-14)); - CALL_SUBTEST_2(testExponentLaws(Matrix2d(), 1e-13)); - CALL_SUBTEST_7(testExponentLaws(Matrix3dRowMajor(), 1e-13)); - CALL_SUBTEST_3(testExponentLaws(Matrix4cd(), 1e-13)); - CALL_SUBTEST_4(testExponentLaws(MatrixXd(8,8), 2e-12)); - CALL_SUBTEST_1(testExponentLaws(Matrix2f(), 1e-4)); - CALL_SUBTEST_5(testExponentLaws(Matrix3cf(), 1e-4)); - CALL_SUBTEST_8(testExponentLaws(Matrix4f(), 1e-4)); - CALL_SUBTEST_6(testExponentLaws(MatrixXf(2,2), 1e-3)); // see bug 614 - CALL_SUBTEST_9(testExponentLaws(MatrixXe(7,7), 1e-13)); + CALL_SUBTEST_10(test3dRotation(1e-13)); + CALL_SUBTEST_11(test3dRotation(1e-5)); + CALL_SUBTEST_12(test3dRotation(1e-13)); + + CALL_SUBTEST_2(testGeneral(Matrix2d(), 1e-13)); + CALL_SUBTEST_7(testGeneral(Matrix3dRowMajor(), 1e-13)); + CALL_SUBTEST_3(testGeneral(Matrix4cd(), 1e-13)); + CALL_SUBTEST_4(testGeneral(MatrixXd(8,8), 2e-12)); + CALL_SUBTEST_1(testGeneral(Matrix2f(), 1e-4)); + CALL_SUBTEST_5(testGeneral(Matrix3cf(), 1e-4)); + CALL_SUBTEST_8(testGeneral(Matrix4f(), 1e-4)); + CALL_SUBTEST_6(testGeneral(MatrixXf(2,2), 1e-3)); // see bug 614 + CALL_SUBTEST_9(testGeneral(MatrixXe(7,7), 1e-13)); + CALL_SUBTEST_10(testGeneral(Matrix3d(), 1e-13)); + CALL_SUBTEST_11(testGeneral(Matrix3f(), 1e-4)); + CALL_SUBTEST_12(testGeneral(Matrix3e(), 1e-13)); + + CALL_SUBTEST_2(testSingular(Matrix2d(), 1e-13)); + CALL_SUBTEST_7(testSingular(Matrix3dRowMajor(), 1e-13)); + CALL_SUBTEST_3(testSingular(Matrix4cd(), 1e-13)); + CALL_SUBTEST_4(testSingular(MatrixXd(8,8), 2e-12)); + CALL_SUBTEST_1(testSingular(Matrix2f(), 1e-4)); + CALL_SUBTEST_5(testSingular(Matrix3cf(), 1e-4)); + CALL_SUBTEST_8(testSingular(Matrix4f(), 1e-4)); + CALL_SUBTEST_6(testSingular(MatrixXf(2,2), 1e-3)); + CALL_SUBTEST_9(testSingular(MatrixXe(7,7), 1e-13)); + CALL_SUBTEST_10(testSingular(Matrix3d(), 1e-13)); + CALL_SUBTEST_11(testSingular(Matrix3f(), 1e-4)); + CALL_SUBTEST_12(testSingular(Matrix3e(), 1e-13)); + + CALL_SUBTEST_2(testLogThenExp(Matrix2d(), 1e-13)); + CALL_SUBTEST_7(testLogThenExp(Matrix3dRowMajor(), 1e-13)); + CALL_SUBTEST_3(testLogThenExp(Matrix4cd(), 1e-13)); + CALL_SUBTEST_4(testLogThenExp(MatrixXd(8,8), 2e-12)); + CALL_SUBTEST_1(testLogThenExp(Matrix2f(), 1e-4)); + CALL_SUBTEST_5(testLogThenExp(Matrix3cf(), 1e-4)); + CALL_SUBTEST_8(testLogThenExp(Matrix4f(), 1e-4)); + CALL_SUBTEST_6(testLogThenExp(MatrixXf(2,2), 1e-3)); + CALL_SUBTEST_9(testLogThenExp(MatrixXe(7,7), 1e-13)); + CALL_SUBTEST_10(testLogThenExp(Matrix3d(), 1e-13)); + CALL_SUBTEST_11(testLogThenExp(Matrix3f(), 1e-4)); + CALL_SUBTEST_12(testLogThenExp(Matrix3e(), 1e-13)); }