diff --git a/CMakeLists.txt b/CMakeLists.txt index 6d9a999c4..5d06f616a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,7 @@ project(Eigen) +cmake_minimum_required(VERSION 2.6.2) + # automatically parse the version number file(READ "${CMAKE_SOURCE_DIR}/Eigen/src/Core/util/Macros.h" _eigen2_version_header LIMIT 5000 OFFSET 1000) string(REGEX MATCH "define *EIGEN_WORLD_VERSION ([0-9]*)" _eigen2_world_version_match "${_eigen2_version_header}") @@ -10,21 +12,22 @@ string(REGEX MATCH "define *EIGEN_MINOR_VERSION ([0-9]*)" _eigen2_minor_version_ set(EIGEN2_MINOR_VERSION "${CMAKE_MATCH_1}") set(EIGEN_VERSION_NUMBER ${EIGEN2_WORLD_VERSION}.${EIGEN2_MAJOR_VERSION}.${EIGEN2_MINOR_VERSION}) -# if the mercurial program is absent, this will leave the EIGEN_HG_REVISION string empty, +# if the mercurial program is absent, this will leave the EIGEN_HG_CHANGESET string empty, # but won't stop CMake. execute_process(COMMAND hg tip -R ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE EIGEN_HGTIP_OUTPUT) +execute_process(COMMAND hg branch -R ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE EIGEN_BRANCH_OUTPUT) -# extract the mercurial revision number from the hg tip output -string(REGEX MATCH "^changeset: *[0-9]*:([0-9;a-f]+).*" EIGEN_HG_REVISION_MATCH "${EIGEN_HGTIP_OUTPUT}") -set(EIGEN_HG_REVISION "${CMAKE_MATCH_1}") - -if(EIGEN_HG_REVISION) - set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER} (mercurial revision ${EIGEN_HG_REVISION})") -else(EIGEN_HG_REVISION) +# if this is the default (aka development) branch, extract the mercurial changeset number from the hg tip output... +if(EIGEN_BRANCH_OUTPUT MATCHES "default") +string(REGEX MATCH "^changeset: *[0-9]*:([0-9;a-f]+).*" EIGEN_HG_CHANGESET_MATCH "${EIGEN_HGTIP_OUTPUT}") +set(EIGEN_HG_CHANGESET "${CMAKE_MATCH_1}") +endif(EIGEN_BRANCH_OUTPUT MATCHES "default") +#...and show it next to the version number +if(EIGEN_HG_CHANGESET) + set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER} (mercurial changeset ${EIGEN_HG_CHANGESET})") +else(EIGEN_HG_CHANGESET) set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER}") -endif(EIGEN_HG_REVISION) - -cmake_minimum_required(VERSION 2.6.2) +endif(EIGEN_HG_CHANGESET) include(CheckCXXCompilerFlag) diff --git a/Eigen/Core b/Eigen/Core index 28ed09035..854f737d6 100644 --- a/Eigen/Core +++ b/Eigen/Core @@ -150,10 +150,12 @@ namespace Eigen { #include "src/Core/Assign.h" #endif +#include "src/Core/util/BlasUtil.h" #include "src/Core/MatrixStorage.h" #include "src/Core/NestByValue.h" #include "src/Core/ReturnByValue.h" #include "src/Core/Flagged.h" +#include "src/Core/NoAlias.h" #include "src/Core/Matrix.h" #include "src/Core/Cwise.h" #include "src/Core/CwiseBinaryOp.h" @@ -177,7 +179,6 @@ namespace Eigen { #include "src/Core/IO.h" #include "src/Core/Swap.h" #include "src/Core/CommaInitializer.h" -#include "src/Core/util/BlasUtil.h" #include "src/Core/ProductBase.h" #include "src/Core/Product.h" #include "src/Core/TriangularMatrix.h" diff --git a/Eigen/Geometry b/Eigen/Geometry index 9cae3459c..c931f28fe 100644 --- a/Eigen/Geometry +++ b/Eigen/Geometry @@ -7,6 +7,7 @@ #include "Array" #include "SVD" +#include "LU" #include #ifndef M_PI diff --git a/Eigen/Jacobi b/Eigen/Jacobi new file mode 100644 index 000000000..33a6b757e --- /dev/null +++ b/Eigen/Jacobi @@ -0,0 +1,24 @@ +#ifndef EIGEN_JACOBI_MODULE_H +#define EIGEN_JACOBI_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableMSVCWarnings.h" + +namespace Eigen { + +/** \defgroup Jacobi_Module Jacobi module + * This module provides Jacobi rotations. + * + * \code + * #include + * \endcode + */ + +#include "src/Jacobi/Jacobi.h" + +} // namespace Eigen + +#include "src/Core/util/EnableMSVCWarnings.h" + +#endif // EIGEN_JACOBI_MODULE_H diff --git a/Eigen/QR b/Eigen/QR index 5f36d0987..151c0b31b 100644 --- a/Eigen/QR +++ b/Eigen/QR @@ -6,6 +6,8 @@ #include "src/Core/util/DisableMSVCWarnings.h" #include "Cholesky" +#include "Jacobi" +#include "Householder" // Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module #if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2) diff --git a/Eigen/SVD b/Eigen/SVD index eef05564b..625071a75 100644 --- a/Eigen/SVD +++ b/Eigen/SVD @@ -2,6 +2,8 @@ #define EIGEN_SVD_MODULE_H #include "Core" +#include "Householder" +#include "Jacobi" #include "src/Core/util/DisableMSVCWarnings.h" @@ -21,6 +23,7 @@ namespace Eigen { */ #include "src/SVD/SVD.h" +#include "src/SVD/JacobiSquareSVD.h" } // namespace Eigen diff --git a/Eigen/src/Array/Random.h b/Eigen/src/Array/Random.h index 61c76bbd3..15cc6ae7c 100644 --- a/Eigen/src/Array/Random.h +++ b/Eigen/src/Array/Random.h @@ -35,13 +35,13 @@ struct ei_functor_traits > /** \array_module * - * \returns a random matrix (not an expression, the matrix is immediately evaluated). + * \returns a random matrix expression * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, - * it is redundant to pass \a rows and \a cols as arguments, so ei_random() should be used + * it is redundant to pass \a rows and \a cols as arguments, so Random() should be used * instead. * * \addexample RandomExample \label How to create a matrix with random coefficients @@ -49,6 +49,10 @@ struct ei_functor_traits > * Example: \include MatrixBase_random_int_int.cpp * Output: \verbinclude MatrixBase_random_int_int.out * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * * \sa MatrixBase::setRandom(), MatrixBase::Random(int), MatrixBase::Random() */ template @@ -60,7 +64,7 @@ MatrixBase::Random(int rows, int cols) /** \array_module * - * \returns a random vector (not an expression, the vector is immediately evaluated). + * \returns a random vector expression * * The parameter \a size is the size of the returned vector. * Must be compatible with this MatrixBase type. @@ -68,12 +72,16 @@ MatrixBase::Random(int rows, int cols) * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, - * it is redundant to pass \a size as argument, so ei_random() should be used + * it is redundant to pass \a size as argument, so Random() should be used * instead. * * Example: \include MatrixBase_random_int.cpp * Output: \verbinclude MatrixBase_random_int.out * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary vector whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random() */ template @@ -85,8 +93,7 @@ MatrixBase::Random(int size) /** \array_module * - * \returns a fixed-size random matrix or vector - * (not an expression, the matrix is immediately evaluated). + * \returns a fixed-size random matrix or vector expression * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variants taking size arguments. @@ -94,6 +101,10 @@ MatrixBase::Random(int size) * Example: \include MatrixBase_random.cpp * Output: \verbinclude MatrixBase_random.out * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random(int) */ template diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index e652fd213..adca9fe2e 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -123,7 +123,7 @@ template class LDLT template bool solveInPlace(MatrixBase &bAndX) const; - void compute(const MatrixType& matrix); + LDLT& compute(const MatrixType& matrix); protected: /** \internal @@ -142,7 +142,7 @@ template class LDLT /** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix */ template -void LDLT::compute(const MatrixType& a) +LDLT& LDLT::compute(const MatrixType& a) { ei_assert(a.rows()==a.cols()); const int size = a.rows(); @@ -154,7 +154,7 @@ void LDLT::compute(const MatrixType& a) m_transpositions.setZero(); m_sign = ei_real(a.coeff(0,0))>0 ? 1:-1; m_isInitialized = true; - return; + return *this; } RealScalar cutoff = 0, biggest_in_corner; @@ -180,7 +180,7 @@ void LDLT::compute(const MatrixType& a) // in "Analysis of the Cholesky Decomposition of a Semi-definite Matrix" by // Nicholas J. Higham. Also see "Accuracy and Stability of Numerical // Algorithms" page 217, also by Higham. - cutoff = ei_abs(machine_epsilon() * size * biggest_in_corner); + cutoff = ei_abs(epsilon() * size * biggest_in_corner); m_sign = ei_real(m_matrix.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1; } @@ -235,6 +235,7 @@ void LDLT::compute(const MatrixType& a) } m_isInitialized = true; + return *this; } /** Computes the solution x of \f$ A x = b \f$ using the current decomposition of A. diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h index ef04b8fe4..3ce85b2d9 100644 --- a/Eigen/src/Cholesky/LLT.h +++ b/Eigen/src/Cholesky/LLT.h @@ -105,7 +105,7 @@ template class LLT template bool solveInPlace(MatrixBase &bAndX) const; - void compute(const MatrixType& matrix); + LLT& compute(const MatrixType& matrix); protected: /** \internal @@ -213,9 +213,12 @@ template struct LLT_Traits }; /** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix + * + * + * \returns a reference to *this */ template -void LLT::compute(const MatrixType& a) +LLT& LLT::compute(const MatrixType& a) { assert(a.rows()==a.cols()); const int size = a.rows(); @@ -223,6 +226,7 @@ void LLT::compute(const MatrixType& a) m_matrix = a; m_isInitialized = Traits::inplace_decomposition(m_matrix); + return *this; } /** Computes the solution x of \f$ A x = b \f$ using the current decomposition of A. diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h index f3521d3dd..4bd1046a7 100644 --- a/Eigen/src/Core/Assign.h +++ b/Eigen/src/Core/Assign.h @@ -57,7 +57,10 @@ private: && ((int(Derived::Flags)&RowMajorBit)==(int(OtherDerived::Flags)&RowMajorBit)), MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0 && int(DstIsAligned) && int(SrcIsAligned), - MayLinearVectorize = MightVectorize && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit), + MayLinearVectorize = MightVectorize && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit) + && (DstIsAligned || InnerMaxSize == Dynamic),/* If the destination isn't aligned, + we have to do runtime checks and we don't unroll, so it's only good for large enough sizes. See remark below + about InnerMaxSize. */ MaySliceVectorize = MightVectorize && int(InnerMaxSize)>=3*PacketSize /* slice vectorization can be slow, so we only want it if the slices are big, which is indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block in a fixed-size matrix */ @@ -90,6 +93,25 @@ public: ? ( int(MayUnrollCompletely) && int(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) ) : int(NoUnrolling) }; + + static void debug() + { + EIGEN_DEBUG_VAR(DstIsAligned) + EIGEN_DEBUG_VAR(SrcIsAligned) + EIGEN_DEBUG_VAR(SrcAlignment) + EIGEN_DEBUG_VAR(InnerSize) + EIGEN_DEBUG_VAR(InnerMaxSize) + EIGEN_DEBUG_VAR(PacketSize) + EIGEN_DEBUG_VAR(MightVectorize) + EIGEN_DEBUG_VAR(MayInnerVectorize) + EIGEN_DEBUG_VAR(MayLinearVectorize) + EIGEN_DEBUG_VAR(MaySliceVectorize) + EIGEN_DEBUG_VAR(Vectorization) + EIGEN_DEBUG_VAR(UnrollingLimit) + EIGEN_DEBUG_VAR(MayUnrollCompletely) + EIGEN_DEBUG_VAR(MayUnrollInner) + EIGEN_DEBUG_VAR(Unrolling) + } }; /*************************************************************************** @@ -405,6 +427,9 @@ EIGEN_STRONG_INLINE Derived& MatrixBase YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) ei_assert(rows() == other.rows() && cols() == other.cols()); ei_assign_impl::run(derived(),other.derived()); +#ifdef EIGEN_DEBUG_ASSIGN + ei_assign_traits::debug(); +#endif return derived(); } diff --git a/Eigen/src/Core/BandMatrix.h b/Eigen/src/Core/BandMatrix.h index c22696992..841d1786a 100644 --- a/Eigen/src/Core/BandMatrix.h +++ b/Eigen/src/Core/BandMatrix.h @@ -130,14 +130,14 @@ class BandMatrix : public AnyMatrixBase struct DiagonalIntReturnType { enum { - ReturnOpposite = (Options&SelfAdjoint) && (Index>0 && Supers==0 || Index<0 && Subs==0), + ReturnOpposite = (Options&SelfAdjoint) && (((Index)>0 && Supers==0) || ((Index)<0 && Subs==0)), Conjugate = ReturnOpposite && NumTraits::IsComplex, ActualIndex = ReturnOpposite ? -Index : Index, - DiagonalSize = RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic + DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic) ? Dynamic - : ActualIndex<0 + : (ActualIndex<0 ? EIGEN_ENUM_MIN(ColsAtCompileTime, RowsAtCompileTime + ActualIndex) - : EIGEN_ENUM_MIN(RowsAtCompileTime, ColsAtCompileTime - ActualIndex) + : EIGEN_ENUM_MIN(RowsAtCompileTime, ColsAtCompileTime - ActualIndex)) }; typedef Block BuildType; typedef typename ei_meta_if::LhsNested LhsNested; typedef typename ei_traits::RhsNested RhsNested; + typedef typename ei_traits::_LhsNested _LhsNested; + typedef typename ei_traits::_RhsNested _RhsNested; EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp()) : m_lhs(lhs), m_rhs(rhs), m_functor(func) @@ -130,6 +132,10 @@ class CwiseBinaryOp : ei_no_assignment_operator, return m_functor.packetOp(m_lhs.template packet(index), m_rhs.template packet(index)); } + const _LhsNested& lhs() const { return m_lhs; } + const _RhsNested& rhs() const { return m_rhs; } + const BinaryOp& functor() const { return m_functor; } + protected: const LhsNested m_lhs; const RhsNested m_rhs; diff --git a/Eigen/src/Core/Flagged.h b/Eigen/src/Core/Flagged.h index d5135e0be..754eaf6c5 100644 --- a/Eigen/src/Core/Flagged.h +++ b/Eigen/src/Core/Flagged.h @@ -25,7 +25,9 @@ #ifndef EIGEN_FLAGGED_H #define EIGEN_FLAGGED_H -/** \class Flagged +/** \deprecated it is only used by lazy() which is deprecated + * + * \class Flagged * * \brief Expression with modified flags * @@ -111,9 +113,9 @@ template clas ExpressionTypeNested m_matrix; }; -/** \returns an expression of *this with added flags +/** \deprecated it is only used by lazy() which is deprecated * - * \addexample MarkExample \label How to mark a triangular matrix as triangular + * \returns an expression of *this with added flags * * Example: \include MatrixBase_marked.cpp * Output: \verbinclude MatrixBase_marked.out @@ -128,8 +130,9 @@ MatrixBase::marked() const return derived(); } -/** \returns an expression of *this with the following flags removed: - * EvalBeforeNestingBit and EvalBeforeAssigningBit. +/** \deprecated use MatrixBase::noalias() + * + * \returns an expression of *this with the EvalBeforeAssigningBit flag removed. * * Example: \include MatrixBase_lazy.cpp * Output: \verbinclude MatrixBase_lazy.out @@ -137,7 +140,7 @@ MatrixBase::marked() const * \sa class Flagged, marked() */ template -inline const Flagged +inline const Flagged MatrixBase::lazy() const { return derived(); diff --git a/Eigen/src/Core/IO.h b/Eigen/src/Core/IO.h index 38ac3eef3..a44802ed2 100644 --- a/Eigen/src/Core/IO.h +++ b/Eigen/src/Core/IO.h @@ -26,15 +26,22 @@ #ifndef EIGEN_IO_H #define EIGEN_IO_H -enum { Raw, AlignCols }; +enum { DontAlignCols = 1 }; +enum { StreamPrecision = -1, + FullPrecision = -2 }; /** \class IOFormat * * \brief Stores a set of parameters controlling the way matrices are printed * * List of available parameters: - * - \b precision number of digits for floating point values - * - \b flags can be either Raw (default) or AlignCols which aligns all the columns + * - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c FullPrecision. + * The default is the special value \c StreamPrecision which means to use the + * stream's own precision setting, as set for instance using \c cout.precision(3). The other special value + * \c FullPrecision means that the number of digits will be computed to match the full precision of each floating-point + * type. + * - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c DontAlignCols which + * allows to disable the alignment of columns, resulting in faster code. * - \b coeffSeparator string printed between two coefficients of the same row * - \b rowSeparator string printed between two rows * - \b rowPrefix string printed at the beginning of each row @@ -50,7 +57,7 @@ enum { Raw, AlignCols }; struct IOFormat { /** Default contructor, see class IOFormat for the meaning of the parameters */ - IOFormat(int _precision=4, int _flags=Raw, + IOFormat(int _precision = StreamPrecision, int _flags = 0, const std::string& _coeffSeparator = " ", const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="", const std::string& _matPrefix="", const std::string& _matSuffix="") @@ -125,21 +132,41 @@ template std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt) { const typename Derived::Nested m = _m; - + typedef typename Derived::Scalar Scalar; + int width = 0; - if (fmt.flags & AlignCols) + + std::streamsize explicit_precision; + if(fmt.precision == StreamPrecision) + { + explicit_precision = 0; + } + else if(fmt.precision == FullPrecision) + { + explicit_precision = NumTraits::HasFloatingPoint + ? std::ceil(-ei_log(epsilon())/ei_log(10.0)) + : 0; + } + else + { + explicit_precision = fmt.precision; + } + + bool align_cols = !(fmt.flags & DontAlignCols); + if(align_cols) { // compute the largest width for(int j = 1; j < m.cols(); ++j) for(int i = 0; i < m.rows(); ++i) { std::stringstream sstr; - sstr.precision(fmt.precision); + if(explicit_precision) sstr.precision(explicit_precision); sstr << m.coeff(i,j); width = std::max(width, int(sstr.str().length())); } } - s.precision(fmt.precision); + std::streamsize old_precision = 0; + if(explicit_precision) old_precision = s.precision(explicit_precision); s << fmt.matPrefix; for(int i = 0; i < m.rows(); ++i) { @@ -159,6 +186,7 @@ std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOForm s << fmt.rowSeparator; } s << fmt.matSuffix; + if(explicit_precision) s.precision(old_precision); return s; } diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h index 0151cc67f..9cb085b61 100644 --- a/Eigen/src/Core/MapBase.h +++ b/Eigen/src/Core/MapBase.h @@ -66,11 +66,11 @@ template class MapBase inline const Scalar* data() const { return m_data; } template struct force_aligned_impl { - AlignedDerivedType static run(MapBase& a) { return a.derived(); } + static AlignedDerivedType run(MapBase& a) { return a.derived(); } }; template struct force_aligned_impl { - AlignedDerivedType static run(MapBase& a) { return a.derived()._convertToForceAligned(); } + static AlignedDerivedType run(MapBase& a) { return a.derived()._convertToForceAligned(); } }; /** \returns an expression equivalent to \c *this but having the \c PacketAccess constant @@ -179,11 +179,11 @@ template class MapBase // explicitly add these two overloads. // Maybe there exists a better solution though. template - Derived& operator+=(const Flagged, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other) + Derived& operator+=(const Flagged, 0, EvalBeforeAssigningBit>& other) { return Base::operator+=(other); } template - Derived& operator-=(const Flagged, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other) + Derived& operator-=(const Flagged, 0, EvalBeforeAssigningBit>& other) { return Base::operator-=(other); } template diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h index d68c483b2..1570f01e0 100644 --- a/Eigen/src/Core/MathFunctions.h +++ b/Eigen/src/Core/MathFunctions.h @@ -1,7 +1,7 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2006-2009 Benoit Jacob // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -25,8 +25,13 @@ #ifndef EIGEN_MATHFUNCTIONS_H #define EIGEN_MATHFUNCTIONS_H +template inline typename NumTraits::Real epsilon() +{ + return std::numeric_limits::Real>::epsilon(); +} + template inline typename NumTraits::Real precision(); -template inline typename NumTraits::Real machine_epsilon(); + template inline T ei_random(T a, T b); template inline T ei_random(); template inline T ei_random_amplitude() @@ -51,7 +56,6 @@ template inline typename NumTraits::Real ei_hypot(T x, T y) **************/ template<> inline int precision() { return 0; } -template<> inline int machine_epsilon() { return 0; } inline int ei_real(int x) { return x; } inline int& ei_real_ref(int& x) { return x; } inline int ei_imag(int) { return 0; } @@ -106,7 +110,6 @@ inline bool ei_isApproxOrLessThan(int a, int b, int = precision()) **************/ template<> inline float precision() { return 1e-5f; } -template<> inline float machine_epsilon() { return 1.192e-07f; } inline float ei_real(float x) { return x; } inline float& ei_real_ref(float& x) { return x; } inline float ei_imag(float) { return 0.f; } @@ -154,7 +157,6 @@ inline bool ei_isApproxOrLessThan(float a, float b, float prec = precision inline double precision() { return 1e-12; } -template<> inline double machine_epsilon() { return 2.220e-16; } inline double ei_real(double x) { return x; } inline double& ei_real_ref(double& x) { return x; } @@ -203,7 +205,6 @@ inline bool ei_isApproxOrLessThan(double a, double b, double prec = precision inline float precision >() { return precision(); } -template<> inline float machine_epsilon >() { return machine_epsilon(); } inline float ei_real(const std::complex& x) { return std::real(x); } inline float ei_imag(const std::complex& x) { return std::imag(x); } inline float& ei_real_ref(std::complex& x) { return reinterpret_cast(&x)[0]; } @@ -240,7 +241,6 @@ inline bool ei_isApprox(const std::complex& a, const std::complex& **********************/ template<> inline double precision >() { return precision(); } -template<> inline double machine_epsilon >() { return machine_epsilon(); } inline double ei_real(const std::complex& x) { return std::real(x); } inline double ei_imag(const std::complex& x) { return std::imag(x); } inline double& ei_real_ref(std::complex& x) { return reinterpret_cast(&x)[0]; } @@ -278,7 +278,6 @@ inline bool ei_isApprox(const std::complex& a, const std::complex inline long double precision() { return precision(); } -template<> inline long double machine_epsilon() { return 1.084e-19l; } inline long double ei_real(long double x) { return x; } inline long double& ei_real_ref(long double& x) { return x; } inline long double ei_imag(long double) { return 0.; } diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index 6ec7ddbb7..1f4c6bf7a 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -326,9 +326,10 @@ template class MatrixBase template Derived& lazyAssign(const MatrixBase& other); - /** Overloaded for cache friendly product evaluation */ + /** \deprecated because .lazy() is deprecated + * Overloaded for cache friendly product evaluation */ template - Derived& lazyAssign(const Flagged& other) + Derived& lazyAssign(const Flagged& other) { return lazyAssign(other._expression()); } template @@ -336,11 +337,11 @@ template class MatrixBase template Derived& operator+=(const Flagged, 0, - EvalBeforeNestingBit | EvalBeforeAssigningBit>& other); + EvalBeforeAssigningBit>& other); template Derived& operator-=(const Flagged, 0, - EvalBeforeNestingBit | EvalBeforeAssigningBit>& other); + EvalBeforeAssigningBit>& other); #endif // not EIGEN_PARSED_BY_DOXYGEN CommaInitializer operator<< (const Scalar& s); @@ -456,6 +457,21 @@ template class MatrixBase void transposeInPlace(); const AdjointReturnType adjoint() const; void adjointInPlace(); + #ifndef EIGEN_NO_DEBUG + template + Derived& lazyAssign(const Transpose& other); + template + Derived& lazyAssign(const CwiseBinaryOp,Transpose,DerivedB>& other); + template + Derived& lazyAssign(const CwiseBinaryOp,DerivedA,Transpose >& other); + + template + Derived& lazyAssign(const CwiseUnaryOp, NestByValue > >& other); + template + Derived& lazyAssign(const CwiseBinaryOp,CwiseUnaryOp, NestByValue > >,DerivedB>& other); + template + Derived& lazyAssign(const CwiseBinaryOp,DerivedA,CwiseUnaryOp, NestByValue > > >& other); + #endif RowXpr row(int i); const RowXpr row(int i) const; @@ -614,7 +630,9 @@ template class MatrixBase template const Flagged marked() const; - const Flagged lazy() const; + const Flagged lazy() const; + + NoAlias noalias(); /** \returns number of elements to skip to pass from one row (resp. column) to another * for a row-major (resp. column-major) matrix. @@ -768,16 +786,26 @@ template class MatrixBase ////////// Householder module /////////// + void makeHouseholderInPlace(Scalar *tau, RealScalar *beta); template void makeHouseholder(EssentialPart *essential, - RealScalar *beta) const; + Scalar *tau, RealScalar *beta) const; template void applyHouseholderOnTheLeft(const EssentialPart& essential, - const RealScalar& beta); + const Scalar& tau, + Scalar* workspace); template void applyHouseholderOnTheRight(const EssentialPart& essential, - const RealScalar& beta); + const Scalar& tau, + Scalar* workspace); +///////// Jacobi module ///////// + + void applyJacobiOnTheLeft(int p, int q, Scalar c, Scalar s); + void applyJacobiOnTheRight(int p, int q, Scalar c, Scalar s); + bool makeJacobi(int p, int q, Scalar *c, Scalar *s) const; + bool makeJacobiForAtA(int p, int q, Scalar *c, Scalar *s) const; + bool makeJacobiForAAt(int p, int q, Scalar *c, Scalar *s) const; #ifdef EIGEN_MATRIXBASE_PLUGIN #include EIGEN_MATRIXBASE_PLUGIN diff --git a/Eigen/src/Core/NoAlias.h b/Eigen/src/Core/NoAlias.h new file mode 100644 index 000000000..66d8d834d --- /dev/null +++ b/Eigen/src/Core/NoAlias.h @@ -0,0 +1,112 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_NOALIAS_H +#define EIGEN_NOALIAS_H + +/** \class NoAlias + * + * \brief Pseudo expression providing an operator = assuming no aliasing + * + * \param ExpressionType the type of the object on which to do the lazy assignment + * + * This class represents an expression with special assignment operators + * assuming no aliasing between the target expression and the source expression. + * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression. + * It is the return type of MatrixBase::noalias() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::noalias() + */ +template +class NoAlias +{ + public: + NoAlias(ExpressionType& expression) : m_expression(expression) {} + + /** Behaves like MatrixBase::lazyAssign(other) + * \sa MatrixBase::lazyAssign() */ + template + EIGEN_STRONG_INLINE ExpressionType& operator=(const MatrixBase& other) + { return m_expression.lazyAssign(other.derived()); } + + /** \sa MatrixBase::operator+= */ + template + EIGEN_STRONG_INLINE ExpressionType& operator+=(const MatrixBase& other) + { return m_expression.lazyAssign(m_expression + other.derived()); } + + /** \sa MatrixBase::operator-= */ + template + EIGEN_STRONG_INLINE ExpressionType& operator-=(const MatrixBase& other) + { return m_expression.lazyAssign(m_expression - other.derived()); } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_STRONG_INLINE ExpressionType& operator+=(const ProductBase& other) + { other.derived().addTo(m_expression); return m_expression; } + + template + EIGEN_STRONG_INLINE ExpressionType& operator-=(const ProductBase& other) + { other.derived().subTo(m_expression); return m_expression; } +#endif + + protected: + ExpressionType& m_expression; +}; + +/** \returns a pseudo expression of \c *this with an operator= assuming + * no aliasing between \c *this and the source expression. + * + * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag. + * Currently, even though several expressions may alias, only product + * expressions have this flag. Therefore, noalias() is only usefull when + * the source expression contains a matrix product. + * + * Here are some examples where noalias is usefull: + * \code + * D.noalias() = A * B; + * D.noalias() += A.transpose() * B; + * D.noalias() -= 2 * A * B.adjoint(); + * \endcode + * + * On the other hand the following example will lead to a \b wrong result: + * \code + * A.noalias() = A * B; + * \endcode + * because the result matrix A is also an operand of the matrix product. Therefore, + * there is no alternative than evaluating A * B in a temporary, that is the default + * behavior when you write: + * \code + * A = A * B; + * \endcode + * + * \sa class NoAlias + */ +template +NoAlias MatrixBase::noalias() +{ + return derived(); +} + +#endif // EIGEN_NOALIAS_H diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h index 18f14f75a..e1e106b80 100644 --- a/Eigen/src/Core/Product.h +++ b/Eigen/src/Core/Product.h @@ -153,10 +153,10 @@ class GeneralProduct GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template void addTo(Dest& dst, Scalar alpha) const + template void scaleAndAddTo(Dest& dst, Scalar alpha) const { ei_assert(dst.rows()==1 && dst.cols()==1); - dst.coeffRef(0,0) += alpha * (m_lhs.cwise()*m_rhs).sum(); + dst.coeffRef(0,0) += alpha * (m_lhs.transpose().cwise()*m_rhs).sum(); } }; @@ -179,7 +179,7 @@ class GeneralProduct GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template void addTo(Dest& dest, Scalar alpha) const + template void scaleAndAddTo(Dest& dest, Scalar alpha) const { ei_outer_product_selector::run(*this, dest, alpha); } @@ -187,7 +187,7 @@ class GeneralProduct template<> struct ei_outer_product_selector { template - static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { + EIGEN_DONT_INLINE static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { // FIXME make sure lhs is sequentially stored const int cols = dest.cols(); for (int j=0; j struct ei_outer_product_selector { template<> struct ei_outer_product_selector { template - static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { + EIGEN_DONT_INLINE static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { // FIXME make sure rhs is sequentially stored const int rows = dest.rows(); for (int i=0; i enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; typedef typename ei_meta_if::ret MatrixType; - template void addTo(Dest& dst, Scalar alpha) const + template void scaleAndAddTo(Dest& dst, Scalar alpha) const { ei_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols()); ei_gemv_selector inline int cols() const { return m_rhs.cols(); } template - inline void evalTo(Dest& dst) const { dst.setZero(); addTo(dst,1); } + inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,1); } template - inline void addTo(Dest& dst) const { addTo(dst,1); } + inline void addTo(Dest& dst) const { scaleAndAddTo(dst,1); } template - inline void subTo(Dest& dst) const { addTo(dst,-1); } + inline void subTo(Dest& dst) const { scaleAndAddTo(dst,-1); } template - inline void addTo(Dest& dst,Scalar alpha) const { derived().addTo(dst,alpha); } + inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { derived().scaleAndAddTo(dst,alpha); } PlainMatrixType eval() const { PlainMatrixType res(rows(), cols()); res.setZero(); - evalTo(res); + derived().evalTo(res); return res; } - const Flagged lazy() const - { - return *this; - } + EIGEN_DEPRECATED const Flagged lazy() const + { return *this; } const _LhsNested& lhs() const { return m_lhs; } const _RhsNested& rhs() const { return m_rhs; } @@ -141,13 +139,86 @@ class ProductBase : public MatrixBase void coeffRef(int); }; +template +class ScaledProduct; + +// Note that these two operator* functions are not defined as member +// functions of ProductBase, because, otherwise we would have to +// define all overloads defined in MatrixBase. Furthermore, Using +// "using Base::operator*" would not work with MSVC. +// +// Also note that here we accept any compatible scalar types +template +const ScaledProduct +operator*(const ProductBase& prod, typename Derived::Scalar x) +{ return ScaledProduct(prod.derived(), x); } + +template +typename ei_enable_if::ret, + const ScaledProduct >::type +operator*(const ProductBase& prod, typename Derived::RealScalar x) +{ return ScaledProduct(prod.derived(), x); } + + +template +const ScaledProduct +operator*(typename Derived::Scalar x,const ProductBase& prod) +{ return ScaledProduct(prod.derived(), x); } + +template +typename ei_enable_if::ret, + const ScaledProduct >::type +operator*(typename Derived::RealScalar x,const ProductBase& prod) +{ return ScaledProduct(prod.derived(), x); } + + +template +struct ei_traits > + : ei_traits, + typename NestedProduct::_LhsNested, + typename NestedProduct::_RhsNested> > +{}; + +template +class ScaledProduct + : public ProductBase, + typename NestedProduct::_LhsNested, + typename NestedProduct::_RhsNested> +{ + public: + typedef ProductBase, + typename NestedProduct::_LhsNested, + typename NestedProduct::_RhsNested> Base; + typedef typename Base::Scalar Scalar; +// EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct) + + ScaledProduct(const NestedProduct& prod, Scalar x) + : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {} + + template + inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,m_alpha); } + + template + inline void addTo(Dest& dst) const { scaleAndAddTo(dst,m_alpha); } + + template + inline void subTo(Dest& dst) const { scaleAndAddTo(dst,-m_alpha); } + + template + inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { m_prod.derived().scaleAndAddTo(dst,alpha); } + + protected: + const NestedProduct& m_prod; + Scalar m_alpha; +}; + /** \internal * Overloaded to perform an efficient C = (A*B).lazy() */ template template Derived& MatrixBase::lazyAssign(const ProductBase& other) { - other.evalTo(derived()); return derived(); + other.derived().evalTo(derived()); return derived(); } /** \internal @@ -155,9 +226,9 @@ Derived& MatrixBase::lazyAssign(const ProductBase template Derived& MatrixBase::operator+=(const Flagged, 0, - EvalBeforeNestingBit | EvalBeforeAssigningBit>& other) + EvalBeforeAssigningBit>& other) { - other._expression().addTo(derived()); return derived(); + other._expression().derived().addTo(derived()); return derived(); } /** \internal @@ -165,9 +236,9 @@ Derived& MatrixBase::operator+=(const Flagged template Derived& MatrixBase::operator-=(const Flagged, 0, - EvalBeforeNestingBit | EvalBeforeAssigningBit>& other) + EvalBeforeAssigningBit>& other) { - other._expression().subTo(derived()); return derived(); + other._expression().derived().subTo(derived()); return derived(); } #endif // EIGEN_PRODUCTBASE_H diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h index 22809633d..77fe79782 100644 --- a/Eigen/src/Core/StableNorm.h +++ b/Eigen/src/Core/StableNorm.h @@ -115,20 +115,20 @@ MatrixBase::blueNorm() const ei_assert(false && "the algorithm cannot be guaranteed on this computer"); } iexp = -((1-iemin)/2); - b1 = std::pow(double(ibeta),iexp); // lower boundary of midrange + b1 = RealScalar(std::pow(double(ibeta),iexp)); // lower boundary of midrange iexp = (iemax + 1 - it)/2; - b2 = std::pow(double(ibeta),iexp); // upper boundary of midrange + b2 = RealScalar(std::pow(double(ibeta),iexp)); // upper boundary of midrange iexp = (2-iemin)/2; - s1m = std::pow(double(ibeta),iexp); // scaling factor for lower range + s1m = RealScalar(std::pow(double(ibeta),iexp)); // scaling factor for lower range iexp = - ((iemax+it)/2); - s2m = std::pow(double(ibeta),iexp); // scaling factor for upper range + s2m = RealScalar(std::pow(double(ibeta),iexp)); // scaling factor for upper range overfl = rbig*s2m; // overfow boundary for abig - eps = std::pow(double(ibeta), 1-it); + eps = RealScalar(std::pow(double(ibeta), 1-it)); relerr = ei_sqrt(eps); // tolerance for neglecting asml abig = 1.0/eps - 1.0; - if (RealScalar(nbig)>abig) nmax = abig; // largest safe n + if (RealScalar(nbig)>abig) nmax = int(abig); // largest safe n else nmax = nbig; } int n = size(); diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index 0787daa61..8527edc2a 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -267,4 +267,92 @@ inline void MatrixBase::adjointInPlace() derived() = adjoint().eval(); } +#ifndef EIGEN_NO_DEBUG + +// The following is to detect aliasing problems in the following common cases: +// a = a.transpose() +// a = a.transpose() + X +// a = X + a.transpose() +// a = a.adjoint() +// a = a.adjoint() + X +// a = X + a.adjoint() + +template::ActualAccess> +struct ei_extract_data_selector { + static typename T::Scalar* run(const T& m) + { + return &ei_blas_traits::extract(m).const_cast_derived().coeffRef(0,0); + } +}; + +template +struct ei_extract_data_selector { + static typename T::Scalar* run(const T&) { return 0; } +}; + +template typename T::Scalar* ei_extract_data(const T& m) +{ + return ei_extract_data_selector::run(m); +} + +template +template +Derived& MatrixBase::lazyAssign(const Transpose& other) +{ + ei_assert(ei_extract_data(other) != ei_extract_data(derived()) + && "aliasing detected during tranposition, please use transposeInPlace()"); + return lazyAssign(static_cast >& >(other)); +} + +template +template +Derived& MatrixBase:: +lazyAssign(const CwiseBinaryOp,Transpose,DerivedB>& other) +{ + ei_assert(ei_extract_data(derived()) != ei_extract_data(other.lhs()) + && "aliasing detected during tranposition, please evaluate your expression"); + return lazyAssign(static_cast,Transpose,DerivedB> >& >(other)); +} + +template +template +Derived& MatrixBase:: +lazyAssign(const CwiseBinaryOp,DerivedA,Transpose >& other) +{ + ei_assert(ei_extract_data(derived()) != ei_extract_data(other.rhs()) + && "aliasing detected during tranposition, please evaluate your expression"); + return lazyAssign(static_cast,DerivedA,Transpose > >& >(other)); +} + +template +template Derived& +MatrixBase:: +lazyAssign(const CwiseUnaryOp, NestByValue > >& other) +{ + ei_assert(ei_extract_data(other) != ei_extract_data(derived()) + && "aliasing detected during tranposition, please use adjointInPlace()"); + return lazyAssign(static_cast, NestByValue > > >& >(other)); +} + +template +template +Derived& MatrixBase:: +lazyAssign(const CwiseBinaryOp,CwiseUnaryOp, NestByValue > >,DerivedB>& other) +{ + ei_assert(ei_extract_data(derived()) != ei_extract_data(other.lhs()) + && "aliasing detected during tranposition, please evaluate your expression"); + return lazyAssign(static_cast,CwiseUnaryOp, NestByValue > >,DerivedB> >& >(other)); +} + +template +template +Derived& MatrixBase:: +lazyAssign(const CwiseBinaryOp,DerivedA,CwiseUnaryOp, NestByValue > > >& other) +{ + ei_assert(ei_extract_data(derived()) != ei_extract_data(other.rhs()) + && "aliasing detected during tranposition, please evaluate your expression"); + return lazyAssign(static_cast,DerivedA,CwiseUnaryOp, NestByValue > > > >& >(other)); +} +#endif + #endif // EIGEN_TRANSPOSE_H diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h index c262ea7a7..b0362f20c 100644 --- a/Eigen/src/Core/TriangularMatrix.h +++ b/Eigen/src/Core/TriangularMatrix.h @@ -234,14 +234,14 @@ template class TriangularView inline TriangularView,TransposeMode> adjoint() { return m_matrix.adjoint().nestByValue(); } /** \sa MatrixBase::adjoint() const */ - const inline TriangularView,TransposeMode> adjoint() const + inline const TriangularView,TransposeMode> adjoint() const { return m_matrix.adjoint().nestByValue(); } /** \sa MatrixBase::transpose() */ inline TriangularView >,TransposeMode> transpose() { return m_matrix.transpose().nestByValue(); } /** \sa MatrixBase::transpose() const */ - const inline TriangularView >,TransposeMode> transpose() const + inline const TriangularView >,TransposeMode> transpose() const { return m_matrix.transpose().nestByValue(); } PlainMatrixType toDense() const diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h index 3fd33afbf..52e666d2f 100644 --- a/Eigen/src/Core/arch/SSE/PacketMath.h +++ b/Eigen/src/Core/arch/SSE/PacketMath.h @@ -191,22 +191,22 @@ template<> EIGEN_STRONG_INLINE Packet4i ei_ploadu(const int* from) { return template<> EIGEN_STRONG_INLINE Packet4f ei_ploadu(const float* from) { __m128 res; - asm("movsd %[from0], %[r]" : [r] "=x" (res) : [from0] "m" (*from), [dummy] "m" (*(from+1)) ); - asm("movhps %[from2], %[r]" : [r] "+x" (res) : [from2] "m" (*(from+2)), [dummy] "m" (*(from+3)) ); + asm volatile ("movsd %[from0], %[r]" : [r] "=x" (res) : [from0] "m" (*from), [dummy] "m" (*(from+1)) ); + asm volatile ("movhps %[from2], %[r]" : [r] "+x" (res) : [from2] "m" (*(from+2)), [dummy] "m" (*(from+3)) ); return res; } template<> EIGEN_STRONG_INLINE Packet2d ei_ploadu(const double* from) { __m128d res; - asm("movsd %[from0], %[r]" : [r] "=x" (res) : [from0] "m" (*from) ); - asm("movhpd %[from1], %[r]" : [r] "+x" (res) : [from1] "m" (*(from+1)) ); + asm volatile ("movsd %[from0], %[r]" : [r] "=x" (res) : [from0] "m" (*from) ); + asm volatile ("movhpd %[from1], %[r]" : [r] "+x" (res) : [from1] "m" (*(from+1)) ); return res; } template<> EIGEN_STRONG_INLINE Packet4i ei_ploadu(const int* from) { __m128i res; - asm("movsd %[from0], %[r]" : [r] "=x" (res) : [from0] "m" (*from), [dummy] "m" (*(from+1)) ); - asm("movhps %[from2], %[r]" : [r] "+x" (res) : [from2] "m" (*(from+2)), [dummy] "m" (*(from+3)) ); + asm volatile ("movsd %[from0], %[r]" : [r] "=x" (res) : [from0] "m" (*from), [dummy] "m" (*(from+1)) ); + asm volatile ("movhps %[from2], %[r]" : [r] "+x" (res) : [from2] "m" (*(from+2)), [dummy] "m" (*(from+3)) ); return res; } #endif diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h index ff0f2c1b4..8b3b13266 100644 --- a/Eigen/src/Core/products/GeneralMatrixMatrix.h +++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h @@ -137,7 +137,7 @@ class GeneralProduct GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template void addTo(Dest& dst, Scalar alpha) const + template void scaleAndAddTo(Dest& dst, Scalar alpha) const { ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h index 358da3752..5e025b90b 100644 --- a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h @@ -375,7 +375,7 @@ struct SelfadjointProductMatrix RhsIsSelfAdjoint = (RhsMode&SelfAdjointBit)==SelfAdjointBit }; - template void addTo(Dest& dst, Scalar alpha) const + template void scaleAndAddTo(Dest& dst, Scalar alpha) const { ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector.h b/Eigen/src/Core/products/SelfadjointMatrixVector.h index f0004cdb9..c2c33d5b8 100644 --- a/Eigen/src/Core/products/SelfadjointMatrixVector.h +++ b/Eigen/src/Core/products/SelfadjointMatrixVector.h @@ -175,7 +175,7 @@ struct SelfadjointProductMatrix SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template void addTo(Dest& dst, Scalar alpha) const + template void scaleAndAddTo(Dest& dst, Scalar alpha) const { ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix.h b/Eigen/src/Core/products/TriangularMatrixMatrix.h index c2ee39e79..701ccb644 100644 --- a/Eigen/src/Core/products/TriangularMatrixMatrix.h +++ b/Eigen/src/Core/products/TriangularMatrixMatrix.h @@ -333,7 +333,7 @@ struct TriangularProduct TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template void addTo(Dest& dst, Scalar alpha) const + template void scaleAndAddTo(Dest& dst, Scalar alpha) const { const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs); const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs); diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h index a21afa2f6..620b090b9 100644 --- a/Eigen/src/Core/products/TriangularMatrixVector.h +++ b/Eigen/src/Core/products/TriangularMatrixVector.h @@ -130,7 +130,7 @@ struct TriangularProduct TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template void addTo(Dest& dst, Scalar alpha) const + template void scaleAndAddTo(Dest& dst, Scalar alpha) const { ei_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); diff --git a/Eigen/src/Core/util/BlasUtil.h b/Eigen/src/Core/util/BlasUtil.h index f4690c0ca..94154108c 100644 --- a/Eigen/src/Core/util/BlasUtil.h +++ b/Eigen/src/Core/util/BlasUtil.h @@ -182,7 +182,7 @@ struct ei_blas_traits, NestedXpr> > enum { IsComplex = NumTraits::IsComplex, - NeedToConjugate = IsComplex + NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex }; static inline ExtractType extract(const XprType& x) { return Base::extract(x._expression()); } static inline Scalar extractScalarFactor(const XprType& x) { return ei_conj(Base::extractScalarFactor(x._expression())); } diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h index 98b3bd2e4..21e7f62c3 100644 --- a/Eigen/src/Core/util/Constants.h +++ b/Eigen/src/Core/util/Constants.h @@ -2,7 +2,7 @@ // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud -// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2007-2009 Benoit Jacob // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -39,10 +39,9 @@ * Also, disabling compiler warnings for integer overflow, sounds like a bad idea. * - It should be a prime number, because for example the old value 10000 led to bugs with 100x100 matrices. * - * If you wish to port Eigen to a platform where sizeof(int)==2, it is perfectly possible to set Dynamic to, say, 97. - * However, changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix. + * Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix. */ -const int Dynamic = 33331; +const int Dynamic = sizeof(int) >= 4 ? 33331 : 101; /** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm(). * The value Infinity there means the L-infinity norm. diff --git a/Eigen/src/Core/util/ForwardDeclarations.h b/Eigen/src/Core/util/ForwardDeclarations.h index c9ce174d2..6f3e6a788 100644 --- a/Eigen/src/Core/util/ForwardDeclarations.h +++ b/Eigen/src/Core/util/ForwardDeclarations.h @@ -35,6 +35,7 @@ template class Matrix; template class Flagged; +template class NoAlias; template class NestByValue; template class SwapWrapper; template class Minor; diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index 956b609a0..dc3b3ee0a 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -111,6 +111,8 @@ #define EIGEN_FAST_MATH 1 #endif +#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl; + #define USING_PART_OF_NAMESPACE_EIGEN \ EIGEN_USING_MATRIX_TYPEDEFS \ using Eigen::Matrix; \ @@ -242,7 +244,7 @@ using Eigen::ei_cos; // format used in Eigen's documentation // needed to define it here as escaping characters in CMake add_definition's argument seems very problematic. -#define EIGEN_DOCS_IO_FORMAT IOFormat(3, AlignCols, " ", "\n", "", "") +#define EIGEN_DOCS_IO_FORMAT IOFormat(3, 0, " ", "\n", "", "") #define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ using Base::operator =; \ diff --git a/Eigen/src/Core/util/Meta.h b/Eigen/src/Core/util/Meta.h index 029d966fd..3a960bea6 100644 --- a/Eigen/src/Core/util/Meta.h +++ b/Eigen/src/Core/util/Meta.h @@ -155,12 +155,7 @@ template class ei_meta_sqrt { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; }; /** \internal determines whether the product of two numeric types is allowed and what the return type is */ -template struct ei_scalar_product_traits -{ - // dummy general case where T and U aren't compatible -- not allowed anyway but we catch it elsewhere - //enum { Cost = NumTraits::MulCost }; - typedef T ReturnType; -}; +template struct ei_scalar_product_traits; template struct ei_scalar_product_traits { diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h index d392e27ba..871259b08 100644 --- a/Eigen/src/Core/util/XprHelper.h +++ b/Eigen/src/Core/util/XprHelper.h @@ -90,7 +90,7 @@ class ei_compute_matrix_flags : MaxRows==1 ? MaxCols : row_major_bit ? MaxCols : MaxRows, is_big = inner_max_size == Dynamic, - is_packet_size_multiple = Rows==Dynamic || Cols==Dynamic || ((Cols*Rows) % ei_packet_traits::size) == 0, + is_packet_size_multiple = MaxRows==Dynamic || MaxCols==Dynamic || ((MaxCols*MaxRows) % ei_packet_traits::size) == 0, aligned_bit = (((Options&DontAlign)==0) && (is_big || is_packet_size_multiple)) ? AlignedBit : 0, packet_access_bit = ei_packet_traits::size > 1 && aligned_bit ? PacketAccessBit : 0 }; diff --git a/Eigen/src/Geometry/Rotation2D.h b/Eigen/src/Geometry/Rotation2D.h index 594d5272b..b80fcace0 100644 --- a/Eigen/src/Geometry/Rotation2D.h +++ b/Eigen/src/Geometry/Rotation2D.h @@ -85,7 +85,7 @@ public: /** Concatenates two rotations */ inline Rotation2D& operator*=(const Rotation2D& other) - { return m_angle += other.m_angle; } + { return m_angle += other.m_angle; return *this; } /** Applies the rotation to a 2D vector */ Vector2 operator* (const Vector2& vec) const diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h index 642f307ed..9eb8ed535 100644 --- a/Eigen/src/Geometry/Transform.h +++ b/Eigen/src/Geometry/Transform.h @@ -460,6 +460,11 @@ public: inline const Block translationExt() const { return m_matrix.template block(0,Dim); } + + #ifdef EIGEN_TRANSFORM_PLUGIN + #include EIGEN_TRANSFORM_PLUGIN + #endif + }; /** \ingroup Geometry_Module */ diff --git a/Eigen/src/Householder/Householder.h b/Eigen/src/Householder/Householder.h index 8972806de..769ba3d43 100644 --- a/Eigen/src/Householder/Householder.h +++ b/Eigen/src/Householder/Householder.h @@ -32,54 +32,93 @@ template struct ei_decrement_size }; }; +template +void makeTrivialHouseholder( + EssentialPart *essential, + typename EssentialPart::RealScalar *beta) +{ + *beta = typename EssentialPart::RealScalar(0); + essential->setZero(); +} + +template +void MatrixBase::makeHouseholderInPlace(Scalar *tau, RealScalar *beta) +{ + VectorBlock::ret> essentialPart(derived(), 1, size()-1); + makeHouseholder(&essentialPart, tau, beta); +} + +/** Computes the elementary reflector H such that: + * \f$ H *this = [ beta 0 ... 0]^T \f$ + * where the transformation H is: + * \f$ H = I - tau v v^*\f$ + * and the vector v is: + * \f$ v^T = [1 essential^T] \f$ + * + * On output: + * \param essential the essential part of the vector \c v + * \param tau the scaling factor of the householder transformation + * \param beta the result of H * \c *this + * + * \sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(), + * MatrixBase::applyHouseholderOnTheRight() + */ template template void MatrixBase::makeHouseholder( EssentialPart *essential, + Scalar *tau, RealScalar *beta) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart) - RealScalar _squaredNorm = squaredNorm(); - Scalar c0; - if(ei_abs2(coeff(0)) <= ei_abs2(precision()) * _squaredNorm) + VectorBlock tail(derived(), 1, size()-1); + + RealScalar tailSqNorm; + Scalar c0 = coeff(0); + + if( (size()==1 || (tailSqNorm=tail.squaredNorm()) == RealScalar(0)) && ei_imag(c0)==RealScalar(0)) { - c0 = ei_sqrt(_squaredNorm); + *tau = 0; + *beta = ei_real(c0); } else { - Scalar sign = coeff(0) / ei_abs(coeff(0)); - c0 = coeff(0) + sign * ei_sqrt(_squaredNorm); + *beta = ei_sqrt(ei_abs2(c0) + tailSqNorm); + if (ei_real(c0)>=0.) + *beta = -*beta; + *essential = tail / (c0 - *beta); + *tau = ei_conj((*beta - c0) / *beta); } - VectorBlock tail(derived(), 1, size()-1); - *essential = tail / c0; - const RealScalar c0abs2 = ei_abs2(c0); - *beta = RealScalar(2) * c0abs2 / (c0abs2 + _squaredNorm - ei_abs2(coeff(0))); } template template void MatrixBase::applyHouseholderOnTheLeft( const EssentialPart& essential, - const RealScalar& beta) + const Scalar& tau, + Scalar* workspace) { - Matrix tmp(cols()); + Map > tmp(workspace,cols()); Block bottom(derived(), 1, 0, rows()-1, cols()); - tmp = row(0) + essential.adjoint() * bottom; - row(0) -= beta * tmp; - bottom -= beta * essential * tmp; + tmp.noalias() = essential.adjoint() * bottom; + tmp += row(0); + row(0) -= tau * tmp; + bottom.noalias() -= tau * essential * tmp; } template template void MatrixBase::applyHouseholderOnTheRight( const EssentialPart& essential, - const RealScalar& beta) + const Scalar& tau, + Scalar* workspace) { - Matrix tmp(rows()); + Map > tmp(workspace,rows()); Block right(derived(), 0, 1, rows(), cols()-1); - tmp = col(0) + right * essential.conjugate(); - col(0) -= beta * tmp; - right -= beta * tmp * essential.transpose(); + tmp.noalias() = right * essential.conjugate(); + tmp += col(0); + col(0) -= tau * tmp; + right.noalias() -= tau * tmp * essential.transpose(); } #endif // EIGEN_HOUSEHOLDER_H diff --git a/Eigen/src/Jacobi/CMakeLists.txt b/Eigen/src/Jacobi/CMakeLists.txt new file mode 100644 index 000000000..490dac626 --- /dev/null +++ b/Eigen/src/Jacobi/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_Jacobi_SRCS "*.h") + +INSTALL(FILES + ${Eigen_Jacobi_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Jacobi COMPONENT Devel + ) diff --git a/Eigen/src/Jacobi/Jacobi.h b/Eigen/src/Jacobi/Jacobi.h new file mode 100644 index 000000000..b5940c74b --- /dev/null +++ b/Eigen/src/Jacobi/Jacobi.h @@ -0,0 +1,202 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_JACOBI_H +#define EIGEN_JACOBI_H + +template +void ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, typename VectorX::Scalar c, typename VectorY::Scalar s); + +template +inline void MatrixBase::applyJacobiOnTheLeft(int p, int q, Scalar c, Scalar s) +{ + RowXpr x(row(p)); + RowXpr y(row(q)); + ei_apply_rotation_in_the_plane(x, y, c, s); +} + +template +inline void MatrixBase::applyJacobiOnTheRight(int p, int q, Scalar c, Scalar s) +{ + ColXpr x(col(p)); + ColXpr y(col(q)); + ei_apply_rotation_in_the_plane(x, y, c, s); +} + +template +bool ei_makeJacobi(Scalar x, Scalar y, Scalar z, Scalar *c, Scalar *s) +{ + if(y == 0) + { + *c = Scalar(1); + *s = Scalar(0); + return false; + } + else + { + Scalar tau = (z - x) / (2 * y); + Scalar w = ei_sqrt(1 + ei_abs2(tau)); + Scalar t; + if(tau>0) + t = Scalar(1) / (tau + w); + else + t = Scalar(1) / (tau - w); + *c = Scalar(1) / ei_sqrt(1 + ei_abs2(t)); + *s = *c * t; + return true; + } +} + +template +inline bool MatrixBase::makeJacobi(int p, int q, Scalar *c, Scalar *s) const +{ + return ei_makeJacobi(coeff(p,p), coeff(p,q), coeff(q,q), c, s); +} + +template +inline bool MatrixBase::makeJacobiForAtA(int p, int q, Scalar *c, Scalar *s) const +{ + return ei_makeJacobi(ei_abs2(coeff(p,p)) + ei_abs2(coeff(q,p)), + ei_conj(coeff(p,p))*coeff(p,q) + ei_conj(coeff(q,p))*coeff(q,q), + ei_abs2(coeff(p,q)) + ei_abs2(coeff(q,q)), + c,s); +} + +template +inline bool MatrixBase::makeJacobiForAAt(int p, int q, Scalar *c, Scalar *s) const +{ + return ei_makeJacobi(ei_abs2(coeff(p,p)) + ei_abs2(coeff(p,q)), + ei_conj(coeff(q,p))*coeff(p,p) + ei_conj(coeff(q,q))*coeff(p,q), + ei_abs2(coeff(q,p)) + ei_abs2(coeff(q,q)), + c,s); +} + +template +inline void ei_normalizeJacobi(Scalar *c, Scalar *s, const Scalar& x, const Scalar& y) +{ + Scalar a = x * *c - y * *s; + Scalar b = x * *s + y * *c; + if(ei_abs(b)>ei_abs(a)) { + Scalar x = *c; + *c = -*s; + *s = x; + } +} + +template +void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, typename VectorX::Scalar c, typename VectorY::Scalar s) +{ + typedef typename VectorX::Scalar Scalar; + ei_assert(_x.size() == _y.size()); + int size = _x.size(); + int incrx = size ==1 ? 1 : &_x.coeffRef(1) - &_x.coeffRef(0); + int incry = size ==1 ? 1 : &_y.coeffRef(1) - &_y.coeffRef(0); + + Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0); + Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0); + + if (incrx==1 && incry==1) + { + // both vectors are sequentially stored in memory => vectorization + typedef typename ei_packet_traits::type Packet; + enum { PacketSize = ei_packet_traits::size, Peeling = 2 }; + + int alignedStart = ei_alignmentOffset(y, size); + int alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize; + + const Packet pc = ei_pset1(c); + const Packet ps = ei_pset1(s); + + for(int i=0; i class LU * * \param matrix the matrix of which to compute the LU decomposition. * It is required to be nonzero. + * + * \returns a reference to *this */ - void compute(const MatrixType& matrix); + LU& compute(const MatrixType& matrix); /** \returns the LU decomposition matrix: the upper-triangular part is U, the * unit-lower-triangular part is L (at least for square matrices; in the non-square @@ -377,7 +379,7 @@ LU::LU(const MatrixType& matrix) } template -void LU::compute(const MatrixType& matrix) +LU& LU::compute(const MatrixType& matrix) { m_originalMatrix = &matrix; m_lu = matrix; @@ -390,7 +392,7 @@ void LU::compute(const MatrixType& matrix) // this formula comes from experimenting (see "LU precision tuning" thread on the list) // and turns out to be identical to Higham's formula used already in LDLt. - m_precision = machine_epsilon() * size; + m_precision = epsilon() * size; IntColVectorType rows_transpositions(matrix.rows()); IntRowVectorType cols_transpositions(matrix.cols()); @@ -435,7 +437,7 @@ void LU::compute(const MatrixType& matrix) if(k::compute(const MatrixType& matrix) std::swap(m_q.coeffRef(k), m_q.coeffRef(cols_transpositions.coeff(k))); m_det_pq = (number_of_transpositions%2) ? -1 : 1; + return *this; } template @@ -561,7 +564,7 @@ bool LU::solve( if(!isSurjective()) { // is c is in the image of U ? - RealScalar biggest_in_c = c.corner(TopLeft, m_rank, c.cols()).cwise().abs().maxCoeff(); + RealScalar biggest_in_c = m_rank>0 ? c.corner(TopLeft, m_rank, c.cols()).cwise().abs().maxCoeff() : 0; for(int col = 0; col < c.cols(); ++col) for(int row = m_rank; row < c.rows(); ++row) if(!ei_isMuchSmallerThan(c.coeff(row,col), biggest_in_c, m_precision)) diff --git a/Eigen/src/LU/PartialLU.h b/Eigen/src/LU/PartialLU.h index f5c9b8ae9..0ef59bac7 100644 --- a/Eigen/src/LU/PartialLU.h +++ b/Eigen/src/LU/PartialLU.h @@ -87,7 +87,7 @@ template class PartialLU */ PartialLU(const MatrixType& matrix); - void compute(const MatrixType& matrix); + PartialLU& compute(const MatrixType& matrix); /** \returns the LU decomposition matrix: the upper-triangular part is U, the * unit-lower-triangular part is L (at least for square matrices; in the non-square @@ -248,7 +248,7 @@ struct ei_partial_lu_impl int rrows = rows-k-1; int rsize = size-k-1; lu.col(k).end(rrows) /= lu.coeff(k,k); - lu.corner(BottomRight,rrows,rsize) -= (lu.col(k).end(rrows) * lu.row(k).end(rsize)).lazy(); + lu.corner(BottomRight,rrows,rsize).noalias() -= lu.col(k).end(rrows) * lu.row(k).end(rsize); } } } @@ -350,7 +350,7 @@ void ei_partial_lu_inplace(MatrixType& lu, IntVector& row_transpositions, int& n } template -void PartialLU::compute(const MatrixType& matrix) +PartialLU& PartialLU::compute(const MatrixType& matrix) { m_lu = matrix; m_p.resize(matrix.rows()); @@ -369,6 +369,7 @@ void PartialLU::compute(const MatrixType& matrix) std::swap(m_p.coeffRef(k), m_p.coeffRef(rows_transpositions.coeff(k))); m_isInitialized = true; + return *this; } template diff --git a/Eigen/src/QR/EigenSolver.h b/Eigen/src/QR/EigenSolver.h index dbe1aca89..bd7a76c49 100644 --- a/Eigen/src/QR/EigenSolver.h +++ b/Eigen/src/QR/EigenSolver.h @@ -118,7 +118,7 @@ template class EigenSolver return m_eivalues; } - void compute(const MatrixType& matrix); + EigenSolver& compute(const MatrixType& matrix); private: @@ -189,7 +189,7 @@ typename EigenSolver::EigenvectorType EigenSolver::eigen } template -void EigenSolver::compute(const MatrixType& matrix) +EigenSolver& EigenSolver::compute(const MatrixType& matrix) { assert(matrix.cols() == matrix.rows()); int n = matrix.cols(); @@ -205,6 +205,7 @@ void EigenSolver::compute(const MatrixType& matrix) hqr2(matH); m_isInitialized = true; + return *this; } // Nonsymmetric reduction to Hessenberg form. diff --git a/Eigen/src/QR/HessenbergDecomposition.h b/Eigen/src/QR/HessenbergDecomposition.h index 6ba90fb3a..6b261a8a7 100644 --- a/Eigen/src/QR/HessenbergDecomposition.h +++ b/Eigen/src/QR/HessenbergDecomposition.h @@ -93,7 +93,7 @@ template class HessenbergDecomposition * * \sa packedMatrix() */ - CoeffVectorType householderCoefficients(void) const { return m_hCoeffs; } + CoeffVectorType householderCoefficients() const { return m_hCoeffs; } /** \returns the internal result of the decomposition. * @@ -112,8 +112,8 @@ template class HessenbergDecomposition */ const MatrixType& packedMatrix(void) const { return m_matrix; } - MatrixType matrixQ(void) const; - MatrixType matrixH(void) const; + MatrixType matrixQ() const; + MatrixType matrixH() const; private: @@ -143,87 +143,42 @@ void HessenbergDecomposition::_compute(MatrixType& matA, CoeffVector { assert(matA.rows()==matA.cols()); int n = matA.rows(); - for (int i = 0; i temp(n); + for (int i = 0; i(1))) - { - hCoeffs.coeffRef(i) = 0.; - } - else - { - Scalar v0 = matA.col(i).coeff(i+1); - RealScalar beta = ei_sqrt(ei_abs2(v0)+v1norm2); - if (ei_real(v0)>=0.) - beta = -beta; - matA.col(i).end(n-(i+2)) *= (Scalar(1)/(v0-beta)); - matA.col(i).coeffRef(i+1) = beta; - Scalar h = (beta - v0) / beta; - // end of the householder transformation - - // Apply similarity transformation to remaining columns, - // i.e., A = H' A H where H = I - h v v' and v = matA.col(i).end(n-i-1) - matA.col(i).coeffRef(i+1) = 1; - - // first let's do A = H' A - matA.corner(BottomRight,n-i-1,n-i-1) -= ((ei_conj(h) * matA.col(i).end(n-i-1)) * - (matA.col(i).end(n-i-1).adjoint() * matA.corner(BottomRight,n-i-1,n-i-1))).lazy(); - - // now let's do A = A H - matA.corner(BottomRight,n,n-i-1) -= ((matA.corner(BottomRight,n,n-i-1) * matA.col(i).end(n-i-1)) - * (h * matA.col(i).end(n-i-1).adjoint())).lazy(); - - matA.col(i).coeffRef(i+1) = beta; - hCoeffs.coeffRef(i) = h; - } - } - if (NumTraits::IsComplex) - { - // Householder transformation on the remaining single scalar - int i = n-2; - Scalar v0 = matA.coeff(i+1,i); - - RealScalar beta = ei_sqrt(ei_abs2(v0)); - if (ei_real(v0)>=0.) - beta = -beta; - Scalar h = (beta - v0) / beta; + int remainingSize = n-i-1; + RealScalar beta; + Scalar h; + matA.col(i).end(remainingSize).makeHouseholderInPlace(&h, &beta); + matA.col(i).coeffRef(i+1) = beta; hCoeffs.coeffRef(i) = h; - // A = H* A - matA.corner(BottomRight,n-i-1,n-i) -= ei_conj(h) * matA.corner(BottomRight,n-i-1,n-i); + // Apply similarity transformation to remaining columns, + // i.e., compute A = H A H' + + // A = H A + matA.corner(BottomRight, remainingSize, remainingSize) + .applyHouseholderOnTheLeft(matA.col(i).end(remainingSize-1), h, &temp.coeffRef(0)); - // A = A H - matA.col(n-1) -= h * matA.col(n-1); - } - else - { - hCoeffs.coeffRef(n-2) = 0; + // A = A H' + matA.corner(BottomRight, n, remainingSize) + .applyHouseholderOnTheRight(matA.col(i).end(remainingSize-1).conjugate(), ei_conj(h), &temp.coeffRef(0)); } } /** reconstructs and returns the matrix Q */ template typename HessenbergDecomposition::MatrixType -HessenbergDecomposition::matrixQ(void) const +HessenbergDecomposition::matrixQ() const { int n = m_matrix.rows(); MatrixType matQ = MatrixType::Identity(n,n); Matrix temp(n); for (int i = n-2; i>=0; i--) { - Scalar tmp = m_matrix.coeff(i+1,i); - m_matrix.const_cast_derived().coeffRef(i+1,i) = 1; - - int rs = n-i-1; - temp.end(rs) = (m_matrix.col(i).end(rs).adjoint() * matQ.corner(BottomRight,rs,rs)).lazy(); - matQ.corner(BottomRight,rs,rs) -= (m_hCoeffs.coeff(i) * m_matrix.col(i).end(rs) * temp.end(rs)).lazy(); - - m_matrix.const_cast_derived().coeffRef(i+1,i) = tmp; + matQ.corner(BottomRight,n-i-1,n-i-1) + .applyHouseholderOnTheLeft(m_matrix.col(i).end(n-i-2), ei_conj(m_hCoeffs.coeff(i)), &temp.coeffRef(0,0)); } return matQ; } @@ -237,7 +192,7 @@ HessenbergDecomposition::matrixQ(void) const */ template typename HessenbergDecomposition::MatrixType -HessenbergDecomposition::matrixH(void) const +HessenbergDecomposition::matrixH() const { // FIXME should this function (and other similar) rather take a matrix as argument // and fill it (to avoid temporaries) diff --git a/Eigen/src/QR/QR.h b/Eigen/src/QR/QR.h index 98ef4da25..90e6f8132 100644 --- a/Eigen/src/QR/QR.h +++ b/Eigen/src/QR/QR.h @@ -45,11 +45,15 @@ template class HouseholderQR { public: + enum { + MinSizeAtCompileTime = EIGEN_ENUM_MIN(MatrixType::ColsAtCompileTime,MatrixType::RowsAtCompileTime) + }; + typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef Block MatrixRBlockType; typedef Matrix MatrixTypeR; - typedef Matrix VectorType; + typedef Matrix HCoeffsType; /** * \brief Default Constructor. @@ -61,7 +65,7 @@ template class HouseholderQR HouseholderQR(const MatrixType& matrix) : m_qr(matrix.rows(), matrix.cols()), - m_hCoeffs(matrix.cols()), + m_hCoeffs(std::min(matrix.rows(),matrix.cols())), m_isInitialized(false) { compute(matrix); @@ -101,80 +105,43 @@ template class HouseholderQR */ const MatrixType& matrixQR() const { return m_qr; } - void compute(const MatrixType& matrix); + HouseholderQR& compute(const MatrixType& matrix); protected: MatrixType m_qr; - VectorType m_hCoeffs; + HCoeffsType m_hCoeffs; bool m_isInitialized; }; #ifndef EIGEN_HIDE_HEAVY_CODE template -void HouseholderQR::compute(const MatrixType& matrix) +HouseholderQR& HouseholderQR::compute(const MatrixType& matrix) { - m_qr = matrix; - m_hCoeffs.resize(matrix.cols()); - int rows = matrix.rows(); int cols = matrix.cols(); - RealScalar eps2 = precision()*precision(); + int size = std::min(rows,cols); + + m_qr = matrix; + m_hCoeffs.resize(size); + Matrix temp(cols); - for (int k = 0; k < cols; ++k) + for (int k = 0; k < size; ++k) { - int remainingSize = rows-k; + int remainingRows = rows - k; + int remainingCols = cols - k -1; RealScalar beta; - Scalar v0 = m_qr.col(k).coeff(k); + m_qr.col(k).end(remainingRows).makeHouseholderInPlace(&m_hCoeffs.coeffRef(k), &beta); + m_qr.coeffRef(k,k) = beta; - if (remainingSize==1) - { - if (NumTraits::IsComplex) - { - // Householder transformation on the remaining single scalar - beta = ei_abs(v0); - if (ei_real(v0)>0) - beta = -beta; - m_qr.coeffRef(k,k) = beta; - m_hCoeffs.coeffRef(k) = (beta - v0) / beta; - } - else - { - m_hCoeffs.coeffRef(k) = 0; - } - } - else if ((beta=m_qr.col(k).end(remainingSize-1).squaredNorm())>eps2) - // FIXME what about ei_imag(v0) ?? - { - // form k-th Householder vector - beta = ei_sqrt(ei_abs2(v0)+beta); - if (ei_real(v0)>=0.) - beta = -beta; - m_qr.col(k).end(remainingSize-1) /= v0-beta; - m_qr.coeffRef(k,k) = beta; - Scalar h = m_hCoeffs.coeffRef(k) = (beta - v0) / beta; - - // apply the Householder transformation (I - h v v') to remaining columns, i.e., - // R <- (I - h v v') * R where v = [1,m_qr(k+1,k), m_qr(k+2,k), ...] - int remainingCols = cols - k -1; - if (remainingCols>0) - { - m_qr.coeffRef(k,k) = Scalar(1); - temp.end(remainingCols) = (m_qr.col(k).end(remainingSize).adjoint() - * m_qr.corner(BottomRight, remainingSize, remainingCols)).lazy(); - m_qr.corner(BottomRight, remainingSize, remainingCols) -= (ei_conj(h) * m_qr.col(k).end(remainingSize) - * temp.end(remainingCols)).lazy(); - m_qr.coeffRef(k,k) = beta; - } - } - else - { - m_hCoeffs.coeffRef(k) = 0; - } + // apply H to remaining part of m_qr from the left + m_qr.corner(BottomRight, remainingRows, remainingCols) + .applyHouseholderOnTheLeft(m_qr.col(k).end(remainingRows-1), m_hCoeffs.coeffRef(k), &temp.coeffRef(k+1)); } m_isInitialized = true; + return *this; } template @@ -186,12 +153,20 @@ void HouseholderQR::solve( { ei_assert(m_isInitialized && "HouseholderQR is not initialized."); const int rows = m_qr.rows(); + const int cols = b.cols(); ei_assert(b.rows() == rows); - result->resize(rows, b.cols()); + result->resize(rows, cols); - // TODO(keir): There is almost certainly a faster way to multiply by - // Q^T without explicitly forming matrixQ(). Investigate. - *result = matrixQ().transpose()*b; + *result = b; + + Matrix temp(cols); + for (int k = 0; k < cols; ++k) + { + int remainingSize = rows-k; + + result->corner(BottomRight, remainingSize, cols) + .applyHouseholderOnTheLeft(m_qr.col(k).end(remainingSize-1), m_hCoeffs.coeff(k), &temp.coeffRef(0)); + } const int rank = std::min(result->rows(), result->cols()); m_qr.corner(TopLeft, rank, rank) @@ -204,8 +179,8 @@ template MatrixType HouseholderQR::matrixQ() const { ei_assert(m_isInitialized && "HouseholderQR is not initialized."); - // compute the product Q_0 Q_1 ... Q_n-1, - // where Q_k is the k-th Householder transformation I - h_k v_k v_k' + // compute the product H'_0 H'_1 ... H'_n-1, + // where H_k is the k-th Householder transformation I - h_k v_k v_k' // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...] int rows = m_qr.rows(); int cols = m_qr.cols(); @@ -213,15 +188,9 @@ MatrixType HouseholderQR::matrixQ() const Matrix temp(cols); for (int k = cols-1; k >= 0; k--) { - // to make easier the computation of the transformation, let's temporarily - // overwrite m_qr(k,k) such that the end of m_qr.col(k) is exactly our Householder vector. - Scalar beta = m_qr.coeff(k,k); - m_qr.const_cast_derived().coeffRef(k,k) = 1; - int endLength = rows-k; - - temp.end(cols-k) = (m_qr.col(k).end(endLength).adjoint() * res.corner(BottomRight,endLength, cols-k)).lazy(); - res.corner(BottomRight,endLength, cols-k) -= ((m_hCoeffs.coeff(k) * m_qr.col(k).end(endLength)) * temp.end(cols-k)).lazy(); - m_qr.const_cast_derived().coeffRef(k,k) = beta; + int remainingSize = rows-k; + res.corner(BottomRight, remainingSize, cols-k) + .applyHouseholderOnTheLeft(m_qr.col(k).end(remainingSize-1), ei_conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k)); } return res; } diff --git a/Eigen/src/QR/SelfAdjointEigenSolver.h b/Eigen/src/QR/SelfAdjointEigenSolver.h index b003fb4d7..809a717b9 100644 --- a/Eigen/src/QR/SelfAdjointEigenSolver.h +++ b/Eigen/src/QR/SelfAdjointEigenSolver.h @@ -90,9 +90,9 @@ template class SelfAdjointEigenSolver compute(matA, matB, computeEigenvectors); } - void compute(const MatrixType& matrix, bool computeEigenvectors = true); + SelfAdjointEigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true); - void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true); + SelfAdjointEigenSolver& compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true); /** \returns the computed eigen vectors as a matrix of column vectors */ MatrixType eigenvectors(void) const @@ -182,7 +182,7 @@ static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int st * \sa SelfAdjointEigenSolver(MatrixType,bool), compute(MatrixType,MatrixType,bool) */ template -void SelfAdjointEigenSolver::compute(const MatrixType& matrix, bool computeEigenvectors) +SelfAdjointEigenSolver& SelfAdjointEigenSolver::compute(const MatrixType& matrix, bool computeEigenvectors) { #ifndef NDEBUG m_eigenvectorsOk = computeEigenvectors; @@ -195,7 +195,7 @@ void SelfAdjointEigenSolver::compute(const MatrixType& matrix, bool { m_eivalues.coeffRef(0,0) = ei_real(matrix.coeff(0,0)); m_eivec.setOnes(); - return; + return *this; } m_eivec = matrix; @@ -240,6 +240,7 @@ void SelfAdjointEigenSolver::compute(const MatrixType& matrix, bool m_eivec.col(i).swap(m_eivec.col(k+i)); } } + return *this; } /** Computes the eigenvalues of the generalized eigen problem @@ -250,7 +251,7 @@ void SelfAdjointEigenSolver::compute(const MatrixType& matrix, bool * \sa SelfAdjointEigenSolver(MatrixType,MatrixType,bool), compute(MatrixType,bool) */ template -void SelfAdjointEigenSolver:: +SelfAdjointEigenSolver& SelfAdjointEigenSolver:: compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors) { ei_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows()); @@ -282,6 +283,7 @@ compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors for (int i=0; i > q(matrixQ,n,n); + q.applyJacobiOnTheRight(k,k+1,c,s); } } } diff --git a/Eigen/src/QR/Tridiagonalization.h b/Eigen/src/QR/Tridiagonalization.h index d98322fac..2053505f6 100644 --- a/Eigen/src/QR/Tridiagonalization.h +++ b/Eigen/src/QR/Tridiagonalization.h @@ -198,65 +198,28 @@ void Tridiagonalization::_compute(MatrixType& matA, CoeffVectorType& { assert(matA.rows()==matA.cols()); int n = matA.rows(); - for (int i = 0; i aux(n); + for (int i = 0; i(1))) - { - hCoeffs.coeffRef(i) = 0.; - } - else - { - Scalar v0 = matA.col(i).coeff(i+1); - RealScalar beta = ei_sqrt(ei_abs2(v0)+v1norm2); - if (ei_real(v0)>=0.) - beta = -beta; - matA.col(i).end(n-(i+2)) *= (Scalar(1)/(v0-beta)); - matA.col(i).coeffRef(i+1) = beta; - Scalar h = (beta - v0) / beta; - // end of the householder transformation + hCoeffs.end(n-i-1) = (matA.corner(BottomRight,remainingSize,remainingSize).template selfadjointView() + * (ei_conj(h) * matA.col(i).end(remainingSize))); - // Apply similarity transformation to remaining columns, - // i.e., A = H' A H where H = I - h v v' and v = matA.col(i).end(n-i-1) - matA.col(i).coeffRef(i+1) = 1; + hCoeffs.end(n-i-1) += (ei_conj(h)*Scalar(-0.5)*(hCoeffs.end(remainingSize).dot(matA.col(i).end(remainingSize)))) * matA.col(i).end(n-i-1); - hCoeffs.end(n-i-1) = (matA.corner(BottomRight,n-i-1,n-i-1).template selfadjointView() - * (h * matA.col(i).end(n-i-1))); + matA.corner(BottomRight, remainingSize, remainingSize).template selfadjointView() + .rankUpdate(matA.col(i).end(remainingSize), hCoeffs.end(remainingSize), -1); - hCoeffs.end(n-i-1) += (h*Scalar(-0.5)*(hCoeffs.end(n-i-1).dot(matA.col(i).end(n-i-1)))) * matA.col(i).end(n-i-1); - - matA.corner(BottomRight, n-i-1, n-i-1).template selfadjointView() - .rankUpdate(matA.col(i).end(n-i-1), hCoeffs.end(n-i-1), -1); - - // note: at that point matA(i+1,i+1) is the (i+1)-th element of the final diagonal - // note: the sequence of the beta values leads to the subdiagonal entries - matA.col(i).coeffRef(i+1) = beta; - - hCoeffs.coeffRef(i) = h; - } - } - if (NumTraits::IsComplex) - { - // Householder transformation on the remaining single scalar - int i = n-2; - Scalar v0 = matA.col(i).coeff(i+1); - RealScalar beta = ei_abs(v0); - if (ei_real(v0)>=RealScalar(0)) - beta = -beta; matA.col(i).coeffRef(i+1) = beta; - // FIXME comparing against 1 - if(ei_isMuchSmallerThan(beta, Scalar(1))) hCoeffs.coeffRef(i) = Scalar(0); - else hCoeffs.coeffRef(i) = (beta - v0) / beta; - } - else - { - hCoeffs.coeffRef(n-2) = 0; + hCoeffs.coeffRef(i) = h; } } @@ -280,16 +243,8 @@ void Tridiagonalization::matrixQInPlace(MatrixBase* q) con Matrix aux(n); for (int i = n-2; i>=0; i--) { - Scalar tmp = m_matrix.coeff(i+1,i); - m_matrix.const_cast_derived().coeffRef(i+1,i) = 1; - - aux.end(n-i-1) = (m_hCoeffs.coeff(i) * m_matrix.col(i).end(n-i-1).adjoint() * matQ.corner(BottomRight,n-i-1,n-i-1)).lazy(); - // rank one update, TODO ! make it works efficiently as expected - for (int j=i+1;j +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_JACOBISQUARESVD_H +#define EIGEN_JACOBISQUARESVD_H + +/** \ingroup SVD_Module + * \nonstableyet + * + * \class JacobiSquareSVD + * + * \brief Jacobi SVD decomposition of a square matrix + * + * \param MatrixType the type of the matrix of which we are computing the SVD decomposition + * \param ComputeU whether the U matrix should be computed + * \param ComputeV whether the V matrix should be computed + * + * \sa MatrixBase::jacobiSvd() + */ +template class JacobiSquareSVD +{ + private: + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + Options = MatrixType::Options + }; + + typedef Matrix DummyMatrixType; + typedef typename ei_meta_if, + DummyMatrixType>::ret MatrixUType; + typedef typename Diagonal::PlainMatrixType SingularValuesType; + typedef Matrix RowType; + typedef Matrix ColType; + + public: + + JacobiSquareSVD() : m_isInitialized(false) {} + + JacobiSquareSVD(const MatrixType& matrix) : m_isInitialized(false) + { + compute(matrix); + } + + JacobiSquareSVD& compute(const MatrixType& matrix); + + const MatrixUType& matrixU() const + { + ei_assert(m_isInitialized && "SVD is not initialized."); + return m_matrixU; + } + + const SingularValuesType& singularValues() const + { + ei_assert(m_isInitialized && "SVD is not initialized."); + return m_singularValues; + } + + const MatrixUType& matrixV() const + { + ei_assert(m_isInitialized && "SVD is not initialized."); + return m_matrixV; + } + + protected: + MatrixUType m_matrixU; + MatrixUType m_matrixV; + SingularValuesType m_singularValues; + bool m_isInitialized; +}; + +template +JacobiSquareSVD& JacobiSquareSVD::compute(const MatrixType& matrix) +{ + MatrixType work_matrix(matrix); + int size = matrix.rows(); + if(ComputeU) m_matrixU = MatrixUType::Identity(size,size); + if(ComputeV) m_matrixV = MatrixUType::Identity(size,size); + m_singularValues.resize(size); + const RealScalar precision = 2 * epsilon(); + +sweep_again: + for(int p = 1; p < size; ++p) + { + for(int q = 0; q < p; ++q) + { + Scalar c, s; + while(std::max(ei_abs(work_matrix.coeff(p,q)),ei_abs(work_matrix.coeff(q,p))) + > std::max(ei_abs(work_matrix.coeff(p,p)),ei_abs(work_matrix.coeff(q,q)))*precision) + { + if(work_matrix.makeJacobiForAtA(p,q,&c,&s)) + { + work_matrix.applyJacobiOnTheRight(p,q,c,s); + if(ComputeV) m_matrixV.applyJacobiOnTheRight(p,q,c,s); + } + if(work_matrix.makeJacobiForAAt(p,q,&c,&s)) + { + ei_normalizeJacobi(&c, &s, work_matrix.coeff(p,p), work_matrix.coeff(q,p)), + work_matrix.applyJacobiOnTheLeft(p,q,c,s); + if(ComputeU) m_matrixU.applyJacobiOnTheRight(p,q,c,s); + } + } + } + } + + RealScalar biggestOnDiag = work_matrix.diagonal().cwise().abs().maxCoeff(); + RealScalar maxAllowedOffDiag = biggestOnDiag * precision; + for(int p = 0; p < size; ++p) + { + for(int q = 0; q < p; ++q) + if(ei_abs(work_matrix.coeff(p,q)) > maxAllowedOffDiag) + goto sweep_again; + for(int q = p+1; q < size; ++q) + if(ei_abs(work_matrix.coeff(p,q)) > maxAllowedOffDiag) + goto sweep_again; + } + + m_singularValues = work_matrix.diagonal().cwise().abs(); + RealScalar biggestSingularValue = m_singularValues.maxCoeff(); + + for(int i = 0; i < size; ++i) + { + RealScalar a = ei_abs(work_matrix.coeff(i,i)); + m_singularValues.coeffRef(i) = a; + if(ComputeU && !ei_isMuchSmallerThan(a, biggestSingularValue)) m_matrixU.col(i) *= work_matrix.coeff(i,i)/a; + } + + for(int i = 0; i < size; i++) + { + int pos; + m_singularValues.end(size-i).maxCoeff(&pos); + if(pos) + { + pos += i; + std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos)); + if(ComputeU) m_matrixU.col(pos).swap(m_matrixU.col(i)); + if(ComputeV) m_matrixV.col(pos).swap(m_matrixV.col(i)); + } + } + + m_isInitialized = true; + return *this; +} +#endif // EIGEN_JACOBISQUARESVD_H diff --git a/Eigen/src/SVD/SVD.h b/Eigen/src/SVD/SVD.h index 9b7d955c7..1a7e6c49a 100644 --- a/Eigen/src/SVD/SVD.h +++ b/Eigen/src/SVD/SVD.h @@ -97,8 +97,7 @@ template class SVD return m_matV; } - void compute(const MatrixType& matrix); - SVD& sort(); + SVD& compute(const MatrixType& matrix); template void computeUnitaryPositive(UnitaryType *unitary, PositiveType *positive) const; @@ -139,9 +138,11 @@ template class SVD /** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix * * \note this code has been adapted from Numerical Recipes, third edition. + * + * \returns a reference to *this */ template -void SVD::compute(const MatrixType& matrix) +SVD& SVD::compute(const MatrixType& matrix) { const int m = matrix.rows(); const int n = matrix.cols(); @@ -158,7 +159,7 @@ void SVD::compute(const MatrixType& matrix) SingularValuesType& W = m_sigma; bool flag; - int i,its,j,jj,k,l,nm; + int i,its,j,k,l,nm; Scalar anorm, c, f, g, h, s, scale, x, y, z; bool convergence = true; Scalar eps = precision(); @@ -308,13 +309,7 @@ void SVD::compute(const MatrixType& matrix) h = Scalar(1.0)/h; c = g*h; s = -f*h; - for (j=0; j::compute(const MatrixType& matrix) y = W[i]; h = s*g; g = c*g; + z = pythag(f,h); rv1[j] = z; c = f/z; @@ -355,13 +351,8 @@ void SVD::compute(const MatrixType& matrix) g = g*c - x*s; h = y*s; y *= c; - for (jj=0; jj::compute(const MatrixType& matrix) } f = c*g + s*y; x = c*y - s*g; - for (jj=0; jj::compute(const MatrixType& matrix) for (int i=0; i::compute(const MatrixType& matrix) m_matU = A.block(0,0,m,m); m_isInitialized = true; -} - -template -SVD& SVD::sort() -{ - ei_assert(m_isInitialized && "SVD is not initialized."); - - int mu = m_matU.rows(); - int mv = m_matV.rows(); - int n = m_matU.cols(); - - for (int i=0; i p) - { - k = j; - p = m_sigma.coeff(j); - } - } - if (k != i) - { - m_sigma.coeffRef(k) = m_sigma.coeff(i); // i.e. - m_sigma.coeffRef(i) = p; // swaps the i-th and the k-th elements - - int j = mu; - for(int s=0; j!=0; ++s, --j) - std::swap(m_matU.coeffRef(s,i), m_matU.coeffRef(s,k)); - - j = mv; - for (int s=0; j!=0; ++s, --j) - std::swap(m_matV.coeffRef(s,i), m_matV.coeffRef(s,k)); - } - } return *this; } diff --git a/bench/btl/actions/action_rot.hh b/bench/btl/actions/action_rot.hh new file mode 100644 index 000000000..df822a6d6 --- /dev/null +++ b/bench/btl/actions/action_rot.hh @@ -0,0 +1,116 @@ + +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License +// as published by the Free Software Foundation; either version 2 +// of the License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +// +#ifndef ACTION_ROT +#define ACTION_ROT +#include "utilities.h" +#include "STL_interface.hh" +#include +#include "init/init_function.hh" +#include "init/init_vector.hh" +#include "init/init_matrix.hh" + +using namespace std; + +template +class Action_rot { + +public : + + // Ctor + BTL_DONT_INLINE Action_rot( int size ):_size(size) + { + MESSAGE("Action_rot Ctor"); + + // STL matrix and vector initialization + typename Interface::stl_matrix tmp; + init_vector(A_stl,_size); + init_vector(B_stl,_size); + + // generic matrix and vector initialization + Interface::vector_from_stl(A_ref,A_stl); + Interface::vector_from_stl(A,A_stl); + Interface::vector_from_stl(B_ref,B_stl); + Interface::vector_from_stl(B,B_stl); + } + + // invalidate copy ctor + Action_rot( const Action_rot & ) + { + INFOS("illegal call to Action_rot Copy Ctor"); + exit(1); + } + + // Dtor + BTL_DONT_INLINE ~Action_rot( void ){ + MESSAGE("Action_rot Dtor"); + Interface::free_vector(A); + Interface::free_vector(B); + Interface::free_vector(A_ref); + Interface::free_vector(B_ref); + } + + // action name + static inline std::string name( void ) + { + return "rot_" + Interface::name(); + } + + double nb_op_base( void ){ + return 6.0*_size; + } + + BTL_DONT_INLINE void initialize( void ){ + Interface::copy_vector(A_ref,A,_size); + Interface::copy_vector(B_ref,B,_size); + } + + BTL_DONT_INLINE void calculate( void ) { + BTL_ASM_COMMENT("#begin rot"); + Interface::rot(A,B,0.5,0.6,_size); + BTL_ASM_COMMENT("end rot"); + } + + BTL_DONT_INLINE void check_result( void ){ + // calculation check +// Interface::vector_to_stl(X,resu_stl); + +// STL_interface::rot(A_stl,B_stl,X_stl,_size); + +// typename Interface::real_type error= +// STL_interface::norm_diff(X_stl,resu_stl); + +// if (error>1.e-3){ +// INFOS("WRONG CALCULATION...residual=" << error); +// exit(0); +// } + + } + +private : + + typename Interface::stl_vector A_stl; + typename Interface::stl_vector B_stl; + + typename Interface::gene_vector A_ref; + typename Interface::gene_vector B_ref; + + typename Interface::gene_vector A; + typename Interface::gene_vector B; + + int _size; +}; + + +#endif diff --git a/bench/btl/actions/basic_actions.hh b/bench/btl/actions/basic_actions.hh index 06a382be6..beaeea8c3 100644 --- a/bench/btl/actions/basic_actions.hh +++ b/bench/btl/actions/basic_actions.hh @@ -15,6 +15,7 @@ // #include "action_symm.hh" #include "action_syr2.hh" #include "action_ger.hh" +#include "action_rot.hh" // #include "action_lu_solve.hh" diff --git a/bench/btl/data/action_settings.txt b/bench/btl/data/action_settings.txt index e2d931ddc..9bee1651c 100644 --- a/bench/btl/data/action_settings.txt +++ b/bench/btl/data/action_settings.txt @@ -14,4 +14,5 @@ tridiagonalization ; "{/*1.5 Tridiagonalization}" ; "matrix size" ; 4:3000 hessenberg ; "{/*1.5 Hessenberg decomposition}" ; "matrix size" ; 4:3000 symv ; "{/*1.5 symmetric matrix vector product}" ; "matrix size" ; 4:3000 syr2 ; "{/*1.5 symmretric rank-2 update (A += u^T v + u v^T)}" ; "matrix size" ; 4:3000 -ger ; "{/*1.5 general rank-1 update (A += u v^T)}" ; "matrix size" ; 4:3000 \ No newline at end of file +ger ; "{/*1.5 general rank-1 update (A += u v^T)}" ; "matrix size" ; 4:3000 +rot ; "{/*1.5 apply rotation in the plane}" ; "vector size" ; 4:1000000 \ No newline at end of file diff --git a/bench/btl/data/go_mean b/bench/btl/data/go_mean index 5ff3a3b98..4e4fd295a 100755 --- a/bench/btl/data/go_mean +++ b/bench/btl/data/go_mean @@ -48,6 +48,7 @@ source mk_mean_script.sh hessenberg $1 11 100 300 1000 $mode $prefix source mk_mean_script.sh symv $1 11 50 300 1000 $mode $prefix source mk_mean_script.sh syr2 $1 11 50 300 1000 $mode $prefix source mk_mean_script.sh ger $1 11 50 300 1000 $mode $prefix +source mk_mean_script.sh rot $1 11 2500 100000 250000 $mode $prefix fi diff --git a/bench/btl/libs/C_BLAS/C_BLAS_interface.hh b/bench/btl/libs/C_BLAS/C_BLAS_interface.hh index 725f35944..f07df1033 100644 --- a/bench/btl/libs/C_BLAS/C_BLAS_interface.hh +++ b/bench/btl/libs/C_BLAS/C_BLAS_interface.hh @@ -179,6 +179,14 @@ public : #endif } + static inline void rot(gene_vector & A, gene_vector & B, float c, float s, int N){ + #ifdef PUREBLAS + srot_(&N,A,&intone,B,&intone,&c,&s); + #else + cblas_srot(N,A,1,B,1,c,s); + #endif + } + static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ #ifdef PUREBLAS sgemv_(&trans,&N,&N,&fone,A,&N,B,&intone,&fzero,X,&intone); diff --git a/bench/btl/libs/C_BLAS/main.cpp b/bench/btl/libs/C_BLAS/main.cpp index 6fd75812c..fdffdc979 100644 --- a/bench/btl/libs/C_BLAS/main.cpp +++ b/bench/btl/libs/C_BLAS/main.cpp @@ -45,6 +45,7 @@ int main() bench > >(MIN_MV,MAX_MV,NB_POINT); bench > >(MIN_MV,MAX_MV,NB_POINT); + bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); bench > >(MIN_MM,MAX_MM,NB_POINT); diff --git a/bench/btl/libs/eigen2/eigen2_interface.hh b/bench/btl/libs/eigen2/eigen2_interface.hh index 6a1bc5d61..1a5f89834 100644 --- a/bench/btl/libs/eigen2/eigen2_interface.hh +++ b/bench/btl/libs/eigen2/eigen2_interface.hh @@ -168,6 +168,10 @@ public : A.col(j) += X * Y[j]; } + static EIGEN_DONT_INLINE void rot(gene_vector & A, gene_vector & B, real c, real s, int N){ + ei_apply_rotation_in_the_plane(A, B, c, s); + } + static inline void atv_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ X = (A.transpose()*B).lazy(); } diff --git a/bench/btl/libs/eigen2/main_linear.cpp b/bench/btl/libs/eigen2/main_linear.cpp index e79927b0f..e73fe2043 100644 --- a/bench/btl/libs/eigen2/main_linear.cpp +++ b/bench/btl/libs/eigen2/main_linear.cpp @@ -27,6 +27,7 @@ int main() bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); + bench > >(MIN_AXPY,MAX_AXPY,NB_POINT); return 0; } diff --git a/doc/I01_TopicLazyEvaluation.dox b/doc/I01_TopicLazyEvaluation.dox index 16fe79e70..f40afa06d 100644 --- a/doc/I01_TopicLazyEvaluation.dox +++ b/doc/I01_TopicLazyEvaluation.dox @@ -4,7 +4,7 @@ namespace Eigen { Executive summary: Eigen has intelligent compile-time mechanisms to enable lazy evaluation and removing temporaries where appropriate. It will handle aliasing automatically in most cases, for example with matrix products. The automatic behavior can be overridden -manually by using the MatrixBase::eval() and MatrixBase::lazy() methods. +manually by using the MatrixBase::eval() and MatrixBase::noalias() methods. When you write a line of code involving a complex expression such as @@ -42,11 +42,11 @@ Eigen chooses lazy evaluation at every stage in that example, which is clearly t Eigen first evaluates matrix * matrix into a temporary matrix, and then copies it into the original \c matrix. This guarantees a correct result as we saw above that lazy evaluation gives wrong results with matrix products. It also doesn't cost much, as the cost of the matrix product itself is much higher. -What if you know what you are doing and want to force lazy evaluation? Then use \link MatrixBase::lazy() .lazy()\endlink instead. Here is an example: +What if you know that the result does no alias the operand of the product and want to force lazy evaluation? Then use \link MatrixBase::noalias() .noalias()\endlink instead. Here is an example: -\code matrix1 = (matrix2 * matrix2).lazy(); \endcode +\code matrix1.noalias() = matrix2 * matrix2; \endcode -Here, since we know that matrix2 is not the same matrix as matrix1, we know that lazy evaluation is not dangerous, so we may force lazy evaluation. Concretely, the effect of lazy() here is to remove the evaluate-before-assigning \link flags flag\endlink and also the evaluate-before-nesting \link flags flag\endlink which we now discuss. +Here, since we know that matrix2 is not the same matrix as matrix1, we know that lazy evaluation is not dangerous, so we may force lazy evaluation. Concretely, the effect of noalias() here is to bypass the evaluate-before-assigning \link flags flag\endlink. The second circumstance in which Eigen chooses immediate evaluation, is when it sees a nested expression such as a + b where \c b is already an expression having the evaluate-before-nesting \link flags flag\endlink. Again, the most important example of such an expression is the \link Product matrix product expression\endlink. For example, when you do @@ -54,8 +54,6 @@ Here, since we know that matrix2 is not the same matrix as matrix1, we know that the product matrix3 * matrix4 gets evaluated immediately into a temporary matrix. Indeed, experiments showed that it is often beneficial for performance to evaluate immediately matrix products when they are nested into bigger expressions. -Again, \link MatrixBase::lazy() .lazy()\endlink can be used to force lazy evaluation here. - The third circumstance in which Eigen chooses immediate evaluation, is when its cost model shows that the total cost of an operation is reduced if a sub-expression gets evaluated into a temporary. Indeed, in certain cases, an intermediate result is sufficiently costly to compute and is reused sufficiently many times, that is worth "caching". Here is an example: \code matrix1 = matrix2 * (matrix3 + matrix4); \endcode diff --git a/doc/I02_HiPerformance.dox b/doc/I02_HiPerformance.dox index 2c77f87b3..c2148ab4d 100644 --- a/doc/I02_HiPerformance.dox +++ b/doc/I02_HiPerformance.dox @@ -9,13 +9,13 @@ for small fixed size matrices. For large matrices, however, it might useful to take some care when writing your expressions in order to minimize useless evaluations and optimize the performance. In this page we will give a brief overview of the Eigen's internal mechanism to simplify -and evaluate complex expressions, and discuss the current limitations. +and evaluate complex product expressions, and discuss the current limitations. In particular we will focus on expressions matching level 2 and 3 BLAS routines, i.e, all kind of matrix products and triangular solvers. Indeed, in Eigen we have implemented a set of highly optimized routines which are very similar to BLAS's ones. Unlike BLAS, those routines are made available to user via a high level and -natural API. Each of these routines can perform in a single evaluation a wide variety of expressions. +natural API. Each of these routines can compute in a single evaluation a wide variety of expressions. Given an expression, the challenge is then to map it to a minimal set of primitives. As explained latter, this mechanism has some limitations, and knowing them will allow you to write faster code by making your expressions more Eigen friendly. @@ -25,18 +25,18 @@ you to write faster code by making your expressions more Eigen friendly. Let's start with the most common primitive: the matrix product of general dense matrices. In the BLAS world this corresponds to the GEMM routine. Our equivalent primitive can perform the following operation: -\f$ C += \alpha op1(A) * op2(B) \f$ +\f$ C.noalias() += \alpha op1(A) op2(B) \f$ where A, B, and C are column and/or row major matrices (or sub-matrices), alpha is a scalar value, and op1, op2 can be transpose, adjoint, conjugate, or the identity. When Eigen detects a matrix product, it analyzes both sides of the product to extract a unique scalar factor alpha, and for each side its effective storage (order and shape) and conjugate state. More precisely each side is simplified by iteratively removing trivial expressions such as scalar multiple, -negate and conjugate. Transpose and Block expressions are not evaluated and only modify the storage order +negate and conjugate. Transpose and Block expressions are not evaluated and they only modify the storage order and shape. All other expressions are immediately evaluated. For instance, the following expression: -\code m1 -= (s1 * m2.adjoint() * (-(s3*m3).conjugate()*s2)).lazy() \endcode +\code m1.noalias() -= s1 * m2.adjoint() * (-(s3*m3).conjugate()*s2) \endcode is automatically simplified to: -\code m1 += (s1*s2*conj(s3)) * m2.adjoint() * m3.conjugate() \endcode +\code m1.noalias() += (s1*s2*conj(s3)) * m2.adjoint() * m3.conjugate() \endcode which exactly matches our GEMM routine. \subsection GEMM_Limitations Limitations @@ -52,23 +52,26 @@ handled by a single GEMM-like call are correctly detected. \code m1 += m2 * m3; \endcode \code temp = m2 * m3; m1 += temp; \endcode -\code m1 += (m2 * m3).lazy(); \endcode -Use .lazy() to tell Eigen the result and right-hand-sides do not alias. +\code m1.noalias() += m2 * m3; \endcode +Use .noalias() to tell Eigen the result and right-hand-sides do not alias. + Otherwise the product m2 * m3 is evaluated into a temporary. -\code m1 += (s1 * (m2 * m3)).lazy(); \endcode -\code temp = (m2 * m3).lazy(); m1 += s1 * temp; \endcode -\code m1 += (s1 * m2 * m3).lazy(); \endcode -This is because m2 * m3 is immediately evaluated by the scalar product.
- Make sure the matrix product is the top most expression. + + +\code m1.noalias() += s1 * (m2 * m3); \endcode +This is a special feature of Eigen. Here the product between a scalar + and a matrix product does not evaluate the matrix product but instead it + returns a matrix product expression tracking the scalar scaling factor.
+ Without this optimization, the matrix product would be evaluated into a + temporary as in the next example. -\code m1 += s1 * (m2 * m3).lazy(); \endcode -\code m1 += s1 * m2 * m3; // using a naive product \endcode -\code m1 += (s1 * m2 * m3).lazy(); \endcode -Even though this expression is evaluated without temporary, it is actually even - worse than the previous case because here the .lazy() enforces Eigen to use a - naive (and slow) evaluation of the product. +\code m1.noalias() += (m2 * m3).transpose(); \endcode +\code temp = m2 * m3; m1 += temp.transpose(); \endcode +\code m1.noalias() += m3.adjoint() * m3.adjoint(); \endcode +This is because the product expression has the EvalBeforeNesting bit which + enforces the evaluation of the product by the Tranpose expression. \code m1 = m1 + m2 * m3; \endcode @@ -78,112 +81,25 @@ handled by a single GEMM-like call are correctly detected. and so the matrix product will be immediately evaluated. -\code m1 += ((s1*m2).block(....) * m3).lazy(); \endcode -\code temp = (s1*m2).block(....); m1 += (temp * m3).lazy(); \endcode -\code m1 += (s1 * m2.block(....) * m3).lazy(); \endcode +\code m1.noalias() = m4 + m2 * m3; \endcode +\code temp = m2 * m3; m1 = m4 + temp; \endcode +\code m1 = m4; m1.noalias() += m2 * m3; \endcode +First of all, here the .noalias() in the first expression is useless because + m2*m3 will be evaluated anyway. However, note how this expression can be rewritten + so that no temporary is evaluated. (tip: for very small fixed size matrix + it is slighlty better to rewrite it like this: m1.noalias() = m2 * m3; m1 += m4; + + +\code m1.noalias() += ((s1*m2).block(....) * m3); \endcode +\code temp = (s1*m2).block(....); m1 += temp * m3; \endcode +\code m1.noalias() += s1 * m2.block(....) * m3; \endcode This is because our expression analyzer is currently not able to extract trivial expressions nested in a Block expression. Therefore the nested scalar multiple cannot be properly extracted. -Of course all these remarks hold for all other kind of products that we will describe in the following paragraphs. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
BLAS equivalent routineEfficient version
(compile to a single optimized evaluation)
Less efficient equivalent version
(requires multiple evaluations)
comments
GEMMm1 = s1 * m2 * m3m1 = s1 * (m2 * m3)This is because m2 * m3 is evaluated by the scalar product.
GEMMm1 += s1 * m2.adjoint() * m3m1 += (s1 * m2).adjoint() * m3This is because our expression analyzer stops at the first transpose expression and cannot extract the nested scalar multiple.
GEMMm1 += m2.adjoint() * m3m1 += m2.conjugate().transpose() * m3For the same reason. Use .adjoint() or .transpose().conjugate()
GEMMm1 -= (-(s0*m2).conjugate()*s1) * (s2 * m3.adjoint() * s3)Note that s0 is automatically conjugated during the simplification of the expression.
SYRm.sefadjointView().rankUpdate(v,s)Computes m += s * v * v.adjoint()
SYR2m.sefadjointView().rankUpdate(u,v,s)Computes m += s * u * v.adjoint() + s * v * u.adjoint()
SYRKm1.sefadjointView().rankUpdate(m2.adjoint(),s)Computes m1 += s * m2.adjoint() * m2
SYMM/HEMMm3 -= s1 * m1.sefadjointView() * m2.adjoint()
SYMM/HEMMm3 += s1 * m2.transpose() * m1.conjugate().sefadjointView()
TRMMm3 -= s1 * m1.triangularView() * m2.adjoint()
TRSV / TRSMm1.adjoint().triangularView().solveInPlace(m2)
+Of course all these remarks hold for all other kind of products involving triangular or selfadjoint matrices. */ diff --git a/doc/snippets/IOFormat.cpp b/doc/snippets/IOFormat.cpp index 9db70980c..735f5dd85 100644 --- a/doc/snippets/IOFormat.cpp +++ b/doc/snippets/IOFormat.cpp @@ -1,11 +1,11 @@ std::string sep = "\n----------------------------------------\n"; -Matrix3f m1; +Matrix3d m1; m1 << 1.111111, 2, 3.33333, 4, 5, 6, 7, 8.888888, 9; -IOFormat CommaInitFmt(4, Raw, ", ", ", ", "", "", " << ", ";"); -IOFormat CleanFmt(4, AlignCols, ", ", "\n", "[", "]"); -IOFormat OctaveFmt(4, AlignCols, ", ", ";\n", "", "", "[", "]"); -IOFormat HeavyFmt(4, AlignCols, ", ", ";\n", "[", "]", "[", "]"); +IOFormat CommaInitFmt(StreamPrecision, DontAlignCols, ", ", ", ", "", "", " << ", ";"); +IOFormat CleanFmt(4, 0, ", ", "\n", "[", "]"); +IOFormat OctaveFmt(StreamPrecision, 0, ", ", ";\n", "", "", "[", "]"); +IOFormat HeavyFmt(FullPrecision, 0, ", ", ";\n", "[", "]", "[", "]"); std::cout << m1 << sep; std::cout << m1.format(CommaInitFmt) << sep; diff --git a/doc/snippets/MatrixBase_eval.cpp b/doc/snippets/MatrixBase_eval.cpp index 732293043..d70424562 100644 --- a/doc/snippets/MatrixBase_eval.cpp +++ b/doc/snippets/MatrixBase_eval.cpp @@ -4,7 +4,7 @@ m = M; cout << "Here is the matrix m:" << endl << m << endl; cout << "Now we want to replace m by its own transpose." << endl; cout << "If we do m = m.transpose(), then m becomes:" << endl; -m = m.transpose(); +m = m.transpose() * 1; cout << m << endl << "which is wrong!" << endl; cout << "Now let us instead do m = m.transpose().eval(). Then m becomes" << endl; m = M; diff --git a/doc/unsupported_modules.dox b/doc/unsupported_modules.dox index 61ea9ada2..856b05831 100644 --- a/doc/unsupported_modules.dox +++ b/doc/unsupported_modules.dox @@ -14,9 +14,21 @@ namespace Eigen { * \defgroup AdolcForward_Module Adolc forward module */ /** \ingroup Unsupported_modules - * \defgroup IterativeSolvers_Module Iterative solvers module */ + * \defgroup AlignedVector3_Module Aligned vector3 module */ + +/** \ingroup Unsupported_modules + * \defgroup AutoDiff_Module Auto Diff module */ /** \ingroup Unsupported_modules * \defgroup BVH_Module BVH module */ -} // namespace Eigen \ No newline at end of file +/** \ingroup Unsupported_modules + * \defgroup IterativeSolvers_Module Iterative solvers module */ + +/** \ingroup Unsupported_modules + * \defgroup MatrixFunctions_Module Matrix functions module */ + +/** \ingroup Unsupported_modules + * \defgroup MoreVectorization More vectorization module */ + +} // namespace Eigen diff --git a/scripts/eigen_gen_docs b/scripts/eigen_gen_docs new file mode 100644 index 000000000..3cdacc1a8 --- /dev/null +++ b/scripts/eigen_gen_docs @@ -0,0 +1,24 @@ +#!/bin/sh + +# configuration +USER='orzel' + +# step 1 : update +hg pull -u || (echo "update failed"; exit 1) + +# step 2 : build +# todo if 'build is not there, create one: +#mkdir build +(cd build && cmake .. && make -j3 doc) || (echo "make failed"; exit 1) +#todo: n+1 where n = number of cpus + +#step 3 : upload +BRANCH=`hg branch` +if [ $BRANCH == "default" ] +then + BRANCH='devel' +fi +# (the '/' at the end of path are very important, see rsync documentation) +rsync -az build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/dox-$BRANCH/ || (echo "upload failed"; exit 1) + + diff --git a/test/adjoint.cpp b/test/adjoint.cpp index 300650338..964658c65 100644 --- a/test/adjoint.cpp +++ b/test/adjoint.cpp @@ -21,7 +21,7 @@ // You should have received a copy of the GNU Lesser General Public // License and a copy of the GNU General Public License along with // Eigen. If not, see . -#define EIGEN_NO_ASSERTION_CHECKING + #include "main.h" template void adjoint(const MatrixType& m) @@ -110,7 +110,6 @@ template void adjoint(const MatrixType& m) m3.transposeInPlace(); VERIFY_IS_APPROX(m3,m1.conjugate()); - } void test_adjoint() @@ -125,5 +124,16 @@ void test_adjoint() } // test a large matrix only once CALL_SUBTEST( adjoint(Matrix()) ); + + { + MatrixXcf a(10,10), b(10,10); + VERIFY_RAISES_ASSERT(a = a.transpose()); + VERIFY_RAISES_ASSERT(a = a.transpose() + b); + VERIFY_RAISES_ASSERT(a = b + a.transpose()); + VERIFY_RAISES_ASSERT(a = a.conjugate().transpose()); + VERIFY_RAISES_ASSERT(a = a.adjoint()); + VERIFY_RAISES_ASSERT(a = a.adjoint() + b); + VERIFY_RAISES_ASSERT(a = b + a.adjoint()); + } } diff --git a/test/eigensolver_selfadjoint.cpp b/test/eigensolver_selfadjoint.cpp index 1f10b131b..2571dbf43 100644 --- a/test/eigensolver_selfadjoint.cpp +++ b/test/eigensolver_selfadjoint.cpp @@ -120,8 +120,8 @@ void test_eigensolver_selfadjoint() CALL_SUBTEST( selfadjointeigensolver(Matrix3f()) ); CALL_SUBTEST( selfadjointeigensolver(Matrix4d()) ); CALL_SUBTEST( selfadjointeigensolver(MatrixXf(10,10)) ); - CALL_SUBTEST( selfadjointeigensolver(MatrixXcd(17,17)) ); CALL_SUBTEST( selfadjointeigensolver(MatrixXd(19,19)) ); + CALL_SUBTEST( selfadjointeigensolver(MatrixXcd(17,17)) ); // some trivial but implementation-wise tricky cases CALL_SUBTEST( selfadjointeigensolver(MatrixXd(1,1)) ); diff --git a/test/householder.cpp b/test/householder.cpp index d8b11c9f1..7d300899f 100644 --- a/test/householder.cpp +++ b/test/householder.cpp @@ -27,6 +27,8 @@ template void householder(const MatrixType& m) { + static bool even = true; + even = !even; /* this test covers the following files: Householder.h */ @@ -38,46 +40,55 @@ template void householder(const MatrixType& m) typedef Matrix VectorType; typedef Matrix::ret, 1> EssentialVectorType; typedef Matrix SquareMatrixType; + + Matrix _tmp(std::max(rows,cols)); + Scalar* tmp = &_tmp.coeffRef(0,0); - RealScalar beta; + Scalar beta; + RealScalar alpha; EssentialVectorType essential; VectorType v1 = VectorType::Random(rows), v2; v2 = v1; - v1.makeHouseholder(&essential, &beta); - v1.applyHouseholderOnTheLeft(essential,beta); - + v1.makeHouseholder(&essential, &beta, &alpha); + v1.applyHouseholderOnTheLeft(essential,beta,tmp); VERIFY_IS_APPROX(v1.norm(), v2.norm()); VERIFY_IS_MUCH_SMALLER_THAN(v1.end(rows-1).norm(), v1.norm()); v1 = VectorType::Random(rows); v2 = v1; - v1.applyHouseholderOnTheLeft(essential,beta); + v1.applyHouseholderOnTheLeft(essential,beta,tmp); VERIFY_IS_APPROX(v1.norm(), v2.norm()); MatrixType m1(rows, cols), m2(rows, cols); v1 = VectorType::Random(rows); + if(even) v1.end(rows-1).setZero(); m1.colwise() = v1; m2 = m1; - m1.col(0).makeHouseholder(&essential, &beta); - m1.applyHouseholderOnTheLeft(essential,beta); + m1.col(0).makeHouseholder(&essential, &beta, &alpha); + m1.applyHouseholderOnTheLeft(essential,beta,tmp); VERIFY_IS_APPROX(m1.norm(), m2.norm()); VERIFY_IS_MUCH_SMALLER_THAN(m1.block(1,0,rows-1,cols).norm(), m1.norm()); + VERIFY_IS_MUCH_SMALLER_THAN(ei_imag(m1(0,0)), ei_real(m1(0,0))); + VERIFY_IS_APPROX(ei_real(m1(0,0)), alpha); v1 = VectorType::Random(rows); + if(even) v1.end(rows-1).setZero(); SquareMatrixType m3(rows,rows), m4(rows,rows); m3.rowwise() = v1.transpose(); m4 = m3; - m3.row(0).makeHouseholder(&essential, &beta); - m3.applyHouseholderOnTheRight(essential,beta); + m3.row(0).makeHouseholder(&essential, &beta, &alpha); + m3.applyHouseholderOnTheRight(essential,beta,tmp); VERIFY_IS_APPROX(m3.norm(), m4.norm()); VERIFY_IS_MUCH_SMALLER_THAN(m3.block(0,1,rows,rows-1).norm(), m3.norm()); + VERIFY_IS_MUCH_SMALLER_THAN(ei_imag(m3(0,0)), ei_real(m3(0,0))); + VERIFY_IS_APPROX(ei_real(m3(0,0)), alpha); } void test_householder() { - for(int i = 0; i < g_repeat; i++) { + for(int i = 0; i < 2*g_repeat; i++) { CALL_SUBTEST( householder(Matrix()) ); CALL_SUBTEST( householder(Matrix()) ); CALL_SUBTEST( householder(Matrix()) ); diff --git a/test/main.h b/test/main.h index 3947451cc..9ad014b64 100644 --- a/test/main.h +++ b/test/main.h @@ -45,7 +45,7 @@ namespace Eigen #define EI_PP_MAKE_STRING2(S) #S #define EI_PP_MAKE_STRING(S) EI_PP_MAKE_STRING2(S) -#define EIGEN_DEFAULT_IO_FORMAT IOFormat(4, AlignCols, " ", "\n", "", "", "", "") +#define EIGEN_DEFAULT_IO_FORMAT IOFormat(4, 0, " ", "\n", "", "", "", "") #ifndef EIGEN_NO_ASSERTION_CHECKING diff --git a/test/product_notemporary.cpp b/test/product_notemporary.cpp index f4311e495..f6e8f4101 100644 --- a/test/product_notemporary.cpp +++ b/test/product_notemporary.cpp @@ -71,15 +71,19 @@ template void product_notemporary(const MatrixType& m) VERIFY_EVALUATION_COUNT( m3 = (m1 * m2.adjoint()), 1); VERIFY_EVALUATION_COUNT( m3 = (m1 * m2.adjoint()).lazy(), 0); + VERIFY_EVALUATION_COUNT( m3.noalias() = m1 * m2.adjoint(), 0); + // NOTE in this case the slow product is used: // FIXME: -// VERIFY_EVALUATION_COUNT( m3 = s1 * (m1 * m2.transpose()).lazy(), 0); + VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * (m1 * m2.transpose()), 0); VERIFY_EVALUATION_COUNT( m3 = (s1 * m1 * s2 * m2.adjoint()).lazy(), 0); VERIFY_EVALUATION_COUNT( m3 = (s1 * m1 * s2 * (m1*s3+m2*s2).adjoint()).lazy(), 1); VERIFY_EVALUATION_COUNT( m3 = ((s1 * m1).adjoint() * s2 * m2).lazy(), 0); - VERIFY_EVALUATION_COUNT( m3 -= (s1 * (-m1*s3).adjoint() * (s2 * m2 * s3)).lazy(), 0); - VERIFY_EVALUATION_COUNT( m3 -= (s1 * (m1.transpose() * m2)).lazy(), 1); + VERIFY_EVALUATION_COUNT( m3 += (s1 * (-m1*s3).adjoint() * (s2 * m2 * s3)).lazy(), 0); + VERIFY_EVALUATION_COUNT( m3.noalias() += s1 * (-m1*s3).adjoint() * (s2 * m2 * s3), 0); + VERIFY_EVALUATION_COUNT( m3 -= (s1 * (m1.transpose() * m2)).lazy(), 0); + VERIFY_EVALUATION_COUNT( m3.noalias() -= s1 * (m1.transpose() * m2), 0); VERIFY_EVALUATION_COUNT(( m3.block(r0,r0,r1,r1) += (-m1.block(r0,c0,r1,c1) * (s2*m2.block(r0,c0,r1,c1)).adjoint()).lazy() ), 0); VERIFY_EVALUATION_COUNT(( m3.block(r0,r0,r1,r1) -= (s1 * m1.block(r0,c0,r1,c1) * m2.block(c0,r0,c1,r1)).lazy() ), 0); diff --git a/test/product_symm.cpp b/test/product_symm.cpp index 88bac878b..1300928a2 100644 --- a/test/product_symm.cpp +++ b/test/product_symm.cpp @@ -94,6 +94,11 @@ template void symm(int size = Size, in VERIFY_IS_APPROX(rhs12 = (s1*m2.adjoint()).template selfadjointView() * (s2*rhs3).conjugate(), rhs13 = (s1*m1.adjoint()) * (s2*rhs3).conjugate()); + + m2 = m1.template triangularView(); rhs13 = rhs12; + VERIFY_IS_APPROX(rhs12 += (s1 * ((m2.adjoint()).template selfadjointView() * (s2*rhs3).conjugate())).lazy(), + rhs13 += (s1*m1.adjoint()) * (s2*rhs3).conjugate()); + // test matrix * selfadjoint symm_extra::run(m1,m2,rhs2,rhs22,rhs23,s1,s2); diff --git a/test/product_trsm.cpp b/test/product_trsm.cpp index 4f0fd15be..9f372df91 100644 --- a/test/product_trsm.cpp +++ b/test/product_trsm.cpp @@ -25,9 +25,9 @@ #include "main.h" #define VERIFY_TRSM(TRI,XB) { \ - XB.setRandom(); ref = XB; \ - TRI.template solveInPlace(XB); \ - VERIFY_IS_APPROX(TRI.toDense() * XB, ref); \ + (XB).setRandom(); ref = (XB); \ + (TRI).solveInPlace(XB); \ + VERIFY_IS_APPROX((TRI).toDense() * (XB), ref); \ } template void trsm(int size,int cols) diff --git a/test/qr.cpp b/test/qr.cpp index 88a447c4b..f004a36ca 100644 --- a/test/qr.cpp +++ b/test/qr.cpp @@ -37,8 +37,8 @@ template void qr(const MatrixType& m) MatrixType a = MatrixType::Random(rows,cols); HouseholderQR qrOfA(a); - VERIFY_IS_APPROX(a, qrOfA.matrixQ() * qrOfA.matrixR()); - VERIFY_IS_NOT_APPROX(a+MatrixType::Identity(rows, cols), qrOfA.matrixQ() * qrOfA.matrixR()); + VERIFY_IS_APPROX(a, qrOfA.matrixQ() * qrOfA.matrixR().toDense()); + VERIFY_IS_NOT_APPROX(a+MatrixType::Identity(rows, cols), qrOfA.matrixQ() * qrOfA.matrixR().toDense()); SquareMatrixType b = a.adjoint() * a; @@ -59,7 +59,7 @@ template void qr_invertible() { /* this test covers the following files: QR.h */ typedef typename NumTraits::Real RealScalar; - int size = ei_random(10,200); + int size = ei_random(10,50); MatrixType m1(size, size), m2(size, size), m3(size, size); m1 = MatrixType::Random(size,size); @@ -74,7 +74,6 @@ template void qr_invertible() HouseholderQR qr(m1); m3 = MatrixType::Random(size,size); qr.solve(m3, &m2); - //std::cerr << m3 - m1*m2 << "\n\n"; VERIFY_IS_APPROX(m3, m1*m2); } @@ -91,20 +90,18 @@ template void qr_verify_assert() void test_qr() { for(int i = 0; i < 1; i++) { + // FIXME : very weird bug here // CALL_SUBTEST( qr(Matrix2f()) ); -// CALL_SUBTEST( qr(Matrix4d()) ); -// CALL_SUBTEST( qr(MatrixXf(12,8)) ); -// CALL_SUBTEST( qr(MatrixXcd(5,5)) ); -// CALL_SUBTEST( qr(MatrixXcd(7,3)) ); - CALL_SUBTEST( qr(MatrixXf(47,47)) ); + CALL_SUBTEST( qr(Matrix4d()) ); + CALL_SUBTEST( qr(MatrixXf(47,40)) ); + CALL_SUBTEST( qr(MatrixXcd(17,7)) ); } for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST( qr_invertible() ); CALL_SUBTEST( qr_invertible() ); - // TODO fix issue with complex -// CALL_SUBTEST( qr_invertible() ); -// CALL_SUBTEST( qr_invertible() ); + CALL_SUBTEST( qr_invertible() ); + CALL_SUBTEST( qr_invertible() ); } CALL_SUBTEST(qr_verify_assert()); diff --git a/test/submatrices.cpp b/test/submatrices.cpp index fc7fd09ea..a819cadc2 100644 --- a/test/submatrices.cpp +++ b/test/submatrices.cpp @@ -86,7 +86,8 @@ template void submatrices(const MatrixType& m) //check row() and col() VERIFY_IS_APPROX(m1.col(c1).transpose(), m1.transpose().row(c1)); - VERIFY_IS_APPROX(m1.col(c1).dot(square.row(r1)), (square.lazy() * m1.conjugate()).eval()(r1,c1)); + // FIXME perhaps we should re-enable that without the .eval() + VERIFY_IS_APPROX(m1.col(c1).dot(square.row(r1)), (square * m1.conjugate()).eval()(r1,c1)); //check operator(), both constant and non-constant, on row() and col() m1.row(r1) += s1 * m1.row(r2); m1.col(c1) += s1 * m1.col(c2); diff --git a/test/svd.cpp b/test/svd.cpp index 93e5ea017..2ccd94764 100644 --- a/test/svd.cpp +++ b/test/svd.cpp @@ -95,7 +95,6 @@ template void svd_verify_assert() VERIFY_RAISES_ASSERT(svd.matrixU()) VERIFY_RAISES_ASSERT(svd.singularValues()) VERIFY_RAISES_ASSERT(svd.matrixV()) - VERIFY_RAISES_ASSERT(svd.sort()) VERIFY_RAISES_ASSERT(svd.computeUnitaryPositive(&tmp,&tmp)) VERIFY_RAISES_ASSERT(svd.computePositiveUnitary(&tmp,&tmp)) VERIFY_RAISES_ASSERT(svd.computeRotationScaling(&tmp,&tmp)) diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3 index f9f364eba..854f0601f 100644 --- a/unsupported/Eigen/AlignedVector3 +++ b/unsupported/Eigen/AlignedVector3 @@ -152,8 +152,19 @@ template class AlignedVector3 { ei_assert(m_coeffs.w()==Scalar(0)); ei_assert(other.m_coeffs.w()==Scalar(0)); + Scalar r = m_coeffs.dot(other.m_coeffs); return m_coeffs.dot(other.m_coeffs); } + + inline void normalize() + { + m_coeffs /= norm(); + } + + inline AlignedVector3 normalized() + { + return AlignedVector3(m_coeffs / norm()); + } inline Scalar sum() const { diff --git a/unsupported/Eigen/MoreVectorization b/unsupported/Eigen/MoreVectorization index e60526a3d..26a01cd29 100644 --- a/unsupported/Eigen/MoreVectorization +++ b/unsupported/Eigen/MoreVectorization @@ -6,7 +6,7 @@ namespace Eigen { /** \ingroup Unsupported_modules - * \defgroup MoreVectorization additional vectorization module + * \defgroup MoreVectorization More vectorization module */ #include "src/MoreVectorization/MathFunctions.h" diff --git a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h index cbe35a110..c2b77636c 100644 --- a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h +++ b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h @@ -108,7 +108,7 @@ void ei_pseudo_inverse(const CMatrix &C, CINVMatrix &CINV) /** \ingroup IterativeSolvers_Module * Constrained conjugate gradient * - * Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the contraint \f$ Cx <= f @\$ + * Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the contraint \f$ Cx \le f \f$ */ template diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h index 39d2809d4..dd25d7f3d 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h @@ -25,13 +25,9 @@ #ifndef EIGEN_MATRIX_EXPONENTIAL #define EIGEN_MATRIX_EXPONENTIAL -#ifdef _MSC_VER -template Scalar log2(Scalar v) { return std::log(v)/std::log(Scalar(2)); } -#endif - -/** Compute the matrix exponential. +/** \brief Compute the matrix exponential. * - * \param M matrix whose exponential is to be computed. + * \param M matrix whose exponential is to be computed. * \param result pointer to the matrix in which to store the result. * * The matrix exponential of \f$ M \f$ is defined by @@ -58,103 +54,264 @@ template Scalar log2(Scalar v) { return std::log(v)/std::log(S * SIAM J. %Matrix Anal. Applic., 26:1179–1193, * 2005. * - * \note Currently, \p M has to be a matrix of \c double . + * \note \p M has to be a matrix of \c float, \c double, + * \c complex or \c complex . */ template -void ei_matrix_exponential(const MatrixBase &M, typename ei_plain_matrix_type::type* result) -{ - typedef typename ei_traits::Scalar Scalar; - typedef typename NumTraits::Real RealScalar; - typedef typename ei_plain_matrix_type::type PlainMatrixType; +EIGEN_STRONG_INLINE void ei_matrix_exponential(const MatrixBase &M, + typename MatrixBase::PlainMatrixType* result); - ei_assert(M.rows() == M.cols()); - EIGEN_STATIC_ASSERT(NumTraits::HasFloatingPoint,NUMERIC_TYPE_MUST_BE_FLOATING_POINT) - PlainMatrixType num, den, U, V; - PlainMatrixType Id = PlainMatrixType::Identity(M.rows(), M.cols()); - typename ei_eval::type Meval = M.eval(); - RealScalar l1norm = Meval.cwise().abs().colwise().sum().maxCoeff(); - int squarings = 0; - - // Choose degree of Pade approximant, depending on norm of M - if (l1norm < 1.495585217958292e-002) { - - // Use (3,3)-Pade +/** \internal \brief Internal helper functions for computing the + * matrix exponential. + */ +namespace MatrixExponentialInternal { + +#ifdef _MSC_VER + template Scalar log2(Scalar v) { return std::log(v)/std::log(Scalar(2)); } +#endif + + /** \internal \brief Compute the (3,3)-Padé approximant to + * the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(M) \f$ around \f$ M = 0 \f$. + * + * \param M Argument of matrix exponential + * \param Id Identity matrix of same size as M + * \param tmp Temporary storage, to be provided by the caller + * \param M2 Temporary storage, to be provided by the caller + * \param U Even-degree terms in numerator of Padé approximant + * \param V Odd-degree terms in numerator of Padé approximant + */ + template + EIGEN_STRONG_INLINE void pade3(const MatrixType &M, const MatrixType& Id, MatrixType& tmp, + MatrixType& M2, MatrixType& U, MatrixType& V) + { + typedef typename ei_traits::Scalar Scalar; const Scalar b[] = {120., 60., 12., 1.}; - PlainMatrixType M2; - M2 = (Meval * Meval).lazy(); - num = b[3]*M2 + b[1]*Id; - U = (Meval * num).lazy(); + M2 = (M * M).lazy(); + tmp = b[3]*M2 + b[1]*Id; + U = (M * tmp).lazy(); V = b[2]*M2 + b[0]*Id; - - } else if (l1norm < 2.539398330063230e-001) { - - // Use (5,5)-Pade + } + + /** \internal \brief Compute the (5,5)-Padé approximant to + * the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(M) \f$ around \f$ M = 0 \f$. + * + * \param M Argument of matrix exponential + * \param Id Identity matrix of same size as M + * \param tmp Temporary storage, to be provided by the caller + * \param M2 Temporary storage, to be provided by the caller + * \param U Even-degree terms in numerator of Padé approximant + * \param V Odd-degree terms in numerator of Padé approximant + */ + template + EIGEN_STRONG_INLINE void pade5(const MatrixType &M, const MatrixType& Id, MatrixType& tmp, + MatrixType& M2, MatrixType& U, MatrixType& V) + { + typedef typename ei_traits::Scalar Scalar; const Scalar b[] = {30240., 15120., 3360., 420., 30., 1.}; - PlainMatrixType M2, M4; - M2 = (Meval * Meval).lazy(); - M4 = (M2 * M2).lazy(); - num = b[5]*M4 + b[3]*M2 + b[1]*Id; - U = (Meval * num).lazy(); + M2 = (M * M).lazy(); + MatrixType M4 = (M2 * M2).lazy(); + tmp = b[5]*M4 + b[3]*M2 + b[1]*Id; + U = (M * tmp).lazy(); V = b[4]*M4 + b[2]*M2 + b[0]*Id; - - } else if (l1norm < 9.504178996162932e-001) { - - // Use (7,7)-Pade + } + + /** \internal \brief Compute the (7,7)-Padé approximant to + * the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(M) \f$ around \f$ M = 0 \f$. + * + * \param M Argument of matrix exponential + * \param Id Identity matrix of same size as M + * \param tmp Temporary storage, to be provided by the caller + * \param M2 Temporary storage, to be provided by the caller + * \param U Even-degree terms in numerator of Padé approximant + * \param V Odd-degree terms in numerator of Padé approximant + */ + template + EIGEN_STRONG_INLINE void pade7(const MatrixType &M, const MatrixType& Id, MatrixType& tmp, + MatrixType& M2, MatrixType& U, MatrixType& V) + { + typedef typename ei_traits::Scalar Scalar; const Scalar b[] = {17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.}; - PlainMatrixType M2, M4, M6; - M2 = (Meval * Meval).lazy(); - M4 = (M2 * M2).lazy(); - M6 = (M4 * M2).lazy(); - num = b[7]*M6 + b[5]*M4 + b[3]*M2 + b[1]*Id; - U = (Meval * num).lazy(); + M2 = (M * M).lazy(); + MatrixType M4 = (M2 * M2).lazy(); + MatrixType M6 = (M4 * M2).lazy(); + tmp = b[7]*M6 + b[5]*M4 + b[3]*M2 + b[1]*Id; + U = (M * tmp).lazy(); V = b[6]*M6 + b[4]*M4 + b[2]*M2 + b[0]*Id; - - } else if (l1norm < 2.097847961257068e+000) { - - // Use (9,9)-Pade + } + + /** \internal \brief Compute the (9,9)-Padé approximant to + * the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(M) \f$ around \f$ M = 0 \f$. + * + * \param M Argument of matrix exponential + * \param Id Identity matrix of same size as M + * \param tmp Temporary storage, to be provided by the caller + * \param M2 Temporary storage, to be provided by the caller + * \param U Even-degree terms in numerator of Padé approximant + * \param V Odd-degree terms in numerator of Padé approximant + */ + template + EIGEN_STRONG_INLINE void pade9(const MatrixType &M, const MatrixType& Id, MatrixType& tmp, + MatrixType& M2, MatrixType& U, MatrixType& V) + { + typedef typename ei_traits::Scalar Scalar; const Scalar b[] = {17643225600., 8821612800., 2075673600., 302702400., 30270240., - 2162160., 110880., 3960., 90., 1.}; - PlainMatrixType M2, M4, M6, M8; - M2 = (Meval * Meval).lazy(); - M4 = (M2 * M2).lazy(); - M6 = (M4 * M2).lazy(); - M8 = (M6 * M2).lazy(); - num = b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2 + b[1]*Id; - U = (Meval * num).lazy(); + 2162160., 110880., 3960., 90., 1.}; + M2 = (M * M).lazy(); + MatrixType M4 = (M2 * M2).lazy(); + MatrixType M6 = (M4 * M2).lazy(); + MatrixType M8 = (M6 * M2).lazy(); + tmp = b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2 + b[1]*Id; + U = (M * tmp).lazy(); V = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2 + b[0]*Id; - - } else { - - // Use (13,13)-Pade; scale matrix by power of 2 so that its norm - // is small enough - - const Scalar maxnorm = 5.371920351148152; + } + + /** \internal \brief Compute the (13,13)-Padé approximant to + * the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(M) \f$ around \f$ M = 0 \f$. + * + * \param M Argument of matrix exponential + * \param Id Identity matrix of same size as M + * \param tmp Temporary storage, to be provided by the caller + * \param M2 Temporary storage, to be provided by the caller + * \param U Even-degree terms in numerator of Padé approximant + * \param V Odd-degree terms in numerator of Padé approximant + */ + template + EIGEN_STRONG_INLINE void pade13(const MatrixType &M, const MatrixType& Id, MatrixType& tmp, + MatrixType& M2, MatrixType& U, MatrixType& V) + { + typedef typename ei_traits::Scalar Scalar; const Scalar b[] = {64764752532480000., 32382376266240000., 7771770303897600., - 1187353796428800., 129060195264000., 10559470521600., 670442572800., - 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.}; - - squarings = std::max(0, (int)ceil(log2(l1norm / maxnorm))); - PlainMatrixType A, A2, A4, A6; - A = Meval / pow(Scalar(2), squarings); - A2 = (A * A).lazy(); - A4 = (A2 * A2).lazy(); - A6 = (A4 * A2).lazy(); - num = b[13]*A6 + b[11]*A4 + b[9]*A2; - V = (A6 * num).lazy(); - num = V + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*Id; - U = (A * num).lazy(); - num = b[12]*A6 + b[10]*A4 + b[8]*A2; - V = (A6 * num).lazy() + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*Id; + 1187353796428800., 129060195264000., 10559470521600., 670442572800., + 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.}; + M2 = (M * M).lazy(); + MatrixType M4 = (M2 * M2).lazy(); + MatrixType M6 = (M4 * M2).lazy(); + V = b[13]*M6 + b[11]*M4 + b[9]*M2; + tmp = (M6 * V).lazy(); + tmp += b[7]*M6 + b[5]*M4 + b[3]*M2 + b[1]*Id; + U = (M * tmp).lazy(); + tmp = b[12]*M6 + b[10]*M4 + b[8]*M2; + V = (M6 * tmp).lazy(); + V += b[6]*M6 + b[4]*M4 + b[2]*M2 + b[0]*Id; + } + + /** \internal \brief Helper class for computing Padé + * approximants to the exponential. + */ + template ::Scalar>::Real> + struct computeUV_selector + { + /** \internal \brief Compute Padé approximant to the exponential. + * + * Computes \p U, \p V and \p squarings such that \f$ (V+U)(V-U)^{-1} \f$ + * is a Padé of \f$ \exp(2^{-\mbox{squarings}}M) \f$ + * around \f$ M = 0 \f$. The degree of the Padé + * approximant and the value of squarings are chosen such that + * the approximation error is no more than the round-off error. + * + * \param M Argument of matrix exponential + * \param Id Identity matrix of same size as M + * \param tmp1 Temporary storage, to be provided by the caller + * \param tmp2 Temporary storage, to be provided by the caller + * \param U Even-degree terms in numerator of Padé approximant + * \param V Odd-degree terms in numerator of Padé approximant + * \param l1norm L1 norm of M + * \param squarings Pointer to integer containing number of times + * that the result needs to be squared to find the + * matrix exponential + */ + static void run(const MatrixType &M, const MatrixType& Id, MatrixType& tmp1, MatrixType& tmp2, + MatrixType& U, MatrixType& V, float l1norm, int* squarings); + }; + + template + struct computeUV_selector + { + static void run(const MatrixType &M, const MatrixType& Id, MatrixType& tmp1, MatrixType& tmp2, + MatrixType& U, MatrixType& V, float l1norm, int* squarings) + { + *squarings = 0; + if (l1norm < 4.258730016922831e-001) { + pade3(M, Id, tmp1, tmp2, U, V); + } else if (l1norm < 1.880152677804762e+000) { + pade5(M, Id, tmp1, tmp2, U, V); + } else { + const float maxnorm = 3.925724783138660; + *squarings = std::max(0, (int)ceil(log2(l1norm / maxnorm))); + MatrixType A = M / std::pow(typename ei_traits::Scalar(2), *squarings); + pade7(A, Id, tmp1, tmp2, U, V); + } + } + }; + + template + struct computeUV_selector + { + static void run(const MatrixType &M, const MatrixType& Id, MatrixType& tmp1, MatrixType& tmp2, + MatrixType& U, MatrixType& V, float l1norm, int* squarings) + { + *squarings = 0; + if (l1norm < 1.495585217958292e-002) { + pade3(M, Id, tmp1, tmp2, U, V); + } else if (l1norm < 2.539398330063230e-001) { + pade5(M, Id, tmp1, tmp2, U, V); + } else if (l1norm < 9.504178996162932e-001) { + pade7(M, Id, tmp1, tmp2, U, V); + } else if (l1norm < 2.097847961257068e+000) { + pade9(M, Id, tmp1, tmp2, U, V); + } else { + const double maxnorm = 5.371920351148152; + *squarings = std::max(0, (int)ceil(log2(l1norm / maxnorm))); + MatrixType A = M / std::pow(typename ei_traits::Scalar(2), *squarings); + pade13(A, Id, tmp1, tmp2, U, V); + } + } + }; + + /** \internal \brief Compute the matrix exponential. + * + * \param M matrix whose exponential is to be computed. + * \param result pointer to the matrix in which to store the result. + */ + template + void compute(const MatrixType &M, MatrixType* result) + { + MatrixType num, den, U, V; + MatrixType Id = MatrixType::Identity(M.rows(), M.cols()); + float l1norm = M.cwise().abs().colwise().sum().maxCoeff(); + int squarings; + computeUV_selector::run(M, Id, num, den, U, V, l1norm, &squarings); + num = U + V; // numerator of Pade approximant + den = -U + V; // denominator of Pade approximant + den.partialLu().solve(num, result); + for (int i=0; i +EIGEN_STRONG_INLINE void ei_matrix_exponential(const MatrixBase &M, + typename MatrixBase::PlainMatrixType* result) +{ + ei_assert(M.rows() == M.cols()); + MatrixExponentialInternal::compute(M.eval(), result); } #endif // EIGEN_MATRIX_EXPONENTIAL diff --git a/unsupported/test/alignedvector3.cpp b/unsupported/test/alignedvector3.cpp index cf33dcceb..52d39ce50 100644 --- a/unsupported/test/alignedvector3.cpp +++ b/unsupported/test/alignedvector3.cpp @@ -40,7 +40,7 @@ void alignedvector3() VERIFY_IS_APPROX(f1,r1); VERIFY_IS_APPROX(f4,r4); - + VERIFY_IS_APPROX(f4+f1,r4+r1); VERIFY_IS_APPROX(f4-f1,r4-r1); VERIFY_IS_APPROX(f4+f1-f2,r4+r1-r2); @@ -56,6 +56,10 @@ void alignedvector3() VERIFY_IS_APPROX(f2.dot(f3),r2.dot(r3)); VERIFY_IS_APPROX(f2.cross(f3),r2.cross(r3)); VERIFY_IS_APPROX(f2.norm(),r2.norm()); + + VERIFY_IS_APPROX(f2.normalized(),r2.normalized()); + + VERIFY_IS_APPROX((f2+f1).normalized(),(r2+r1).normalized()); f2.normalize(); r2.normalize(); diff --git a/unsupported/test/matrixExponential.cpp b/unsupported/test/matrixExponential.cpp index 0ec4fae90..9a6c335d8 100644 --- a/unsupported/test/matrixExponential.cpp +++ b/unsupported/test/matrixExponential.cpp @@ -34,26 +34,47 @@ double binom(int n, int k) return res; } -void test2dRotation() +template +void test2dRotation(double tol) { - Matrix2d A, B, C; - double angle; + Matrix A, B, C; + T angle; + A << 0, 1, -1, 0; for (int i=0; i<=20; i++) { angle = pow(10, i / 5. - 2); - A << 0, angle, -angle, 0; B << cos(angle), sin(angle), -sin(angle), cos(angle); - ei_matrix_exponential(A, &C); - VERIFY(C.isApprox(B, 1e-14)); + ei_matrix_exponential(angle*A, &C); + VERIFY(C.isApprox(B, tol)); } } -void testPascal() +template +void test2dHyperbolicRotation(double tol) +{ + Matrix,2,2> A, B, C; + std::complex imagUnit(0,1); + T angle, ch, sh; + + for (int i=0; i<=20; i++) + { + angle = (i-10) / 2.0; + ch = std::cosh(angle); + sh = std::sinh(angle); + A << 0, angle*imagUnit, -angle*imagUnit, 0; + B << ch, sh*imagUnit, -sh*imagUnit, ch; + ei_matrix_exponential(A, &C); + VERIFY(C.isApprox(B, tol)); + } +} + +template +void testPascal(double tol) { for (int size=1; size<20; size++) { - MatrixXd A(size,size), B(size,size), C(size,size); + Matrix A(size,size), B(size,size), C(size,size); A.setZero(); for (int i=0; i void randomTest(const MatrixType& m) +template +void randomTest(const MatrixType& m, double tol) { /* this test covers the following files: Inverse.h @@ -80,16 +102,24 @@ template void randomTest(const MatrixType& m) m1 = MatrixType::Random(rows, cols); ei_matrix_exponential(m1, &m2); ei_matrix_exponential(-m1, &m3); - VERIFY(identity.isApprox(m2 * m3, 1e-13)); + VERIFY(identity.isApprox(m2 * m3, tol)); } } void test_matrixExponential() { - CALL_SUBTEST(test2dRotation()); - CALL_SUBTEST(testPascal()); - CALL_SUBTEST(randomTest(Matrix2d())); - CALL_SUBTEST(randomTest(Matrix3d())); - CALL_SUBTEST(randomTest(Matrix4d())); - CALL_SUBTEST(randomTest(MatrixXd(8,8))); + CALL_SUBTEST(test2dRotation(1e-14)); + CALL_SUBTEST(test2dRotation(1e-5)); + CALL_SUBTEST(test2dHyperbolicRotation(1e-14)); + CALL_SUBTEST(test2dHyperbolicRotation(1e-5)); + CALL_SUBTEST(testPascal(1e-5)); + CALL_SUBTEST(testPascal(1e-14)); + CALL_SUBTEST(randomTest(Matrix2d(), 1e-13)); + CALL_SUBTEST(randomTest(Matrix(), 1e-13)); + CALL_SUBTEST(randomTest(Matrix4cd(), 1e-13)); + CALL_SUBTEST(randomTest(MatrixXd(8,8), 1e-13)); + CALL_SUBTEST(randomTest(Matrix2f(), 1e-4)); + CALL_SUBTEST(randomTest(Matrix3cf(), 1e-4)); + CALL_SUBTEST(randomTest(Matrix4f(), 1e-4)); + CALL_SUBTEST(randomTest(MatrixXf(8,8), 1e-4)); }