diff --git a/CTestConfig.cmake b/CTestConfig.cmake index 4c0027824..0557c491a 100644 --- a/CTestConfig.cmake +++ b/CTestConfig.cmake @@ -4,14 +4,10 @@ ## # The following are required to uses Dart and the Cdash dashboard ## ENABLE_TESTING() ## INCLUDE(CTest) -set(CTEST_PROJECT_NAME "Eigen") +set(CTEST_PROJECT_NAME "Eigen3.2") set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC") set(CTEST_DROP_METHOD "http") set(CTEST_DROP_SITE "manao.inria.fr") -set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen") +set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen3.2") set(CTEST_DROP_SITE_CDASH TRUE) -set(CTEST_PROJECT_SUBPROJECTS -Official -Unsupported -) diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index 4faedd257..f34d26465 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -16,7 +16,10 @@ namespace Eigen { namespace internal { -template struct LDLT_Traits; + template struct LDLT_Traits; + + // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef + enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite }; } /** \ingroup Cholesky_Module @@ -69,7 +72,12 @@ template class LDLT * The default constructor is useful in cases in which the user intends to * perform decompositions via LDLT::compute(const MatrixType&). */ - LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {} + LDLT() + : m_matrix(), + m_transpositions(), + m_sign(internal::ZeroSign), + m_isInitialized(false) + {} /** \brief Default Constructor with memory preallocation * @@ -81,6 +89,7 @@ template class LDLT : m_matrix(size, size), m_transpositions(size), m_temporary(size), + m_sign(internal::ZeroSign), m_isInitialized(false) {} @@ -93,6 +102,7 @@ template class LDLT : m_matrix(matrix.rows(), matrix.cols()), m_transpositions(matrix.rows()), m_temporary(matrix.rows()), + m_sign(internal::ZeroSign), m_isInitialized(false) { compute(matrix); @@ -139,7 +149,7 @@ template class LDLT inline bool isPositive() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); - return m_sign == 1; + return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign; } #ifdef EIGEN2_SUPPORT @@ -153,7 +163,7 @@ template class LDLT inline bool isNegative(void) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); - return m_sign == -1; + return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign; } /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A. @@ -235,7 +245,7 @@ template class LDLT MatrixType m_matrix; TranspositionType m_transpositions; TmpMatrixType m_temporary; - int m_sign; + internal::SignMatrix m_sign; bool m_isInitialized; }; @@ -246,7 +256,7 @@ template struct ldlt_inplace; template<> struct ldlt_inplace { template - static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0) + static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { using std::abs; typedef typename MatrixType::Scalar Scalar; @@ -258,8 +268,9 @@ template<> struct ldlt_inplace if (size <= 1) { transpositions.setIdentity(); - if(sign) - *sign = numext::real(mat.coeff(0,0))>0 ? 1:-1; + if (numext::real(mat.coeff(0,0)) > 0) sign = PositiveSemiDef; + else if (numext::real(mat.coeff(0,0)) < 0) sign = NegativeSemiDef; + else sign = ZeroSign; return true; } @@ -284,7 +295,6 @@ template<> struct ldlt_inplace if(biggest_in_corner < cutoff) { for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i; - if(sign) *sign = 0; break; } @@ -325,15 +335,15 @@ template<> struct ldlt_inplace } if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff)) A21 /= mat.coeffRef(k,k); - - if(sign) - { - // LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right - int newSign = numext::real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0; - if(k == 0) - *sign = newSign; - else if(*sign != newSign) - *sign = 0; + + RealScalar realAkk = numext::real(mat.coeffRef(k,k)); + if (sign == PositiveSemiDef) { + if (realAkk < 0) sign = Indefinite; + } else if (sign == NegativeSemiDef) { + if (realAkk > 0) sign = Indefinite; + } else if (sign == ZeroSign) { + if (realAkk > 0) sign = PositiveSemiDef; + else if (realAkk < 0) sign = NegativeSemiDef; } } @@ -399,7 +409,7 @@ template<> struct ldlt_inplace template<> struct ldlt_inplace { template - static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0) + static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { Transpose matt(mat); return ldlt_inplace::unblocked(matt, transpositions, temp, sign); @@ -445,7 +455,7 @@ LDLT& LDLT::compute(const MatrixType& a) m_isInitialized = false; m_temporary.resize(size); - internal::ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign); + internal::ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, m_sign); m_isInitialized = true; return *this; @@ -473,7 +483,7 @@ LDLT& LDLT::rankUpdate(const MatrixBase=0 ? 1 : -1; + m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef; m_isInitialized = true; } diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h index a25e823ab..1a577c2dc 100644 --- a/Eigen/src/Core/EigenBase.h +++ b/Eigen/src/Core/EigenBase.h @@ -141,36 +141,6 @@ Derived& DenseBase::operator-=(const EigenBase &other) return derived(); } -/** replaces \c *this by \c *this * \a other. - * - * \returns a reference to \c *this - */ -template -template -inline Derived& -MatrixBase::operator*=(const EigenBase &other) -{ - other.derived().applyThisOnTheRight(derived()); - return derived(); -} - -/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=(). - */ -template -template -inline void MatrixBase::applyOnTheRight(const EigenBase &other) -{ - other.derived().applyThisOnTheRight(derived()); -} - -/** replaces \c *this by \c *this * \a other. */ -template -template -inline void MatrixBase::applyOnTheLeft(const EigenBase &other) -{ - other.derived().applyThisOnTheLeft(derived()); -} - } // end namespace Eigen #endif // EIGEN_EIGENBASE_H diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index 61798317e..49b69f873 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -564,6 +564,51 @@ template class MatrixBase {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; + +/*************************************************************************** +* Implementation of matrix base methods +***************************************************************************/ + +/** replaces \c *this by \c *this * \a other. + * + * \returns a reference to \c *this + * + * Example: \include MatrixBase_applyOnTheRight.cpp + * Output: \verbinclude MatrixBase_applyOnTheRight.out + */ +template +template +inline Derived& +MatrixBase::operator*=(const EigenBase &other) +{ + other.derived().applyThisOnTheRight(derived()); + return derived(); +} + +/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=(). + * + * Example: \include MatrixBase_applyOnTheRight.cpp + * Output: \verbinclude MatrixBase_applyOnTheRight.out + */ +template +template +inline void MatrixBase::applyOnTheRight(const EigenBase &other) +{ + other.derived().applyThisOnTheRight(derived()); +} + +/** replaces \c *this by \a other * \c *this. + * + * Example: \include MatrixBase_applyOnTheLeft.cpp + * Output: \verbinclude MatrixBase_applyOnTheLeft.out + */ +template +template +inline void MatrixBase::applyOnTheLeft(const EigenBase &other) +{ + other.derived().applyThisOnTheLeft(derived()); +} + } // end namespace Eigen #endif // EIGEN_MATRIXBASE_H diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h index c219e2f53..525620bad 100644 --- a/Eigen/src/Core/StableNorm.h +++ b/Eigen/src/Core/StableNorm.h @@ -17,16 +17,29 @@ namespace internal { template inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale) { - Scalar max = bl.cwiseAbs().maxCoeff(); - if (max>scale) + using std::max; + Scalar maxCoeff = bl.cwiseAbs().maxCoeff(); + + if (maxCoeff>scale) { - ssq = ssq * numext::abs2(scale/max); - scale = max; - invScale = Scalar(1)/scale; + ssq = ssq * numext::abs2(scale/maxCoeff); + Scalar tmp = Scalar(1)/maxCoeff; + if(tmp > NumTraits::highest()) + { + invScale = NumTraits::highest(); + scale = Scalar(1)/invScale; + } + else + { + scale = maxCoeff; + invScale = tmp; + } } - // TODO if the max is much much smaller than the current scale, + + // TODO if the maxCoeff is much much smaller than the current scale, // then we can neglect this sub vector - ssq += (bl*invScale).squaredNorm(); + if(scale>Scalar(0)) // if scale==0, then bl is 0 + ssq += (bl*invScale).squaredNorm(); } template diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h index 2f2398bbf..88cf1c30d 100644 --- a/Eigen/src/Core/util/Memory.h +++ b/Eigen/src/Core/util/Memory.h @@ -608,7 +608,7 @@ template class aligned_stack_memory_handler */ #ifdef EIGEN_ALLOCA // The native alloca() that comes with llvm aligns buffer on 16 bytes even when AVX is enabled. - #if defined(__arm__) || EIGEN_ALIGN_BYTES > 16 +#if defined(__arm__) || defined(_WIN32) || EIGEN_ALIGN_BYTES > 16 #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast((reinterpret_cast(EIGEN_ALLOCA(SIZE+EIGEN_ALIGN_BYTES)) & ~(size_t(EIGEN_ALIGN_BYTES-1))) + EIGEN_ALIGN_BYTES) #else #define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA @@ -761,11 +761,27 @@ public: ::new( p ) T( value ); } +#if (__cplusplus >= 201103L) + template + void construct( U* u, Args&&... args) + { + ::new( static_cast(u) ) U( std::forward( args )... ); + } +#endif + void destroy( pointer p ) { p->~T(); } +#if (__cplusplus >= 201103L) + template + void destroy( U* u ) + { + u->~U(); + } +#endif + void deallocate( pointer p, size_type /*num*/ ) { internal::aligned_free( p ); diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h index 419f90148..0a5012cfe 100644 --- a/Eigen/src/Geometry/Transform.h +++ b/Eigen/src/Geometry/Transform.h @@ -530,9 +530,9 @@ public: inline Transform& operator=(const UniformScaling& t); inline Transform& operator*=(const UniformScaling& s) { return scale(s.factor()); } - inline Transform operator*(const UniformScaling& s) const + inline Transform operator*(const UniformScaling& s) const { - Transform res = *this; + Transform res = *this; res.scale(s.factor()); return res; } @@ -699,9 +699,13 @@ template Transform& Transform::operator=(const QMatrix& other) { EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) - m_matrix << other.m11(), other.m21(), other.dx(), - other.m12(), other.m22(), other.dy(), - 0, 0, 1; + if (Mode == int(AffineCompact)) + m_matrix << other.m11(), other.m21(), other.dx(), + other.m12(), other.m22(), other.dy(); + else + m_matrix << other.m11(), other.m21(), other.dx(), + other.m12(), other.m22(), other.dy(), + 0, 0, 1; return *this; } diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h index 4b13f08d4..5c320e2d2 100644 --- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h @@ -66,9 +66,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r } // unordered insertion - for(int k=0; k1) std::sort(indices.data(),indices.data()+nnz); - for(int k=0; k RowMajorMatrix; - typedef SparseMatrix ColMajorMatrix; + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); // sort the non zeros: @@ -149,7 +149,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix rhsRow = rhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhsRow, lhs, resRow); @@ -162,7 +162,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix lhsRow = lhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhsRow, resRow); @@ -175,7 +175,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); res = resRow; @@ -190,7 +190,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); res = resCol; @@ -202,7 +202,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix lhsCol = lhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhsCol, rhs, resCol); @@ -215,7 +215,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix rhsCol = rhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhsCol, resCol); @@ -228,8 +228,8 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; - typedef SparseMatrix ColMajorMatrix; + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrix; RowMajorMatrix resRow(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); // sort the non zeros: diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h index d0436aa7c..6f615e250 100644 --- a/Eigen/src/SparseCore/SparseBlock.h +++ b/Eigen/src/SparseCore/SparseBlock.h @@ -335,6 +335,14 @@ const Block SparseMatrixBase::inner } +namespace internal { + +template< typename XprType, int BlockRows, int BlockCols, bool InnerPanel, + bool OuterVector = (BlockCols==1 && XprType::IsRowMajor) || (BlockRows==1 && !XprType::IsRowMajor)> +class GenericSparseBlockInnerIteratorImpl; + +} + /** Generic implementation of sparse Block expression. * Real-only. */ @@ -342,11 +350,12 @@ template class BlockImpl : public SparseMatrixBase >, internal::no_assignment_operator { - typedef typename internal::remove_all::type _MatrixTypeNested; typedef Block BlockType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) + + typedef typename internal::remove_all::type _MatrixTypeNested; /** Column or Row constructor */ @@ -354,8 +363,8 @@ public: : m_matrix(xpr), m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), - m_blockRows(xpr.rows()), - m_blockCols(xpr.cols()) + m_blockRows(BlockRows==1 ? 1 : xpr.rows()), + m_blockCols(BlockCols==1 ? 1 : xpr.cols()) {} /** Dynamic-size constructor @@ -394,29 +403,8 @@ public: inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } - class InnerIterator : public _MatrixTypeNested::InnerIterator - { - typedef typename _MatrixTypeNested::InnerIterator Base; - const BlockType& m_block; - Index m_end; - public: - - EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer) - : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), - m_block(block), - m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value()) - { - while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) ) - Base::operator++(); - } - - inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } - inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } - inline Index row() const { return Base::row() - m_block.m_startRow.value(); } - inline Index col() const { return Base::col() - m_block.m_startCol.value(); } - - inline operator bool() const { return Base::operator bool() && Base::index() < m_end; } - }; + typedef internal::GenericSparseBlockInnerIteratorImpl InnerIterator; + class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator { typedef typename _MatrixTypeNested::ReverseInnerIterator Base; @@ -441,7 +429,7 @@ public: inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; } }; protected: - friend class InnerIterator; + friend class internal::GenericSparseBlockInnerIteratorImpl; friend class ReverseInnerIterator; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) @@ -454,6 +442,100 @@ public: }; +namespace internal { + template + class GenericSparseBlockInnerIteratorImpl : public Block::_MatrixTypeNested::InnerIterator + { + typedef Block BlockType; + enum { + IsRowMajor = BlockType::IsRowMajor + }; + typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested; + typedef typename BlockType::Index Index; + typedef typename _MatrixTypeNested::InnerIterator Base; + const BlockType& m_block; + Index m_end; + public: + + EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer) + : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), + m_block(block), + m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value()) + { + while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) ) + Base::operator++(); + } + + inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } + inline Index row() const { return Base::row() - m_block.m_startRow.value(); } + inline Index col() const { return Base::col() - m_block.m_startCol.value(); } + + inline operator bool() const { return Base::operator bool() && Base::index() < m_end; } + }; + + // Row vector of a column-major sparse matrix or column of a row-major one. + template + class GenericSparseBlockInnerIteratorImpl + { + typedef Block BlockType; + enum { + IsRowMajor = BlockType::IsRowMajor + }; + typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested; + typedef typename BlockType::Index Index; + typedef typename BlockType::Scalar Scalar; + const BlockType& m_block; + Index m_outerPos; + Index m_innerIndex; + Scalar m_value; + Index m_end; + public: + + EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer = 0) + : + m_block(block), + m_outerPos( (IsRowMajor ? block.m_startCol.value() : block.m_startRow.value()) - 1), // -1 so that operator++ finds the first non-zero entry + m_innerIndex(IsRowMajor ? block.m_startRow.value() : block.m_startCol.value()), + m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value()) + { + EIGEN_UNUSED_VARIABLE(outer); + eigen_assert(outer==0); + + ++(*this); + } + + inline Index index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline Index outer() const { return 0; } + inline Index row() const { return IsRowMajor ? 0 : index(); } + inline Index col() const { return IsRowMajor ? index() : 0; } + + inline Scalar value() const { return m_value; } + + inline GenericSparseBlockInnerIteratorImpl& operator++() + { + // search next non-zero entry + while(m_outerPos::InnerIterator : public _LhsNes inline Scalar value() const { return Base::value() * m_factor; } protected: - int m_outer; + Index m_outer; Scalar m_factor; }; @@ -156,7 +156,7 @@ struct sparse_time_dense_product_impl(std::malloc(m_outerSize * sizeof(Index))); - for (int i = 0; i < m_outerSize; i++) + for (Index i = 0; i < m_outerSize; i++) { m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; } @@ -752,8 +752,8 @@ class SparseMatrix else for (Index i=0; i::sumupDuplicates() wi.fill(-1); Index count = 0; // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers - for(int j=0; j class SparseMatrixBase : public EigenBase } else { - SparseMatrix trans = m; - s << static_cast >&>(trans); + SparseMatrix trans = m; + s << static_cast >&>(trans); } } return s; diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h index 70b6480ef..cf7663070 100644 --- a/Eigen/src/SparseCore/SparseProduct.h +++ b/Eigen/src/SparseCore/SparseProduct.h @@ -16,6 +16,7 @@ template struct SparseSparseProductReturnType { typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::Index Index; enum { LhsRowMajor = internal::traits::Flags & RowMajorBit, RhsRowMajor = internal::traits::Flags & RowMajorBit, @@ -24,11 +25,11 @@ struct SparseSparseProductReturnType }; typedef typename internal::conditional, + SparseMatrix, typename internal::nested::type>::type LhsNested; typedef typename internal::conditional, + SparseMatrix, typename internal::nested::type>::type RhsNested; typedef SparseSparseProduct Type; diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h index 70857c7b6..fcc18f5c9 100644 --- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -27,7 +27,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); Index cols = rhs.outerSize(); - //int size = lhs.outerSize(); + //Index size = lhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); // allocate a temporary buffer @@ -100,7 +100,7 @@ struct sparse_sparse_product_with_pruning_selector SparseTemporaryType; + typedef SparseMatrix SparseTemporaryType; SparseTemporaryType _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); res = _res; @@ -126,10 +126,11 @@ struct sparse_sparse_product_with_pruning_selector ColMajorMatrix; - ColMajorMatrix colLhs(lhs); - ColMajorMatrix colRhs(rhs); - internal::sparse_sparse_product_with_pruning_impl(colLhs, colRhs, res, tolerance); + typedef SparseMatrix ColMajorMatrixLhs; + typedef SparseMatrix ColMajorMatrixRhs; + ColMajorMatrixLhs colLhs(lhs); + ColMajorMatrixRhs colRhs(rhs); + internal::sparse_sparse_product_with_pruning_impl(colLhs, colRhs, res, tolerance); // let's transpose the product to get a column x column product // typedef SparseMatrix SparseTemporaryType; diff --git a/Eigen/src/SparseLU/SparseLU_Memory.h b/Eigen/src/SparseLU/SparseLU_Memory.h index d068d62df..1ffa7d54e 100644 --- a/Eigen/src/SparseLU/SparseLU_Memory.h +++ b/Eigen/src/SparseLU/SparseLU_Memory.h @@ -70,7 +70,7 @@ Index SparseLUImpl::expand(VectorType& vec, Index& length, Index if(num_expansions == 0 || keep_prev) new_len = length ; // First time allocate requested else - new_len = Index(alpha * length); + new_len = (std::max)(length+1,Index(alpha * length)); VectorType old_vec; // Temporary vector to hold the previous values if (nbElts > 0 ) @@ -107,7 +107,7 @@ Index SparseLUImpl::expand(VectorType& vec, Index& length, Index do { alpha = (alpha + 1)/2; - new_len = Index(alpha * length); + new_len = (std::max)(length+1,Index(alpha * length)); #ifdef EIGEN_EXCEPTIONS try #endif diff --git a/bench/bench_norm.cpp b/bench/bench_norm.cpp index 806db292c..398fef835 100644 --- a/bench/bench_norm.cpp +++ b/bench/bench_norm.cpp @@ -32,25 +32,25 @@ EIGEN_DONT_INLINE typename T::Scalar lapackNorm(T& v) Scalar ssq = 1; for (int i=0;i= ax) { - ssq += internal::abs2(ax/scale); + ssq += numext::abs2(ax/scale); } else { - ssq = Scalar(1) + ssq * internal::abs2(scale/ax); + ssq = Scalar(1) + ssq * numext::abs2(scale/ax); scale = ax; } } - return scale * internal::sqrt(ssq); + return scale * std::sqrt(ssq); } template EIGEN_DONT_INLINE typename T::Scalar twopassNorm(T& v) { typedef typename T::Scalar Scalar; - Scalar s = v.cwise().abs().maxCoeff(); + Scalar s = v.array().abs().maxCoeff(); return s*(v/s).norm(); } @@ -73,16 +73,20 @@ EIGEN_DONT_INLINE typename T::Scalar divacNorm(T& v) v(i) = v(2*i) + v(2*i+1); n = n/2; } - return internal::sqrt(v(0)); + return std::sqrt(v(0)); } +namespace Eigen { +namespace internal { #ifdef EIGEN_VECTORIZE -Packet4f internal::plt(const Packet4f& a, Packet4f& b) { return _mm_cmplt_ps(a,b); } -Packet2d internal::plt(const Packet2d& a, Packet2d& b) { return _mm_cmplt_pd(a,b); } +Packet4f plt(const Packet4f& a, Packet4f& b) { return _mm_cmplt_ps(a,b); } +Packet2d plt(const Packet2d& a, Packet2d& b) { return _mm_cmplt_pd(a,b); } -Packet4f internal::pandnot(const Packet4f& a, Packet4f& b) { return _mm_andnot_ps(a,b); } -Packet2d internal::pandnot(const Packet2d& a, Packet2d& b) { return _mm_andnot_pd(a,b); } +Packet4f pandnot(const Packet4f& a, Packet4f& b) { return _mm_andnot_ps(a,b); } +Packet2d pandnot(const Packet2d& a, Packet2d& b) { return _mm_andnot_pd(a,b); } #endif +} +} template EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v) @@ -126,7 +130,7 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v) overfl = rbig*s2m; // overfow boundary for abig eps = std::pow(ibeta, 1-it); - relerr = internal::sqrt(eps); // tolerance for neglecting asml + relerr = std::sqrt(eps); // tolerance for neglecting asml abig = 1.0/eps - 1.0; if (Scalar(nbig)>abig) nmax = abig; // largest safe n else nmax = nbig; @@ -134,13 +138,13 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v) typedef typename internal::packet_traits::type Packet; const int ps = internal::packet_traits::size; - Packet pasml = internal::pset1(Scalar(0)); - Packet pamed = internal::pset1(Scalar(0)); - Packet pabig = internal::pset1(Scalar(0)); - Packet ps2m = internal::pset1(s2m); - Packet ps1m = internal::pset1(s1m); - Packet pb2 = internal::pset1(b2); - Packet pb1 = internal::pset1(b1); + Packet pasml = internal::pset1(Scalar(0)); + Packet pamed = internal::pset1(Scalar(0)); + Packet pabig = internal::pset1(Scalar(0)); + Packet ps2m = internal::pset1(s2m); + Packet ps1m = internal::pset1(s1m); + Packet pb2 = internal::pset1(b2); + Packet pb1 = internal::pset1(b1); for(int j=0; j(j)); @@ -170,7 +174,7 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v) Scalar amed = internal::predux(pamed); if(abig > Scalar(0)) { - abig = internal::sqrt(abig); + abig = std::sqrt(abig); if(abig > overfl) { eigen_assert(false && "overflow"); @@ -179,7 +183,7 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v) if(amed > Scalar(0)) { abig = abig/s2m; - amed = internal::sqrt(amed); + amed = std::sqrt(amed); } else { @@ -191,24 +195,24 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v) { if (amed > Scalar(0)) { - abig = internal::sqrt(amed); - amed = internal::sqrt(asml) / s1m; + abig = std::sqrt(amed); + amed = std::sqrt(asml) / s1m; } else { - return internal::sqrt(asml)/s1m; + return std::sqrt(asml)/s1m; } } else { - return internal::sqrt(amed); + return std::sqrt(amed); } asml = std::min(abig, amed); abig = std::max(abig, amed); if(asml <= abig*relerr) return abig; else - return abig * internal::sqrt(Scalar(1) + internal::abs2(asml/abig)); + return abig * std::sqrt(Scalar(1) + numext::abs2(asml/abig)); #endif } @@ -224,22 +228,22 @@ EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v) for (int i=0; i()); - double yd = based * internal::abs(internal::random()); + double yf = basef * std::abs(internal::random()); + double yd = based * std::abs(internal::random()); VectorXf vf = VectorXf::Ones(s) * yf; VectorXd vd = VectorXd::Ones(s) * yd; - std::cout << "reference\t" << internal::sqrt(double(s))*yf << "\t" << internal::sqrt(double(s))*yd << "\n"; + std::cout << "reference\t" << std::sqrt(double(s))*yf << "\t" << std::sqrt(double(s))*yd << "\n"; std::cout << "sqsumNorm\t" << sqsumNorm(vf) << "\t" << sqsumNorm(vd) << "\n"; std::cout << "hypotNorm\t" << hypotNorm(vf) << "\t" << hypotNorm(vd) << "\n"; std::cout << "blueNorm\t" << blueNorm(vf) << "\t" << blueNorm(vd) << "\n"; @@ -255,8 +259,8 @@ void check_accuracy_var(int ef0, int ef1, int ed0, int ed1, int s) VectorXd vd(s); for (int i=0; i()) * std::pow(double(10), internal::random(ef0,ef1)); - vd[i] = internal::abs(internal::random()) * std::pow(double(10), internal::random(ed0,ed1)); + vf[i] = std::abs(internal::random()) * std::pow(double(10), internal::random(ef0,ef1)); + vd[i] = std::abs(internal::random()) * std::pow(double(10), internal::random(ed0,ed1)); } //std::cout << "reference\t" << internal::sqrt(double(s))*yf << "\t" << internal::sqrt(double(s))*yd << "\n"; @@ -321,10 +325,10 @@ int main(int argc, char** argv) VectorXcf vcf = VectorXcf::Random(1024*1024*32) * y; BENCH_PERF(sqsumNorm); BENCH_PERF(blueNorm); -// BENCH_PERF(pblueNorm); -// BENCH_PERF(lapackNorm); -// BENCH_PERF(hypotNorm); -// BENCH_PERF(twopassNorm); + BENCH_PERF(pblueNorm); + BENCH_PERF(lapackNorm); + BENCH_PERF(hypotNorm); + BENCH_PERF(twopassNorm); BENCH_PERF(bl2passNorm); } @@ -336,10 +340,10 @@ int main(int argc, char** argv) VectorXcf vcf = VectorXcf::Random(512) * y; BENCH_PERF(sqsumNorm); BENCH_PERF(blueNorm); -// BENCH_PERF(pblueNorm); -// BENCH_PERF(lapackNorm); -// BENCH_PERF(hypotNorm); -// BENCH_PERF(twopassNorm); + BENCH_PERF(pblueNorm); + BENCH_PERF(lapackNorm); + BENCH_PERF(hypotNorm); + BENCH_PERF(twopassNorm); BENCH_PERF(bl2passNorm); } } diff --git a/blas/CMakeLists.txt b/blas/CMakeLists.txt index c35a2fdbe..a9bc05137 100644 --- a/blas/CMakeLists.txt +++ b/blas/CMakeLists.txt @@ -7,6 +7,9 @@ workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS) if(EIGEN_Fortran_COMPILER_WORKS) enable_language(Fortran OPTIONAL) + if(NOT CMAKE_Fortran_COMPILER) + set(EIGEN_Fortran_COMPILER_WORKS OFF) + endif() endif() add_custom_target(blas) diff --git a/cmake/language_support.cmake b/cmake/language_support.cmake index 2ca303c92..d687b71f6 100644 --- a/cmake/language_support.cmake +++ b/cmake/language_support.cmake @@ -23,7 +23,7 @@ function(workaround_9220 language language_works) #message("DEBUG: language = ${language}") set(text "project(test NONE) - cmake_minimum_required(VERSION 2.6.0) + cmake_minimum_required(VERSION 2.8.0) set (CMAKE_Fortran_FLAGS \"${CMAKE_Fortran_FLAGS}\") set (CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS}\") enable_language(${language} OPTIONAL) diff --git a/doc/AsciiQuickReference.txt b/doc/AsciiQuickReference.txt index d3bfd439c..4c8fe2f47 100644 --- a/doc/AsciiQuickReference.txt +++ b/doc/AsciiQuickReference.txt @@ -163,13 +163,25 @@ x.squaredNorm() // dot(x, x) Note the equivalence is not true for co x.dot(y) // dot(x, y) x.cross(y) // cross(x, y) Requires #include +//// Type conversion +// Eigen // Matlab +A.cast(); // double(A) +A.cast(); // single(A) +A.cast(); // int32(A) +// if the original type equals destination type, no work is done + +// Note that for most operations Eigen requires all operands to have the same type: +MatrixXf F = MatrixXf::Zero(3,3); +A += F; // illegal in Eigen. In Matlab A = A+F is allowed +A += F.cast(); // F converted to double and then added (generally, conversion happens on-the-fly) + // Eigen can map existing memory into Eigen matrices. float array[3]; -Map(array, 3).fill(10); -int data[4] = 1, 2, 3, 4; -Matrix2i mat2x2(data); -MatrixXi mat2x2 = Map(data); -MatrixXi mat2x2 = Map(data, 2, 2); +Vector3f::Map(array).fill(10); // create a temporary Map over array and sets entries to 10 +int data[4] = {1, 2, 3, 4}; +Matrix2i mat2x2(data); // copies data into mat2x2 +Matrix2i::Map(data) = 2*mat2x2; // overwrite elements of data with 2*mat2x2 +MatrixXi::Map(data, 2, 2) += mat2x2; // adds mat2x2 to elements of data (alternative syntax if size is not know at compile time) // Solve Ax = b. Result stored in x. Matlab: x = A \ b. x = A.ldlt().solve(b)); // A sym. p.s.d. #include diff --git a/doc/snippets/MatrixBase_applyOnTheLeft.cpp b/doc/snippets/MatrixBase_applyOnTheLeft.cpp new file mode 100644 index 000000000..6398c873a --- /dev/null +++ b/doc/snippets/MatrixBase_applyOnTheLeft.cpp @@ -0,0 +1,7 @@ +Matrix3f A = Matrix3f::Random(3,3), B; +B << 0,1,0, + 0,0,1, + 1,0,0; +cout << "At start, A = " << endl << A << endl; +A.applyOnTheLeft(B); +cout << "After applyOnTheLeft, A = " << endl << A << endl; diff --git a/doc/snippets/MatrixBase_applyOnTheRight.cpp b/doc/snippets/MatrixBase_applyOnTheRight.cpp new file mode 100644 index 000000000..e4b71b2d8 --- /dev/null +++ b/doc/snippets/MatrixBase_applyOnTheRight.cpp @@ -0,0 +1,9 @@ +Matrix3f A = Matrix3f::Random(3,3), B; +B << 0,1,0, + 0,0,1, + 1,0,0; +cout << "At start, A = " << endl << A << endl; +A *= B; +cout << "After A *= B, A = " << endl << A << endl; +A.applyOnTheRight(B); // equivalent to A *= B +cout << "After applyOnTheRight, A = " << endl << A << endl; diff --git a/lapack/CMakeLists.txt b/lapack/CMakeLists.txt index 7e7444326..9883d4c72 100644 --- a/lapack/CMakeLists.txt +++ b/lapack/CMakeLists.txt @@ -7,6 +7,9 @@ workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS) if(EIGEN_Fortran_COMPILER_WORKS) enable_language(Fortran OPTIONAL) + if(NOT CMAKE_Fortran_COMPILER) + set(EIGEN_Fortran_COMPILER_WORKS OFF) + endif() endif() add_custom_target(lapack) diff --git a/test/cholesky.cpp b/test/cholesky.cpp index 378525a83..b980dc572 100644 --- a/test/cholesky.cpp +++ b/test/cholesky.cpp @@ -263,8 +263,8 @@ template void cholesky_bug241(const MatrixType& m) // LDLT is not guaranteed to work for indefinite matrices, but happens to work fine if matrix is diagonal. // This test checks that LDLT reports correctly that matrix is indefinite. -// See http://forum.kde.org/viewtopic.php?f=74&t=106942 -template void cholesky_indefinite(const MatrixType& m) +// See http://forum.kde.org/viewtopic.php?f=74&t=106942 and bug 736 +template void cholesky_definiteness(const MatrixType& m) { eigen_assert(m.rows() == 2 && m.cols() == 2); MatrixType mat; @@ -280,6 +280,24 @@ template void cholesky_indefinite(const MatrixType& m) VERIFY(!ldlt.isNegative()); VERIFY(!ldlt.isPositive()); } + { + mat << 0, 0, 0, 0; + LDLT ldlt(mat); + VERIFY(ldlt.isNegative()); + VERIFY(ldlt.isPositive()); + } + { + mat << 0, 0, 0, 1; + LDLT ldlt(mat); + VERIFY(!ldlt.isNegative()); + VERIFY(ldlt.isPositive()); + } + { + mat << -1, 0, 0, 0; + LDLT ldlt(mat); + VERIFY(ldlt.isNegative()); + VERIFY(!ldlt.isPositive()); + } } template void cholesky_verify_assert() @@ -309,7 +327,7 @@ void test_cholesky() CALL_SUBTEST_1( cholesky(Matrix()) ); CALL_SUBTEST_3( cholesky(Matrix2d()) ); CALL_SUBTEST_3( cholesky_bug241(Matrix2d()) ); - CALL_SUBTEST_3( cholesky_indefinite(Matrix2d()) ); + CALL_SUBTEST_3( cholesky_definiteness(Matrix2d()) ); CALL_SUBTEST_4( cholesky(Matrix3f()) ); CALL_SUBTEST_5( cholesky(Matrix4d()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE); diff --git a/test/geo_transformations.cpp b/test/geo_transformations.cpp index 35ae67ebe..ee3030b5d 100644 --- a/test/geo_transformations.cpp +++ b/test/geo_transformations.cpp @@ -279,6 +279,13 @@ template void transformations() t1 = Eigen::Scaling(s0,s0,s0) * t1; VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); + t0 = t3; + t0.scale(s0); + t1 = t3 * Eigen::Scaling(s0); + VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); + t0.prescale(s0); + t1 = Eigen::Scaling(s0) * t1; + VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.setIdentity(); t0.prerotate(q1).prescale(v0).pretranslate(v0); diff --git a/test/qr.cpp b/test/qr.cpp index a79e0dd34..98738777f 100644 --- a/test/qr.cpp +++ b/test/qr.cpp @@ -54,6 +54,8 @@ template void qr_invertible() { using std::log; using std::abs; + using std::pow; + using std::max; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Scalar Scalar; @@ -65,7 +67,7 @@ template void qr_invertible() if (internal::is_same::value) { // let's build a matrix more stable to inverse - MatrixType a = MatrixType::Random(size,size*2); + MatrixType a = MatrixType::Random(size,size*4); m1 += a * a.adjoint(); } @@ -81,8 +83,11 @@ template void qr_invertible() m3 = qr.householderQ(); // get a unitary m1 = m3 * m1 * m3; qr.compute(m1); - VERIFY_IS_APPROX(absdet, qr.absDeterminant()); VERIFY_IS_APPROX(log(absdet), qr.logAbsDeterminant()); + // This test is tricky if the determinant becomes too small. + // Since we generate random numbers with magnitude rrange [0,1], the average determinant is 0.5^size + VERIFY_IS_MUCH_SMALLER_THAN( abs(absdet-qr.absDeterminant()), (max)(RealScalar(pow(0.5,size)),(max)(abs(absdet),abs(qr.absDeterminant()))) ); + } template void qr_verify_assert() diff --git a/test/sparse.h b/test/sparse.h index a09c65e5f..e19a76316 100644 --- a/test/sparse.h +++ b/test/sparse.h @@ -154,16 +154,16 @@ initSparse(double density, sparseMat.finalize(); } -template void +template void initSparse(double density, Matrix& refVec, - SparseVector& sparseVec, + SparseVector& sparseVec, std::vector* zeroCoords = 0, std::vector* nonzeroCoords = 0) { sparseVec.reserve(int(refVec.size()*density)); sparseVec.setZero(); - for(int i=0; i(0,1) < density) ? internal::random() : Scalar(0); if (v!=Scalar(0)) @@ -178,10 +178,10 @@ initSparse(double density, } } -template void +template void initSparse(double density, Matrix& refVec, - SparseVector& sparseVec, + SparseVector& sparseVec, std::vector* zeroCoords = 0, std::vector* nonzeroCoords = 0) { diff --git a/test/sparse_basic.cpp b/test/sparse_basic.cpp index d5ff9f80b..6c620f037 100644 --- a/test/sparse_basic.cpp +++ b/test/sparse_basic.cpp @@ -270,6 +270,14 @@ template void sparse_basic(const SparseMatrixType& re VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.row(0).dot(refM2.row(0))); else VERIFY_IS_APPROX(m1.innerVector(0).dot(refM2.row(0)), refM1.col(0).dot(refM2.row(0))); + + DenseVector rv = DenseVector::Random(m1.cols()); + DenseVector cv = DenseVector::Random(m1.rows()); + Index r = internal::random(0,m1.rows()-2); + Index c = internal::random(0,m1.cols()-1); + VERIFY_IS_APPROX(( m1.template block<1,Dynamic>(r,0,1,m1.cols()).dot(rv)) , refM1.row(r).dot(rv)); + VERIFY_IS_APPROX(m1.row(r).dot(rv), refM1.row(r).dot(rv)); + VERIFY_IS_APPROX(m1.col(c).dot(cv), refM1.col(c).dot(cv)); VERIFY_IS_APPROX(m1.conjugate(), refM1.conjugate()); VERIFY_IS_APPROX(m1.real(), refM1.real()); diff --git a/test/sparse_product.cpp b/test/sparse_product.cpp index 664e33887..a2ea9d5b7 100644 --- a/test/sparse_product.cpp +++ b/test/sparse_product.cpp @@ -13,8 +13,9 @@ template struct test_outer { static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) { - int c = internal::random(0,m2.cols()-1); - int c1 = internal::random(0,m2.cols()-1); + typedef typename SparseMatrixType::Index Index; + Index c = internal::random(0,m2.cols()-1); + Index c1 = internal::random(0,m2.cols()-1); VERIFY_IS_APPROX(m4=m2.col(c)*refMat2.col(c1).transpose(), refMat4=refMat2.col(c)*refMat2.col(c1).transpose()); VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.col(c).transpose(), refMat4=refMat2.col(c1)*refMat2.col(c).transpose()); } @@ -22,8 +23,9 @@ template struct test_outer struct test_outer { static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) { - int r = internal::random(0,m2.rows()-1); - int c1 = internal::random(0,m2.cols()-1); + typedef typename SparseMatrixType::Index Index; + Index r = internal::random(0,m2.rows()-1); + Index c1 = internal::random(0,m2.cols()-1); VERIFY_IS_APPROX(m4=m2.row(r).transpose()*refMat2.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*refMat2.col(c1).transpose()); VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.row(r), refMat4=refMat2.col(c1)*refMat2.row(r)); } @@ -37,9 +39,9 @@ template void sparse_product() { typedef typename SparseMatrixType::Index Index; Index n = 100; - const Index rows = internal::random(1,n); - const Index cols = internal::random(1,n); - const Index depth = internal::random(1,n); + const Index rows = internal::random(1,n); + const Index cols = internal::random(1,n); + const Index depth = internal::random(1,n); typedef typename SparseMatrixType::Scalar Scalar; enum { Flags = SparseMatrixType::Flags }; @@ -244,6 +246,7 @@ void test_sparse_product() CALL_SUBTEST_1( (sparse_product >()) ); CALL_SUBTEST_2( (sparse_product, ColMajor > >()) ); CALL_SUBTEST_2( (sparse_product, RowMajor > >()) ); + CALL_SUBTEST_3( (sparse_product >()) ); CALL_SUBTEST_4( (sparse_product_regression_test, Matrix >()) ); } } diff --git a/test/sparse_vector.cpp b/test/sparse_vector.cpp index ec5877b6a..0c9476803 100644 --- a/test/sparse_vector.cpp +++ b/test/sparse_vector.cpp @@ -9,14 +9,14 @@ #include "sparse.h" -template void sparse_vector(int rows, int cols) +template void sparse_vector(int rows, int cols) { double densityMat = (std::max)(8./(rows*cols), 0.01); double densityVec = (std::max)(8./float(rows), 0.1); typedef Matrix DenseMatrix; typedef Matrix DenseVector; - typedef SparseVector SparseVectorType; - typedef SparseMatrix SparseMatrixType; + typedef SparseVector SparseVectorType; + typedef SparseMatrix SparseMatrixType; Scalar eps = 1e-6; SparseMatrixType m1(rows,rows); @@ -101,9 +101,10 @@ template void sparse_vector(int rows, int cols) void test_sparse_vector() { for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( sparse_vector(8, 8) ); - CALL_SUBTEST_2( sparse_vector >(16, 16) ); - CALL_SUBTEST_1( sparse_vector(299, 535) ); + CALL_SUBTEST_1(( sparse_vector(8, 8) )); + CALL_SUBTEST_2(( sparse_vector, int>(16, 16) )); + CALL_SUBTEST_1(( sparse_vector(299, 535) )); + CALL_SUBTEST_1(( sparse_vector(299, 535) )); } } diff --git a/test/stable_norm.cpp b/test/stable_norm.cpp index c09fc17b7..549f91fbf 100644 --- a/test/stable_norm.cpp +++ b/test/stable_norm.cpp @@ -55,8 +55,16 @@ template void stable_norm(const MatrixType& m) Index rows = m.rows(); Index cols = m.cols(); - Scalar big = internal::random() * ((std::numeric_limits::max)() * RealScalar(1e-4)); - Scalar small = internal::random() * ((std::numeric_limits::min)() * RealScalar(1e4)); + // get a non-zero random factor + Scalar factor = internal::random(); + while(numext::abs2(factor)(); + Scalar big = factor * ((std::numeric_limits::max)() * RealScalar(1e-4)); + + factor = internal::random(); + while(numext::abs2(factor)(); + Scalar small = factor * ((std::numeric_limits::min)() * RealScalar(1e4)); MatrixType vzero = MatrixType::Zero(rows, cols), vrand = MatrixType::Random(rows, cols), @@ -91,7 +99,7 @@ template void stable_norm(const MatrixType& m) VERIFY_IS_APPROX(vsmall.blueNorm(), sqrt(size)*abs(small)); VERIFY_IS_APPROX(vsmall.hypotNorm(), sqrt(size)*abs(small)); -// Test compilation of cwise() version + // Test compilation of cwise() version VERIFY_IS_APPROX(vrand.colwise().stableNorm(), vrand.colwise().norm()); VERIFY_IS_APPROX(vrand.colwise().blueNorm(), vrand.colwise().norm()); VERIFY_IS_APPROX(vrand.colwise().hypotNorm(), vrand.colwise().norm()); diff --git a/test/umeyama.cpp b/test/umeyama.cpp index 738d0af70..2e8092434 100644 --- a/test/umeyama.cpp +++ b/test/umeyama.cpp @@ -130,10 +130,11 @@ void run_fixed_size_test(int num_elements) // MUST be positive because in any other case det(cR_t) may become negative for // odd dimensions! - const Scalar c = abs(internal::random()); + // Also if c is to small compared to t.norm(), problem is ill-posed (cf. Bug 744) + const Scalar c = internal::random(0.5, 2.0); FixedMatrix R = randMatrixSpecialUnitary(dim); - FixedVector t = Scalar(50)*FixedVector::Random(dim,1); + FixedVector t = Scalar(32)*FixedVector::Random(dim,1); HomMatrix cR_t = HomMatrix::Identity(dim+1,dim+1); cR_t.block(0,0,dim,dim) = c*R; @@ -149,9 +150,9 @@ void run_fixed_size_test(int num_elements) HomMatrix cR_t_umeyama = umeyama(src_block, dst_block); - const Scalar error = ( cR_t_umeyama*src - dst ).array().square().sum(); + const Scalar error = ( cR_t_umeyama*src - dst ).squaredNorm(); - VERIFY(error < Scalar(10)*std::numeric_limits::epsilon()); + VERIFY(error < Scalar(16)*std::numeric_limits::epsilon()); } void test_umeyama() diff --git a/unsupported/Eigen/OpenGLSupport b/unsupported/Eigen/OpenGLSupport index 4454bf820..c4090ab11 100644 --- a/unsupported/Eigen/OpenGLSupport +++ b/unsupported/Eigen/OpenGLSupport @@ -11,7 +11,12 @@ #define EIGEN_OPENGL_MODULE #include -#include + +#if defined(__APPLE_CC__) + #include +#else + #include +#endif namespace Eigen { diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h index a4516056d..b8f2cba17 100644 --- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h +++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h @@ -153,7 +153,22 @@ void KroneckerProductSparse::evalTo(Dest& dst) const Bc = m_B.cols(); dst.resize(this->rows(), this->cols()); dst.resizeNonZeros(0); - dst.reserve(m_A.nonZeros() * m_B.nonZeros()); + + // compute number of non-zeros per innervectors of dst + { + VectorXi nnzA = VectorXi::Zero(Dest::IsRowMajor ? m_A.rows() : m_A.cols()); + for (Index kA=0; kA < m_A.outerSize(); ++kA) + for (typename Lhs::InnerIterator itA(m_A,kA); itA; ++itA) + nnzA(Dest::IsRowMajor ? itA.row() : itA.col())++; + + VectorXi nnzB = VectorXi::Zero(Dest::IsRowMajor ? m_B.rows() : m_B.cols()); + for (Index kB=0; kB < m_B.outerSize(); ++kB) + for (typename Rhs::InnerIterator itB(m_B,kB); itB; ++itB) + nnzB(Dest::IsRowMajor ? itB.row() : itB.col())++; + + Matrix nnzAB = nnzB * nnzA.transpose(); + dst.reserve(VectorXi::Map(nnzAB.data(), nnzAB.size())); + } for (Index kA=0; kA < m_A.outerSize(); ++kA) { diff --git a/unsupported/Eigen/src/Splines/SplineFwd.h b/unsupported/Eigen/src/Splines/SplineFwd.h index 49db8d35d..9ea23a9a1 100644 --- a/unsupported/Eigen/src/Splines/SplineFwd.h +++ b/unsupported/Eigen/src/Splines/SplineFwd.h @@ -31,6 +31,8 @@ namespace Eigen enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ }; + + enum { DerivativeMemoryLayout = Dimension==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ }; /** \brief The data type used to store non-zero basis functions. */ typedef Array BasisVectorType; @@ -39,7 +41,7 @@ namespace Eigen typedef Array BasisDerivativeType; /** \brief The data type used to store the spline's derivative values. */ - typedef Array DerivativeType; + typedef Array DerivativeType; /** \brief The point type the spline is representing. */ typedef Array PointType; @@ -62,12 +64,14 @@ namespace Eigen { enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ }; + + enum { DerivativeMemoryLayout = _Dim==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ }; /** \brief The data type used to store the values of the basis function derivatives. */ typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType; /** \brief The data type used to store the spline's derivative values. */ - typedef Array<_Scalar,_Dim,Dynamic,ColMajor,_Dim,NumOfDerivativesAtCompileTime> DerivativeType; + typedef Array<_Scalar,_Dim,Dynamic,DerivativeMemoryLayout,_Dim,NumOfDerivativesAtCompileTime> DerivativeType; }; /** \brief 2D float B-spline with dynamic degree. */ diff --git a/unsupported/test/FFTW.cpp b/unsupported/test/FFTW.cpp index a07bf274b..d3718e2d2 100644 --- a/unsupported/test/FFTW.cpp +++ b/unsupported/test/FFTW.cpp @@ -16,9 +16,6 @@ std::complex RandomCpx() { return std::complex( (T)(rand()/(T)RAND_MAX - . using namespace std; using namespace Eigen; -float norm(float x) {return x*x;} -double norm(double x) {return x*x;} -long double norm(long double x) {return x*x;} template < typename T> complex promote(complex x) { return complex(x.real(),x.imag()); } @@ -40,11 +37,11 @@ complex promote(long double x) { return complex( x); for (size_t k1=0;k1<(size_t)timebuf.size();++k1) { acc += promote( timebuf[k1] ) * exp( complex(0,k1*phinc) ); } - totalpower += norm(acc); + totalpower += numext::abs2(acc); complex x = promote(fftbuf[k0]); complex dif = acc - x; - difpower += norm(dif); - //cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(norm(dif)) << endl; + difpower += numext::abs2(dif); + //cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(numext::abs2(dif)) << endl; } cerr << "rmse:" << sqrt(difpower/totalpower) << endl; return sqrt(difpower/totalpower); @@ -57,8 +54,8 @@ complex promote(long double x) { return complex( x); long double difpower=0; size_t n = (min)( buf1.size(),buf2.size() ); for (size_t k=0;k(0.01,0.5); + int ra = Eigen::internal::random(1,50); + int ca = Eigen::internal::random(1,50); + int rb = Eigen::internal::random(1,50); + int cb = Eigen::internal::random(1,50); + SparseMatrix sA(ra,ca), sB(rb,cb), sC; + SparseMatrix sC2; + MatrixXf dA(ra,ca), dB(rb,cb), dC; + initSparse(density, dA, sA); + initSparse(density, dB, sB); + + sC = kroneckerProduct(sA,sB); + dC = kroneckerProduct(dA,dB); + VERIFY_IS_APPROX(MatrixXf(sC),dC); + + sC = kroneckerProduct(sA.transpose(),sB); + dC = kroneckerProduct(dA.transpose(),dB); + VERIFY_IS_APPROX(MatrixXf(sC),dC); + + sC = kroneckerProduct(sA.transpose(),sB.transpose()); + dC = kroneckerProduct(dA.transpose(),dB.transpose()); + VERIFY_IS_APPROX(MatrixXf(sC),dC); + + sC = kroneckerProduct(sA,sB.transpose()); + dC = kroneckerProduct(dA,dB.transpose()); + VERIFY_IS_APPROX(MatrixXf(sC),dC); + + sC2 = kroneckerProduct(sA,sB); + dC = kroneckerProduct(dA,dB); + VERIFY_IS_APPROX(MatrixXf(sC2),dC); + } } diff --git a/unsupported/test/polynomialsolver.cpp b/unsupported/test/polynomialsolver.cpp index 13f92169e..680ff6d31 100644 --- a/unsupported/test/polynomialsolver.cpp +++ b/unsupported/test/polynomialsolver.cpp @@ -106,7 +106,6 @@ void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const typedef typename POLYNOMIAL::Scalar Scalar; typedef typename REAL_ROOTS::Scalar Real; - typedef PolynomialSolver PolynomialSolverType; //Test realRoots std::vector< Real > calc_realRoots; @@ -120,7 +119,7 @@ void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const bool found = false; for( size_t j=0; j