From e8cdbedefb1913b5a0e2f2b7d38470f081cb8d29 Mon Sep 17 00:00:00 2001 From: Christoph Hertzberg Date: Thu, 4 Dec 2014 22:48:53 +0100 Subject: [PATCH 1/5] bug #877, bug #572: Introduce a global Index typedef. Rename Sparse*::Index to StorageIndex, make Dense*::StorageIndex an alias to DenseIndex. Overall this commit gets rid of all Index conversion warnings. --- Eigen/src/Cholesky/LDLT.h | 1 + Eigen/src/Cholesky/LLT.h | 1 + Eigen/src/CholmodSupport/CholmodSupport.h | 24 +- Eigen/src/Core/ArrayBase.h | 1 - Eigen/src/Core/AssignEvaluator.h | 52 ++--- Eigen/src/Core/BandMatrix.h | 12 +- Eigen/src/Core/Block.h | 14 +- Eigen/src/Core/CommaInitializer.h | 2 +- Eigen/src/Core/CoreEvaluators.h | 30 +-- Eigen/src/Core/CoreIterators.h | 18 +- Eigen/src/Core/CwiseBinaryOp.h | 8 +- Eigen/src/Core/CwiseUnaryOp.h | 4 +- Eigen/src/Core/DenseBase.h | 14 +- Eigen/src/Core/DenseCoeffsBase.h | 4 - Eigen/src/Core/Diagonal.h | 24 +- Eigen/src/Core/DiagonalMatrix.h | 8 +- Eigen/src/Core/EigenBase.h | 8 +- Eigen/src/Core/MapBase.h | 1 - Eigen/src/Core/Matrix.h | 2 +- Eigen/src/Core/MatrixBase.h | 3 +- Eigen/src/Core/PermutationMatrix.h | 16 +- Eigen/src/Core/PlainObjectBase.h | 3 - Eigen/src/Core/Product.h | 10 +- Eigen/src/Core/ProductEvaluators.h | 22 +- Eigen/src/Core/ReturnByValue.h | 4 +- Eigen/src/Core/SelfAdjointView.h | 4 +- Eigen/src/Core/Solve.h | 7 +- Eigen/src/Core/StableNorm.h | 1 - Eigen/src/Core/Swap.h | 2 +- Eigen/src/Core/Transpose.h | 10 +- Eigen/src/Core/TriangularMatrix.h | 18 +- Eigen/src/Core/util/Macros.h | 22 +- Eigen/src/Core/util/XprHelper.h | 17 ++ Eigen/src/Geometry/Transform.h | 3 +- Eigen/src/Householder/HouseholderSequence.h | 8 +- .../BasicPreconditioners.h | 3 +- Eigen/src/IterativeLinearSolvers/BiCGSTAB.h | 1 - .../ConjugateGradient.h | 1 - .../IterativeLinearSolvers/IncompleteLUT.h | 64 +++--- .../IterativeSolverBase.h | 6 +- Eigen/src/LU/FullPivLU.h | 1 + Eigen/src/LU/PartialPivLU.h | 1 + Eigen/src/OrderingMethods/Amd.h | 15 +- Eigen/src/PaStiXSupport/PaStiXSupport.h | 1 + Eigen/src/PardisoSupport/PardisoSupport.h | 2 +- Eigen/src/QR/ColPivHouseholderQR.h | 3 +- Eigen/src/QR/FullPivHouseholderQR.h | 1 + Eigen/src/QR/HouseholderQR.h | 1 + Eigen/src/SPQRSupport/SuiteSparseQRSupport.h | 20 +- Eigen/src/SVD/SVDBase.h | 1 + Eigen/src/SparseCholesky/SimplicialCholesky.h | 36 +-- .../SparseCholesky/SimplicialCholesky_impl.h | 30 +-- Eigen/src/SparseCore/AmbiVector.h | 92 ++++---- Eigen/src/SparseCore/CompressedStorage.h | 26 +-- .../ConservativeSparseSparseProduct.h | 21 +- Eigen/src/SparseCore/MappedSparseMatrix.h | 84 +++---- Eigen/src/SparseCore/SparseAssign.h | 3 - Eigen/src/SparseCore/SparseBlock.h | 173 +++++++------- Eigen/src/SparseCore/SparseColEtree.h | 10 +- Eigen/src/SparseCore/SparseCwiseBinaryOp.h | 36 +-- Eigen/src/SparseCore/SparseCwiseUnaryOp.h | 4 +- Eigen/src/SparseCore/SparseDenseProduct.h | 16 +- Eigen/src/SparseCore/SparseDiagonalProduct.h | 16 +- Eigen/src/SparseCore/SparseMatrix.h | 188 ++++++++------- Eigen/src/SparseCore/SparseMatrixBase.h | 28 ++- Eigen/src/SparseCore/SparsePermutation.h | 18 +- Eigen/src/SparseCore/SparseSelfAdjointView.h | 75 +++--- .../SparseSparseProductWithPruning.h | 26 +-- Eigen/src/SparseCore/SparseTranspose.h | 16 +- Eigen/src/SparseCore/SparseTriangularView.h | 22 +- Eigen/src/SparseCore/SparseUtil.h | 41 ++-- Eigen/src/SparseCore/SparseVector.h | 80 +++---- Eigen/src/SparseCore/SparseView.h | 26 +-- Eigen/src/SparseCore/TriangularSolver.h | 16 +- Eigen/src/SparseLU/SparseLU.h | 55 +++-- .../src/SparseLU/SparseLU_SupernodalMatrix.h | 66 +++--- Eigen/src/SparseQR/SparseQR.h | 46 ++-- Eigen/src/SuperLUSupport/SuperLUSupport.h | 18 +- Eigen/src/UmfPackSupport/UmfPackSupport.h | 10 +- test/bandmatrix.cpp | 3 - test/main.h | 10 +- test/nullary.cpp | 4 +- test/product.h | 1 - test/sparse_basic.cpp | 98 ++++---- test/sparse_permutations.cpp | 8 +- test/sparse_product.cpp | 2 +- test/sparse_solver.h | 6 +- .../Eigen/src/IterativeSolvers/GMRES.h | 2 +- .../Eigen/src/IterativeSolvers/MINRES.h | 2 +- .../KroneckerProduct/KroneckerTensorProduct.h | 41 ++-- .../Eigen/src/SparseExtra/BlockSparseMatrix.h | 216 +++++++++--------- .../src/SparseExtra/DynamicSparseMatrix.h | 30 +-- .../Eigen/src/SparseExtra/RandomSetter.h | 10 +- unsupported/test/matrix_functions.h | 1 - unsupported/test/sparse_extra.cpp | 1 - 95 files changed, 1101 insertions(+), 1111 deletions(-) diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index 5acbf4651..e2a6ca2b2 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -60,6 +60,7 @@ template class LDLT typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix TmpMatrixType; typedef Transpositions TranspositionType; diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h index 90194e64d..5e0cf6c88 100644 --- a/Eigen/src/Cholesky/LLT.h +++ b/Eigen/src/Cholesky/LLT.h @@ -60,6 +60,7 @@ template class LLT typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; enum { PacketSize = internal::packet_traits::size, diff --git a/Eigen/src/CholmodSupport/CholmodSupport.h b/Eigen/src/CholmodSupport/CholmodSupport.h index 3eadb83a0..3ce3e99d3 100644 --- a/Eigen/src/CholmodSupport/CholmodSupport.h +++ b/Eigen/src/CholmodSupport/CholmodSupport.h @@ -48,8 +48,8 @@ void cholmod_configure_matrix(CholmodType& mat) /** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object. * Note that the data are shared. */ -template -cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) +template +cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_StorageIndex>& mat) { cholmod_sparse res; res.nzmax = mat.nonZeros(); @@ -74,11 +74,11 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) res.dtype = 0; res.stype = -1; - if (internal::is_same<_Index,int>::value) + if (internal::is_same<_StorageIndex,int>::value) { res.itype = CHOLMOD_INT; } - else if (internal::is_same<_Index,UF_long>::value) + else if (internal::is_same<_StorageIndex,UF_long>::value) { res.itype = CHOLMOD_LONG; } @@ -138,12 +138,12 @@ cholmod_dense viewAsCholmod(MatrixBase& mat) /** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix. * The data are not copied but shared. */ -template -MappedSparseMatrix viewAsEigen(cholmod_sparse& cm) +template +MappedSparseMatrix viewAsEigen(cholmod_sparse& cm) { - return MappedSparseMatrix - (cm.nrow, cm.ncol, static_cast(cm.p)[cm.ncol], - static_cast(cm.p), static_cast(cm.i),static_cast(cm.x) ); + return MappedSparseMatrix + (cm.nrow, cm.ncol, static_cast(cm.p)[cm.ncol], + static_cast(cm.p), static_cast(cm.i),static_cast(cm.x) ); } enum CholmodMode { @@ -169,7 +169,7 @@ class CholmodBase : public SparseSolverBase typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef MatrixType CholMatrixType; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; public: @@ -195,8 +195,8 @@ class CholmodBase : public SparseSolverBase cholmod_finish(&m_cholmod); } - inline Index cols() const { return m_cholmodFactor->n; } - inline Index rows() const { return m_cholmodFactor->n; } + inline StorageIndex cols() const { return internal::convert_index(m_cholmodFactor->n); } + inline StorageIndex rows() const { return internal::convert_index(m_cholmodFactor->n); } /** \brief Reports whether previous computation was successful. * diff --git a/Eigen/src/Core/ArrayBase.h b/Eigen/src/Core/ArrayBase.h index d42693d4b..82c12076e 100644 --- a/Eigen/src/Core/ArrayBase.h +++ b/Eigen/src/Core/ArrayBase.h @@ -50,7 +50,6 @@ template class ArrayBase typename NumTraits::Scalar>::Real>::operator*; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h index 4db10e697..506bace69 100644 --- a/Eigen/src/Core/AssignEvaluator.h +++ b/Eigen/src/Core/AssignEvaluator.h @@ -179,20 +179,20 @@ struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling +template struct copy_using_evaluator_DefaultTraversal_InnerUnrolling { - EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, typename Kernel::Index outer) + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer) { - kernel.assignCoeffByOuterInner(outer, Index); - copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); + kernel.assignCoeffByOuterInner(outer, Index_); + copy_using_evaluator_DefaultTraversal_InnerUnrolling::run(kernel, outer); } }; template struct copy_using_evaluator_DefaultTraversal_InnerUnrolling { - EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, typename Kernel::Index) { } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index) { } }; /*********************** @@ -246,13 +246,13 @@ struct copy_using_evaluator_innervec_CompleteUnrolling EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } }; -template +template struct copy_using_evaluator_innervec_InnerUnrolling { - EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, typename Kernel::Index outer) + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer) { - kernel.template assignPacketByOuterInner(outer, Index); - enum { NextIndex = Index + packet_traits::size }; + kernel.template assignPacketByOuterInner(outer, Index_); + enum { NextIndex = Index_ + packet_traits::size }; copy_using_evaluator_innervec_InnerUnrolling::run(kernel, outer); } }; @@ -260,7 +260,7 @@ struct copy_using_evaluator_innervec_InnerUnrolling template struct copy_using_evaluator_innervec_InnerUnrolling { - EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, typename Kernel::Index) { } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, Index) { } }; /*************************************************************************** @@ -283,8 +283,6 @@ struct dense_assignment_loop { EIGEN_DEVICE_FUNC static void run(Kernel &kernel) { - typedef typename Kernel::Index Index; - for(Index outer = 0; outer < kernel.outerSize(); ++outer) { for(Index inner = 0; inner < kernel.innerSize(); ++inner) { kernel.assignCoeffByOuterInner(outer, inner); @@ -306,7 +304,7 @@ struct dense_assignment_loop template struct dense_assignment_loop { - typedef typename Kernel::Index Index; + typedef typename Kernel::StorageIndex StorageIndex; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; @@ -330,7 +328,7 @@ struct unaligned_dense_assignment_loop { // if IsAligned = true, then do nothing template - EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, typename Kernel::Index, typename Kernel::Index) {} + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index, Index) {} }; template <> @@ -342,16 +340,16 @@ struct unaligned_dense_assignment_loop #if EIGEN_COMP_MSVC template static EIGEN_DONT_INLINE void run(Kernel &kernel, - typename Kernel::Index start, - typename Kernel::Index end) + Index start, + Index end) #else template EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, - typename Kernel::Index start, - typename Kernel::Index end) + Index start, + Index end) #endif { - for (typename Kernel::Index index = start; index < end; ++index) + for (Index index = start; index < end; ++index) kernel.assignCoeff(index); } }; @@ -361,8 +359,6 @@ struct dense_assignment_loop { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { - typedef typename Kernel::Index Index; - const Index size = kernel.size(); typedef packet_traits PacketTraits; enum { @@ -386,7 +382,7 @@ struct dense_assignment_loop template struct dense_assignment_loop { - typedef typename Kernel::Index Index; + typedef typename Kernel::StorageIndex StorageIndex; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; @@ -409,8 +405,6 @@ struct dense_assignment_loop { EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) { - typedef typename Kernel::Index Index; - const Index innerSize = kernel.innerSize(); const Index outerSize = kernel.outerSize(); const Index packetSize = packet_traits::size; @@ -433,7 +427,7 @@ struct dense_assignment_loop struct dense_assignment_loop { - typedef typename Kernel::Index Index; + typedef typename Kernel::StorageIndex StorageIndex; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; @@ -452,7 +446,6 @@ struct dense_assignment_loop { EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) { - typedef typename Kernel::Index Index; const Index size = kernel.size(); for(Index i = 0; i < size; ++i) kernel.assignCoeff(i); @@ -478,7 +471,6 @@ struct dense_assignment_loop { EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) { - typedef typename Kernel::Index Index; typedef packet_traits PacketTraits; enum { packetSize = PacketTraits::size, @@ -533,7 +525,7 @@ public: typedef DstEvaluatorTypeT DstEvaluatorType; typedef SrcEvaluatorTypeT SrcEvaluatorType; typedef typename DstEvaluatorType::Scalar Scalar; - typedef typename DstEvaluatorType::Index Index; + typedef typename DstEvaluatorType::StorageIndex StorageIndex; typedef copy_using_evaluator_traits AssignmentTraits; @@ -731,8 +723,8 @@ EIGEN_DEVICE_FUNC void call_assignment_no_alias(Dst& dst, const Src& src, const && int(Dst::SizeAtCompileTime) != 1 }; - typename Dst::Index dstRows = NeedToTranspose ? src.cols() : src.rows(); - typename Dst::Index dstCols = NeedToTranspose ? src.rows() : src.cols(); + Index dstRows = NeedToTranspose ? src.cols() : src.rows(); + Index dstCols = NeedToTranspose ? src.rows() : src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); diff --git a/Eigen/src/Core/BandMatrix.h b/Eigen/src/Core/BandMatrix.h index e59ee3da9..d07ea7056 100644 --- a/Eigen/src/Core/BandMatrix.h +++ b/Eigen/src/Core/BandMatrix.h @@ -32,7 +32,7 @@ class BandMatrixBase : public EigenBase }; typedef typename internal::traits::Scalar Scalar; typedef Matrix DenseMatrixType; - typedef typename DenseMatrixType::Index Index; + typedef typename DenseMatrixType::StorageIndex StorageIndex; typedef typename internal::traits::CoefficientsType CoefficientsType; typedef EigenBase Base; @@ -179,7 +179,7 @@ struct traits > { typedef _Scalar Scalar; typedef Dense StorageKind; - typedef DenseIndex Index; + typedef DenseIndex StorageIndex; enum { CoeffReadCost = NumTraits::ReadCost, RowsAtCompileTime = _Rows, @@ -201,7 +201,7 @@ class BandMatrix : public BandMatrixBase::Scalar Scalar; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::CoefficientsType CoefficientsType; explicit inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs) @@ -241,7 +241,7 @@ struct traits::CoeffReadCost, RowsAtCompileTime = _Rows, @@ -264,7 +264,7 @@ class BandMatrixWrapper : public BandMatrixBase::Scalar Scalar; typedef typename internal::traits::CoefficientsType CoefficientsType; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs) : m_coeffs(coeffs), @@ -312,7 +312,7 @@ template class TridiagonalMatrix : public BandMatrix { typedef BandMatrix Base; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; public: explicit TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {} diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h index 9cf9d5432..6ea383695 100644 --- a/Eigen/src/Core/Block.h +++ b/Eigen/src/Core/Block.h @@ -154,7 +154,7 @@ class BlockImpl : public internal::BlockImpl_dense { typedef internal::BlockImpl_dense Impl; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; public: typedef Impl Base; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) @@ -306,13 +306,13 @@ template m_startRow; - const internal::variable_if_dynamic m_startCol; - const internal::variable_if_dynamic m_blockRows; - const internal::variable_if_dynamic m_blockCols; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; }; /** \internal Internal implementation of dense Blocks in the direct access case.*/ diff --git a/Eigen/src/Core/CommaInitializer.h b/Eigen/src/Core/CommaInitializer.h index 70cbfeff5..98ebe3bf6 100644 --- a/Eigen/src/Core/CommaInitializer.h +++ b/Eigen/src/Core/CommaInitializer.h @@ -28,7 +28,7 @@ template struct CommaInitializer { typedef typename XprType::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const Scalar& s) diff --git a/Eigen/src/Core/CoreEvaluators.h b/Eigen/src/Core/CoreEvaluators.h index a0dc72c4d..eb35b44cb 100644 --- a/Eigen/src/Core/CoreEvaluators.h +++ b/Eigen/src/Core/CoreEvaluators.h @@ -111,7 +111,7 @@ struct evaluator_base typedef evaluator type; typedef evaluator nestedType; - typedef typename traits::Index Index; + typedef typename traits::StorageIndex StorageIndex; // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices. typedef traits ExpressionTraits; }; @@ -128,7 +128,7 @@ struct evaluator > : evaluator_base { typedef PlainObjectBase PlainObjectType; - typedef typename PlainObjectType::Index Index; + typedef typename PlainObjectType::StorageIndex StorageIndex; typedef typename PlainObjectType::Scalar Scalar; typedef typename PlainObjectType::CoeffReturnType CoeffReturnType; typedef typename PlainObjectType::PacketScalar PacketScalar; @@ -264,7 +264,7 @@ struct unary_evaluator, IndexBased> EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {} - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -343,7 +343,7 @@ struct evaluator > : m_functor(n.functor()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -394,7 +394,7 @@ struct unary_evaluator, IndexBased > m_argImpl(op.nestedExpression()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -469,7 +469,7 @@ struct binary_evaluator, IndexBased, IndexBase m_rhsImpl(xpr.rhs()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -522,7 +522,7 @@ struct unary_evaluator, IndexBased> m_argImpl(op.nestedExpression()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; @@ -563,7 +563,7 @@ struct mapbase_evaluator : evaluator_base { typedef Derived XprType; typedef typename XprType::PointerType PointerType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -760,7 +760,7 @@ struct unary_evaluator, IndexBa m_startCol(block.startCol()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -865,7 +865,7 @@ struct evaluator > m_elseImpl(select.elseMatrix()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; inline EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const @@ -898,7 +898,7 @@ struct unary_evaluator > : evaluator_base > { typedef Replicate XprType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketReturnType PacketReturnType; enum { @@ -981,7 +981,7 @@ struct evaluator > : m_expr(expr) {} - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const @@ -1016,7 +1016,7 @@ struct evaluator_wrapper_base EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {} - typedef typename ArgType::Index Index; + typedef typename ArgType::StorageIndex StorageIndex; typedef typename ArgType::Scalar Scalar; typedef typename ArgType::CoeffReturnType CoeffReturnType; typedef typename ArgType::PacketScalar PacketScalar; @@ -1103,7 +1103,7 @@ struct unary_evaluator > : evaluator_base > { typedef Reverse XprType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -1219,7 +1219,7 @@ struct evaluator > m_index(diagonal.index()) { } - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; diff --git a/Eigen/src/Core/CoreIterators.h b/Eigen/src/Core/CoreIterators.h index 7feebc4e4..141eaa2eb 100644 --- a/Eigen/src/Core/CoreIterators.h +++ b/Eigen/src/Core/CoreIterators.h @@ -36,7 +36,7 @@ protected: typedef internal::inner_iterator_selector::Kind> IteratorType; typedef typename internal::evaluator::type EvaluatorType; typedef typename internal::traits::Scalar Scalar; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; public: /** Construct an iterator over the \a outerId -th row or column of \a xpr */ InnerIterator(const XprType &xpr, const Index &outerId) @@ -50,11 +50,11 @@ public: */ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_iter.operator++(); return *this; } /// \returns the column or row index of the current coefficient. - EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_iter.index(); } /// \returns the row index of the current coefficient. - EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } + EIGEN_STRONG_INLINE StorageIndex row() const { return m_iter.row(); } /// \returns the column index of the current coefficient. - EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return m_iter.col(); } /// \returns \c true if the iterator \c *this still references a valid coefficient. EIGEN_STRONG_INLINE operator bool() const { return m_iter; } @@ -77,7 +77,7 @@ class inner_iterator_selector protected: typedef typename evaluator::type EvaluatorType; typedef typename traits::Scalar Scalar; - typedef typename traits::Index Index; + typedef typename traits::StorageIndex StorageIndex; enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; public: @@ -93,9 +93,9 @@ public: EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; } - EIGEN_STRONG_INLINE Index index() const { return m_inner; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } @@ -115,7 +115,7 @@ class inner_iterator_selector protected: typedef typename evaluator::InnerIterator Base; typedef typename evaluator::type EvaluatorType; - typedef typename traits::Index Index; + typedef typename traits::StorageIndex StorageIndex; public: EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/) diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h index a205c3f10..4d4626279 100644 --- a/Eigen/src/Core/CwiseBinaryOp.h +++ b/Eigen/src/Core/CwiseBinaryOp.h @@ -59,8 +59,8 @@ struct traits > typedef typename cwise_promote_storage_type::StorageKind, typename traits::StorageKind, BinaryOp>::ret StorageKind; - typedef typename promote_index_type::Index, - typename traits::Index>::type Index; + typedef typename promote_index_type::StorageIndex, + typename traits::StorageIndex>::type StorageIndex; typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; typedef typename remove_reference::type _LhsNested; @@ -111,7 +111,7 @@ class CwiseBinaryOp : } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Index rows() const { + EIGEN_STRONG_INLINE StorageIndex rows() const { // return the fixed size type if available to enable compile time optimizations if (internal::traits::type>::RowsAtCompileTime==Dynamic) return m_rhs.rows(); @@ -119,7 +119,7 @@ class CwiseBinaryOp : return m_lhs.rows(); } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Index cols() const { + EIGEN_STRONG_INLINE StorageIndex cols() const { // return the fixed size type if available to enable compile time optimizations if (internal::traits::type>::ColsAtCompileTime==Dynamic) return m_rhs.cols(); diff --git a/Eigen/src/Core/CwiseUnaryOp.h b/Eigen/src/Core/CwiseUnaryOp.h index da1d1992d..5388af216 100644 --- a/Eigen/src/Core/CwiseUnaryOp.h +++ b/Eigen/src/Core/CwiseUnaryOp.h @@ -66,9 +66,9 @@ class CwiseUnaryOp : public CwiseUnaryOpImpl class DenseBase typedef typename internal::traits::StorageKind StorageKind; - /** \brief The type of indices + /** \brief The interface type of indices * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. - * \sa \ref TopicPreprocessorDirectives. + * \sa \ref TopicPreprocessorDirectives, StorageIndex. */ - typedef typename internal::traits::Index Index; + typedef Eigen::Index Index; + + /** + * \brief The type used to store indices + * \details This typedef is relevant for types that store multiple indices such as + * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index + * \sa \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase. + */ + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; diff --git a/Eigen/src/Core/DenseCoeffsBase.h b/Eigen/src/Core/DenseCoeffsBase.h index a9e4dbaf9..569fed956 100644 --- a/Eigen/src/Core/DenseCoeffsBase.h +++ b/Eigen/src/Core/DenseCoeffsBase.h @@ -35,7 +35,6 @@ class DenseCoeffsBase : public EigenBase { public: typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; @@ -287,7 +286,6 @@ class DenseCoeffsBase : public DenseCoeffsBase Base; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; @@ -450,7 +448,6 @@ class DenseCoeffsBase : public DenseCoeffsBase Base; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; @@ -525,7 +522,6 @@ class DenseCoeffsBase public: typedef DenseCoeffsBase Base; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h index 33b82f90f..18f061179 100644 --- a/Eigen/src/Core/Diagonal.h +++ b/Eigen/src/Core/Diagonal.h @@ -70,28 +70,28 @@ template class Diagonal EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) EIGEN_DEVICE_FUNC - explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {} + explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(internal::convert_index(a_index)) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) EIGEN_DEVICE_FUNC - inline Index rows() const + inline StorageIndex rows() const { - return m_index.value()<0 ? numext::mini(Index(m_matrix.cols()),Index(m_matrix.rows()+m_index.value())) - : numext::mini(Index(m_matrix.rows()),Index(m_matrix.cols()-m_index.value())); + return m_index.value()<0 ? numext::mini(m_matrix.cols(),m_matrix.rows()+m_index.value()) + : numext::mini(m_matrix.rows(),m_matrix.cols()-m_index.value()); } EIGEN_DEVICE_FUNC - inline Index cols() const { return 1; } + inline StorageIndex cols() const { return 1; } EIGEN_DEVICE_FUNC - inline Index innerStride() const + inline StorageIndex innerStride() const { return m_matrix.outerStride() + 1; } EIGEN_DEVICE_FUNC - inline Index outerStride() const + inline StorageIndex outerStride() const { return 0; } @@ -153,23 +153,23 @@ template class Diagonal } EIGEN_DEVICE_FUNC - inline Index index() const + inline StorageIndex index() const { return m_index.value(); } protected: typename MatrixType::Nested m_matrix; - const internal::variable_if_dynamicindex m_index; + const internal::variable_if_dynamicindex m_index; private: // some compilers may fail to optimize std::max etc in case of compile-time constants... EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } + EIGEN_STRONG_INLINE StorageIndex absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } + EIGEN_STRONG_INLINE StorageIndex rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } + EIGEN_STRONG_INLINE StorageIndex colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } // trigger a compile time error is someone try to call packet template typename MatrixType::PacketReturnType packet(Index) const; template typename MatrixType::PacketReturnType packet(Index,Index) const; diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h index e3dc71336..f37091000 100644 --- a/Eigen/src/Core/DiagonalMatrix.h +++ b/Eigen/src/Core/DiagonalMatrix.h @@ -22,7 +22,7 @@ class DiagonalBase : public EigenBase typedef typename DiagonalVectorType::Scalar Scalar; typedef typename DiagonalVectorType::RealScalar RealScalar; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; enum { RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, @@ -108,7 +108,7 @@ struct traits > { typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType; typedef DiagonalShape StorageKind; - typedef DenseIndex Index; +// typedef DenseIndex Index; enum { Flags = LvalueBit | NoPreferredStorageOrderBit }; @@ -124,7 +124,7 @@ class DiagonalMatrix typedef const DiagonalMatrix& Nested; typedef _Scalar Scalar; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; #endif protected: @@ -230,7 +230,7 @@ struct traits > { typedef _DiagonalVectorType DiagonalVectorType; typedef typename DiagonalVectorType::Scalar Scalar; - typedef typename DiagonalVectorType::Index Index; + typedef typename DiagonalVectorType::StorageIndex StorageIndex; typedef DiagonalShape StorageKind; typedef typename traits::XprKind XprKind; enum { diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h index 52b66e6dc..c98ca467a 100644 --- a/Eigen/src/Core/EigenBase.h +++ b/Eigen/src/Core/EigenBase.h @@ -28,7 +28,7 @@ template struct EigenBase // typedef typename internal::plain_matrix_type::type PlainObject; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; /** \returns a reference to the derived object */ EIGEN_DEVICE_FUNC @@ -46,14 +46,14 @@ template struct EigenBase /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ EIGEN_DEVICE_FUNC - inline Index rows() const { return derived().rows(); } + inline StorageIndex rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ EIGEN_DEVICE_FUNC - inline Index cols() const { return derived().cols(); } + inline StorageIndex cols() const { return derived().cols(); } /** \returns the number of coefficients, which is rows()*cols(). * \sa rows(), cols(), SizeAtCompileTime. */ EIGEN_DEVICE_FUNC - inline Index size() const { return rows() * cols(); } + inline StorageIndex size() const { return rows() * cols(); } /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ template diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h index 3c67edae5..3dafee9d7 100644 --- a/Eigen/src/Core/MapBase.h +++ b/Eigen/src/Core/MapBase.h @@ -37,7 +37,6 @@ template class MapBase }; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h index 0b3d90786..94b1a966e 100644 --- a/Eigen/src/Core/Matrix.h +++ b/Eigen/src/Core/Matrix.h @@ -107,7 +107,7 @@ struct traits > { typedef _Scalar Scalar; typedef Dense StorageKind; - typedef DenseIndex Index; + typedef DenseIndex StorageIndex; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = _Rows, diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index 86994cb36..5c00d6a63 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -52,7 +52,8 @@ template class MatrixBase #ifndef EIGEN_PARSED_BY_DOXYGEN typedef MatrixBase StorageBaseType; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef Eigen::Index Index; + typedef Index StorageIndex; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; diff --git a/Eigen/src/Core/PermutationMatrix.h b/Eigen/src/Core/PermutationMatrix.h index 4846f2ae1..886d59a2c 100644 --- a/Eigen/src/Core/PermutationMatrix.h +++ b/Eigen/src/Core/PermutationMatrix.h @@ -67,7 +67,7 @@ class PermutationBase : public EigenBase MaxColsAtCompileTime = Traits::MaxColsAtCompileTime }; typedef typename Traits::StorageIndexType StorageIndexType; - typedef typename Traits::Index Index; + typedef typename Traits::StorageIndex StorageIndex; typedef Matrix DenseMatrixType; typedef PermutationMatrix @@ -277,7 +277,7 @@ struct traits IndicesType; - typedef typename IndicesType::Index Index; + typedef typename IndicesType::StorageIndex StorageIndex; typedef _StorageIndexType StorageIndexType; }; } @@ -294,7 +294,7 @@ class PermutationMatrix : public PermutationBase, _PacketAccess> IndicesType; - typedef typename IndicesType::Index Index; + typedef typename IndicesType::StorageIndex StorageIndex; typedef _StorageIndexType StorageIndexType; }; } @@ -418,7 +418,7 @@ class Map > typedef PermutationStorage StorageKind; typedef typename _IndicesType::Scalar Scalar; typedef typename _IndicesType::Scalar StorageIndexType; - typedef typename _IndicesType::Index Index; + typedef typename _IndicesType::StorageIndex StorageIndex; typedef _IndicesType IndicesType; enum { RowsAtCompileTime = _IndicesType::SizeAtCompileTime, @@ -558,7 +558,7 @@ struct permut_matrix_product_retval : public ReturnByValue > { typedef typename remove_all::type MatrixTypeNestedCleaned; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix) : m_permutation(perm), m_matrix(matrix) @@ -650,7 +650,7 @@ class Transpose > MaxColsAtCompileTime = Traits::MaxColsAtCompileTime }; typedef typename Traits::Scalar Scalar; - typedef typename Traits::Index Index; + typedef typename Traits::StorageIndex StorageIndex; #endif Transpose(const PermutationType& p) : m_permutation(p) {} diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h index 06e326a05..65d69f484 100644 --- a/Eigen/src/Core/PlainObjectBase.h +++ b/Eigen/src/Core/PlainObjectBase.h @@ -95,7 +95,6 @@ class PlainObjectBase : public internal::dense_xpr_base::type typedef typename internal::dense_xpr_base::type Base; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; @@ -846,7 +845,6 @@ namespace internal { template struct conservative_resize_like_impl { - typedef typename Derived::Index Index; static void run(DenseBase& _this, Index rows, Index cols) { if (_this.rows() == rows && _this.cols() == cols) return; @@ -912,7 +910,6 @@ struct conservative_resize_like_impl { using conservative_resize_like_impl::run; - typedef typename Derived::Index Index; static void run(DenseBase& _this, Index size) { const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h index cb79543ef..8ff13fbba 100644 --- a/Eigen/src/Core/Product.h +++ b/Eigen/src/Core/Product.h @@ -67,8 +67,8 @@ struct traits > typedef typename product_promote_storage_type::ret>::ret StorageKind; - typedef typename promote_index_type::type Index; + typedef typename promote_index_type::type StorageIndex; enum { RowsAtCompileTime = LhsTraits::RowsAtCompileTime, @@ -120,8 +120,8 @@ class Product : public ProductImpl<_Lhs,_Rhs,Option, && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } - EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); } - EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); } + EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return m_lhs.rows(); } + EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return m_rhs.cols(); } EIGEN_DEVICE_FUNC const LhsNestedCleaned& lhs() const { return m_lhs; } EIGEN_DEVICE_FUNC const RhsNestedCleaned& rhs() const { return m_rhs; } @@ -149,7 +149,7 @@ class dense_product_base public: using Base::derived; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; operator const Scalar() const { diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h index 3cebbbd12..b2c9b56ed 100644 --- a/Eigen/src/Core/ProductEvaluators.h +++ b/Eigen/src/Core/ProductEvaluators.h @@ -210,7 +210,6 @@ struct generic_product_impl template EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&) { - typedef typename Dst::Index Index; // FIXME make sure lhs is sequentially stored // FIXME not very good if rhs is real and lhs complex while alpha is real too // FIXME we should probably build an evaluator for dst and rhs @@ -222,7 +221,6 @@ EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, cons // Row major result template EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&) { - typedef typename Dst::Index Index; // FIXME make sure rhs is sequentially stored // FIXME not very good if lhs is real and rhs complex while alpha is real too // FIXME we should probably build an evaluator for dst and lhs @@ -372,7 +370,7 @@ struct product_evaluator, ProductTag, DenseShape, : evaluator_base > { typedef Product XprType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -524,7 +522,7 @@ struct product_evaluator, LazyCoeffBasedProduc template struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); @@ -535,7 +533,7 @@ struct etor_product_packet_impl struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); @@ -546,7 +544,7 @@ struct etor_product_packet_impl struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); @@ -556,7 +554,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); @@ -566,7 +564,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { eigen_assert(innerDim>0 && "you are using a non initialized matrix"); @@ -579,7 +577,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { eigen_assert(innerDim>0 && "you are using a non initialized matrix"); @@ -668,7 +666,7 @@ template { - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename scalar_product_traits::ReturnType Scalar; typedef typename internal::packet_traits::type PacketScalar; public: @@ -733,7 +731,7 @@ struct product_evaluator, ProductTag, DiagonalSha using Base::coeff; using Base::packet_impl; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::PacketScalar PacketScalar; typedef Product XprType; @@ -781,7 +779,7 @@ struct product_evaluator, ProductTag, DenseShape, using Base::coeff; using Base::packet_impl; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::PacketScalar PacketScalar; typedef Product XprType; diff --git a/Eigen/src/Core/ReturnByValue.h b/Eigen/src/Core/ReturnByValue.h index af01a5567..d2b80d872 100644 --- a/Eigen/src/Core/ReturnByValue.h +++ b/Eigen/src/Core/ReturnByValue.h @@ -61,8 +61,8 @@ template class ReturnByValue EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { static_cast(this)->evalTo(dst); } - EIGEN_DEVICE_FUNC inline Index rows() const { return static_cast(this)->rows(); } - EIGEN_DEVICE_FUNC inline Index cols() const { return static_cast(this)->cols(); } + EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return static_cast(this)->rows(); } + EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return static_cast(this)->cols(); } #ifndef EIGEN_PARSED_BY_DOXYGEN #define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT diff --git a/Eigen/src/Core/SelfAdjointView.h b/Eigen/src/Core/SelfAdjointView.h index b785e8e1e..2d5760066 100644 --- a/Eigen/src/Core/SelfAdjointView.h +++ b/Eigen/src/Core/SelfAdjointView.h @@ -59,7 +59,7 @@ template class SelfAdjointView /** \brief The type of coefficients in this matrix */ typedef typename internal::traits::Scalar Scalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; enum { Mode = internal::traits::Mode, @@ -224,7 +224,7 @@ public: typedef typename Base::DstEvaluatorType DstEvaluatorType; typedef typename Base::SrcEvaluatorType SrcEvaluatorType; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::AssignmentTraits AssignmentTraits; diff --git a/Eigen/src/Core/Solve.h b/Eigen/src/Core/Solve.h index 3905cd616..5a3a4235e 100644 --- a/Eigen/src/Core/Solve.h +++ b/Eigen/src/Core/Solve.h @@ -48,6 +48,7 @@ struct traits > : traits::StorageKind>::PlainObject> { typedef typename solve_traits::StorageKind>::PlainObject PlainObject; + typedef typename promote_index_type::type StorageIndex; typedef traits BaseTraits; enum { Flags = BaseTraits::Flags & RowMajorBit, @@ -62,15 +63,15 @@ template class Solve : public SolveImpl::StorageKind> { public: - typedef typename RhsType::Index Index; typedef typename internal::traits::PlainObject PlainObject; + typedef typename internal::traits::StorageIndex StorageIndex; Solve(const Decomposition &dec, const RhsType &rhs) : m_dec(dec), m_rhs(rhs) {} - EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); } - EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); } + EIGEN_DEVICE_FUNC StorageIndex rows() const { return m_dec.cols(); } + EIGEN_DEVICE_FUNC StorageIndex cols() const { return m_rhs.cols(); } EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; } EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; } diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h index 0b7e39827..83a973365 100644 --- a/Eigen/src/Core/StableNorm.h +++ b/Eigen/src/Core/StableNorm.h @@ -55,7 +55,6 @@ inline typename NumTraits::Scalar>::Real blueNorm_impl(const EigenBase& _vec) { typedef typename Derived::RealScalar RealScalar; - typedef typename Derived::Index Index; using std::pow; using std::sqrt; using std::abs; diff --git a/Eigen/src/Core/Swap.h b/Eigen/src/Core/Swap.h index 55319320a..3d4d8b802 100644 --- a/Eigen/src/Core/Swap.h +++ b/Eigen/src/Core/Swap.h @@ -28,7 +28,7 @@ protected: public: typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::DstXprType DstXprType; typedef swap_assign_op Functor; diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index a3b95256f..e1316a73d 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -29,14 +29,10 @@ namespace Eigen { namespace internal { template -struct traits > +struct traits > : public traits { - typedef typename traits::Scalar Scalar; - typedef typename traits::Index Index; typedef typename nested::type MatrixTypeNested; typedef typename remove_reference::type MatrixTypeNestedPlain; - typedef typename traits::StorageKind StorageKind; - typedef typename traits::XprKind XprKind; enum { RowsAtCompileTime = MatrixType::ColsAtCompileTime, ColsAtCompileTime = MatrixType::RowsAtCompileTime, @@ -68,8 +64,8 @@ template class Transpose EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose) - EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); } - EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); } + EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return m_matrix.cols(); } + EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return m_matrix.rows(); } /** \returns the nested expression */ EIGEN_DEVICE_FUNC diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h index cf0255bce..d8135be27 100644 --- a/Eigen/src/Core/TriangularMatrix.h +++ b/Eigen/src/Core/TriangularMatrix.h @@ -45,7 +45,7 @@ template class TriangularBase : public EigenBase }; typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::FullMatrixType DenseMatrixType; typedef DenseMatrixType DenseType; typedef Derived const& Nested; @@ -54,9 +54,9 @@ template class TriangularBase : public EigenBase inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); } EIGEN_DEVICE_FUNC - inline Index rows() const { return derived().rows(); } + inline StorageIndex rows() const { return derived().rows(); } EIGEN_DEVICE_FUNC - inline Index cols() const { return derived().cols(); } + inline StorageIndex cols() const { return derived().cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().outerStride(); } EIGEN_DEVICE_FUNC @@ -199,7 +199,7 @@ template class TriangularView public: typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::MatrixTypeNestedCleaned NestedExpression; enum { @@ -222,9 +222,9 @@ template class TriangularView { return Base::operator=(other); } EIGEN_DEVICE_FUNC - inline Index rows() const { return m_matrix.rows(); } + inline StorageIndex rows() const { return m_matrix.rows(); } EIGEN_DEVICE_FUNC - inline Index cols() const { return m_matrix.cols(); } + inline StorageIndex cols() const { return m_matrix.cols(); } EIGEN_DEVICE_FUNC const NestedExpression& nestedExpression() const { return m_matrix; } @@ -325,7 +325,7 @@ template class TriangularViewImpl<_Mat using Base::derived; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; enum { Mode = _Mode, @@ -688,7 +688,7 @@ public: typedef typename Base::DstEvaluatorType DstEvaluatorType; typedef typename Base::SrcEvaluatorType SrcEvaluatorType; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::AssignmentTraits AssignmentTraits; @@ -831,7 +831,7 @@ struct triangular_assignment_loop template struct triangular_assignment_loop { - typedef typename Kernel::Index Index; + typedef typename Kernel::StorageIndex StorageIndex; typedef typename Kernel::Scalar Scalar; EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index bc26043d7..11b7e2887 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -629,7 +629,7 @@ namespace Eigen { typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ typedef typename Eigen::internal::nested::type Nested; \ typedef typename Eigen::internal::traits::StorageKind StorageKind; \ - typedef typename Eigen::internal::traits::Index Index; \ + typedef typename Eigen::internal::traits::StorageIndex StorageIndex; \ enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ Flags = Eigen::internal::traits::Flags, \ @@ -639,23 +639,13 @@ namespace Eigen { #define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ - typedef typename Eigen::internal::traits::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex. */ \ - typedef typename Eigen::NumTraits::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex, T were corresponding to RealScalar. */ \ + EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ typedef typename Base::PacketScalar PacketScalar; \ - typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ - typedef typename Eigen::internal::nested::type Nested; \ - typedef typename Eigen::internal::traits::StorageKind StorageKind; \ - typedef typename Eigen::internal::traits::Index Index; \ - enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ - ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ - MaxRowsAtCompileTime = Eigen::internal::traits::MaxRowsAtCompileTime, \ - MaxColsAtCompileTime = Eigen::internal::traits::MaxColsAtCompileTime, \ - Flags = Eigen::internal::traits::Flags, \ - SizeAtCompileTime = Base::SizeAtCompileTime, \ - MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \ - IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ + typedef Eigen::Index Index; \ + enum { MaxRowsAtCompileTime = Eigen::internal::traits::MaxRowsAtCompileTime, \ + MaxColsAtCompileTime = Eigen::internal::traits::MaxColsAtCompileTime}; \ using Base::derived; \ - using Base::const_cast_derived; + using Base::const_cast_derived; #define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b) #define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b) diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h index 09866ad8d..299e5cbc2 100644 --- a/Eigen/src/Core/util/XprHelper.h +++ b/Eigen/src/Core/util/XprHelper.h @@ -26,8 +26,25 @@ namespace Eigen { typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex; +/** + * \brief The Index type as used for the API. + * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. + * \sa \ref TopicPreprocessorDirectives, StorageIndex. + */ + +typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE Index; + namespace internal { +template +EIGEN_DEVICE_FUNC +inline IndexDest convert_index(const IndexSrc& idx) { + // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away: + eigen_internal_assert(idx <= NumTraits::highest() && "Index value to big for target type"); + return IndexDest(idx); +} + + //classes inheriting no_assignment_operator don't generate a default operator=. class no_assignment_operator { diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h index 7ebde6803..276e94c58 100644 --- a/Eigen/src/Geometry/Transform.h +++ b/Eigen/src/Geometry/Transform.h @@ -66,7 +66,7 @@ template struct traits > { typedef _Scalar Scalar; - typedef DenseIndex Index; + typedef DenseIndex StorageIndex; typedef Dense StorageKind; enum { Dim1 = _Dim==Dynamic ? _Dim : _Dim + 1, @@ -202,6 +202,7 @@ public: }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; + typedef DenseIndex StorageIndex; typedef DenseIndex Index; /** type of the matrix used to represent the transformation */ typedef typename internal::make_proper_matrix_type::type MatrixType; diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h index 4ded2995f..bf2bb59ab 100644 --- a/Eigen/src/Householder/HouseholderSequence.h +++ b/Eigen/src/Householder/HouseholderSequence.h @@ -60,7 +60,7 @@ template struct traits > { typedef typename VectorsType::Scalar Scalar; - typedef typename VectorsType::Index Index; + typedef typename VectorsType::StorageIndex StorageIndex; typedef typename VectorsType::StorageKind StorageKind; enum { RowsAtCompileTime = Side==OnTheLeft ? traits::RowsAtCompileTime @@ -87,7 +87,7 @@ struct hseq_side_dependent_impl { typedef Block EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; - typedef typename VectorsType::Index Index; + typedef typename VectorsType::StorageIndex StorageIndex; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; @@ -100,7 +100,7 @@ struct hseq_side_dependent_impl { typedef Transpose > EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; - typedef typename VectorsType::Index Index; + typedef typename VectorsType::StorageIndex StorageIndex; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; @@ -131,7 +131,7 @@ template class HouseholderS MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime }; typedef typename internal::traits::Scalar Scalar; - typedef typename VectorsType::Index Index; + typedef typename VectorsType::StorageIndex StorageIndex; typedef HouseholderSequence< typename internal::conditional::IsComplex, diff --git a/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h index 3991afa8f..a09f81225 100644 --- a/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +++ b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h @@ -34,9 +34,8 @@ class DiagonalPreconditioner { typedef _Scalar Scalar; typedef Matrix Vector; - typedef typename Vector::Index Index; - public: + typedef typename Vector::StorageIndex StorageIndex; // this typedef is only to export the scalar type and compile-time dimensions to solve_retval typedef Matrix MatrixType; diff --git a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h index 224fe913f..5f55efbe9 100644 --- a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +++ b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h @@ -159,7 +159,6 @@ class BiCGSTAB : public IterativeSolverBase(row.size()); /* length of the vector */ Index first, last ; ncut--; /* to fit the zero-based indices */ @@ -105,7 +105,7 @@ class IncompleteLUT : public SparseSolverBase > typedef Matrix Vector; typedef SparseMatrix FactorType; typedef SparseMatrix PermutType; - typedef typename FactorType::Index Index; + typedef typename FactorType::StorageIndex StorageIndex; public: typedef Matrix MatrixType; @@ -124,9 +124,9 @@ class IncompleteLUT : public SparseSolverBase > compute(mat); } - Index rows() const { return m_lu.rows(); } + StorageIndex rows() const { return m_lu.rows(); } - Index cols() const { return m_lu.cols(); } + StorageIndex cols() const { return m_lu.cols(); } /** \brief Reports whether previous computation was successful. * @@ -189,8 +189,8 @@ protected: bool m_analysisIsOk; bool m_factorizationIsOk; ComputationInfo m_info; - PermutationMatrix m_P; // Fill-reducing permutation - PermutationMatrix m_Pinv; // Inverse permutation + PermutationMatrix m_P; // Fill-reducing permutation + PermutationMatrix m_Pinv; // Inverse permutation }; /** @@ -218,14 +218,14 @@ template void IncompleteLUT::analyzePattern(const _MatrixType& amat) { // Compute the Fill-reducing permutation - SparseMatrix mat1 = amat; - SparseMatrix mat2 = amat.transpose(); + SparseMatrix mat1 = amat; + SparseMatrix mat2 = amat.transpose(); // Symmetrize the pattern // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered... - SparseMatrix AtA = mat2 + mat1; + SparseMatrix AtA = mat2 + mat1; AtA.prune(keep_diag()); - internal::minimum_degree_ordering(AtA, m_P); // Then compute the AMD ordering... + internal::minimum_degree_ordering(AtA, m_P); // Then compute the AMD ordering... m_Pinv = m_P.inverse(); // ... and the inverse permutation @@ -241,7 +241,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) using std::abs; eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); - Index n = amat.cols(); // Size of the matrix + StorageIndex n = amat.cols(); // Size of the matrix m_lu.resize(n,n); // Declare Working vectors and variables Vector u(n) ; // real values of the row -- maximum size is n -- @@ -250,7 +250,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) // Apply the fill-reducing permutation eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); - SparseMatrix mat; + SparseMatrix mat; mat = amat.twistedBy(m_Pinv); // Initialization @@ -259,21 +259,21 @@ void IncompleteLUT::factorize(const _MatrixType& amat) u.fill(0); // number of largest elements to keep in each row: - Index fill_in = static_cast (amat.nonZeros()*m_fillfactor)/n+1; + StorageIndex fill_in = static_cast (amat.nonZeros()*m_fillfactor)/n+1; if (fill_in > n) fill_in = n; // number of largest nonzero elements to keep in the L and the U part of the current row: - Index nnzL = fill_in/2; - Index nnzU = nnzL; + StorageIndex nnzL = fill_in/2; + StorageIndex nnzU = nnzL; m_lu.reserve(n * (nnzL + nnzU + 1)); // global loop over the rows of the sparse matrix - for (Index ii = 0; ii < n; ii++) + for (StorageIndex ii = 0; ii < n; ii++) { // 1 - copy the lower and the upper part of the row i of mat in the working vector u - Index sizeu = 1; // number of nonzero elements in the upper part of the current row - Index sizel = 0; // number of nonzero elements in the lower part of the current row + StorageIndex sizeu = 1; // number of nonzero elements in the upper part of the current row + StorageIndex sizel = 0; // number of nonzero elements in the lower part of the current row ju(ii) = ii; u(ii) = 0; jr(ii) = ii; @@ -282,7 +282,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii for (; j_it; ++j_it) { - Index k = j_it.index(); + StorageIndex k = j_it.index(); if (k < ii) { // copy the lower part @@ -298,7 +298,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) else { // copy the upper part - Index jpos = ii + sizeu; + StorageIndex jpos = ii + sizeu; ju(jpos) = k; u(jpos) = j_it.value(); jr(k) = jpos; @@ -317,19 +317,19 @@ void IncompleteLUT::factorize(const _MatrixType& amat) rownorm = sqrt(rownorm); // 3 - eliminate the previous nonzero rows - Index jj = 0; - Index len = 0; + StorageIndex jj = 0; + StorageIndex len = 0; while (jj < sizel) { // In order to eliminate in the correct order, // we must select first the smallest column index among ju(jj:sizel) - Index k; - Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment + StorageIndex k; + StorageIndex minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment k += jj; if (minrow != ju(jj)) { // swap the two locations - Index j = ju(jj); + StorageIndex j = ju(jj); swap(ju(jj), ju(k)); jr(minrow) = jj; jr(j) = k; swap(u(jj), u(k)); @@ -355,11 +355,11 @@ void IncompleteLUT::factorize(const _MatrixType& amat) for (; ki_it; ++ki_it) { Scalar prod = fact * ki_it.value(); - Index j = ki_it.index(); - Index jpos = jr(j); + StorageIndex j = ki_it.index(); + StorageIndex jpos = jr(j); if (jpos == -1) // fill-in element { - Index newpos; + StorageIndex newpos; if (j >= ii) // dealing with the upper part { newpos = ii + sizeu; @@ -388,7 +388,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) } // end of the elimination on the row ii // reset the upper part of the pointer jr to zero - for(Index k = 0; k ::factorize(const _MatrixType& amat) // store the largest m_fill elements of the L part m_lu.startVec(ii); - for(Index k = 0; k < len; k++) + for(StorageIndex k = 0; k < len; k++) m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); // store the diagonal element @@ -413,7 +413,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) // sort the U-part of the row // apply the dropping rule first len = 0; - for(Index k = 1; k < sizeu; k++) + for(StorageIndex k = 1; k < sizeu; k++) { if(abs(u(ii+k)) > m_droptol * rownorm ) { @@ -429,7 +429,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) internal::QuickSplit(uu, juu, len); // store the largest elements of the U part - for(Index k = ii + 1; k < ii + len; k++) + for(StorageIndex k = ii + 1; k < ii + len; k++) m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); } diff --git a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h index f33c868bb..cc99e00f9 100644 --- a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +++ b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h @@ -28,7 +28,7 @@ public: typedef typename internal::traits::MatrixType MatrixType; typedef typename internal::traits::Preconditioner Preconditioner; typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::RealScalar RealScalar; public: @@ -115,9 +115,9 @@ public: } /** \internal */ - Index rows() const { return mp_matrix ? mp_matrix->rows() : 0; } + StorageIndex rows() const { return mp_matrix ? mp_matrix->rows() : 0; } /** \internal */ - Index cols() const { return mp_matrix ? mp_matrix->cols() : 0; } + StorageIndex cols() const { return mp_matrix ? mp_matrix->cols() : 0; } /** \returns the tolerance threshold used by the stopping criteria */ RealScalar tolerance() const { return m_tolerance; } diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h index 96f2cebee..eb4520004 100644 --- a/Eigen/src/LU/FullPivLU.h +++ b/Eigen/src/LU/FullPivLU.h @@ -67,6 +67,7 @@ template class FullPivLU typedef typename NumTraits::Real RealScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename internal::plain_row_type::type IntRowVectorType; typedef typename internal::plain_col_type::type IntColVectorType; typedef PermutationMatrix PermutationQType; diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h index d04e4191b..7e2c8b471 100644 --- a/Eigen/src/LU/PartialPivLU.h +++ b/Eigen/src/LU/PartialPivLU.h @@ -73,6 +73,7 @@ template class PartialPivLU typedef typename NumTraits::Real RealScalar; typedef typename internal::traits::StorageKind StorageKind; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef PermutationMatrix PermutationType; typedef Transpositions TranspositionType; typedef typename MatrixType::PlainObject PlainObject; diff --git a/Eigen/src/OrderingMethods/Amd.h b/Eigen/src/OrderingMethods/Amd.h index ce7c0bbf3..50022d1ca 100644 --- a/Eigen/src/OrderingMethods/Amd.h +++ b/Eigen/src/OrderingMethods/Amd.h @@ -42,7 +42,7 @@ template inline void amd_mark(const T0* w, const T1& j /* clear w */ template -static int cs_wclear (Index mark, Index lemax, Index *w, Index n) +static Index cs_wclear (Index mark, Index lemax, Index *w, Index n) { Index k; if(mark < 2 || (mark + lemax < 0)) @@ -59,7 +59,7 @@ static int cs_wclear (Index mark, Index lemax, Index *w, Index n) template Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Index *stack) { - int i, p, top = 0; + Index i, p, top = 0; if(!head || !next || !post || !stack) return (-1); /* check inputs */ stack[0] = j; /* place j on the stack */ while (top >= 0) /* while (stack is not empty) */ @@ -92,11 +92,12 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation { using std::sqrt; - int d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1, - k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi, - ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t; - unsigned int h; + Index d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1, + k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi, + ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t; + std::size_t h; + Index n = C.cols(); dense = std::max (16, Index(10 * sqrt(double(n)))); /* find dense threshold */ dense = std::min (n-2, dense); @@ -330,7 +331,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation h %= n; /* finalize hash of i */ next[i] = hhead[h]; /* place i in hash bucket */ hhead[h] = i; - last[i] = h; /* save hash of i in last[i] */ + last[i] = Index(h); /* save hash of i in last[i] */ } } /* scan2 is done */ degree[k] = dk; /* finalize |Lk| */ diff --git a/Eigen/src/PaStiXSupport/PaStiXSupport.h b/Eigen/src/PaStiXSupport/PaStiXSupport.h index a96c27695..27acf4128 100644 --- a/Eigen/src/PaStiXSupport/PaStiXSupport.h +++ b/Eigen/src/PaStiXSupport/PaStiXSupport.h @@ -139,6 +139,7 @@ class PastixBase : public SparseSolverBase typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix Vector; typedef SparseMatrix ColSpMatrix; diff --git a/Eigen/src/PardisoSupport/PardisoSupport.h b/Eigen/src/PardisoSupport/PardisoSupport.h index 054af6635..7c75dcb7f 100644 --- a/Eigen/src/PardisoSupport/PardisoSupport.h +++ b/Eigen/src/PardisoSupport/PardisoSupport.h @@ -110,7 +110,7 @@ class PardisoImpl : public SparseSolveBase typedef typename Traits::MatrixType MatrixType; typedef typename Traits::Scalar Scalar; typedef typename Traits::RealScalar RealScalar; - typedef typename Traits::Index Index; + typedef typename Traits::StorageIndex StorageIndex; typedef SparseMatrix SparseMatrixType; typedef Matrix VectorType; typedef Matrix IntRowVectorType; diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h index de77e8411..6fade3755 100644 --- a/Eigen/src/QR/ColPivHouseholderQR.h +++ b/Eigen/src/QR/ColPivHouseholderQR.h @@ -58,6 +58,7 @@ template class ColPivHouseholderQR typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix MatrixQType; typedef typename internal::plain_diag_type::type HCoeffsType; typedef PermutationMatrix PermutationType; @@ -69,7 +70,7 @@ template class ColPivHouseholderQR private: - typedef typename PermutationType::Index PermIndexType; + typedef typename PermutationType::StorageIndex PermIndexType; public: diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h index 5712d175c..90ab25b2b 100644 --- a/Eigen/src/QR/FullPivHouseholderQR.h +++ b/Eigen/src/QR/FullPivHouseholderQR.h @@ -67,6 +67,7 @@ template class FullPivHouseholderQR typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef internal::FullPivHouseholderQRMatrixQReturnType MatrixQReturnType; typedef typename internal::plain_diag_type::type HCoeffsType; typedef Matrix class HouseholderQR typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix MatrixQType; typedef typename internal::plain_diag_type::type HCoeffsType; typedef typename internal::plain_row_type::type RowVectorType; diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h index 44f6a1acb..5fd18b787 100644 --- a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h @@ -63,9 +63,9 @@ class SPQR : public SparseSolverBase > public: typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; - typedef UF_long Index ; - typedef SparseMatrix MatrixType; - typedef Map > PermutationType; + typedef UF_long StorageIndex ; + typedef SparseMatrix MatrixType; + typedef Map > PermutationType; public: SPQR() : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits::epsilon()) @@ -150,7 +150,7 @@ class SPQR : public SparseSolverBase > { eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); if(!m_isRUpToDate) { - m_R = viewAsEigen(*m_cR); + m_R = viewAsEigen(*m_cR); m_isRUpToDate = true; } return m_R; @@ -204,11 +204,11 @@ class SPQR : public SparseSolverBase > RealScalar m_tolerance; // treat columns with 2-norm below this tolerance as zero mutable cholmod_sparse *m_cR; // The sparse R factor in cholmod format mutable MatrixType m_R; // The sparse matrix R in Eigen format - mutable Index *m_E; // The permutation applied to columns + mutable StorageIndex *m_E; // The permutation applied to columns mutable cholmod_sparse *m_H; //The householder vectors - mutable Index *m_HPinv; // The row permutation of H + mutable StorageIndex *m_HPinv; // The row permutation of H mutable cholmod_dense *m_HTau; // The Householder coefficients - mutable Index m_rank; // The rank of the matrix + mutable StorageIndex m_rank; // The rank of the matrix mutable cholmod_common m_cc; // Workspace and parameters template friend struct SPQR_QProduct; }; @@ -217,12 +217,12 @@ template struct SPQR_QProduct : ReturnByValue > { typedef typename SPQRType::Scalar Scalar; - typedef typename SPQRType::Index Index; + typedef typename SPQRType::StorageIndex StorageIndex; //Define the constructor to get reference to argument types SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {} - inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); } - inline Index cols() const { return m_other.cols(); } + inline StorageIndex rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); } + inline StorageIndex cols() const { return m_other.cols(); } // Assign to a vector template void evalTo(ResType& res) const diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index 27b732b80..0bc2ede28 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -53,6 +53,7 @@ public: typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h index 918a34e13..b148d6b1f 100644 --- a/Eigen/src/SparseCholesky/SimplicialCholesky.h +++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h @@ -44,8 +44,8 @@ class SimplicialCholeskyBase : public SparseSolverBase enum { UpLo = internal::traits::UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; public: @@ -70,8 +70,8 @@ class SimplicialCholeskyBase : public SparseSolverBase Derived& derived() { return *static_cast(this); } const Derived& derived() const { return *static_cast(this); } - inline Index cols() const { return m_matrix.cols(); } - inline Index rows() const { return m_matrix.rows(); } + inline StorageIndex cols() const { return m_matrix.cols(); } + inline StorageIndex rows() const { return m_matrix.rows(); } /** \brief Reports whether previous computation was successful. * @@ -216,16 +216,16 @@ class SimplicialCholeskyBase : public SparseSolverBase VectorType m_diag; // the diagonal coefficients (LDLT mode) VectorXi m_parent; // elimination tree VectorXi m_nonZerosPerCol; - PermutationMatrix m_P; // the permutation - PermutationMatrix m_Pinv; // the inverse permutation + PermutationMatrix m_P; // the permutation + PermutationMatrix m_Pinv; // the inverse permutation RealScalar m_shiftOffset; RealScalar m_shiftScale; }; -template > class SimplicialLLT; -template > class SimplicialLDLT; -template > class SimplicialCholesky; +template > class SimplicialLLT; +template > class SimplicialLDLT; +template > class SimplicialCholesky; namespace internal { @@ -235,8 +235,8 @@ template struct traits CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; typedef TriangularView MatrixL; typedef TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } @@ -249,8 +249,8 @@ template struct traits CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; typedef TriangularView MatrixL; typedef TriangularView MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } @@ -293,7 +293,7 @@ public: typedef SimplicialCholeskyBase Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; typedef internal::traits Traits; @@ -382,8 +382,8 @@ public: typedef SimplicialCholeskyBase Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; typedef internal::traits Traits; typedef typename Traits::MatrixL MatrixL; @@ -464,8 +464,8 @@ public: typedef SimplicialCholeskyBase Base; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix CholMatrixType; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix CholMatrixType; typedef Matrix VectorType; typedef internal::traits Traits; typedef internal::traits > LDLTTraits; diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h index 7aaf702be..302323ab4 100644 --- a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +++ b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h @@ -57,7 +57,7 @@ void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrix ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); - for(Index k = 0; k < size; ++k) + for(StorageIndex k = 0; k < size; ++k) { /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ m_parent[k] = -1; /* parent of k is not yet known */ @@ -82,7 +82,7 @@ void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrix } /* construct Lp index array from m_nonZerosPerCol column counts */ - Index* Lp = m_matrix.outerIndexPtr(); + StorageIndex* Lp = m_matrix.outerIndexPtr(); Lp[0] = 0; for(Index k = 0; k < size; ++k) Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1); @@ -104,35 +104,35 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); eigen_assert(ap.rows()==ap.cols()); - const Index size = ap.rows(); + const StorageIndex size = ap.rows(); eigen_assert(m_parent.size()==size); eigen_assert(m_nonZerosPerCol.size()==size); - const Index* Lp = m_matrix.outerIndexPtr(); - Index* Li = m_matrix.innerIndexPtr(); + const StorageIndex* Lp = m_matrix.outerIndexPtr(); + StorageIndex* Li = m_matrix.innerIndexPtr(); Scalar* Lx = m_matrix.valuePtr(); ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0); - ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0); - ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); + ei_declare_aligned_stack_constructed_variable(StorageIndex, pattern, size, 0); + ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0); bool ok = true; m_diag.resize(DoLDLT ? size : 0); - for(Index k = 0; k < size; ++k) + for(StorageIndex k = 0; k < size; ++k) { // compute nonzero pattern of kth row of L, in topological order y[k] = 0.0; // Y(0:k) is now all zero - Index top = size; // stack for pattern is empty + StorageIndex top = size; // stack for pattern is empty tags[k] = k; // mark node k as visited m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L for(typename MatrixType::InnerIterator it(ap,k); it; ++it) { - Index i = it.index(); + StorageIndex i = it.index(); if(i <= k) { y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ - Index len; + StorageIndex len; for(len = 0; tags[i] != k; i = m_parent[i]) { pattern[len++] = i; /* L(k,i) is nonzero */ @@ -149,7 +149,7 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& y[k] = 0.0; for(; top < size; ++top) { - Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ + StorageIndex i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ Scalar yi = y[i]; /* get and clear Y(i) */ y[i] = 0.0; @@ -160,8 +160,8 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& else yi = l_ki = yi / Lx[Lp[i]]; - Index p2 = Lp[i] + m_nonZerosPerCol[i]; - Index p; + StorageIndex p2 = Lp[i] + m_nonZerosPerCol[i]; + StorageIndex p; for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p) y[Li[p]] -= numext::conj(Lx[p]) * yi; d -= numext::real(l_ki * numext::conj(yi)); @@ -180,7 +180,7 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& } else { - Index p = Lp[k] + m_nonZerosPerCol[k]++; + StorageIndex p = Lp[k] + m_nonZerosPerCol[k]++; Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */ if(d <= RealScalar(0)) { ok = false; /* failure, matrix is not positive definite */ diff --git a/Eigen/src/SparseCore/AmbiVector.h b/Eigen/src/SparseCore/AmbiVector.h index 5c9c3101e..759fc08ff 100644 --- a/Eigen/src/SparseCore/AmbiVector.h +++ b/Eigen/src/SparseCore/AmbiVector.h @@ -19,12 +19,12 @@ namespace internal { * * See BasicSparseLLT and SparseProduct for usage examples. */ -template +template class AmbiVector { public: typedef _Scalar Scalar; - typedef _Index Index; + typedef _StorageIndex StorageIndex; typedef typename NumTraits::Real RealScalar; explicit AmbiVector(Index size) @@ -36,10 +36,10 @@ class AmbiVector void init(double estimatedDensity); void init(int mode); - Index nonZeros() const; + StorageIndex nonZeros() const; /** Specifies a sub-vector to work on */ - void setBounds(Index start, Index end) { m_start = start; m_end = end; } + void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); } void setZero(); @@ -55,12 +55,16 @@ class AmbiVector { if (m_allocatedSize < size) reallocate(size); - m_size = size; + m_size = convert_index(size); } - Index size() const { return m_size; } + StorageIndex size() const { return m_size; } protected: + StorageIndex convert_index(Index idx) + { + return internal::convert_index(idx); + } void reallocate(Index size) { @@ -70,15 +74,15 @@ class AmbiVector if (size<1000) { Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar); - m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl); + m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl)); m_buffer = new Scalar[allocSize]; } else { - m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl); + m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl)); m_buffer = new Scalar[size]; } - m_size = size; + m_size = convert_index(size); m_start = 0; m_end = m_size; } @@ -86,7 +90,7 @@ class AmbiVector void reallocateSparse() { Index copyElements = m_allocatedElements; - m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size); + m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size); Index allocSize = m_allocatedElements * sizeof(ListEl); allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0); Scalar* newBuffer = new Scalar[allocSize]; @@ -99,30 +103,30 @@ class AmbiVector // element type of the linked list struct ListEl { - Index next; - Index index; + StorageIndex next; + StorageIndex index; Scalar value; }; // used to store data in both mode Scalar* m_buffer; Scalar m_zero; - Index m_size; - Index m_start; - Index m_end; - Index m_allocatedSize; - Index m_allocatedElements; - Index m_mode; + StorageIndex m_size; + StorageIndex m_start; + StorageIndex m_end; + StorageIndex m_allocatedSize; + StorageIndex m_allocatedElements; + StorageIndex m_mode; // linked list mode - Index m_llStart; - Index m_llCurrent; - Index m_llSize; + StorageIndex m_llStart; + StorageIndex m_llCurrent; + StorageIndex m_llSize; }; /** \returns the number of non zeros in the current sub vector */ -template -_Index AmbiVector<_Scalar,_Index>::nonZeros() const +template +_StorageIndex AmbiVector<_Scalar,_StorageIndex>::nonZeros() const { if (m_mode==IsSparse) return m_llSize; @@ -130,8 +134,8 @@ _Index AmbiVector<_Scalar,_Index>::nonZeros() const return m_end - m_start; } -template -void AmbiVector<_Scalar,_Index>::init(double estimatedDensity) +template +void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity) { if (estimatedDensity>0.1) init(IsDense); @@ -139,8 +143,8 @@ void AmbiVector<_Scalar,_Index>::init(double estimatedDensity) init(IsSparse); } -template -void AmbiVector<_Scalar,_Index>::init(int mode) +template +void AmbiVector<_Scalar,_StorageIndex>::init(int mode) { m_mode = mode; if (m_mode==IsSparse) @@ -155,15 +159,15 @@ void AmbiVector<_Scalar,_Index>::init(int mode) * * Don't worry, this function is extremely cheap. */ -template -void AmbiVector<_Scalar,_Index>::restart() +template +void AmbiVector<_Scalar,_StorageIndex>::restart() { m_llCurrent = m_llStart; } /** Set all coefficients of current subvector to zero */ -template -void AmbiVector<_Scalar,_Index>::setZero() +template +void AmbiVector<_Scalar,_StorageIndex>::setZero() { if (m_mode==IsDense) { @@ -178,8 +182,8 @@ void AmbiVector<_Scalar,_Index>::setZero() } } -template -_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) +template +_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i) { if (m_mode==IsDense) return m_buffer[i]; @@ -195,7 +199,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) m_llCurrent = 0; ++m_llSize; llElements[0].value = Scalar(0); - llElements[0].index = i; + llElements[0].index = convert_index(i); llElements[0].next = -1; return llElements[0].value; } @@ -204,7 +208,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) // this is going to be the new first element of the list ListEl& el = llElements[m_llSize]; el.value = Scalar(0); - el.index = i; + el.index = convert_index(i); el.next = m_llStart; m_llStart = m_llSize; ++m_llSize; @@ -213,7 +217,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) } else { - Index nextel = llElements[m_llCurrent].next; + StorageIndex nextel = llElements[m_llCurrent].next; eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); while (nextel >= 0 && llElements[nextel].index<=i) { @@ -237,7 +241,7 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) // let's insert a new coefficient ListEl& el = llElements[m_llSize]; el.value = Scalar(0); - el.index = i; + el.index = convert_index(i); el.next = llElements[m_llCurrent].next; llElements[m_llCurrent].next = m_llSize; ++m_llSize; @@ -247,8 +251,8 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i) } } -template -_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i) +template +_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i) { if (m_mode==IsDense) return m_buffer[i]; @@ -275,8 +279,8 @@ _Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i) } /** Iterator over the nonzero coefficients */ -template -class AmbiVector<_Scalar,_Index>::Iterator +template +class AmbiVector<_Scalar,_StorageIndex>::Iterator { public: typedef _Scalar Scalar; @@ -320,7 +324,7 @@ class AmbiVector<_Scalar,_Index>::Iterator } } - Index index() const { return m_cachedIndex; } + StorageIndex index() const { return m_cachedIndex; } Scalar value() const { return m_cachedValue; } operator bool() const { return m_cachedIndex>=0; } @@ -359,9 +363,9 @@ class AmbiVector<_Scalar,_Index>::Iterator protected: const AmbiVector& m_vector; // the target vector - Index m_currentEl; // the current element in sparse/linked-list mode + StorageIndex m_currentEl; // the current element in sparse/linked-list mode RealScalar m_epsilon; // epsilon used to prune zero coefficients - Index m_cachedIndex; // current coordinate + StorageIndex m_cachedIndex; // current coordinate Scalar m_cachedValue; // current value bool m_isDense; // mode of the vector }; diff --git a/Eigen/src/SparseCore/CompressedStorage.h b/Eigen/src/SparseCore/CompressedStorage.h index 2741f8292..f98b42760 100644 --- a/Eigen/src/SparseCore/CompressedStorage.h +++ b/Eigen/src/SparseCore/CompressedStorage.h @@ -18,13 +18,13 @@ namespace internal { * Stores a sparse set of values as a list of values and a list of indices. * */ -template +template class CompressedStorage { public: typedef _Scalar Scalar; - typedef _Index Index; + typedef _StorageIndex StorageIndex; protected: @@ -92,10 +92,10 @@ class CompressedStorage void append(const Scalar& v, Index i) { - Index id = static_cast(m_size); + Index id = m_size; resize(m_size+1, 1); m_values[id] = v; - m_indices[id] = i; + m_indices[id] = internal::convert_index(i); } inline size_t size() const { return m_size; } @@ -105,17 +105,17 @@ class CompressedStorage inline Scalar& value(size_t i) { return m_values[i]; } inline const Scalar& value(size_t i) const { return m_values[i]; } - inline Index& index(size_t i) { return m_indices[i]; } - inline const Index& index(size_t i) const { return m_indices[i]; } + inline StorageIndex& index(size_t i) { return m_indices[i]; } + inline const StorageIndex& index(size_t i) const { return m_indices[i]; } /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */ - inline Index searchLowerIndex(Index key) const + inline StorageIndex searchLowerIndex(Index key) const { return searchLowerIndex(0, m_size, key); } /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */ - inline Index searchLowerIndex(size_t start, size_t end, Index key) const + inline StorageIndex searchLowerIndex(size_t start, size_t end, Index key) const { while(end>start) { @@ -125,7 +125,7 @@ class CompressedStorage else end = mid; } - return static_cast(start); + return static_cast(start); } /** \returns the stored value at index \a key @@ -167,7 +167,7 @@ class CompressedStorage { m_allocatedSize = 2*(m_size+1); internal::scoped_array newValues(m_allocatedSize); - internal::scoped_array newIndices(m_allocatedSize); + internal::scoped_array newIndices(m_allocatedSize); // copy first chunk internal::smart_copy(m_values, m_values +id, newValues.ptr()); @@ -188,7 +188,7 @@ class CompressedStorage internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1); } m_size++; - m_indices[id] = key; + m_indices[id] = convert_index(key); m_values[id] = defaultValue; } return m_values[id]; @@ -216,7 +216,7 @@ class CompressedStorage { eigen_internal_assert(size!=m_allocatedSize); internal::scoped_array newValues(size); - internal::scoped_array newIndices(size); + internal::scoped_array newIndices(size); size_t copySize = (std::min)(size, m_size); internal::smart_copy(m_values, m_values+copySize, newValues.ptr()); internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr()); @@ -227,7 +227,7 @@ class CompressedStorage protected: Scalar* m_values; - Index* m_indices; + StorageIndex* m_indices; size_t m_size; size_t m_allocatedSize; diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h index a30522ff7..244f1b50e 100644 --- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h @@ -18,7 +18,6 @@ template static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false) { typedef typename remove_all::type::Scalar Scalar; - typedef typename remove_all::type::Index Index; // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); @@ -137,8 +136,8 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; - typedef SparseMatrix ColMajorMatrixAux; + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrixAux; typedef typename sparse_eval::type ColMajorMatrix; // If the result is tall and thin (in the extreme case a column vector) @@ -167,7 +166,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix rhsRow = rhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhsRow, lhs, resRow); @@ -180,7 +179,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix lhsRow = lhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhsRow, resRow); @@ -193,7 +192,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); res = resRow; @@ -208,7 +207,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); res = resCol; @@ -220,7 +219,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix lhsCol = lhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhsCol, rhs, resCol); @@ -233,7 +232,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix rhsCol = rhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhsCol, resCol); @@ -246,8 +245,8 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; - typedef SparseMatrix ColMajorMatrix; + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrix; RowMajorMatrix resRow(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); // sort the non zeros: diff --git a/Eigen/src/SparseCore/MappedSparseMatrix.h b/Eigen/src/SparseCore/MappedSparseMatrix.h index 2852c669a..5e4580329 100644 --- a/Eigen/src/SparseCore/MappedSparseMatrix.h +++ b/Eigen/src/SparseCore/MappedSparseMatrix.h @@ -22,14 +22,14 @@ namespace Eigen { * */ namespace internal { -template -struct traits > : traits > +template +struct traits > : traits > {}; } -template +template class MappedSparseMatrix - : public SparseMatrixBase > + : public SparseMatrixBase > { public: EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix) @@ -37,19 +37,19 @@ class MappedSparseMatrix protected: - Index m_outerSize; - Index m_innerSize; - Index m_nnz; - Index* m_outerIndex; - Index* m_innerIndices; + StorageIndex m_outerSize; + StorageIndex m_innerSize; + StorageIndex m_nnz; + StorageIndex* m_outerIndex; + StorageIndex* m_innerIndices; Scalar* m_values; public: - inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } - inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } - inline Index innerSize() const { return m_innerSize; } - inline Index outerSize() const { return m_outerSize; } + inline StorageIndex rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + inline StorageIndex cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + inline StorageIndex innerSize() const { return m_innerSize; } + inline StorageIndex outerSize() const { return m_outerSize; } bool isCompressed() const { return true; } @@ -58,11 +58,11 @@ class MappedSparseMatrix inline const Scalar* valuePtr() const { return m_values; } inline Scalar* valuePtr() { return m_values; } - inline const Index* innerIndexPtr() const { return m_innerIndices; } - inline Index* innerIndexPtr() { return m_innerIndices; } + inline const StorageIndex* innerIndexPtr() const { return m_innerIndices; } + inline StorageIndex* innerIndexPtr() { return m_innerIndices; } - inline const Index* outerIndexPtr() const { return m_outerIndex; } - inline Index* outerIndexPtr() { return m_outerIndex; } + inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; } + inline StorageIndex* outerIndexPtr() { return m_outerIndex; } //---------------------------------------- inline Scalar coeff(Index row, Index col) const @@ -79,7 +79,7 @@ class MappedSparseMatrix // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) - const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner); + const StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner); const Index id = r-&m_innerIndices[0]; return ((*r==inner) && (id=start && "you probably called coeffRef on a non finalized matrix"); eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient"); - Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner); + StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner); const Index id = r-&m_innerIndices[0]; eigen_assert((*r==inner) && (id -class MappedSparseMatrix::InnerIterator +template +class MappedSparseMatrix::InnerIterator { public: InnerIterator(const MappedSparseMatrix& mat, Index outer) : m_matrix(mat), - m_outer(outer), + m_outer(convert_index(outer)), m_id(mat.outerIndexPtr()[outer]), m_start(m_id), m_end(mat.outerIndexPtr()[outer+1]) @@ -131,22 +131,22 @@ class MappedSparseMatrix::InnerIterator inline Scalar value() const { return m_matrix.valuePtr()[m_id]; } inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_id]); } - inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + inline StorageIndex index() const { return m_matrix.innerIndexPtr()[m_id]; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); } protected: const MappedSparseMatrix& m_matrix; - const Index m_outer; - Index m_id; - const Index m_start; - const Index m_end; + const StorageIndex m_outer; + StorageIndex m_id; + const StorageIndex m_start; + const StorageIndex m_end; }; -template -class MappedSparseMatrix::ReverseInnerIterator +template +class MappedSparseMatrix::ReverseInnerIterator { public: ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer) @@ -162,18 +162,18 @@ class MappedSparseMatrix::ReverseInnerIterator inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; } inline Scalar& valueRef() { return const_cast(m_matrix.valuePtr()[m_id-1]); } - inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + inline StorageIndex index() const { return m_matrix.innerIndexPtr()[m_id-1]; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); } protected: const MappedSparseMatrix& m_matrix; - const Index m_outer; - Index m_id; - const Index m_start; - const Index m_end; + const StorageIndex m_outer; + StorageIndex m_id; + const StorageIndex m_start; + const StorageIndex m_end; }; namespace internal { diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h index 97c079d3f..469c2b188 100644 --- a/Eigen/src/SparseCore/SparseAssign.h +++ b/Eigen/src/SparseCore/SparseAssign.h @@ -71,7 +71,6 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src) { eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); - typedef typename DstXprType::Index Index; typedef typename DstXprType::Scalar Scalar; typedef typename internal::evaluator::type DstEvaluatorType; typedef typename internal::evaluator::type SrcEvaluatorType; @@ -144,7 +143,6 @@ struct Assignment static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) { eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); - typedef typename SrcXprType::Index Index; typename internal::evaluator::type srcEval(src); typename internal::evaluator::type dstEval(dst); @@ -161,7 +159,6 @@ struct Assignment &) { eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); - typedef typename SrcXprType::Index Index; dst.setZero(); typename internal::evaluator::type srcEval(src); diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h index 9e4da2057..8db4bbb75 100644 --- a/Eigen/src/SparseCore/SparseBlock.h +++ b/Eigen/src/SparseCore/SparseBlock.h @@ -27,39 +27,39 @@ public: EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) inline BlockImpl(const XprType& xpr, Index i) - : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) + : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize) {} inline BlockImpl(const XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) - : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) + : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) {} - EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + EIGEN_STRONG_INLINE StorageIndex rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE StorageIndex cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } - Index nonZeros() const + StorageIndex nonZeros() const { typedef typename internal::evaluator::type EvaluatorType; EvaluatorType matEval(m_matrix); - Index nnz = 0; + StorageIndex nnz = 0; Index end = m_outerStart + m_outerSize.value(); - for(int j=m_outerStart; j m_outerSize; + StorageIndex m_outerStart; + const internal::variable_if_dynamic m_outerSize; public: EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) @@ -82,15 +82,16 @@ public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) protected: + typedef typename Base::IndexVector IndexVector; enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; public: inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index i) - : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) + : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize) {} inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) - : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) + : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) {} template @@ -102,14 +103,14 @@ public: // and/or it is not at the end of the nonzeros of the underlying matrix. // 1 - eval to a temporary to avoid transposition and/or aliasing issues - SparseMatrix tmp(other); + SparseMatrix tmp(other); // 2 - let's check whether there is enough allocated memory - Index nnz = tmp.nonZeros(); - Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block - Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block - Index block_size = end - start; // available room in the current block - Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; + StorageIndex nnz = tmp.nonZeros(); + StorageIndex start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block + StorageIndex end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block + StorageIndex block_size = end - start; // available room in the current block + StorageIndex tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; Index free_size = m_matrix.isCompressed() ? Index(matrix.data().allocatedSize()) + block_size @@ -151,7 +152,7 @@ public: matrix.innerNonZeroPtr()[m_outerStart+j] = tmp.innerVector(j).nonZeros(); // update outer index pointers - Index p = start; + StorageIndex p = start; for(Index k=0; k >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); + return Map(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); } const Scalar& lastCoeff() const @@ -207,32 +208,32 @@ public: return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; } - EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + EIGEN_STRONG_INLINE StorageIndex rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE StorageIndex cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } - Index startRow() const { return IsRowMajor ? m_outerStart : 0; } - Index startCol() const { return IsRowMajor ? 0 : m_outerStart; } - Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + StorageIndex startRow() const { return IsRowMajor ? m_outerStart : 0; } + StorageIndex startCol() const { return IsRowMajor ? 0 : m_outerStart; } + StorageIndex blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + StorageIndex blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: typename SparseMatrixType::Nested m_matrix; - Index m_outerStart; - const internal::variable_if_dynamic m_outerSize; + StorageIndex m_outerStart; + const internal::variable_if_dynamic m_outerSize; }; } // namespace internal -template -class BlockImpl,BlockRows,BlockCols,true,Sparse> - : public internal::sparse_matrix_block_impl,BlockRows,BlockCols> +template +class BlockImpl,BlockRows,BlockCols,true,Sparse> + : public internal::sparse_matrix_block_impl,BlockRows,BlockCols> { public: - typedef _Index Index; - typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; + typedef _StorageIndex StorageIndex; + typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType; typedef internal::sparse_matrix_block_impl Base; inline BlockImpl(SparseMatrixType& xpr, Index i) : Base(xpr, i) @@ -245,13 +246,13 @@ public: using Base::operator=; }; -template -class BlockImpl,BlockRows,BlockCols,true,Sparse> - : public internal::sparse_matrix_block_impl,BlockRows,BlockCols> +template +class BlockImpl,BlockRows,BlockCols,true,Sparse> + : public internal::sparse_matrix_block_impl,BlockRows,BlockCols> { public: - typedef _Index Index; - typedef const SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; + typedef _StorageIndex StorageIndex; + typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType; typedef internal::sparse_matrix_block_impl Base; inline BlockImpl(SparseMatrixType& xpr, Index i) : Base(xpr, i) @@ -333,8 +334,8 @@ public: */ inline BlockImpl(const XprType& xpr, Index i) : m_matrix(xpr), - m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), - m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), + m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0), + m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0), m_blockRows(BlockRows==1 ? 1 : xpr.rows()), m_blockCols(BlockCols==1 ? 1 : xpr.cols()) {} @@ -342,11 +343,11 @@ public: /** Dynamic-size constructor */ inline BlockImpl(const XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) - : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) + : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols)) {} - inline Index rows() const { return m_blockRows.value(); } - inline Index cols() const { return m_blockCols.value(); } + inline StorageIndex rows() const { return m_blockRows.value(); } + inline StorageIndex cols() const { return m_blockCols.value(); } inline Scalar& coeffRef(Index row, Index col) { @@ -374,10 +375,10 @@ public: } inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } - Index startRow() const { return m_startRow.value(); } - Index startCol() const { return m_startCol.value(); } - Index blockRows() const { return m_blockRows.value(); } - Index blockCols() const { return m_blockCols.value(); } + StorageIndex startRow() const { return m_startRow.value(); } + StorageIndex startCol() const { return m_startCol.value(); } + StorageIndex blockRows() const { return m_blockRows.value(); } + StorageIndex blockCols() const { return m_blockCols.value(); } protected: friend class internal::GenericSparseBlockInnerIteratorImpl; @@ -386,10 +387,10 @@ public: EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) typename XprType::Nested m_matrix; - const internal::variable_if_dynamic m_startRow; - const internal::variable_if_dynamic m_startCol; - const internal::variable_if_dynamic m_blockRows; - const internal::variable_if_dynamic m_blockCols; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; }; @@ -402,7 +403,7 @@ namespace internal { IsRowMajor = BlockType::IsRowMajor }; typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested; - typedef typename BlockType::Index Index; + typedef typename BlockType::StorageIndex StorageIndex; typedef typename _MatrixTypeNested::InnerIterator Base; const BlockType& m_block; Index m_end; @@ -417,10 +418,10 @@ namespace internal { Base::operator++(); } - inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } - inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } - inline Index row() const { return Base::row() - m_block.m_startRow.value(); } - inline Index col() const { return Base::col() - m_block.m_startCol.value(); } + inline StorageIndex index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline StorageIndex outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } + inline StorageIndex row() const { return Base::row() - m_block.m_startRow.value(); } + inline StorageIndex col() const { return Base::col() - m_block.m_startCol.value(); } inline operator bool() const { return Base::operator bool() && Base::index() < m_end; } }; @@ -434,13 +435,13 @@ namespace internal { IsRowMajor = BlockType::IsRowMajor }; typedef typename BlockType::_MatrixTypeNested _MatrixTypeNested; - typedef typename BlockType::Index Index; + typedef typename BlockType::StorageIndex StorageIndex; typedef typename BlockType::Scalar Scalar; const BlockType& m_block; - Index m_outerPos; - Index m_innerIndex; + StorageIndex m_outerPos; + StorageIndex m_innerIndex; Scalar m_value; - Index m_end; + StorageIndex m_end; public: explicit EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer = 0) @@ -456,10 +457,10 @@ namespace internal { ++(*this); } - inline Index index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } - inline Index outer() const { return 0; } - inline Index row() const { return IsRowMajor ? 0 : index(); } - inline Index col() const { return IsRowMajor ? index() : 0; } + inline StorageIndex index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline StorageIndex outer() const { return 0; } + inline StorageIndex row() const { return IsRowMajor ? 0 : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : 0; } inline Scalar value() const { return m_value; } @@ -491,7 +492,7 @@ struct unary_evaluator, IteratorBa class OuterVectorInnerIterator; public: typedef Block XprType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; class ReverseInnerIterator; @@ -538,10 +539,10 @@ public: EvalIterator::operator++(); } - inline Index index() const { return EvalIterator::index() - (IsRowMajor ? m_block.startCol() : m_block.startRow()); } - inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); } - inline Index row() const { return EvalIterator::row() - m_block.startRow(); } - inline Index col() const { return EvalIterator::col() - m_block.startCol(); } + inline StorageIndex index() const { return EvalIterator::index() - (IsRowMajor ? m_block.startCol() : m_block.startRow()); } + inline StorageIndex outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); } + inline StorageIndex row() const { return EvalIterator::row() - m_block.startRow(); } + inline StorageIndex col() const { return EvalIterator::col() - m_block.startCol(); } inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; } }; @@ -550,10 +551,10 @@ template class unary_evaluator, IteratorBased>::OuterVectorInnerIterator { const unary_evaluator& m_eval; - Index m_outerPos; - Index m_innerIndex; + StorageIndex m_outerPos; + StorageIndex m_innerIndex; Scalar m_value; - Index m_end; + StorageIndex m_end; public: EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer) @@ -568,10 +569,10 @@ public: ++(*this); } - inline Index index() const { return m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow()); } - inline Index outer() const { return 0; } - inline Index row() const { return IsRowMajor ? 0 : index(); } - inline Index col() const { return IsRowMajor ? index() : 0; } + inline StorageIndex index() const { return m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow()); } + inline StorageIndex outer() const { return 0; } + inline StorageIndex row() const { return IsRowMajor ? 0 : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : 0; } inline Scalar value() const { return m_value; } diff --git a/Eigen/src/SparseCore/SparseColEtree.h b/Eigen/src/SparseCore/SparseColEtree.h index f8745f461..88c799068 100644 --- a/Eigen/src/SparseCore/SparseColEtree.h +++ b/Eigen/src/SparseCore/SparseColEtree.h @@ -58,10 +58,10 @@ Index etree_find (Index i, IndexVector& pp) * \param perm The permutation to apply to the column of \b mat */ template -int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::Index *perm=0) +int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0) { - typedef typename MatrixType::Index Index; - Index nc = mat.cols(); // Number of columns + typedef typename MatrixType::StorageIndex Index; + Index nc = mat.cols(); // Number of columns Index m = mat.rows(); Index diagSize = (std::min)(nc,m); IndexVector root(nc); // root of subtree of etree @@ -70,7 +70,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl pp.setZero(); // Initialize disjoint sets parent.resize(mat.cols()); //Compute first nonzero column in each row - Index row,col; + Index row,col; firstRowElt.resize(m); firstRowElt.setConstant(nc); firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1); @@ -89,7 +89,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl except use (firstRowElt[r],c) in place of an edge (r,c) of A. Thus each row clique in A'*A is replaced by a star centered at its first vertex, which has the same fill. */ - Index rset, cset, rroot; + Index rset, cset, rroot; for (col = 0; col < nc; col++) { found_diag = col>=m; diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h index 94ca9b1a4..afb09ad91 100644 --- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h @@ -56,7 +56,7 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; public: @@ -97,9 +97,9 @@ public: EIGEN_STRONG_INLINE Scalar value() const { return m_value; } - EIGEN_STRONG_INLINE Index index() const { return m_id; } - EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); } - EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; } + EIGEN_STRONG_INLINE StorageIndex row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; } @@ -108,7 +108,7 @@ public: RhsIterator m_rhsIter; const BinaryOp& m_functor; Scalar m_value; - Index m_id; + StorageIndex m_id; }; @@ -145,7 +145,7 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; public: @@ -177,9 +177,9 @@ public: EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); } - EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE StorageIndex row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); } @@ -223,7 +223,7 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit }; public: @@ -241,9 +241,9 @@ public: EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); } - EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_rhsIter.index(); } + EIGEN_STRONG_INLINE StorageIndex row() const { return m_rhsIter.row(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return m_rhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; } @@ -288,7 +288,7 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit }; public: @@ -307,9 +307,9 @@ public: { return m_functor(m_lhsIter.value(), m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); } - EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } - EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } - EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE StorageIndex row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; } @@ -317,7 +317,7 @@ public: LhsIterator m_lhsIter; const RhsEvaluator &m_rhsEval; const BinaryOp& m_functor; - const Index m_outer; + const StorageIndex m_outer; }; diff --git a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h index 32b7bc949..63d8f329c 100644 --- a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +++ b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h @@ -47,7 +47,7 @@ class unary_evaluator, IteratorBased>::InnerIterat typedef typename unary_evaluator, IteratorBased>::EvalIterator Base; public: - EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer) + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor) {} @@ -122,7 +122,7 @@ class unary_evaluator, IteratorBased>::InnerItera typedef typename unary_evaluator, IteratorBased>::EvalIterator Base; public: - EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer) + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor) {} diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h index 5aea11425..f6e6fab29 100644 --- a/Eigen/src/SparseCore/SparseDenseProduct.h +++ b/Eigen/src/SparseCore/SparseDenseProduct.h @@ -29,7 +29,7 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { @@ -62,7 +62,7 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { @@ -86,7 +86,7 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { @@ -106,7 +106,7 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { @@ -193,7 +193,7 @@ protected: typedef typename evaluator::type RhsEval; typedef typename evaluator::InnerIterator LhsIterator; typedef typename ProdXprType::Scalar Scalar; - typedef typename ProdXprType::Index Index; + typedef typename ProdXprType::StorageIndex StorageIndex; public: enum { @@ -211,9 +211,9 @@ public: m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits::StorageKind() )) {} - EIGEN_STRONG_INLINE Index outer() const { return m_outer; } - EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } - EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } + EIGEN_STRONG_INLINE StorageIndex outer() const { return m_outer; } + EIGEN_STRONG_INLINE StorageIndex row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } + EIGEN_STRONG_INLINE StorageIndex col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; } EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); } diff --git a/Eigen/src/SparseCore/SparseDiagonalProduct.h b/Eigen/src/SparseCore/SparseDiagonalProduct.h index be935e9f3..19a79edad 100644 --- a/Eigen/src/SparseCore/SparseDiagonalProduct.h +++ b/Eigen/src/SparseCore/SparseDiagonalProduct.h @@ -66,7 +66,7 @@ struct sparse_diagonal_product_evaluator::InnerIterator SparseXprInnerIterator; typedef typename SparseXprType::Scalar Scalar; - typedef typename SparseXprType::Index Index; + typedef typename SparseXprType::StorageIndex StorageIndex; public: class InnerIterator : public SparseXprInnerIterator @@ -96,7 +96,7 @@ template struct sparse_diagonal_product_evaluator { typedef typename SparseXprType::Scalar Scalar; - typedef typename SparseXprType::Index Index; + typedef typename SparseXprType::StorageIndex StorageIndex; typedef CwiseBinaryOp, const typename SparseXprType::ConstInnerVectorReturnType, @@ -111,14 +111,14 @@ struct sparse_diagonal_product_evaluator(outer)) {} inline Scalar value() const { return m_cwiseIter.value(); } - inline Index index() const { return m_cwiseIter.index(); } - inline Index outer() const { return m_outer; } - inline Index col() const { return SparseXprType::IsRowMajor ? m_cwiseIter.index() : m_outer; } - inline Index row() const { return SparseXprType::IsRowMajor ? m_outer : m_cwiseIter.index(); } + inline StorageIndex index() const { return convert_index(m_cwiseIter.index()); } + inline StorageIndex outer() const { return m_outer; } + inline StorageIndex col() const { return SparseXprType::IsRowMajor ? m_cwiseIter.index() : m_outer; } + inline StorageIndex row() const { return SparseXprType::IsRowMajor ? m_outer : m_cwiseIter.index(); } EIGEN_STRONG_INLINE InnerIterator& operator++() { ++m_cwiseIter; return *this; } @@ -127,7 +127,7 @@ struct sparse_diagonal_product_evaluator struct traits > { typedef _Scalar Scalar; - typedef _Index Index; + typedef _Index StorageIndex; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { @@ -65,7 +65,7 @@ struct traits, DiagIndex> > typedef _Scalar Scalar; typedef Dense StorageKind; - typedef _Index Index; + typedef _Index StorageIndex; typedef MatrixXpr XprKind; enum { @@ -103,23 +103,24 @@ class SparseMatrix using Base::IsRowMajor; - typedef internal::CompressedStorage Storage; + typedef internal::CompressedStorage Storage; enum { Options = _Options }; + typedef typename Base::IndexVector IndexVector; + typedef typename Base::ScalarVector ScalarVector; protected: - typedef SparseMatrix TransposedSparseMatrix; - Index m_outerSize; - Index m_innerSize; - Index* m_outerIndex; - Index* m_innerNonZeros; // optional, if null then the data is compressed + StorageIndex m_outerSize; + StorageIndex m_innerSize; + StorageIndex* m_outerIndex; + StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed Storage m_data; - Eigen::Map > innerNonZeros() { return Eigen::Map >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } - const Eigen::Map > innerNonZeros() const { return Eigen::Map >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } + Eigen::Map innerNonZeros() { return Eigen::Map(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } + const Eigen::Map innerNonZeros() const { return Eigen::Map(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); } public: @@ -127,14 +128,14 @@ class SparseMatrix inline bool isCompressed() const { return m_innerNonZeros==0; } /** \returns the number of rows of the matrix */ - inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + inline StorageIndex rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } /** \returns the number of columns of the matrix */ - inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + inline StorageIndex cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */ - inline Index innerSize() const { return m_innerSize; } + inline StorageIndex innerSize() const { return m_innerSize; } /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */ - inline Index outerSize() const { return m_outerSize; } + inline StorageIndex outerSize() const { return m_outerSize; } /** \returns a const pointer to the array of values. * This function is aimed at interoperability with other libraries. @@ -148,29 +149,29 @@ class SparseMatrix /** \returns a const pointer to the array of inner indices. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), outerIndexPtr() */ - inline const Index* innerIndexPtr() const { return &m_data.index(0); } + inline const StorageIndex* innerIndexPtr() const { return &m_data.index(0); } /** \returns a non-const pointer to the array of inner indices. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), outerIndexPtr() */ - inline Index* innerIndexPtr() { return &m_data.index(0); } + inline StorageIndex* innerIndexPtr() { return &m_data.index(0); } /** \returns a const pointer to the array of the starting positions of the inner vectors. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), innerIndexPtr() */ - inline const Index* outerIndexPtr() const { return m_outerIndex; } + inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; } /** \returns a non-const pointer to the array of the starting positions of the inner vectors. * This function is aimed at interoperability with other libraries. * \sa valuePtr(), innerIndexPtr() */ - inline Index* outerIndexPtr() { return m_outerIndex; } + inline StorageIndex* outerIndexPtr() { return m_outerIndex; } /** \returns a const pointer to the array of the number of non zeros of the inner vectors. * This function is aimed at interoperability with other libraries. * \warning it returns the null pointer 0 in compressed mode */ - inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; } + inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; } /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors. * This function is aimed at interoperability with other libraries. * \warning it returns the null pointer 0 in compressed mode */ - inline Index* innerNonZeroPtr() { return m_innerNonZeros; } + inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; } /** \internal */ inline Storage& data() { return m_data; } @@ -234,7 +235,7 @@ class SparseMatrix if(isCompressed()) { - reserve(Matrix::Constant(outerSize(), 2)); + reserve(IndexVector::Constant(outerSize(), 2)); } return insertUncompressed(row,col); } @@ -248,17 +249,17 @@ class SparseMatrix inline void setZero() { m_data.clear(); - memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); + memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); if(m_innerNonZeros) - memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index)); + memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); } /** \returns the number of non zero coefficients */ - inline Index nonZeros() const + inline StorageIndex nonZeros() const { if(m_innerNonZeros) return innerNonZeros().sum(); - return static_cast(m_data.size()); + return convert_index(Index(m_data.size())); } /** Preallocates \a reserveSize non zeros. @@ -302,13 +303,13 @@ class SparseMatrix { std::size_t totalReserveSize = 0; // turn the matrix into non-compressed mode - m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(Index))); + m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(StorageIndex))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); // temporarily use m_innerSizes to hold the new starting points. - Index* newOuterIndex = m_innerNonZeros; + StorageIndex* newOuterIndex = m_innerNonZeros; - Index count = 0; + StorageIndex count = 0; for(Index j=0; j=0; --j) { - Index innerNNZ = previousOuterIndex - m_outerIndex[j]; + StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); @@ -335,15 +336,15 @@ class SparseMatrix } else { - Index* newOuterIndex = static_cast(std::malloc((m_outerSize+1)*sizeof(Index))); + StorageIndex* newOuterIndex = static_cast(std::malloc((m_outerSize+1)*sizeof(StorageIndex))); if (!newOuterIndex) internal::throw_std_bad_alloc(); - Index count = 0; + StorageIndex count = 0; for(Index j=0; j(reserveSizes[j], alreadyReserved); + StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j]; + StorageIndex toReserve = std::max(reserveSizes[j], alreadyReserved); count += toReserve + m_innerNonZeros[j]; } newOuterIndex[m_outerSize] = count; @@ -354,7 +355,7 @@ class SparseMatrix Index offset = newOuterIndex[j] - m_outerIndex[j]; if(offset>0) { - Index innerNNZ = m_innerNonZeros[j]; + StorageIndex innerNNZ = m_innerNonZeros[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); @@ -425,7 +426,7 @@ class SparseMatrix { if(isCompressed()) { - Index size = static_cast(m_data.size()); + StorageIndex size = internal::convert_index(Index(m_data.size())); Index i = m_outerSize; // find the last filled column while (i>=0 && m_outerIndex[i]==0) @@ -490,7 +491,7 @@ class SparseMatrix { if(m_innerNonZeros != 0) return; - m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(Index))); + m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(StorageIndex))); for (Index i = 0; i < m_outerSize; i++) { m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; @@ -517,7 +518,7 @@ class SparseMatrix // TODO also implement a unit test makeCompressed(); - Index k = 0; + StorageIndex k = 0; for(Index j=0; jcols() : rows - this->rows(); Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols(); - Index newInnerSize = IsRowMajor ? cols : rows; + StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows); // Deals with inner non zeros if (m_innerNonZeros) { // Resize m_innerNonZeros - Index *newInnerNonZeros = static_cast(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index))); + StorageIndex *newInnerNonZeros = static_cast(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex))); if (!newInnerNonZeros) internal::throw_std_bad_alloc(); m_innerNonZeros = newInnerNonZeros; @@ -566,7 +567,7 @@ class SparseMatrix else if (innerChange < 0) { // Inner size decreased: allocate a new m_innerNonZeros - m_innerNonZeros = static_cast(std::malloc((m_outerSize+outerChange+1) * sizeof(Index))); + m_innerNonZeros = static_cast(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); for(Index i = 0; i < m_outerSize; i++) m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; @@ -577,8 +578,8 @@ class SparseMatrix { for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++) { - Index &n = m_innerNonZeros[i]; - Index start = m_outerIndex[i]; + StorageIndex &n = m_innerNonZeros[i]; + StorageIndex start = m_outerIndex[i]; while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; } } @@ -589,12 +590,12 @@ class SparseMatrix if (outerChange == 0) return; - Index *newOuterIndex = static_cast(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index))); + StorageIndex *newOuterIndex = static_cast(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex))); if (!newOuterIndex) internal::throw_std_bad_alloc(); m_outerIndex = newOuterIndex; if (outerChange > 0) { - Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; + StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; for(Index i=m_outerSize; i(std::malloc((outerSize + 1) * sizeof(Index))); + m_outerIndex = static_cast(std::malloc((outerSize + 1) * sizeof(StorageIndex))); if (!m_outerIndex) internal::throw_std_bad_alloc(); m_outerSize = outerSize; @@ -622,7 +623,7 @@ class SparseMatrix std::free(m_innerNonZeros); m_innerNonZeros = 0; } - memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); + memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); } /** \internal @@ -715,9 +716,9 @@ class SparseMatrix { eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); this->m_data.resize(rows()); - Eigen::Map >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); - Eigen::Map >(&this->m_data.value(0), rows()).setOnes(); - Eigen::Map >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); + Eigen::Map(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); + Eigen::Map(&this->m_data.value(0), rows()).setOnes(); + Eigen::Map(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); } inline SparseMatrix& operator=(const SparseMatrix& other) { @@ -808,9 +809,7 @@ protected: template void initAssignment(const Other& other) { - eigen_assert( other.rows() == typename Other::Index(Index(other.rows())) - && other.cols() == typename Other::Index(Index(other.cols())) ); - resize(Index(other.rows()), Index(other.cols())); + resize(other.rows(), other.cols()); if(m_innerNonZeros) { std::free(m_innerNonZeros); @@ -826,15 +825,15 @@ protected: * A vector object that is equal to 0 everywhere but v at the position i */ class SingletonVector { - Index m_index; - Index m_value; + StorageIndex m_index; + StorageIndex m_value; public: - typedef Index value_type; + typedef StorageIndex value_type; SingletonVector(Index i, Index v) - : m_index(i), m_value(v) + : m_index(convert_index(i)), m_value(convert_index(v)) {} - Index operator[](Index i) const { return i==m_index ? m_value : 0; } + StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; } }; /** \internal @@ -853,14 +852,14 @@ public: eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++; - m_data.index(p) = inner; + m_data.index(p) = convert_index(inner); return (m_data.value(p) = 0); } private: static void check_template_parameters() { - EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); + EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } @@ -880,7 +879,7 @@ class SparseMatrix::InnerIterator { public: InnerIterator(const SparseMatrix& mat, Index outer) - : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]) + : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(convert_index(outer)), m_id(mat.m_outerIndex[outer]) { if(mat.isCompressed()) m_end = mat.m_outerIndex[outer+1]; @@ -893,19 +892,19 @@ class SparseMatrix::InnerIterator inline const Scalar& value() const { return m_values[m_id]; } inline Scalar& valueRef() { return const_cast(m_values[m_id]); } - inline Index index() const { return m_indices[m_id]; } - inline Index outer() const { return m_outer; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + inline StorageIndex index() const { return m_indices[m_id]; } + inline StorageIndex outer() const { return m_outer; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id < m_end); } protected: const Scalar* m_values; - const Index* m_indices; - const Index m_outer; - Index m_id; - Index m_end; + const StorageIndex* m_indices; + const StorageIndex m_outer; + StorageIndex m_id; + StorageIndex m_end; private: // If you get here, then you're not using the right InnerIterator type, e.g.: // SparseMatrix A; @@ -931,19 +930,19 @@ class SparseMatrix::ReverseInnerIterator inline const Scalar& value() const { return m_values[m_id-1]; } inline Scalar& valueRef() { return const_cast(m_values[m_id-1]); } - inline Index index() const { return m_indices[m_id-1]; } - inline Index outer() const { return m_outer; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + inline StorageIndex index() const { return m_indices[m_id-1]; } + inline StorageIndex outer() const { return m_outer; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } inline operator bool() const { return (m_id > m_start); } protected: const Scalar* m_values; - const Index* m_indices; - const Index m_outer; - Index m_id; - const Index m_start; + const StorageIndex* m_indices; + const StorageIndex m_outer; + StorageIndex m_id; + const StorageIndex m_start; }; namespace internal { @@ -954,13 +953,13 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa EIGEN_UNUSED_VARIABLE(Options); enum { IsRowMajor = SparseMatrixType::IsRowMajor }; typedef typename SparseMatrixType::Scalar Scalar; - typedef typename SparseMatrixType::Index Index; - SparseMatrix trMat(mat.rows(),mat.cols()); + typedef typename SparseMatrixType::StorageIndex StorageIndex; + SparseMatrix trMat(mat.rows(),mat.cols()); if(begin!=end) { // pass 1: count the nnz per inner-vector - Matrix wi(trMat.outerSize()); + typename SparseMatrixType::IndexVector wi(trMat.outerSize()); wi.setZero(); for(InputIterator it(begin); it!=end; ++it) { @@ -1034,13 +1033,13 @@ void SparseMatrix::sumupDuplicates() { eigen_assert(!isCompressed()); // TODO, in practice we should be able to use m_innerNonZeros for that task - Matrix wi(innerSize()); + IndexVector wi(innerSize()); wi.fill(-1); - Index count = 0; + StorageIndex count = 0; // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers for(Index j=0; j& SparseMatrix > (dest.m_outerIndex,dest.outerSize()).setZero(); + Eigen::Map (dest.m_outerIndex,dest.outerSize()).setZero(); // pass 1 // FIXME the above copy could be merged with that pass @@ -1098,8 +1097,8 @@ EIGEN_DONT_INLINE SparseMatrix& SparseMatrix positions(dest.outerSize()); + StorageIndex count = 0; + IndexVector positions(dest.outerSize()); for (Index j=0; j& SparseMatrix::Scalar& Sparse { eigen_assert(!isCompressed()); - const Index outer = IsRowMajor ? row : col; - const Index inner = IsRowMajor ? col : row; + const StorageIndex outer = convert_index(IsRowMajor ? row : col); + const StorageIndex inner = convert_index(IsRowMajor ? col : row); Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; - Index innerNNZ = m_innerNonZeros[outer]; + StorageIndex innerNNZ = m_innerNonZeros[outer]; if(innerNNZ>=room) { // this inner vector is full, we need to reallocate the whole buffer :( - reserve(SingletonVector(outer,std::max(2,innerNNZ))); + reserve(SingletonVector(outer,std::max(2,innerNNZ))); } Index startId = m_outerIndex[outer]; @@ -1180,7 +1179,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse // we start a new inner vector while (previousOuter>=0 && m_outerIndex[previousOuter]==0) { - m_outerIndex[previousOuter] = static_cast(m_data.size()); + m_outerIndex[previousOuter] = convert_index(m_data.size()); --previousOuter; } m_outerIndex[outer+1] = m_outerIndex[outer]; @@ -1280,7 +1279,6 @@ struct evaluator > : evaluator_base > { typedef _Scalar Scalar; - typedef _Index Index; typedef SparseMatrix<_Scalar,_Options,_Index> SparseMatrixType; typedef typename SparseMatrixType::InnerIterator InnerIterator; typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator; diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h index 04baabe4f..c55a6a930 100644 --- a/Eigen/src/SparseCore/SparseMatrixBase.h +++ b/Eigen/src/SparseCore/SparseMatrixBase.h @@ -30,13 +30,15 @@ template class SparseMatrixBase : public EigenBase typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::Index Index; + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::add_const_on_value_type_if_arithmetic< typename internal::packet_traits::type >::type PacketReturnType; typedef SparseMatrixBase StorageBaseType; typedef EigenBase Base; + typedef Matrix IndexVector; + typedef Matrix ScalarVector; template Derived& operator=(const EigenBase &other); @@ -99,7 +101,7 @@ template class SparseMatrixBase : public EigenBase typedef typename internal::add_const >::type ConstTransposeReturnType; // FIXME storage order do not match evaluator storage order - typedef SparseMatrix PlainObject; + typedef SparseMatrix PlainObject; #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is the "real scalar" type; if the \a Scalar type is already real numbers @@ -142,15 +144,15 @@ template class SparseMatrixBase : public EigenBase #undef EIGEN_CURRENT_STORAGE_BASE_CLASS /** \returns the number of rows. \sa cols() */ - inline Index rows() const { return derived().rows(); } + inline StorageIndex rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows() */ - inline Index cols() const { return derived().cols(); } + inline StorageIndex cols() const { return derived().cols(); } /** \returns the number of coefficients, which is \a rows()*cols(). * \sa rows(), cols(). */ - inline Index size() const { return rows() * cols(); } + inline StorageIndex size() const { return rows() * cols(); } /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ - inline Index nonZeros() const { return derived().nonZeros(); } + inline StorageIndex nonZeros() const { return derived().nonZeros(); } /** \returns true if either the number of rows or the number of columns is equal to 1. * In other words, this function returns * \code rows()==1 || cols()==1 \endcode @@ -158,10 +160,10 @@ template class SparseMatrixBase : public EigenBase inline bool isVector() const { return rows()==1 || cols()==1; } /** \returns the size of the storage major dimension, * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ - Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } + StorageIndex outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } /** \returns the size of the inner dimension according to the storage order, * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ - Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } + StorageIndex innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } bool isRValue() const { return m_isRValue; } Derived& markAsRValue() { m_isRValue = true; return derived(); } @@ -227,8 +229,8 @@ template class SparseMatrixBase : public EigenBase } else { - SparseMatrix trans = m; - s << static_cast >&>(trans); + SparseMatrix trans = m; + s << static_cast >&>(trans); } } return s; @@ -288,7 +290,7 @@ template class SparseMatrixBase : public EigenBase { return Product(lhs.derived(), rhs.derived()); } /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */ - SparseSymmetricPermutationProduct twistedBy(const PermutationMatrix& perm) const + SparseSymmetricPermutationProduct twistedBy(const PermutationMatrix& perm) const { return SparseSymmetricPermutationProduct(derived(), perm); } @@ -352,6 +354,10 @@ template class SparseMatrixBase : public EigenBase protected: bool m_isRValue; + + static inline StorageIndex convert_index(const Index idx) { + return internal::convert_index(idx); + } }; } // end namespace Eigen diff --git a/Eigen/src/SparseCore/SparsePermutation.h b/Eigen/src/SparseCore/SparsePermutation.h index 21411f232..80e5c5fef 100644 --- a/Eigen/src/SparseCore/SparsePermutation.h +++ b/Eigen/src/SparseCore/SparsePermutation.h @@ -21,15 +21,15 @@ struct traits::type MatrixTypeNestedCleaned; typedef typename MatrixTypeNestedCleaned::Scalar Scalar; - typedef typename MatrixTypeNestedCleaned::Index Index; + typedef typename MatrixTypeNestedCleaned::StorageIndex StorageIndex; enum { SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight }; typedef typename internal::conditional, - SparseMatrix >::type ReturnType; + SparseMatrix, + SparseMatrix >::type ReturnType; }; template @@ -38,7 +38,7 @@ struct permut_sparsematrix_product_retval { typedef typename remove_all::type MatrixTypeNestedCleaned; typedef typename MatrixTypeNestedCleaned::Scalar Scalar; - typedef typename MatrixTypeNestedCleaned::Index Index; + typedef typename MatrixTypeNestedCleaned::StorageIndex StorageIndex; enum { SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor, @@ -56,8 +56,8 @@ struct permut_sparsematrix_product_retval { if(MoveOuter) { - SparseMatrix tmp(m_matrix.rows(), m_matrix.cols()); - Matrix sizes(m_matrix.outerSize()); + SparseMatrix tmp(m_matrix.rows(), m_matrix.cols()); + Matrix sizes(m_matrix.outerSize()); for(Index j=0; j tmp(m_matrix.rows(), m_matrix.cols()); - Matrix sizes(tmp.outerSize()); + SparseMatrix tmp(m_matrix.rows(), m_matrix.cols()); + Matrix sizes(tmp.outerSize()); sizes.setZero(); - PermutationMatrix perm; + PermutationMatrix perm; if((Side==OnTheLeft) ^ Transposed) perm = m_permutation; else diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h index 5da7d2bef..e13f98144 100644 --- a/Eigen/src/SparseCore/SparseSelfAdjointView.h +++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h @@ -33,10 +33,10 @@ struct traits > : traits { }; template -void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm = 0); +void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm = 0); template -void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm = 0); +void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm = 0); } @@ -48,8 +48,8 @@ template class SparseSelfAdjointView enum { Mode = _Mode }; typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; - typedef Matrix VectorI; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef Matrix VectorI; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; @@ -58,8 +58,8 @@ template class SparseSelfAdjointView eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices"); } - inline Index rows() const { return m_matrix.rows(); } - inline Index cols() const { return m_matrix.cols(); } + inline StorageIndex rows() const { return m_matrix.rows(); } + inline StorageIndex cols() const { return m_matrix.cols(); } /** \internal \returns a reference to the nested matrix */ const _MatrixTypeNested& matrix() const { return m_matrix; } @@ -117,22 +117,22 @@ template class SparseSelfAdjointView SparseSelfAdjointView& rankUpdate(const SparseMatrixBase& u, const Scalar& alpha = Scalar(1)); /** \internal triggered by sparse_matrix = SparseSelfadjointView; */ - template void evalTo(SparseMatrix& _dest) const + template void evalTo(SparseMatrix& _dest) const { internal::permute_symm_to_fullsymm(m_matrix, _dest); } - template void evalTo(DynamicSparseMatrix& _dest) const + template void evalTo(DynamicSparseMatrix& _dest) const { // TODO directly evaluate into _dest; - SparseMatrix tmp(_dest.rows(),_dest.cols()); + SparseMatrix tmp(_dest.rows(),_dest.cols()); internal::permute_symm_to_fullsymm(m_matrix, tmp); _dest = tmp; } /** \returns an expression of P H P^-1 */ // TODO implement twists in a more evaluator friendly fashion - SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix& perm) const + SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix& perm) const { return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm); } @@ -215,7 +215,6 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons typedef typename evaluator::type LhsEval; typedef typename evaluator::InnerIterator LhsIterator; - typedef typename SparseLhsType::Index Index; typedef typename SparseLhsType::Scalar LhsScalar; enum { @@ -302,7 +301,7 @@ struct generic_product_impl @@ -353,12 +352,12 @@ protected: namespace internal { template -void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) +void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm) { - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::Scalar Scalar; - typedef SparseMatrix Dest; - typedef Matrix VectorI; + typedef SparseMatrix Dest; + typedef Matrix VectorI; Dest& dest(_dest.derived()); enum { @@ -401,16 +400,16 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix(it.index()); Index r = it.row(); Index c = it.col(); - Index jp = perm ? perm[j] : j; - Index ip = perm ? perm[i] : i; + StorageIndex jp = perm ? perm[j] : j; + StorageIndex ip = perm ? perm[i] : i; if(Mode==(Upper|Lower)) { @@ -440,12 +439,12 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix -void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::Index* perm) +void permute_symm_to_symm(const MatrixType& mat, SparseMatrix& _dest, const typename MatrixType::StorageIndex* perm) { - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::Scalar Scalar; - SparseMatrix& dest(_dest.derived()); - typedef Matrix VectorI; + SparseMatrix& dest(_dest.derived()); + typedef Matrix VectorI; enum { SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor, StorageOrderMatch = int(SrcOrder) == int(DstOrder), @@ -453,20 +452,20 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrixj)) continue; - Index ip = perm ? perm[i] : i; + StorageIndex ip = perm ? perm[i] : i; count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; } } @@ -477,17 +476,17 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrixj)) continue; - Index jp = perm ? perm[j] : j; - Index ip = perm? perm[i] : i; + StorageIndex jp = perm ? perm[j] : j; + StorageIndex ip = perm? perm[i] : i; Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++; dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp); @@ -519,11 +518,11 @@ class SparseSymmetricPermutationProduct { public: typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; protected: - typedef PermutationMatrix Perm; + typedef PermutationMatrix Perm; public: - typedef Matrix VectorI; + typedef Matrix VectorI; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all::type _MatrixTypeNested; @@ -531,8 +530,8 @@ class SparseSymmetricPermutationProduct : m_matrix(mat), m_perm(perm) {} - inline Index rows() const { return m_matrix.rows(); } - inline Index cols() const { return m_matrix.cols(); } + inline StorageIndex rows() const { return m_matrix.rows(); } + inline StorageIndex cols() const { return m_matrix.cols(); } template void evalTo(SparseMatrix& _dest) const diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h index f291f8cef..1384fbbff 100644 --- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -22,16 +22,16 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res); typedef typename remove_all::type::Scalar Scalar; - typedef typename remove_all::type::Index Index; + typedef typename remove_all::type::StorageIndex StorageIndex; // make sure to call innerSize/outerSize since we fake the storage order. - Index rows = lhs.innerSize(); - Index cols = rhs.outerSize(); + StorageIndex rows = lhs.innerSize(); + StorageIndex cols = rhs.outerSize(); //Index size = lhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); // allocate a temporary buffer - AmbiVector tempVector(rows); + AmbiVector tempVector(rows); // estimate the number of non zero entries // given a rhs column containing Y non zeros, we assume that the respective Y columns @@ -39,7 +39,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r // the product of a rhs column with the lhs is X+Y where X is the average number of non zero // per column of the lhs. // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) - Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); + StorageIndex estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); // mimics a resizeByInnerOuter: if(ResultType::IsRowMajor) @@ -70,7 +70,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r } } res.startVec(j); - for (typename AmbiVector::Iterator it(tempVector,tolerance); it; ++it) + for (typename AmbiVector::Iterator it(tempVector,tolerance); it; ++it) res.insertBackByOuterInner(j,it.index()) = it.value(); } res.finalize(); @@ -103,7 +103,7 @@ struct sparse_sparse_product_with_pruning_selector SparseTemporaryType; + typedef SparseMatrix SparseTemporaryType; SparseTemporaryType _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); res = _res; @@ -129,8 +129,8 @@ struct sparse_sparse_product_with_pruning_selector ColMajorMatrixLhs; - typedef SparseMatrix ColMajorMatrixRhs; + typedef SparseMatrix ColMajorMatrixLhs; + typedef SparseMatrix ColMajorMatrixRhs; ColMajorMatrixLhs colLhs(lhs); ColMajorMatrixRhs colRhs(rhs); internal::sparse_sparse_product_with_pruning_impl(colLhs, colRhs, res, tolerance); @@ -149,7 +149,7 @@ struct sparse_sparse_product_with_pruning_selector RowMajorMatrixLhs; + typedef SparseMatrix RowMajorMatrixLhs; RowMajorMatrixLhs rowLhs(lhs); sparse_sparse_product_with_pruning_selector(rowLhs,rhs,res,tolerance); } @@ -161,7 +161,7 @@ struct sparse_sparse_product_with_pruning_selector RowMajorMatrixRhs; + typedef SparseMatrix RowMajorMatrixRhs; RowMajorMatrixRhs rowRhs(rhs); sparse_sparse_product_with_pruning_selector(lhs,rowRhs,res,tolerance); } @@ -173,7 +173,7 @@ struct sparse_sparse_product_with_pruning_selector ColMajorMatrixRhs; + typedef SparseMatrix ColMajorMatrixRhs; ColMajorMatrixRhs colRhs(rhs); internal::sparse_sparse_product_with_pruning_impl(lhs, colRhs, res, tolerance); } @@ -185,7 +185,7 @@ struct sparse_sparse_product_with_pruning_selector ColMajorMatrixLhs; + typedef SparseMatrix ColMajorMatrixLhs; ColMajorMatrixLhs colLhs(lhs); internal::sparse_sparse_product_with_pruning_impl(colLhs, rhs, res, tolerance); } diff --git a/Eigen/src/SparseCore/SparseTranspose.h b/Eigen/src/SparseCore/SparseTranspose.h index c3d2d1a16..c74af46b3 100644 --- a/Eigen/src/SparseCore/SparseTranspose.h +++ b/Eigen/src/SparseCore/SparseTranspose.h @@ -20,7 +20,7 @@ template class TransposeImpl protected: typedef SparseMatrixBase > Base; public: - inline typename MatrixType::Index nonZeros() const { return Base::derived().nestedExpression().nonZeros(); } + inline typename MatrixType::StorageIndex nonZeros() const { return Base::derived().nestedExpression().nonZeros(); } }; namespace internal { @@ -33,28 +33,28 @@ struct unary_evaluator, IteratorBased> typedef typename evaluator::ReverseInnerIterator EvalReverseIterator; public: typedef Transpose XprType; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; class InnerIterator : public EvalIterator { public: - EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer) + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) : EvalIterator(unaryOp.m_argImpl,outer) {} - Index row() const { return EvalIterator::col(); } - Index col() const { return EvalIterator::row(); } + StorageIndex row() const { return EvalIterator::col(); } + StorageIndex col() const { return EvalIterator::row(); } }; class ReverseInnerIterator : public EvalReverseIterator { public: - EIGEN_STRONG_INLINE ReverseInnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer) + EIGEN_STRONG_INLINE ReverseInnerIterator(const unary_evaluator& unaryOp, Index outer) : EvalReverseIterator(unaryOp.m_argImpl,outer) {} - Index row() const { return EvalReverseIterator::col(); } - Index col() const { return EvalReverseIterator::row(); } + StorageIndex row() const { return EvalReverseIterator::col(); } + StorageIndex col() const { return EvalReverseIterator::row(); } }; enum { diff --git a/Eigen/src/SparseCore/SparseTriangularView.h b/Eigen/src/SparseCore/SparseTriangularView.h index b044d6778..15bdbacb5 100644 --- a/Eigen/src/SparseCore/SparseTriangularView.h +++ b/Eigen/src/SparseCore/SparseTriangularView.h @@ -64,7 +64,7 @@ template class TriangularViewImpl::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator { typedef typename MatrixTypeNestedCleaned::InnerIterator Base; - typedef typename TriangularViewType::Index Index; + typedef typename TriangularViewType::StorageIndex StorageIndex; public: EIGEN_STRONG_INLINE InnerIterator(const TriangularViewImpl& view, Index outer) @@ -102,9 +102,9 @@ class TriangularViewImpl::InnerIterator : public MatrixT return *this; } - inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); } - inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); } - inline Index index() const + inline StorageIndex row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); } + inline StorageIndex col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); } + inline StorageIndex index() const { if(HasUnitDiag && m_returnOne) return Base::outer(); else return Base::index(); @@ -134,7 +134,7 @@ template class TriangularViewImpl::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator { typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base; - typedef typename TriangularViewImpl::Index Index; + typedef typename TriangularViewImpl::StorageIndex StorageIndex; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const TriangularViewType& view, Index outer) @@ -150,8 +150,8 @@ class TriangularViewImpl::ReverseInnerIterator : public EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() { Base::operator--(); return *this; } - inline Index row() const { return Base::row(); } - inline Index col() const { return Base::col(); } + inline StorageIndex row() const { return Base::row(); } + inline StorageIndex col() const { return Base::col(); } EIGEN_STRONG_INLINE operator bool() const { @@ -175,7 +175,7 @@ struct unary_evaluator, IteratorBased> protected: typedef typename XprType::Scalar Scalar; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator EvalIterator; enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit)) @@ -246,9 +246,9 @@ public: } } -// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); } -// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); } - inline Index index() const +// inline StorageIndex row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); } +// inline StorageIndex col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); } + inline StorageIndex index() const { if(HasUnitDiag && m_returnOne) return Base::outer(); else return Base::index(); diff --git a/Eigen/src/SparseCore/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h index 8de227b88..5714150c2 100644 --- a/Eigen/src/SparseCore/SparseUtil.h +++ b/Eigen/src/SparseCore/SparseUtil.h @@ -43,20 +43,22 @@ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) +// TODO this is mostly the same as EIGEN_GENERIC_PUBLIC_INTERFACE #define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \ typedef BaseClass Base; \ typedef typename Eigen::internal::traits::Scalar Scalar; \ typedef typename Eigen::NumTraits::Real RealScalar; \ typedef typename Eigen::internal::nested::type Nested; \ typedef typename Eigen::internal::traits::StorageKind StorageKind; \ - typedef typename Eigen::internal::traits::Index Index; \ + typedef typename Eigen::internal::traits::StorageIndex StorageIndex; \ enum { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ Flags = Eigen::internal::traits::Flags, \ SizeAtCompileTime = Base::SizeAtCompileTime, \ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ using Base::derived; \ - using Base::const_cast_derived; + using Base::const_cast_derived; \ + using Base::convert_index; #define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase) @@ -67,10 +69,10 @@ const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern; const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern; template class SparseMatrixBase; -template class SparseMatrix; -template class DynamicSparseMatrix; -template class SparseVector; -template class MappedSparseMatrix; +template class SparseMatrix; +template class DynamicSparseMatrix; +template class SparseVector; +template class MappedSparseMatrix; template class SparseSelfAdjointView; template class SparseDiagonalProduct; @@ -99,24 +101,25 @@ template struct eval template struct sparse_eval { typedef typename traits::Scalar _Scalar; - typedef typename traits::Index _Index; + typedef typename traits::StorageIndex _StorageIndex; public: - typedef SparseVector<_Scalar, RowMajor, _Index> type; + typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type; }; template struct sparse_eval { typedef typename traits::Scalar _Scalar; - typedef typename traits::Index _Index; + typedef typename traits::StorageIndex _StorageIndex; public: - typedef SparseVector<_Scalar, ColMajor, _Index> type; + typedef SparseVector<_Scalar, ColMajor, _StorageIndex> type; }; +// TODO this seems almost identical to plain_matrix_type template struct sparse_eval { typedef typename traits::Scalar _Scalar; - typedef typename traits::Index _Index; + typedef typename traits::StorageIndex _StorageIndex; enum { _Options = ((traits::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; public: - typedef SparseMatrix<_Scalar, _Options, _Index> type; + typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type; }; template struct sparse_eval { @@ -128,10 +131,10 @@ template struct sparse_eval { template struct plain_matrix_type { typedef typename traits::Scalar _Scalar; - typedef typename traits::Index _Index; + typedef typename traits::StorageIndex _StorageIndex; enum { _Options = ((evaluator::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; public: - typedef SparseMatrix<_Scalar, _Options, _Index> type; + typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type; }; template @@ -162,26 +165,26 @@ template<> struct glue_shapes { typedef SparseTria * * \sa SparseMatrix::setFromTriplets() */ -template::Index > +template::StorageIndex > class Triplet { public: Triplet() : m_row(0), m_col(0), m_value(0) {} - Triplet(const Index& i, const Index& j, const Scalar& v = Scalar(0)) + Triplet(const StorageIndex& i, const StorageIndex& j, const Scalar& v = Scalar(0)) : m_row(i), m_col(j), m_value(v) {} /** \returns the row index of the element */ - const Index& row() const { return m_row; } + const StorageIndex& row() const { return m_row; } /** \returns the column index of the element */ - const Index& col() const { return m_col; } + const StorageIndex& col() const { return m_col; } /** \returns the value of the element */ const Scalar& value() const { return m_value; } protected: - Index m_row, m_col; + StorageIndex m_row, m_col; Scalar m_value; }; diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h index 8b696a476..fd70cf2bc 100644 --- a/Eigen/src/SparseCore/SparseVector.h +++ b/Eigen/src/SparseCore/SparseVector.h @@ -26,11 +26,11 @@ namespace Eigen { */ namespace internal { -template -struct traits > +template +struct traits > { typedef _Scalar Scalar; - typedef _Index Index; + typedef _StorageIndex StorageIndex; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { @@ -61,9 +61,9 @@ struct sparse_vector_assign_selector; } -template +template class SparseVector - : public SparseMatrixBase > + : public SparseMatrixBase > { typedef SparseMatrixBase SparseBase; @@ -72,23 +72,23 @@ class SparseVector EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=) - typedef internal::CompressedStorage Storage; + typedef internal::CompressedStorage Storage; enum { IsColVector = internal::traits::IsColVector }; enum { Options = _Options }; - EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; } - EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } - EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } - EIGEN_STRONG_INLINE Index outerSize() const { return 1; } + EIGEN_STRONG_INLINE StorageIndex rows() const { return IsColVector ? m_size : 1; } + EIGEN_STRONG_INLINE StorageIndex cols() const { return IsColVector ? 1 : m_size; } + EIGEN_STRONG_INLINE StorageIndex innerSize() const { return m_size; } + EIGEN_STRONG_INLINE StorageIndex outerSize() const { return 1; } EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); } EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); } - EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); } - EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); } + EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return &m_data.index(0); } + EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return &m_data.index(0); } /** \internal */ inline Storage& data() { return m_data; } @@ -132,7 +132,7 @@ class SparseVector inline void setZero() { m_data.clear(); } /** \returns the number of non zero coefficients */ - inline Index nonZeros() const { return static_cast(m_data.size()); } + inline StorageIndex nonZeros() const { return static_cast(m_data.size()); } inline void startVec(Index outer) { @@ -188,7 +188,7 @@ class SparseVector m_data.value(p+1) = m_data.value(p); --p; } - m_data.index(p+1) = i; + m_data.index(p+1) = convert_index(i); m_data.value(p+1) = 0; return m_data.value(p+1); } @@ -207,13 +207,13 @@ class SparseVector void resize(Index rows, Index cols) { - eigen_assert(rows==1 || cols==1); + eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1"); resize(IsColVector ? rows : cols); } void resize(Index newSize) { - m_size = newSize; + m_size = convert_index(newSize); m_data.clear(); } @@ -348,27 +348,27 @@ protected: static void check_template_parameters() { - EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); + EIGEN_STATIC_ASSERT(NumTraits::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } Storage m_data; - Index m_size; + StorageIndex m_size; }; -template -class SparseVector::InnerIterator +template +class SparseVector::InnerIterator { public: explicit InnerIterator(const SparseVector& vec, Index outer=0) - : m_data(vec.m_data), m_id(0), m_end(static_cast(m_data.size())) + : m_data(vec.m_data), m_id(0), m_end(convert_index(m_data.size())) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } - explicit InnerIterator(const internal::CompressedStorage& data) - : m_data(data), m_id(0), m_end(static_cast(m_data.size())) + explicit InnerIterator(const internal::CompressedStorage& data) + : m_data(data), m_id(0), m_end(convert_index(m_data.size())) {} inline InnerIterator& operator++() { m_id++; return *this; } @@ -376,16 +376,16 @@ class SparseVector::InnerIterator inline Scalar value() const { return m_data.value(m_id); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id)); } - inline Index index() const { return m_data.index(m_id); } - inline Index row() const { return IsColVector ? index() : 0; } - inline Index col() const { return IsColVector ? 0 : index(); } + inline StorageIndex index() const { return m_data.index(m_id); } + inline StorageIndex row() const { return IsColVector ? index() : 0; } + inline StorageIndex col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id < m_end); } protected: - const internal::CompressedStorage& m_data; - Index m_id; - const Index m_end; + const internal::CompressedStorage& m_data; + StorageIndex m_id; + const StorageIndex m_end; private: // If you get here, then you're not using the right InnerIterator type, e.g.: // SparseMatrix A; @@ -393,19 +393,19 @@ class SparseVector::InnerIterator template InnerIterator(const SparseMatrixBase&,Index outer=0); }; -template -class SparseVector::ReverseInnerIterator +template +class SparseVector::ReverseInnerIterator { public: explicit ReverseInnerIterator(const SparseVector& vec, Index outer=0) - : m_data(vec.m_data), m_id(static_cast(m_data.size())), m_start(0) + : m_data(vec.m_data), m_id(convert_index(m_data.size())), m_start(0) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } - explicit ReverseInnerIterator(const internal::CompressedStorage& data) - : m_data(data), m_id(static_cast(m_data.size())), m_start(0) + explicit ReverseInnerIterator(const internal::CompressedStorage& data) + : m_data(data), m_id(convert_index(m_data.size())), m_start(0) {} inline ReverseInnerIterator& operator--() { m_id--; return *this; } @@ -413,15 +413,15 @@ class SparseVector::ReverseInnerIterator inline Scalar value() const { return m_data.value(m_id-1); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id-1)); } - inline Index index() const { return m_data.index(m_id-1); } - inline Index row() const { return IsColVector ? index() : 0; } - inline Index col() const { return IsColVector ? 0 : index(); } + inline StorageIndex index() const { return m_data.index(m_id-1); } + inline StorageIndex row() const { return IsColVector ? index() : 0; } + inline StorageIndex col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id > m_start); } protected: - const internal::CompressedStorage& m_data; - Index m_id; + const internal::CompressedStorage& m_data; + StorageIndex m_id; const Index m_start; }; @@ -465,7 +465,7 @@ struct sparse_vector_assign_selector { eigen_internal_assert(src.outerSize()==src.size()); typedef typename internal::evaluator::type SrcEvaluatorType; SrcEvaluatorType srcEval(src); - for(typename Dest::Index i=0; i struct traits > : traits { - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Sparse StorageKind; enum { Flags = int(traits::Flags) & (RowMajorBit) @@ -40,11 +40,11 @@ public: RealScalar m_epsilon = NumTraits::dummy_precision()) : m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {} - inline Index rows() const { return m_matrix.rows(); } - inline Index cols() const { return m_matrix.cols(); } + inline StorageIndex rows() const { return m_matrix.rows(); } + inline StorageIndex cols() const { return m_matrix.cols(); } - inline Index innerSize() const { return m_matrix.innerSize(); } - inline Index outerSize() const { return m_matrix.outerSize(); } + inline StorageIndex innerSize() const { return m_matrix.innerSize(); } + inline StorageIndex outerSize() const { return m_matrix.outerSize(); } /** \returns the nested expression */ const typename internal::remove_all::type& @@ -126,7 +126,7 @@ struct unary_evaluator, IndexBased> typedef SparseView XprType; protected: enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; public: @@ -134,7 +134,7 @@ struct unary_evaluator, IndexBased> { public: - EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, typename XprType::Index outer) + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer) : m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize()) { incrementToNonZero(); @@ -153,17 +153,17 @@ struct unary_evaluator, IndexBased> : m_sve.m_argImpl.coeff(m_inner, m_outer); } - EIGEN_STRONG_INLINE Index index() const { return m_inner; } - inline Index row() const { return IsRowMajor ? m_outer : index(); } - inline Index col() const { return IsRowMajor ? index() : m_outer; } + EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; } + inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } + inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } protected: const unary_evaluator &m_sve; - Index m_inner; - const Index m_outer; - const Index m_end; + StorageIndex m_inner; + const StorageIndex m_outer; + const StorageIndex m_end; private: void incrementToNonZero() diff --git a/Eigen/src/SparseCore/TriangularSolver.h b/Eigen/src/SparseCore/TriangularSolver.h index 98062e9c6..ccfbdc762 100644 --- a/Eigen/src/SparseCore/TriangularSolver.h +++ b/Eigen/src/SparseCore/TriangularSolver.h @@ -28,7 +28,7 @@ template struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::type LhsEval; typedef typename evaluator::InnerIterator LhsIterator; static void run(const Lhs& lhs, Rhs& other) @@ -66,7 +66,7 @@ template struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::type LhsEval; typedef typename evaluator::InnerIterator LhsIterator; static void run(const Lhs& lhs, Rhs& other) @@ -106,7 +106,7 @@ template struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::type LhsEval; typedef typename evaluator::InnerIterator LhsIterator; static void run(const Lhs& lhs, Rhs& other) @@ -142,7 +142,7 @@ template struct sparse_solve_triangular_selector { typedef typename Rhs::Scalar Scalar; - typedef typename Lhs::Index Index; + typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::type LhsEval; typedef typename evaluator::InnerIterator LhsIterator; static void run(const Lhs& lhs, Rhs& other) @@ -212,12 +212,12 @@ template struct sparse_solve_triangular_sparse_selector { typedef typename Rhs::Scalar Scalar; - typedef typename promote_index_type::Index, - typename traits::Index>::type Index; + typedef typename promote_index_type::StorageIndex, + typename traits::StorageIndex>::type StorageIndex; static void run(const Lhs& lhs, Rhs& other) { const bool IsLower = (UpLo==Lower); - AmbiVector tempVector(other.rows()*2); + AmbiVector tempVector(other.rows()*2); tempVector.setBounds(0,other.rows()); Rhs res(other.rows(), other.cols()); @@ -273,7 +273,7 @@ struct sparse_solve_triangular_sparse_selector Index count = 0; // FIXME compute a reference value to filter zeros - for (typename AmbiVector::Iterator it(tempVector/*,1e-12*/); it; ++it) + for (typename AmbiVector::Iterator it(tempVector/*,1e-12*/); it; ++it) { ++ count; // std::cerr << "fill " << it.index() << ", " << col << "\n"; diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h index d72d7f150..0c48fef3e 100644 --- a/Eigen/src/SparseLU/SparseLU.h +++ b/Eigen/src/SparseLU/SparseLU.h @@ -14,7 +14,7 @@ namespace Eigen { -template > class SparseLU; +template > class SparseLU; template struct SparseLUMatrixLReturnType; template struct SparseLUMatrixUReturnType; @@ -70,7 +70,7 @@ template struct SparseLUMatrixURetu * \sa \ref OrderingMethods_Module */ template -class SparseLU : public SparseSolverBase >, public internal::SparseLUImpl +class SparseLU : public SparseSolverBase >, public internal::SparseLUImpl { protected: typedef SparseSolverBase > APIBase; @@ -82,13 +82,13 @@ class SparseLU : public SparseSolverBase >, typedef _OrderingType OrderingType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix NCMatrix; - typedef internal::MappedSuperNodalMatrix SCMatrix; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix NCMatrix; + typedef internal::MappedSuperNodalMatrix SCMatrix; typedef Matrix ScalarVector; - typedef Matrix IndexVector; - typedef PermutationMatrix PermutationType; - typedef internal::SparseLUImpl Base; + typedef Matrix IndexVector; + typedef PermutationMatrix PermutationType; + typedef internal::SparseLUImpl Base; public: SparseLU():m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1) @@ -122,8 +122,8 @@ class SparseLU : public SparseSolverBase >, factorize(matrix); } - inline Index rows() const { return m_mat.rows(); } - inline Index cols() const { return m_mat.cols(); } + inline StorageIndex rows() const { return m_mat.rows(); } + inline StorageIndex cols() const { return m_mat.cols(); } /** Indicate that the pattern of the input matrix is symmetric */ void isSymmetric(bool sym) { @@ -146,9 +146,9 @@ class SparseLU : public SparseSolverBase >, * y = b; matrixU().solveInPlace(y); * \endcode */ - SparseLUMatrixUReturnType > matrixU() const + SparseLUMatrixUReturnType > matrixU() const { - return SparseLUMatrixUReturnType >(m_Lstore, m_Ustore); + return SparseLUMatrixUReturnType >(m_Lstore, m_Ustore); } /** @@ -324,7 +324,7 @@ class SparseLU : public SparseSolverBase >, std::string m_lastError; NCMatrix m_mat; // The input (permuted ) matrix SCMatrix m_Lstore; // The lower triangular matrix (supernodal) - MappedSparseMatrix m_Ustore; // The upper triangular matrix + MappedSparseMatrix m_Ustore; // The upper triangular matrix PermutationType m_perm_c; // Column permutation PermutationType m_perm_r ; // Row permutation IndexVector m_etree; // Column elimination tree @@ -334,10 +334,10 @@ class SparseLU : public SparseSolverBase >, // SparseLU options bool m_symmetricmode; // values for performance - internal::perfvalues m_perfv; + internal::perfvalues m_perfv; RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot - Index m_nnzL, m_nnzU; // Nonzeros in L and U factors - Index m_detPermR; // Determinant of the coefficient matrix + StorageIndex m_nnzL, m_nnzU; // Nonzeros in L and U factors + StorageIndex m_detPermR; // Determinant of the coefficient matrix private: // Disable copy constructor SparseLU (const SparseLU& ); @@ -375,7 +375,7 @@ void SparseLU::analyzePattern(const MatrixType& mat) { m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used. // Then, permute only the column pointers - ei_declare_aligned_stack_constructed_variable(Index,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast(mat.outerIndexPtr()):0); + ei_declare_aligned_stack_constructed_variable(StorageIndex,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast(mat.outerIndexPtr()):0); // If the input matrix 'mat' is uncompressed, then the outer-indices do not match the ones of m_mat, and a copy is thus needed. if(!mat.isCompressed()) @@ -640,7 +640,7 @@ void SparseLU::factorize(const MatrixType& matrix) // Create supernode matrix L m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup); // Create the column major upper sparse matrix U; - new (&m_Ustore) MappedSparseMatrix ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() ); + new (&m_Ustore) MappedSparseMatrix ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() ); m_info = Success; m_factorizationIsOk = true; @@ -649,12 +649,12 @@ void SparseLU::factorize(const MatrixType& matrix) template struct SparseLUMatrixLReturnType : internal::no_assignment_operator { - typedef typename MappedSupernodalType::Index Index; + typedef typename MappedSupernodalType::StorageIndex StorageIndex; typedef typename MappedSupernodalType::Scalar Scalar; explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL) { } - Index rows() { return m_mapL.rows(); } - Index cols() { return m_mapL.cols(); } + StorageIndex rows() { return m_mapL.rows(); } + StorageIndex cols() { return m_mapL.cols(); } template void solveInPlace( MatrixBase &X) const { @@ -666,21 +666,18 @@ struct SparseLUMatrixLReturnType : internal::no_assignment_operator template struct SparseLUMatrixUReturnType : internal::no_assignment_operator { - typedef typename MatrixLType::Index Index; + typedef typename MatrixLType::StorageIndex StorageIndex; typedef typename MatrixLType::Scalar Scalar; explicit SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU) : m_mapL(mapL),m_mapU(mapU) { } - Index rows() { return m_mapL.rows(); } - Index cols() { return m_mapL.cols(); } + StorageIndex rows() { return m_mapL.rows(); } + StorageIndex cols() { return m_mapL.cols(); } template void solveInPlace(MatrixBase &X) const { - /* Explicit type conversion as the Index type of MatrixBase may be wider than Index */ - eigen_assert(X.rows() <= NumTraits::highest()); - eigen_assert(X.cols() <= NumTraits::highest()); - Index nrhs = Index(X.cols()); - Index n = Index(X.rows()); + Index nrhs = X.cols(); + Index n = X.rows(); // Backward solve with U for (Index k = m_mapL.nsuper(); k >= 0; k--) { diff --git a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h index e8ee35a94..098763765 100644 --- a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +++ b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h @@ -29,20 +29,20 @@ namespace internal { * SuperInnerIterator to iterate through all supernodes * Function for triangular solve */ -template +template class MappedSuperNodalMatrix { public: typedef _Scalar Scalar; - typedef _Index Index; - typedef Matrix IndexVector; + typedef _StorageIndex StorageIndex; + typedef Matrix IndexVector; typedef Matrix ScalarVector; public: MappedSuperNodalMatrix() { } - MappedSuperNodalMatrix(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, + MappedSuperNodalMatrix(StorageIndex m, StorageIndex n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col ) { setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col); @@ -58,7 +58,7 @@ class MappedSuperNodalMatrix * FIXME This class will be modified such that it can be use in the course * of the factorization. */ - void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, + void setInfos(StorageIndex m, StorageIndex n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col ) { m_row = m; @@ -75,12 +75,12 @@ class MappedSuperNodalMatrix /** * Number of rows */ - Index rows() { return m_row; } + StorageIndex rows() { return m_row; } /** * Number of columns */ - Index cols() { return m_col; } + StorageIndex cols() { return m_col; } /** * Return the array of nonzero values packed by column @@ -96,12 +96,12 @@ class MappedSuperNodalMatrix /** * Return the pointers to the beginning of each column in \ref valuePtr() */ - Index* colIndexPtr() + StorageIndex* colIndexPtr() { return m_nzval_colptr; } - const Index* colIndexPtr() const + const StorageIndex* colIndexPtr() const { return m_nzval_colptr; } @@ -109,9 +109,9 @@ class MappedSuperNodalMatrix /** * Return the array of compressed row indices of all supernodes */ - Index* rowIndex() { return m_rowind; } + StorageIndex* rowIndex() { return m_rowind; } - const Index* rowIndex() const + const StorageIndex* rowIndex() const { return m_rowind; } @@ -119,9 +119,9 @@ class MappedSuperNodalMatrix /** * Return the location in \em rowvaluePtr() which starts each column */ - Index* rowIndexPtr() { return m_rowind_colptr; } + StorageIndex* rowIndexPtr() { return m_rowind_colptr; } - const Index* rowIndexPtr() const + const StorageIndex* rowIndexPtr() const { return m_rowind_colptr; } @@ -129,18 +129,18 @@ class MappedSuperNodalMatrix /** * Return the array of column-to-supernode mapping */ - Index* colToSup() { return m_col_to_sup; } + StorageIndex* colToSup() { return m_col_to_sup; } - const Index* colToSup() const + const StorageIndex* colToSup() const { return m_col_to_sup; } /** * Return the array of supernode-to-column mapping */ - Index* supToCol() { return m_sup_to_col; } + StorageIndex* supToCol() { return m_sup_to_col; } - const Index* supToCol() const + const StorageIndex* supToCol() const { return m_sup_to_col; } @@ -148,7 +148,7 @@ class MappedSuperNodalMatrix /** * Return the number of supernodes */ - Index nsuper() const + StorageIndex nsuper() const { return m_nsuper; } @@ -161,15 +161,15 @@ class MappedSuperNodalMatrix protected: - Index m_row; // Number of rows - Index m_col; // Number of columns - Index m_nsuper; // Number of supernodes + StorageIndex m_row; // Number of rows + StorageIndex m_col; // Number of columns + StorageIndex m_nsuper; // Number of supernodes Scalar* m_nzval; //array of nonzero values packed by column - Index* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j - Index* m_rowind; // Array of compressed row indices of rectangular supernodes - Index* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j - Index* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs - Index* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode + StorageIndex* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j + StorageIndex* m_rowind; // Array of compressed row indices of rectangular supernodes + StorageIndex* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j + StorageIndex* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs + StorageIndex* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode private : }; @@ -182,9 +182,9 @@ template class MappedSuperNodalMatrix::InnerIterator { public: - InnerIterator(const MappedSuperNodalMatrix& mat, Index outer) + InnerIterator(const MappedSuperNodalMatrix& mat, Eigen::Index outer) : m_matrix(mat), - m_outer(outer), + m_outer(convert_index(outer)), m_supno(mat.colToSup()[outer]), m_idval(mat.colIndexPtr()[outer]), m_startidval(m_idval), @@ -229,14 +229,14 @@ class MappedSuperNodalMatrix::InnerIterator * \brief Solve with the supernode triangular matrix * */ -template +template template -void MappedSuperNodalMatrix::solveInPlace( MatrixBase&X) const +void MappedSuperNodalMatrix::solveInPlace( MatrixBase&X) const { /* Explicit type conversion as the Index type of MatrixBase may be wider than Index */ - eigen_assert(X.rows() <= NumTraits::highest()); - eigen_assert(X.cols() <= NumTraits::highest()); - Index n = Index(X.rows()); +// eigen_assert(X.rows() <= NumTraits::highest()); +// eigen_assert(X.cols() <= NumTraits::highest()); + Index n = int(X.rows()); Index nrhs = Index(X.cols()); const Scalar * Lval = valuePtr(); // Nonzero values Matrix work(n, nrhs); // working vector diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h index 133211488..58bfc1cb4 100644 --- a/Eigen/src/SparseQR/SparseQR.h +++ b/Eigen/src/SparseQR/SparseQR.h @@ -21,7 +21,7 @@ namespace internal { template struct traits > { typedef typename SparseQRType::MatrixType ReturnType; - typedef typename ReturnType::Index Index; + typedef typename ReturnType::StorageIndex StorageIndex; typedef typename ReturnType::StorageKind StorageKind; }; template struct traits > @@ -73,11 +73,11 @@ class SparseQR : public SparseSolverBase > typedef _OrderingType OrderingType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef SparseMatrix QRMatrixType; - typedef Matrix IndexVector; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef SparseMatrix QRMatrixType; + typedef Matrix IndexVector; typedef Matrix ScalarVector; - typedef PermutationMatrix PermutationType; + typedef PermutationMatrix PermutationType; public: SparseQR () : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) { } @@ -109,11 +109,11 @@ class SparseQR : public SparseSolverBase > /** \returns the number of rows of the represented matrix. */ - inline Index rows() const { return m_pmat.rows(); } + inline StorageIndex rows() const { return m_pmat.rows(); } /** \returns the number of columns of the represented matrix. */ - inline Index cols() const { return m_pmat.cols();} + inline StorageIndex cols() const { return m_pmat.cols();} /** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization. */ @@ -123,7 +123,7 @@ class SparseQR : public SparseSolverBase > * * \sa setPivotThreshold() */ - Index rank() const + StorageIndex rank() const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); return m_nonzeropivots; @@ -179,7 +179,7 @@ class SparseQR : public SparseSolverBase > b = y; // Solve with the triangular matrix R - y.resize((std::max)(cols(),Index(y.rows())),y.cols()); + y.resize((std::max)(cols(),y.rows()),y.cols()); y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView().solve(b.topRows(rank)); y.bottomRows(y.rows()-rank).setZero(); @@ -260,7 +260,7 @@ class SparseQR : public SparseSolverBase > PermutationType m_outputPerm_c; // The final column permutation RealScalar m_threshold; // Threshold to determine null Householder reflections bool m_useDefaultThreshold; // Use default threshold - Index m_nonzeropivots; // Number of non zero pivots found + StorageIndex m_nonzeropivots; // Number of non zero pivots found IndexVector m_etree; // Column elimination tree IndexVector m_firstRowElt; // First element in each row bool m_isQSorted; // whether Q is sorted or not @@ -289,9 +289,9 @@ void SparseQR::analyzePattern(const MatrixType& mat) // Compute the column fill reducing ordering OrderingType ord; ord(matCpy, m_perm_c); - Index n = mat.cols(); - Index m = mat.rows(); - Index diagSize = (std::min)(m,n); + StorageIndex n = mat.cols(); + StorageIndex m = mat.rows(); + StorageIndex diagSize = (std::min)(m,n); if (!m_perm_c.size()) { @@ -354,7 +354,7 @@ void SparseQR::factorize(const MatrixType& mat) // otherwise directly use the input matrix // IndexVector originalOuterIndicesCpy; - const Index *originalOuterIndices = mat.outerIndexPtr(); + const StorageIndex *originalOuterIndices = mat.outerIndexPtr(); if(MatrixType::IsRowMajor) { originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1); @@ -385,11 +385,11 @@ void SparseQR::factorize(const MatrixType& mat) // Initialize the numerical permutation m_pivotperm.setIdentity(n); - Index nonzeroCol = 0; // Record the number of valid pivots + StorageIndex nonzeroCol = 0; // Record the number of valid pivots m_Q.startVec(0); // Left looking rank-revealing QR factorization: compute a column of R and Q at a time - for (Index col = 0; col < n; ++col) + for (StorageIndex col = 0; col < n; ++col) { mark.setConstant(-1); m_R.startVec(col); @@ -405,12 +405,12 @@ void SparseQR::factorize(const MatrixType& mat) // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found. for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) { - Index curIdx = nonzeroCol; + StorageIndex curIdx = nonzeroCol; if(itp) curIdx = itp.row(); if(curIdx == nonzeroCol) found_diag = true; // Get the nonzeros indexes of the current column of R - Index st = m_firstRowElt(curIdx); // The traversal of the etree starts here + StorageIndex st = m_firstRowElt(curIdx); // The traversal of the etree starts here if (st < 0 ) { m_lastError = "Empty row found during numerical factorization"; @@ -467,7 +467,7 @@ void SparseQR::factorize(const MatrixType& mat) { for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) { - Index iQ = itq.row(); + StorageIndex iQ = itq.row(); if (mark(iQ) != col) { Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q, @@ -578,7 +578,7 @@ struct SparseQR_QProduct : ReturnByValue struct SparseQRMatrixQReturnType : public EigenBase > { - typedef typename SparseQRType::Index Index; + typedef typename SparseQRType::StorageIndex StorageIndex; typedef typename SparseQRType::Scalar Scalar; typedef Matrix DenseMatrix; explicit SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {} @@ -647,8 +647,8 @@ struct SparseQRMatrixQReturnType : public EigenBase(m_qr); } - inline Index rows() const { return m_qr.rows(); } - inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); } + inline StorageIndex rows() const { return m_qr.rows(); } + inline StorageIndex cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); } // To use for operations with the transpose of Q SparseQRMatrixQTransposeReturnType transpose() const { diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h index ef73587a7..f00bc3976 100644 --- a/Eigen/src/SuperLUSupport/SuperLUSupport.h +++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h @@ -156,10 +156,10 @@ struct SluMatrix : SuperMatrix res.setScalarType(); res.Mtype = SLU_GE; - res.nrow = mat.rows(); - res.ncol = mat.cols(); + res.nrow = internal::convert_index(mat.rows()); + res.ncol = internal::convert_index(mat.cols()); - res.storage.lda = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride(); + res.storage.lda = internal::convert_index(MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride()); res.storage.values = (void*)(mat.data()); return res; } @@ -298,7 +298,7 @@ class SuperLUBase : public SparseSolverBase typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix Vector; typedef Matrix IntRowVectorType; typedef Matrix IntColVectorType; @@ -313,8 +313,8 @@ class SuperLUBase : public SparseSolverBase clearFactors(); } - inline Index rows() const { return m_matrix.rows(); } - inline Index cols() const { return m_matrix.cols(); } + inline StorageIndex rows() const { return m_matrix.rows(); } + inline StorageIndex cols() const { return m_matrix.cols(); } /** \returns a reference to the Super LU option object to configure the Super LU algorithms. */ inline superlu_options_t& options() { return m_sluOptions; } @@ -457,7 +457,7 @@ class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> > typedef _MatrixType MatrixType; typedef typename Base::Scalar Scalar; typedef typename Base::RealScalar RealScalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::IntRowVectorType IntRowVectorType; typedef typename Base::IntColVectorType IntColVectorType; typedef typename Base::LUMatrixType LUMatrixType; @@ -616,8 +616,8 @@ void SuperLU::_solve_impl(const MatrixBase &b, MatrixBase { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()"); - const int size = m_matrix.rows(); - const int rhsCols = b.cols(); + const StorageIndex size = m_matrix.rows(); + const Index rhsCols = b.cols(); eigen_assert(size==b.rows()); m_sluOptions.Trans = NOTRANS; diff --git a/Eigen/src/UmfPackSupport/UmfPackSupport.h b/Eigen/src/UmfPackSupport/UmfPackSupport.h index b8b216d5e..982aa2fca 100644 --- a/Eigen/src/UmfPackSupport/UmfPackSupport.h +++ b/Eigen/src/UmfPackSupport/UmfPackSupport.h @@ -141,7 +141,7 @@ class UmfPackLU : public SparseSolverBase > typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix Vector; typedef Matrix IntRowVectorType; typedef Matrix IntColVectorType; @@ -164,8 +164,8 @@ class UmfPackLU : public SparseSolverBase > if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar()); } - inline Index rows() const { return m_copyMatrix.rows(); } - inline Index cols() const { return m_copyMatrix.cols(); } + inline StorageIndex rows() const { return m_copyMatrix.rows(); } + inline StorageIndex cols() const { return m_copyMatrix.cols(); } /** \brief Reports whether previous computation was successful. * @@ -279,7 +279,7 @@ class UmfPackLU : public SparseSolverBase > void grapInput_impl(const InputMatrixType& mat, internal::true_type) { m_copyMatrix.resize(mat.rows(), mat.cols()); - if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::Index)!=sizeof(int) || !mat.isCompressed() ) + if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::StorageIndex)!=sizeof(int) || !mat.isCompressed() ) { // non supported input -> copy m_copyMatrix = mat; @@ -397,7 +397,7 @@ template template bool UmfPackLU::_solve_impl(const MatrixBase &b, MatrixBase &x) const { - const int rhsCols = b.cols(); + Index rhsCols = b.cols(); eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet"); eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet"); eigen_assert(b.derived().data() != x.derived().data() && " Umfpack does not support inplace solve"); diff --git a/test/bandmatrix.cpp b/test/bandmatrix.cpp index 5e4e8e07b..f8c38f7c3 100644 --- a/test/bandmatrix.cpp +++ b/test/bandmatrix.cpp @@ -11,7 +11,6 @@ template void bandmatrix(const MatrixType& _m) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef Matrix DenseMatrixType; @@ -62,8 +61,6 @@ using Eigen::internal::BandMatrix; void test_bandmatrix() { - typedef BandMatrix::Index Index; - for(int i = 0; i < 10*g_repeat ; i++) { Index rows = internal::random(1,10); Index cols = internal::random(1,10); diff --git a/test/main.h b/test/main.h index b3fa68476..a1ccd28b5 100644 --- a/test/main.h +++ b/test/main.h @@ -373,11 +373,10 @@ bool test_is_equal(const T& actual, const U& expected) */ // Forward declaration to avoid ICC warning template -void createRandomPIMatrixOfRank(typename MatrixType::Index desired_rank, typename MatrixType::Index rows, typename MatrixType::Index cols, MatrixType& m); +void createRandomPIMatrixOfRank(Index desired_rank, Index rows, Index cols, MatrixType& m); template -void createRandomPIMatrixOfRank(typename MatrixType::Index desired_rank, typename MatrixType::Index rows, typename MatrixType::Index cols, MatrixType& m) +void createRandomPIMatrixOfRank(Index desired_rank, Index rows, Index cols, MatrixType& m) { - typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime }; @@ -414,11 +413,10 @@ void createRandomPIMatrixOfRank(typename MatrixType::Index desired_rank, typenam // Forward declaration to avoid ICC warning template -void randomPermutationVector(PermutationVectorType& v, typename PermutationVectorType::Index size); +void randomPermutationVector(PermutationVectorType& v, Index size); template -void randomPermutationVector(PermutationVectorType& v, typename PermutationVectorType::Index size) +void randomPermutationVector(PermutationVectorType& v, Index size) { - typedef typename PermutationVectorType::Index Index; typedef typename PermutationVectorType::Scalar Scalar; v.resize(size); for(Index i = 0; i < size; ++i) v(i) = Scalar(i); diff --git a/test/nullary.cpp b/test/nullary.cpp index 5408d88b2..a19c674e4 100644 --- a/test/nullary.cpp +++ b/test/nullary.cpp @@ -35,8 +35,8 @@ bool equalsIdentity(const MatrixType& A) template void testVectorType(const VectorType& base) { - typedef typename internal::traits::Index Index; - typedef typename internal::traits::Scalar Scalar; + typedef typename VectorType::Index Index; + typedef typename VectorType::Scalar Scalar; const Index size = base.size(); diff --git a/test/product.h b/test/product.h index 0b3abe402..672d0cee9 100644 --- a/test/product.h +++ b/test/product.h @@ -22,7 +22,6 @@ template void product(const MatrixType& m) /* this test covers the following files: Identity.h Product.h */ - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Matrix RowVectorType; typedef Matrix ColVectorType; diff --git a/test/sparse_basic.cpp b/test/sparse_basic.cpp index 097959c84..a26fd5dcd 100644 --- a/test/sparse_basic.cpp +++ b/test/sparse_basic.cpp @@ -13,11 +13,11 @@ template void sparse_basic(const SparseMatrixType& ref) { - typedef typename SparseMatrixType::Index Index; - typedef Matrix Vector2; + typedef typename SparseMatrixType::StorageIndex StorageIndex; + typedef Matrix Vector2; - const Index rows = ref.rows(); - const Index cols = ref.cols(); + const StorageIndex rows = ref.rows(); + const StorageIndex cols = ref.cols(); const Index inner = ref.innerSize(); const Index outer = ref.outerSize(); @@ -56,27 +56,27 @@ template void sparse_basic(const SparseMatrixType& re VERIFY_IS_APPROX(m, refMat); // test InnerIterators and Block expressions - for (int t=0; t<10; ++t) + for (Index t=0; t<10; ++t) { - int j = internal::random(0,cols-1); - int i = internal::random(0,rows-1); - int w = internal::random(1,cols-j-1); - int h = internal::random(1,rows-i-1); + Index j = internal::random(0,cols-1); + Index i = internal::random(0,rows-1); + Index w = internal::random(1,cols-j-1); + Index h = internal::random(1,rows-i-1); VERIFY_IS_APPROX(m.block(i,j,h,w), refMat.block(i,j,h,w)); - for(int c=0; c void sparse_basic(const SparseMatrixType& re } } - for(int c=0; c void sparse_basic(const SparseMatrixType& re SparseMatrixType m2(rows,cols); VectorXi r(VectorXi::Constant(m2.outerSize(), ((mode%2)==0) ? int(m2.innerSize()) : std::max(1,int(m2.innerSize())/8))); m2.reserve(r); - for (int k=0; k(0,rows-1); Index j = internal::random(0,cols-1); @@ -390,7 +390,7 @@ template void sparse_basic(const SparseMatrixType& re // test setFromTriplets { - typedef Triplet TripletType; + typedef Triplet TripletType; std::vector triplets; Index ntriplets = rows*cols; triplets.reserve(ntriplets); @@ -398,8 +398,8 @@ template void sparse_basic(const SparseMatrixType& re refMat.setZero(); for(Index i=0;i(0,rows-1); - Index c = internal::random(0,cols-1); + StorageIndex r = internal::random(0,rows-1); + StorageIndex c = internal::random(0,cols-1); Scalar v = internal::random(); triplets.push_back(TripletType(r,c,v)); refMat(r,c) += v; @@ -482,17 +482,17 @@ template void sparse_basic(const SparseMatrixType& re // test conservative resize { - std::vector< std::pair > inc; + std::vector< std::pair > inc; if(rows > 3 && cols > 2) - inc.push_back(std::pair(-3,-2)); - inc.push_back(std::pair(0,0)); - inc.push_back(std::pair(3,2)); - inc.push_back(std::pair(3,0)); - inc.push_back(std::pair(0,3)); + inc.push_back(std::pair(-3,-2)); + inc.push_back(std::pair(0,0)); + inc.push_back(std::pair(3,2)); + inc.push_back(std::pair(3,0)); + inc.push_back(std::pair(0,3)); for(size_t i = 0; i< inc.size(); i++) { - Index incRows = inc[i].first; - Index incCols = inc[i].second; + StorageIndex incRows = inc[i].first; + StorageIndex incCols = inc[i].second; SparseMatrixType m1(rows, cols); DenseMatrix refMat1 = DenseMatrix::Zero(rows, cols); initSparse(density, refMat1, m1); @@ -527,28 +527,28 @@ template void sparse_basic(const SparseMatrixType& re template -void big_sparse_triplet(typename SparseMatrixType::Index rows, typename SparseMatrixType::Index cols, double density) { -typedef typename SparseMatrixType::Index Index; -typedef typename SparseMatrixType::Scalar Scalar; -typedef Triplet TripletType; -std::vector triplets; -double nelements = density * rows*cols; -VERIFY(nelements>=0 && nelements < NumTraits::highest()); -Index ntriplets = Index(nelements); -triplets.reserve(ntriplets); -Scalar sum = Scalar(0); -for(Index i=0;i(0,rows-1); - Index c = internal::random(0,cols-1); - Scalar v = internal::random(); - triplets.push_back(TripletType(r,c,v)); - sum += v; -} -SparseMatrixType m(rows,cols); -m.setFromTriplets(triplets.begin(), triplets.end()); -VERIFY(m.nonZeros() <= ntriplets); -VERIFY_IS_APPROX(sum, m.sum()); +void big_sparse_triplet(Index rows, Index cols, double density) { + typedef typename SparseMatrixType::StorageIndex StorageIndex; + typedef typename SparseMatrixType::Scalar Scalar; + typedef Triplet TripletType; + std::vector triplets; + double nelements = density * rows*cols; + VERIFY(nelements>=0 && nelements < NumTraits::highest()); + Index ntriplets = Index(nelements); + triplets.reserve(ntriplets); + Scalar sum = Scalar(0); + for(Index i=0;i(0,rows-1); + Index c = internal::random(0,cols-1); + Scalar v = internal::random(); + triplets.push_back(TripletType(r,c,v)); + sum += v; + } + SparseMatrixType m(rows,cols); + m.setFromTriplets(triplets.begin(), triplets.end()); + VERIFY(m.nonZeros() <= ntriplets); + VERIFY_IS_APPROX(sum, m.sum()); } diff --git a/test/sparse_permutations.cpp b/test/sparse_permutations.cpp index e4ce1d679..dec586776 100644 --- a/test/sparse_permutations.cpp +++ b/test/sparse_permutations.cpp @@ -11,15 +11,13 @@ template void sparse_permutations(const SparseMatrixType& ref) { - typedef typename SparseMatrixType::Index Index; - const Index rows = ref.rows(); const Index cols = ref.cols(); typedef typename SparseMatrixType::Scalar Scalar; - typedef typename SparseMatrixType::Index Index; - typedef SparseMatrix OtherSparseMatrixType; + typedef typename SparseMatrixType::StorageIndex StorageIndex; + typedef SparseMatrix OtherSparseMatrixType; typedef Matrix DenseMatrix; - typedef Matrix VectorI; + typedef Matrix VectorI; double density = (std::max)(8./(rows*cols), 0.01); diff --git a/test/sparse_product.cpp b/test/sparse_product.cpp index 366e27274..b3f653d0e 100644 --- a/test/sparse_product.cpp +++ b/test/sparse_product.cpp @@ -11,7 +11,7 @@ template void sparse_product() { - typedef typename SparseMatrixType::Index Index; + typedef typename SparseMatrixType::StorageIndex Index; Index n = 100; const Index rows = internal::random(1,n); const Index cols = internal::random(1,n); diff --git a/test/sparse_solver.h b/test/sparse_solver.h index ee350d561..afda26b93 100644 --- a/test/sparse_solver.h +++ b/test/sparse_solver.h @@ -15,7 +15,7 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A, { typedef typename Solver::MatrixType Mat; typedef typename Mat::Scalar Scalar; - typedef typename Mat::Index Index; + typedef typename Mat::StorageIndex StorageIndex; DenseRhs refX = dA.lu().solve(db); { @@ -60,7 +60,7 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A, x.setZero(); // test with Map - MappedSparseMatrix Am(A.rows(), A.cols(), A.nonZeros(), const_cast(A.outerIndexPtr()), const_cast(A.innerIndexPtr()), const_cast(A.valuePtr())); + MappedSparseMatrix Am(A.rows(), A.cols(), A.nonZeros(), const_cast(A.outerIndexPtr()), const_cast(A.innerIndexPtr()), const_cast(A.valuePtr())); solver.compute(Am); if (solver.info() != Success) { @@ -95,7 +95,7 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A, // test uncompressed inputs { Mat A2 = A; - A2.reserve((ArrayXf::Random(A.outerSize())+2).template cast().eval()); + A2.reserve((ArrayXf::Random(A.outerSize())+2).template cast().eval()); solver.compute(A2); Rhs x = solver.solve(b); VERIFY(x.isApprox(refX,test_precision())); diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h index cd15ce0bf..60f5f662c 100644 --- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h @@ -284,7 +284,7 @@ public: using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h index aaf42c78a..eccdce24c 100644 --- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h @@ -220,7 +220,7 @@ namespace Eigen { using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; + typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h index 446fcac16..d0b51970d 100644 --- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h +++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h @@ -31,7 +31,7 @@ class KroneckerProductBase : public ReturnByValue protected: typedef typename Traits::Lhs Lhs; typedef typename Traits::Rhs Rhs; - typedef typename Traits::Index Index; + typedef typename Traits::StorageIndex StorageIndex; public: /*! \brief Constructor. */ @@ -39,8 +39,8 @@ class KroneckerProductBase : public ReturnByValue : m_A(A), m_B(B) {} - inline Index rows() const { return m_A.rows() * m_B.rows(); } - inline Index cols() const { return m_A.cols() * m_B.cols(); } + inline StorageIndex rows() const { return m_A.rows() * m_B.rows(); } + inline StorageIndex cols() const { return m_A.cols() * m_B.cols(); } /*! * This overrides ReturnByValue::coeff because this function is @@ -48,8 +48,8 @@ class KroneckerProductBase : public ReturnByValue */ Scalar coeff(Index row, Index col) const { - return m_A.coeff(typename Lhs::Index(row / m_B.rows()), typename Lhs::Index(col / m_B.cols())) * - m_B.coeff(typename Rhs::Index(row % m_B.rows()), typename Rhs::Index(col % m_B.cols())); + return m_A.coeff(row / m_B.rows(), col / m_B.cols()) * + m_B.coeff(row % m_B.rows(), col % m_B.cols()); } /*! @@ -59,7 +59,7 @@ class KroneckerProductBase : public ReturnByValue Scalar coeff(Index i) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); - return m_A.coeff(typename Lhs::Index(i / m_A.size())) * m_B.coeff(typename Rhs::Index(i % m_A.size())); + return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size()); } protected: @@ -134,7 +134,6 @@ template template void KroneckerProduct::evalTo(Dest& dst) const { - typedef typename Base::Index Index; const int BlockRows = Rhs::RowsAtCompileTime, BlockCols = Rhs::ColsAtCompileTime; const Index Br = m_B.rows(), @@ -148,12 +147,8 @@ template template void KroneckerProductSparse::evalTo(Dest& dst) const { - typedef typename Dest::Index DestIndex; - const typename Rhs::Index Br = m_B.rows(), - Bc = m_B.cols(); - eigen_assert(this->rows() <= NumTraits::highest()); - eigen_assert(this->cols() <= NumTraits::highest()); - dst.resize(DestIndex(this->rows()), DestIndex(this->cols())); + Index Br = m_B.rows(), Bc = m_B.cols(); + dst.resize(this->rows(), this->cols()); dst.resizeNonZeros(0); // 1 - evaluate the operands if needed: @@ -170,13 +165,14 @@ void KroneckerProductSparse::evalTo(Dest& dst) const // compute number of non-zeros per innervectors of dst { + // TODO VectorXi is not necessarily big enough! VectorXi nnzA = VectorXi::Zero(Dest::IsRowMajor ? m_A.rows() : m_A.cols()); - for (typename Lhs::Index kA=0; kA < m_A.outerSize(); ++kA) + for (Index kA=0; kA < m_A.outerSize(); ++kA) for (LhsInnerIterator itA(lhs1,kA); itA; ++itA) nnzA(Dest::IsRowMajor ? itA.row() : itA.col())++; VectorXi nnzB = VectorXi::Zero(Dest::IsRowMajor ? m_B.rows() : m_B.cols()); - for (typename Rhs::Index kB=0; kB < m_B.outerSize(); ++kB) + for (Index kB=0; kB < m_B.outerSize(); ++kB) for (RhsInnerIterator itB(rhs1,kB); itB; ++itB) nnzB(Dest::IsRowMajor ? itB.row() : itB.col())++; @@ -184,17 +180,16 @@ void KroneckerProductSparse::evalTo(Dest& dst) const dst.reserve(VectorXi::Map(nnzAB.data(), nnzAB.size())); } - for (typename Lhs::Index kA=0; kA < m_A.outerSize(); ++kA) + for (Index kA=0; kA < m_A.outerSize(); ++kA) { - for (typename Rhs::Index kB=0; kB < m_B.outerSize(); ++kB) + for (Index kB=0; kB < m_B.outerSize(); ++kB) { for (LhsInnerIterator itA(lhs1,kA); itA; ++itA) { for (RhsInnerIterator itB(rhs1,kB); itB; ++itB) { - const DestIndex - i = DestIndex(itA.row() * Br + itB.row()), - j = DestIndex(itA.col() * Bc + itB.col()); + Index i = itA.row() * Br + itB.row(), + j = itA.col() * Bc + itB.col(); dst.insert(i,j) = itA.value() * itB.value(); } } @@ -210,7 +205,7 @@ struct traits > typedef typename remove_all<_Lhs>::type Lhs; typedef typename remove_all<_Rhs>::type Rhs; typedef typename scalar_product_traits::ReturnType Scalar; - typedef typename promote_index_type::type Index; + typedef typename promote_index_type::type StorageIndex; enum { Rows = size_at_compile_time::RowsAtCompileTime, traits::RowsAtCompileTime>::ret, @@ -230,7 +225,7 @@ struct traits > typedef typename remove_all<_Rhs>::type Rhs; typedef typename scalar_product_traits::ReturnType Scalar; typedef typename cwise_promote_storage_type::StorageKind, typename traits::StorageKind, scalar_product_op >::ret StorageKind; - typedef typename promote_index_type::type Index; + typedef typename promote_index_type::type StorageIndex; enum { LhsFlags = Lhs::Flags, @@ -249,7 +244,7 @@ struct traits > CoeffReadCost = Dynamic }; - typedef SparseMatrix ReturnType; + typedef SparseMatrix ReturnType; }; } // end namespace internal diff --git a/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h index 6d845961e..d92fd0ef1 100644 --- a/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h +++ b/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h @@ -51,7 +51,7 @@ namespace Eigen { * Dynamic : block size known at runtime * a numeric number : fixed-size block known at compile time */ -template class BlockSparseMatrix; +template class BlockSparseMatrix; template class BlockSparseMatrixView; @@ -280,14 +280,14 @@ class BlockSparseTimeDenseProduct BlockSparseTimeDenseProduct& operator=(const BlockSparseTimeDenseProduct&); }; -template -class BlockSparseMatrix : public SparseMatrixBase > +template +class BlockSparseMatrix : public SparseMatrixBase > { public: typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef _Index Index; - typedef typename internal::nested >::type Nested; + typedef _StorageIndex StorageIndex; + typedef typename internal::nested >::type Nested; enum { Options = _Options, @@ -303,7 +303,7 @@ class BlockSparseMatrix : public SparseMatrixBase BlockScalar; typedef Matrix BlockRealScalar; typedef typename internal::conditional<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar>::type BlockScalarReturnType; - typedef BlockSparseMatrix PlainObject; + typedef BlockSparseMatrix PlainObject; public: // Default constructor BlockSparseMatrix() @@ -412,17 +412,17 @@ class BlockSparseMatrix : public SparseMatrixBase nzblocksFlag(m_innerBSize,false); // Record the existing blocks blockPattern.startVec(bj); - for(Index j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j) + for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j) { typename MatrixType::InnerIterator it_spmat(spmat, j); for(; it_spmat; ++it_spmat) { - Index bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block + StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block if(!nzblocksFlag[bi]) { // Save the index of this nonzero block @@ -439,21 +439,21 @@ class BlockSparseMatrix : public SparseMatrixBase m_indices[m_outerIndex[bj]+idx]) ++idx; // Not expensive for ordered blocks - Index idxVal;// Get the right position in the array of values for this element + StorageIndex idxVal;// Get the right position in the array of values for this element if(m_blockSize == Dynamic) { // Offset from all blocks before ... @@ -503,8 +503,8 @@ class BlockSparseMatrix : public SparseMatrixBasecol() : it->row(); - Index inner = IsColMajor ? it->row() : it->col(); + StorageIndex outer = IsColMajor ? it->col() : it->row(); + StorageIndex inner = IsColMajor ? it->row() : it->col(); m_indices[block_id(outer)] = inner; - Index block_size = it->value().rows()*it->value().cols(); - Index nz_marker = blockPtr(block_id[outer]); + StorageIndex block_size = it->value().rows()*it->value().cols(); + StorageIndex nz_marker = blockPtr(block_id[outer]); memcpy(&(m_values[nz_marker]), it->value().data(), block_size * sizeof(Scalar)); if(m_blockSize == Dynamic) { @@ -735,7 +735,7 @@ class BlockSparseMatrix : public SparseMatrixBase coeffRef(Index brow, Index bcol) + Ref coeffRef(StorageIndex brow, StorageIndex bcol) { eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS"); eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS"); - Index rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol); - Index csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow); - Index inner = IsColMajor ? brow : bcol; - Index outer = IsColMajor ? bcol : brow; - Index offset = m_outerIndex[outer]; + StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol); + StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow); + StorageIndex inner = IsColMajor ? brow : bcol; + StorageIndex outer = IsColMajor ? bcol : brow; + StorageIndex offset = m_outerIndex[outer]; while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++; if(m_indices[offset] == inner) @@ -829,16 +829,16 @@ class BlockSparseMatrix : public SparseMatrixBase coeff(Index brow, Index bcol) const + Map coeff(StorageIndex brow, StorageIndex bcol) const { eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS"); eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS"); - Index rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol); - Index csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow); - Index inner = IsColMajor ? brow : bcol; - Index outer = IsColMajor ? bcol : brow; - Index offset = m_outerIndex[outer]; + StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol); + StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow); + StorageIndex inner = IsColMajor ? brow : bcol; + StorageIndex outer = IsColMajor ? bcol : brow; + StorageIndex offset = m_outerIndex[outer]; while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++; if(m_indices[offset] == inner) { @@ -857,23 +857,23 @@ class BlockSparseMatrix : public SparseMatrixBase(m_values);} // inline Scalar *valuePtr(){ return m_values; } - inline Index *innerIndexPtr() {return m_indices; } - inline const Index *innerIndexPtr() const {return m_indices; } - inline Index *outerIndexPtr() {return m_outerIndex; } - inline const Index* outerIndexPtr() const {return m_outerIndex; } + inline StorageIndex *innerIndexPtr() {return m_indices; } + inline const StorageIndex *innerIndexPtr() const {return m_indices; } + inline StorageIndex *outerIndexPtr() {return m_outerIndex; } + inline const StorageIndex* outerIndexPtr() const {return m_outerIndex; } /** \brief for compatibility purposes with the SparseMatrix class */ inline bool isCompressed() const {return true;} /** * \returns the starting index of the bi row block */ - inline Index blockRowsIndex(Index bi) const + inline StorageIndex blockRowsIndex(StorageIndex bi) const { return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi); } @@ -881,26 +881,26 @@ class BlockSparseMatrix : public SparseMatrixBase in the array of values */ - Index blockPtr(Index id) const + StorageIndex blockPtr(Index id) const { if(m_blockSize == Dynamic) return m_blockPtr[id]; else return id * m_blockSize * m_blockSize; @@ -955,21 +955,21 @@ class BlockSparseMatrix : public SparseMatrixBase insert(Index brow, Index bcol); - Index m_innerBSize; // Number of block rows - Index m_outerBSize; // Number of block columns - Index *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1) - Index *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1) - Index m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize) - Index m_nonzeros; // Total nonzeros elements + StorageIndex m_innerBSize; // Number of block rows + StorageIndex m_outerBSize; // Number of block columns + StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1) + StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1) + StorageIndex m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize) + StorageIndex m_nonzeros; // Total nonzeros elements Scalar *m_values; //Values stored block column after block column (size m_nonzeros) - Index *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks - Index *m_indices; //Inner block indices, size m_nonzerosblocks ... OK - Index *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK - Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1 + StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks + StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK + StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK + StorageIndex m_blockSize; // Size of a block for fixed-size blocks, otherwise -1 }; -template -class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index>::BlockInnerIterator +template +class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator { public: @@ -977,7 +977,7 @@ class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index>::BlockIn Flags = _Options }; - BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer) + BlockInnerIterator(const BlockSparseMatrix& mat, const StorageIndex outer) : m_mat(mat),m_outer(outer), m_id(mat.m_outerIndex[outer]), m_end(mat.m_outerIndex[outer+1]) @@ -997,27 +997,27 @@ class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index>::BlockIn rows(),cols()); } // Block inner index - inline Index index() const {return m_mat.m_indices[m_id]; } - inline Index outer() const { return m_outer; } + inline StorageIndex index() const {return m_mat.m_indices[m_id]; } + inline StorageIndex outer() const { return m_outer; } // block row index - inline Index row() const {return index(); } + inline StorageIndex row() const {return index(); } // block column index - inline Index col() const {return outer(); } + inline StorageIndex col() const {return outer(); } // FIXME Number of rows in the current block - inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; } + inline StorageIndex rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; } // Number of columns in the current block ... - inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;} + inline StorageIndex cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;} inline operator bool() const { return (m_id < m_end); } protected: - const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, Index>& m_mat; - const Index m_outer; - Index m_id; - Index m_end; + const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat; + const StorageIndex m_outer; + StorageIndex m_id; + StorageIndex m_end; }; -template -class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index>::InnerIterator +template +class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::InnerIterator { public: InnerIterator(const BlockSparseMatrix& mat, Index outer) @@ -1055,23 +1055,23 @@ class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _Index>::InnerIt { return itb.valueRef().coeff(m_id - m_start, m_offset); } - inline Index index() const { return m_id; } - inline Index outer() const {return m_outer; } - inline Index col() const {return outer(); } - inline Index row() const { return index();} + inline StorageIndex index() const { return m_id; } + inline StorageIndex outer() const {return m_outer; } + inline StorageIndex col() const {return outer(); } + inline StorageIndex row() const { return index();} inline operator bool() const { return itb; } protected: const BlockSparseMatrix& m_mat; - const Index m_outer; - const Index m_outerB; + const StorageIndex m_outer; + const StorageIndex m_outerB; BlockInnerIterator itb; // Iterator through the blocks - const Index m_offset; // Position of this column in the block - Index m_start; // starting inner index of this block - Index m_id; // current inner index in the block - Index m_end; // starting inner index of the next block + const StorageIndex m_offset; // Position of this column in the block + StorageIndex m_start; // starting inner index of this block + StorageIndex m_id; // current inner index in the block + StorageIndex m_end; // starting inner index of the next block }; } // end namespace Eigen diff --git a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h index 976f9f270..bedb1dec5 100644 --- a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h +++ b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h @@ -37,7 +37,7 @@ template struct traits > { typedef _Scalar Scalar; - typedef _Index Index; + typedef _Index StorageIndex; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { @@ -70,21 +70,21 @@ template protected: - typedef DynamicSparseMatrix TransposedSparseMatrix; + typedef DynamicSparseMatrix TransposedSparseMatrix; - Index m_innerSize; - std::vector > m_data; + StorageIndex m_innerSize; + std::vector > m_data; public: - inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; } - inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); } - inline Index innerSize() const { return m_innerSize; } - inline Index outerSize() const { return static_cast(m_data.size()); } - inline Index innerNonZeros(Index j) const { return m_data[j].size(); } + inline StorageIndex rows() const { return IsRowMajor ? outerSize() : m_innerSize; } + inline StorageIndex cols() const { return IsRowMajor ? m_innerSize : outerSize(); } + inline StorageIndex innerSize() const { return m_innerSize; } + inline StorageIndex outerSize() const { return convert_index(m_data.size()); } + inline StorageIndex innerNonZeros(Index j) const { return m_data[j].size(); } - std::vector >& _data() { return m_data; } - const std::vector >& _data() const { return m_data; } + std::vector >& _data() { return m_data; } + const std::vector >& _data() const { return m_data; } /** \returns the coefficient value at given position \a row, \a col * This operation involes a log(rho*outer_size) binary search. @@ -117,11 +117,11 @@ template } /** \returns the number of non zero coefficients */ - Index nonZeros() const + StorageIndex nonZeros() const { - Index res = 0; + StorageIndex res = 0; for (Index j=0; j(m_data[j].size()); + res += convert_index(m_data[j].size()); return res; } @@ -197,7 +197,7 @@ template void resize(Index rows, Index cols) { const Index outerSize = IsRowMajor ? rows : cols; - m_innerSize = IsRowMajor ? cols : rows; + m_innerSize = convert_index(IsRowMajor ? cols : rows); setZero(); if (Index(m_data.size()) != outerSize) { diff --git a/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/unsupported/Eigen/src/SparseExtra/RandomSetter.h index dee1708e7..807ba9d94 100644 --- a/unsupported/Eigen/src/SparseExtra/RandomSetter.h +++ b/unsupported/Eigen/src/SparseExtra/RandomSetter.h @@ -154,7 +154,7 @@ template> OuterPacketBits; // index of the packet/map - const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet + const StorageIndex outer = internal::convert_index(SetterRowMajor ? row : col); + const StorageIndex inner = internal::convert_index(SetterRowMajor ? col : row); + const StorageIndex outerMajor = outer >> OuterPacketBits; // index of the packet/map + const StorageIndex outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet const KeyType key = (KeyType(outerMinor)< { static void run(MatrixType& m, MatrixType& T, const MatrixType& U) { - typedef typename MatrixType::Index Index; const Index size = m.cols(); for (Index i=0; i < size; ++i) { diff --git a/unsupported/test/sparse_extra.cpp b/unsupported/test/sparse_extra.cpp index 1ee791b0f..a010ceb93 100644 --- a/unsupported/test/sparse_extra.cpp +++ b/unsupported/test/sparse_extra.cpp @@ -49,7 +49,6 @@ bool test_random_setter(DynamicSparseMatrix& sm, const DenseType& ref, const template void sparse_extra(const SparseMatrixType& ref) { - typedef typename SparseMatrixType::Index Index; const Index rows = ref.rows(); const Index cols = ref.cols(); typedef typename SparseMatrixType::Scalar Scalar; From fc202bab398ed9b78ef8452f8e4ef35e233965f6 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Fri, 13 Feb 2015 18:57:41 +0100 Subject: [PATCH 2/5] Index refactoring: StorageIndex must be used for storage only (and locally when it make sense). In all other cases use the global Index type. --- Eigen/src/Core/BooleanRedux.h | 2 +- Eigen/src/Core/CoreIterators.h | 15 +-- Eigen/src/Core/CwiseBinaryOp.h | 4 +- Eigen/src/Core/DenseBase.h | 15 +-- Eigen/src/Core/Diagonal.h | 24 ++-- Eigen/src/Core/DiagonalMatrix.h | 1 - Eigen/src/Core/EigenBase.h | 19 ++- Eigen/src/Core/Inverse.h | 2 +- Eigen/src/Core/MatrixBase.h | 3 +- Eigen/src/Core/PermutationMatrix.h | 64 +++++----- Eigen/src/Core/Product.h | 5 +- Eigen/src/Core/ProductEvaluators.h | 10 -- Eigen/src/Core/ReturnByValue.h | 4 +- Eigen/src/Core/SelfAdjointView.h | 2 - Eigen/src/Core/Solve.h | 4 +- Eigen/src/Core/Swap.h | 1 - Eigen/src/Core/Transpose.h | 4 +- Eigen/src/Core/TriangularMatrix.h | 12 +- Eigen/src/Geometry/Transform.h | 7 +- Eigen/src/Householder/HouseholderSequence.h | 3 - .../IterativeLinearSolvers/IncompleteLUT.h | 74 ++++++------ Eigen/src/LU/FullPivLU.h | 6 +- Eigen/src/LU/PartialPivLU.h | 2 +- Eigen/src/QR/ColPivHouseholderQR.h | 2 +- Eigen/src/QR/FullPivHouseholderQR.h | 4 +- Eigen/src/QR/HouseholderQR.h | 2 +- Eigen/src/SPQRSupport/SuiteSparseQRSupport.h | 6 +- Eigen/src/SVD/SVDBase.h | 1 - Eigen/src/SparseCholesky/SimplicialCholesky.h | 8 +- .../SparseCholesky/SimplicialCholesky_impl.h | 20 ++-- Eigen/src/SparseCore/AmbiVector.h | 4 +- Eigen/src/SparseCore/CompressedStorage.h | 53 +++++---- Eigen/src/SparseCore/SparseBlock.h | 112 +++++++++--------- Eigen/src/SparseCore/SparseColEtree.h | 8 +- Eigen/src/SparseCore/SparseCwiseBinaryOp.h | 32 +++-- Eigen/src/SparseCore/SparseDenseProduct.h | 11 +- Eigen/src/SparseCore/SparseDiagonalProduct.h | 16 ++- Eigen/src/SparseCore/SparseMatrix.h | 24 ++-- Eigen/src/SparseCore/SparseMatrixBase.h | 12 +- Eigen/src/SparseCore/SparseSelfAdjointView.h | 8 +- .../SparseSparseProductWithPruning.h | 6 +- Eigen/src/SparseCore/SparseTranspose.h | 11 +- Eigen/src/SparseCore/SparseTriangularView.h | 19 ++- Eigen/src/SparseCore/SparseVector.h | 40 +++---- Eigen/src/SparseCore/SparseView.h | 20 ++-- Eigen/src/SparseLU/SparseLU.h | 26 ++-- Eigen/src/SparseLU/SparseLUImpl.h | 8 +- Eigen/src/SparseLU/SparseLU_Memory.h | 15 ++- Eigen/src/SparseLU/SparseLU_Structs.h | 3 +- .../src/SparseLU/SparseLU_SupernodalMatrix.h | 18 +-- Eigen/src/SparseLU/SparseLU_Utils.h | 8 +- Eigen/src/SparseLU/SparseLU_column_bmod.h | 5 +- Eigen/src/SparseLU/SparseLU_column_dfs.h | 16 +-- Eigen/src/SparseLU/SparseLU_copy_to_ucol.h | 5 +- Eigen/src/SparseLU/SparseLU_gemm_kernel.h | 2 +- .../src/SparseLU/SparseLU_heap_relax_snode.h | 6 +- Eigen/src/SparseLU/SparseLU_kernel_bmod.h | 11 +- Eigen/src/SparseLU/SparseLU_panel_bmod.h | 4 +- Eigen/src/SparseLU/SparseLU_panel_dfs.h | 14 +-- Eigen/src/SparseLU/SparseLU_pivotL.h | 6 +- Eigen/src/SparseLU/SparseLU_pruneL.h | 5 +- Eigen/src/SparseLU/SparseLU_relax_snode.h | 4 +- Eigen/src/SparseQR/SparseQR.h | 26 ++-- Eigen/src/SuperLUSupport/SuperLUSupport.h | 6 +- Eigen/src/UmfPackSupport/UmfPackSupport.h | 4 +- test/nullary.cpp | 3 - test/sparse_basic.cpp | 4 +- test/sparse_product.cpp | 6 +- .../Eigen/src/IterativeSolvers/GMRES.h | 1 - .../Eigen/src/IterativeSolvers/MINRES.h | 1 - .../KroneckerProduct/KroneckerTensorProduct.h | 5 +- .../Eigen/src/LevenbergMarquardt/LMcovar.h | 1 - .../Eigen/src/LevenbergMarquardt/LMpar.h | 2 +- .../Eigen/src/LevenbergMarquardt/LMqrsolv.h | 2 - .../LevenbergMarquardt/LevenbergMarquardt.h | 3 +- .../Eigen/src/SparseExtra/BlockSparseMatrix.h | 98 +++++++-------- .../src/SparseExtra/DynamicSparseMatrix.h | 50 ++++---- .../Eigen/src/SparseExtra/RandomSetter.h | 8 +- 78 files changed, 514 insertions(+), 564 deletions(-) diff --git a/Eigen/src/Core/BooleanRedux.h b/Eigen/src/Core/BooleanRedux.h index dac1887e0..31fbb9214 100644 --- a/Eigen/src/Core/BooleanRedux.h +++ b/Eigen/src/Core/BooleanRedux.h @@ -130,7 +130,7 @@ inline bool DenseBase::any() const * \sa all(), any() */ template -inline typename DenseBase::Index DenseBase::count() const +inline Eigen::Index DenseBase::count() const { return derived().template cast().template cast().sum(); } diff --git a/Eigen/src/Core/CoreIterators.h b/Eigen/src/Core/CoreIterators.h index 141eaa2eb..c76bdf68e 100644 --- a/Eigen/src/Core/CoreIterators.h +++ b/Eigen/src/Core/CoreIterators.h @@ -36,7 +36,6 @@ protected: typedef internal::inner_iterator_selector::Kind> IteratorType; typedef typename internal::evaluator::type EvaluatorType; typedef typename internal::traits::Scalar Scalar; - typedef typename internal::traits::StorageIndex StorageIndex; public: /** Construct an iterator over the \a outerId -th row or column of \a xpr */ InnerIterator(const XprType &xpr, const Index &outerId) @@ -50,11 +49,11 @@ public: */ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_iter.operator++(); return *this; } /// \returns the column or row index of the current coefficient. - EIGEN_STRONG_INLINE StorageIndex index() const { return m_iter.index(); } + EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } /// \returns the row index of the current coefficient. - EIGEN_STRONG_INLINE StorageIndex row() const { return m_iter.row(); } + EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } /// \returns the column index of the current coefficient. - EIGEN_STRONG_INLINE StorageIndex col() const { return m_iter.col(); } + EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } /// \returns \c true if the iterator \c *this still references a valid coefficient. EIGEN_STRONG_INLINE operator bool() const { return m_iter; } @@ -77,7 +76,6 @@ class inner_iterator_selector protected: typedef typename evaluator::type EvaluatorType; typedef typename traits::Scalar Scalar; - typedef typename traits::StorageIndex StorageIndex; enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; public: @@ -93,9 +91,9 @@ public: EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; } - EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; } - inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } - inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } + EIGEN_STRONG_INLINE Index index() const { return m_inner; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } @@ -115,7 +113,6 @@ class inner_iterator_selector protected: typedef typename evaluator::InnerIterator Base; typedef typename evaluator::type EvaluatorType; - typedef typename traits::StorageIndex StorageIndex; public: EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/) diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h index 4d4626279..0c9d9fbf2 100644 --- a/Eigen/src/Core/CwiseBinaryOp.h +++ b/Eigen/src/Core/CwiseBinaryOp.h @@ -111,7 +111,7 @@ class CwiseBinaryOp : } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE StorageIndex rows() const { + EIGEN_STRONG_INLINE Index rows() const { // return the fixed size type if available to enable compile time optimizations if (internal::traits::type>::RowsAtCompileTime==Dynamic) return m_rhs.rows(); @@ -119,7 +119,7 @@ class CwiseBinaryOp : return m_lhs.rows(); } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE StorageIndex cols() const { + EIGEN_STRONG_INLINE Index cols() const { // return the fixed size type if available to enable compile time optimizations if (internal::traits::type>::ColsAtCompileTime==Dynamic) return m_rhs.cols(); diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h index 628fc61fa..322daad8f 100644 --- a/Eigen/src/Core/DenseBase.h +++ b/Eigen/src/Core/DenseBase.h @@ -58,16 +58,10 @@ template class DenseBase typedef typename internal::traits::StorageKind StorageKind; - /** \brief The interface type of indices - * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. - * \sa \ref TopicPreprocessorDirectives, StorageIndex. - */ - typedef Eigen::Index Index; - /** - * \brief The type used to store indices - * \details This typedef is relevant for types that store multiple indices such as - * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index + * \brief The type used to store indices + * \details This typedef is relevant for types that store multiple indices such as + * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index * \sa \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase. */ typedef typename internal::traits::StorageIndex StorageIndex; @@ -76,7 +70,8 @@ template class DenseBase typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; - typedef DenseCoeffsBase Base; + typedef internal::special_scalar_op_base::Scalar, + typename NumTraits::Scalar>::Real> Base; using Base::derived; using Base::const_cast_derived; using Base::rows; diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h index 18f061179..2446a18d4 100644 --- a/Eigen/src/Core/Diagonal.h +++ b/Eigen/src/Core/Diagonal.h @@ -70,28 +70,28 @@ template class Diagonal EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) EIGEN_DEVICE_FUNC - explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(internal::convert_index(a_index)) {} + explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) EIGEN_DEVICE_FUNC - inline StorageIndex rows() const + inline Index rows() const { - return m_index.value()<0 ? numext::mini(m_matrix.cols(),m_matrix.rows()+m_index.value()) - : numext::mini(m_matrix.rows(),m_matrix.cols()-m_index.value()); + return m_index.value()<0 ? numext::mini(m_matrix.cols(),m_matrix.rows()+m_index.value()) + : numext::mini(m_matrix.rows(),m_matrix.cols()-m_index.value()); } EIGEN_DEVICE_FUNC - inline StorageIndex cols() const { return 1; } + inline Index cols() const { return 1; } EIGEN_DEVICE_FUNC - inline StorageIndex innerStride() const + inline Index innerStride() const { return m_matrix.outerStride() + 1; } EIGEN_DEVICE_FUNC - inline StorageIndex outerStride() const + inline Index outerStride() const { return 0; } @@ -153,23 +153,23 @@ template class Diagonal } EIGEN_DEVICE_FUNC - inline StorageIndex index() const + inline Index index() const { return m_index.value(); } protected: typename MatrixType::Nested m_matrix; - const internal::variable_if_dynamicindex m_index; + const internal::variable_if_dynamicindex m_index; private: // some compilers may fail to optimize std::max etc in case of compile-time constants... EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE StorageIndex absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } + EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE StorageIndex rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } + EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } EIGEN_DEVICE_FUNC - EIGEN_STRONG_INLINE StorageIndex colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } + EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } // trigger a compile time error is someone try to call packet template typename MatrixType::PacketReturnType packet(Index) const; template typename MatrixType::PacketReturnType packet(Index,Index) const; diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h index 56beaf3bc..5a9e3abd4 100644 --- a/Eigen/src/Core/DiagonalMatrix.h +++ b/Eigen/src/Core/DiagonalMatrix.h @@ -108,7 +108,6 @@ struct traits > { typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType; typedef DiagonalShape StorageKind; -// typedef DenseIndex Index; enum { Flags = LvalueBit | NoPreferredStorageOrderBit }; diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h index c98ca467a..79dabda37 100644 --- a/Eigen/src/Core/EigenBase.h +++ b/Eigen/src/Core/EigenBase.h @@ -13,7 +13,9 @@ namespace Eigen { -/** Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T). +/** \class EigenBase + * + * Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T). * * In other words, an EigenBase object is an object that can be copied into a MatrixBase. * @@ -26,9 +28,16 @@ namespace Eigen { template struct EigenBase { // typedef typename internal::plain_matrix_type::type PlainObject; + + /** \brief The interface type of indices + * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. + * \deprecated Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead. + * \sa StorageIndex, \ref TopicPreprocessorDirectives. + */ + typedef Eigen::Index Index; + // FIXME is it needed? typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::StorageIndex StorageIndex; /** \returns a reference to the derived object */ EIGEN_DEVICE_FUNC @@ -46,14 +55,14 @@ template struct EigenBase /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ EIGEN_DEVICE_FUNC - inline StorageIndex rows() const { return derived().rows(); } + inline Index rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ EIGEN_DEVICE_FUNC - inline StorageIndex cols() const { return derived().cols(); } + inline Index cols() const { return derived().cols(); } /** \returns the number of coefficients, which is rows()*cols(). * \sa rows(), cols(), SizeAtCompileTime. */ EIGEN_DEVICE_FUNC - inline StorageIndex size() const { return rows() * cols(); } + inline Index size() const { return rows() * cols(); } /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ template diff --git a/Eigen/src/Core/Inverse.h b/Eigen/src/Core/Inverse.h index f3b0dff87..f3fa82a01 100644 --- a/Eigen/src/Core/Inverse.h +++ b/Eigen/src/Core/Inverse.h @@ -45,7 +45,7 @@ template class Inverse : public InverseImpl::StorageKind> { public: - typedef typename XprType::Index Index; + typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::PlainObject PlainObject; typedef typename internal::nested::type XprTypeNested; typedef typename internal::remove_all::type XprTypeNestedCleaned; diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index 5c00d6a63..ed28b4d07 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -52,8 +52,7 @@ template class MatrixBase #ifndef EIGEN_PARSED_BY_DOXYGEN typedef MatrixBase StorageBaseType; typedef typename internal::traits::StorageKind StorageKind; - typedef Eigen::Index Index; - typedef Index StorageIndex; + typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; diff --git a/Eigen/src/Core/PermutationMatrix.h b/Eigen/src/Core/PermutationMatrix.h index 886d59a2c..1da27c06c 100644 --- a/Eigen/src/Core/PermutationMatrix.h +++ b/Eigen/src/Core/PermutationMatrix.h @@ -66,11 +66,10 @@ class PermutationBase : public EigenBase MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = Traits::MaxColsAtCompileTime }; - typedef typename Traits::StorageIndexType StorageIndexType; typedef typename Traits::StorageIndex StorageIndex; - typedef Matrix + typedef Matrix DenseMatrixType; - typedef PermutationMatrix + typedef PermutationMatrix PlainPermutationType; using Base::derived; typedef Transpose TransposeReturnType; @@ -148,7 +147,7 @@ class PermutationBase : public EigenBase /** Sets *this to be the identity permutation matrix */ void setIdentity() { - for(StorageIndexType i = 0; i < size(); ++i) + for(Index i = 0; i < size(); ++i) indices().coeffRef(i) = i; } @@ -174,8 +173,8 @@ class PermutationBase : public EigenBase eigen_assert(i>=0 && j>=0 && i * * \param SizeAtCompileTime the number of rows/cols, or Dynamic * \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it. - * \param StorageIndexType the integer type of the indices + * \param StorageIndex the integer type of the indices * * This class represents a permutation matrix, internally stored as a vector of integers. * @@ -271,19 +270,18 @@ class PermutationBase : public EigenBase */ namespace internal { -template -struct traits > - : traits > +template +struct traits > + : traits > { typedef PermutationStorage StorageKind; - typedef Matrix<_StorageIndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType; - typedef typename IndicesType::StorageIndex StorageIndex; - typedef _StorageIndexType StorageIndexType; + typedef Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType; + typedef _StorageIndex StorageIndex; }; } -template -class PermutationMatrix : public PermutationBase > +template +class PermutationMatrix : public PermutationBase > { typedef PermutationBase Base; typedef internal::traits Traits; @@ -293,7 +291,6 @@ class PermutationMatrix : public PermutationBase::highest()); + eigen_internal_assert(size <= NumTraits::highest()); } /** Copy constructor. */ @@ -376,9 +373,9 @@ class PermutationMatrix : public PermutationBase >& other) : m_indices(other.nestedPermutation().size()) { - eigen_internal_assert(m_indices.size() <= NumTraits::highest()); - StorageIndexType end = StorageIndexType(m_indices.size()); - for (StorageIndexType i=0; i::highest()); + StorageIndex end = StorageIndex(m_indices.size()); + for (StorageIndex i=0; i @@ -396,20 +393,19 @@ class PermutationMatrix : public PermutationBase -struct traits,_PacketAccess> > - : traits > +template +struct traits,_PacketAccess> > + : traits > { typedef PermutationStorage StorageKind; - typedef Map, _PacketAccess> IndicesType; - typedef typename IndicesType::StorageIndex StorageIndex; - typedef _StorageIndexType StorageIndexType; + typedef Map, _PacketAccess> IndicesType; + typedef _StorageIndex StorageIndex; }; } -template -class Map,_PacketAccess> - : public PermutationBase,_PacketAccess> > +template +class Map,_PacketAccess> + : public PermutationBase,_PacketAccess> > { typedef PermutationBase Base; typedef internal::traits Traits; @@ -417,15 +413,14 @@ class Map > { typedef PermutationStorage StorageKind; typedef typename _IndicesType::Scalar Scalar; - typedef typename _IndicesType::Scalar StorageIndexType; - typedef typename _IndicesType::StorageIndex StorageIndex; + typedef typename _IndicesType::Scalar StorageIndex; typedef _IndicesType IndicesType; enum { RowsAtCompileTime = _IndicesType::SizeAtCompileTime, diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h index 8ff13fbba..74b895792 100644 --- a/Eigen/src/Core/Product.h +++ b/Eigen/src/Core/Product.h @@ -120,8 +120,8 @@ class Product : public ProductImpl<_Lhs,_Rhs,Option, && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } - EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return m_lhs.rows(); } - EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return m_rhs.cols(); } + EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); } + EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); } EIGEN_DEVICE_FUNC const LhsNestedCleaned& lhs() const { return m_lhs; } EIGEN_DEVICE_FUNC const RhsNestedCleaned& rhs() const { return m_rhs; } @@ -149,7 +149,6 @@ class dense_product_base public: using Base::derived; typedef typename Base::Scalar Scalar; - typedef typename Base::StorageIndex StorageIndex; operator const Scalar() const { diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h index 7f9d135f7..d84e7776b 100644 --- a/Eigen/src/Core/ProductEvaluators.h +++ b/Eigen/src/Core/ProductEvaluators.h @@ -373,7 +373,6 @@ struct product_evaluator, ProductTag, DenseShape, : evaluator_base > { typedef Product XprType; - typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; @@ -525,7 +524,6 @@ struct product_evaluator, LazyCoeffBasedProduc template struct etor_product_packet_impl { - typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); @@ -536,7 +534,6 @@ struct etor_product_packet_impl struct etor_product_packet_impl { - typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); @@ -547,7 +544,6 @@ struct etor_product_packet_impl struct etor_product_packet_impl { - typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); @@ -557,7 +553,6 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); @@ -567,7 +562,6 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { eigen_assert(innerDim>0 && "you are using a non initialized matrix"); @@ -580,7 +574,6 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - typedef typename Lhs::StorageIndex StorageIndex; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { eigen_assert(innerDim>0 && "you are using a non initialized matrix"); @@ -669,7 +662,6 @@ template { - typedef typename MatrixType::StorageIndex StorageIndex; typedef typename scalar_product_traits::ReturnType Scalar; typedef typename internal::packet_traits::type PacketScalar; public: @@ -734,7 +726,6 @@ struct product_evaluator, ProductTag, DiagonalSha using Base::coeff; using Base::packet_impl; typedef typename Base::Scalar Scalar; - typedef typename Base::StorageIndex StorageIndex; typedef typename Base::PacketScalar PacketScalar; typedef Product XprType; @@ -782,7 +773,6 @@ struct product_evaluator, ProductTag, DenseShape, using Base::coeff; using Base::packet_impl; typedef typename Base::Scalar Scalar; - typedef typename Base::StorageIndex StorageIndex; typedef typename Base::PacketScalar PacketScalar; typedef Product XprType; diff --git a/Eigen/src/Core/ReturnByValue.h b/Eigen/src/Core/ReturnByValue.h index d2b80d872..af01a5567 100644 --- a/Eigen/src/Core/ReturnByValue.h +++ b/Eigen/src/Core/ReturnByValue.h @@ -61,8 +61,8 @@ template class ReturnByValue EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { static_cast(this)->evalTo(dst); } - EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return static_cast(this)->rows(); } - EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return static_cast(this)->cols(); } + EIGEN_DEVICE_FUNC inline Index rows() const { return static_cast(this)->rows(); } + EIGEN_DEVICE_FUNC inline Index cols() const { return static_cast(this)->cols(); } #ifndef EIGEN_PARSED_BY_DOXYGEN #define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT diff --git a/Eigen/src/Core/SelfAdjointView.h b/Eigen/src/Core/SelfAdjointView.h index 2d5760066..a05746ad2 100644 --- a/Eigen/src/Core/SelfAdjointView.h +++ b/Eigen/src/Core/SelfAdjointView.h @@ -58,7 +58,6 @@ template class SelfAdjointView /** \brief The type of coefficients in this matrix */ typedef typename internal::traits::Scalar Scalar; - typedef typename MatrixType::StorageIndex StorageIndex; enum { @@ -224,7 +223,6 @@ public: typedef typename Base::DstEvaluatorType DstEvaluatorType; typedef typename Base::SrcEvaluatorType SrcEvaluatorType; typedef typename Base::Scalar Scalar; - typedef typename Base::StorageIndex StorageIndex; typedef typename Base::AssignmentTraits AssignmentTraits; diff --git a/Eigen/src/Core/Solve.h b/Eigen/src/Core/Solve.h index 5a3a4235e..47446b49e 100644 --- a/Eigen/src/Core/Solve.h +++ b/Eigen/src/Core/Solve.h @@ -70,8 +70,8 @@ public: : m_dec(dec), m_rhs(rhs) {} - EIGEN_DEVICE_FUNC StorageIndex rows() const { return m_dec.cols(); } - EIGEN_DEVICE_FUNC StorageIndex cols() const { return m_rhs.cols(); } + EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); } + EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); } EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; } EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; } diff --git a/Eigen/src/Core/Swap.h b/Eigen/src/Core/Swap.h index 3d4d8b802..dcb42821f 100644 --- a/Eigen/src/Core/Swap.h +++ b/Eigen/src/Core/Swap.h @@ -28,7 +28,6 @@ protected: public: typedef typename Base::Scalar Scalar; - typedef typename Base::StorageIndex StorageIndex; typedef typename Base::DstXprType DstXprType; typedef swap_assign_op Functor; diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index 5d60ba149..7e41769a3 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -64,8 +64,8 @@ template class Transpose EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose) - EIGEN_DEVICE_FUNC inline StorageIndex rows() const { return m_matrix.cols(); } - EIGEN_DEVICE_FUNC inline StorageIndex cols() const { return m_matrix.rows(); } + EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); } + EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); } /** \returns the nested expression */ EIGEN_DEVICE_FUNC diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h index d8135be27..fd53ae4cb 100644 --- a/Eigen/src/Core/TriangularMatrix.h +++ b/Eigen/src/Core/TriangularMatrix.h @@ -54,9 +54,9 @@ template class TriangularBase : public EigenBase inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); } EIGEN_DEVICE_FUNC - inline StorageIndex rows() const { return derived().rows(); } + inline Index rows() const { return derived().rows(); } EIGEN_DEVICE_FUNC - inline StorageIndex cols() const { return derived().cols(); } + inline Index cols() const { return derived().cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().outerStride(); } EIGEN_DEVICE_FUNC @@ -199,7 +199,6 @@ template class TriangularView public: typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::StorageIndex StorageIndex; typedef typename internal::traits::MatrixTypeNestedCleaned NestedExpression; enum { @@ -222,9 +221,9 @@ template class TriangularView { return Base::operator=(other); } EIGEN_DEVICE_FUNC - inline StorageIndex rows() const { return m_matrix.rows(); } + inline Index rows() const { return m_matrix.rows(); } EIGEN_DEVICE_FUNC - inline StorageIndex cols() const { return m_matrix.cols(); } + inline Index cols() const { return m_matrix.cols(); } EIGEN_DEVICE_FUNC const NestedExpression& nestedExpression() const { return m_matrix; } @@ -325,7 +324,6 @@ template class TriangularViewImpl<_Mat using Base::derived; typedef typename internal::traits::StorageKind StorageKind; - typedef typename internal::traits::StorageIndex StorageIndex; enum { Mode = _Mode, @@ -688,7 +686,6 @@ public: typedef typename Base::DstEvaluatorType DstEvaluatorType; typedef typename Base::SrcEvaluatorType SrcEvaluatorType; typedef typename Base::Scalar Scalar; - typedef typename Base::StorageIndex StorageIndex; typedef typename Base::AssignmentTraits AssignmentTraits; @@ -831,7 +828,6 @@ struct triangular_assignment_loop template struct triangular_assignment_loop { - typedef typename Kernel::StorageIndex StorageIndex; typedef typename Kernel::Scalar Scalar; EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h index 54842b5ff..34d337c90 100644 --- a/Eigen/src/Geometry/Transform.h +++ b/Eigen/src/Geometry/Transform.h @@ -66,7 +66,7 @@ template struct traits > { typedef _Scalar Scalar; - typedef DenseIndex StorageIndex; + typedef Eigen::Index StorageIndex; typedef Dense StorageKind; enum { Dim1 = _Dim==Dynamic ? _Dim : _Dim + 1, @@ -204,8 +204,9 @@ public: }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; - typedef DenseIndex StorageIndex; - typedef DenseIndex Index; + typedef Eigen::Index StorageIndex; + /** \deprecated */ + typedef Eigen::Index Index; /** type of the matrix used to represent the transformation */ typedef typename internal::make_proper_matrix_type::type MatrixType; /** constified MatrixType */ diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h index bf2bb59ab..74cd0a472 100644 --- a/Eigen/src/Householder/HouseholderSequence.h +++ b/Eigen/src/Householder/HouseholderSequence.h @@ -87,7 +87,6 @@ struct hseq_side_dependent_impl { typedef Block EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; - typedef typename VectorsType::StorageIndex StorageIndex; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; @@ -100,7 +99,6 @@ struct hseq_side_dependent_impl { typedef Transpose > EssentialVectorType; typedef HouseholderSequence HouseholderSequenceType; - typedef typename VectorsType::StorageIndex StorageIndex; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; @@ -131,7 +129,6 @@ template class HouseholderS MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime }; typedef typename internal::traits::Scalar Scalar; - typedef typename VectorsType::StorageIndex StorageIndex; typedef HouseholderSequence< typename internal::conditional::IsComplex, diff --git a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h index c413e9e1a..6d63d45e4 100644 --- a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +++ b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h @@ -25,14 +25,14 @@ namespace internal { * \param ind The array of index for the elements in @p row * \param ncut The number of largest elements to keep **/ -template +template Index QuickSplit(VectorV &row, VectorI &ind, Index ncut) { typedef typename VectorV::RealScalar RealScalar; using std::swap; using std::abs; Index mid; - Index n = convert_index(row.size()); /* length of the vector */ + Index n = row.size(); /* length of the vector */ Index first, last ; ncut--; /* to fit the zero-based indices */ @@ -124,9 +124,9 @@ class IncompleteLUT : public SparseSolverBase > compute(mat); } - StorageIndex rows() const { return m_lu.rows(); } + Index rows() const { return m_lu.rows(); } - StorageIndex cols() const { return m_lu.cols(); } + Index cols() const { return m_lu.cols(); } /** \brief Reports whether previous computation was successful. * @@ -239,9 +239,10 @@ void IncompleteLUT::factorize(const _MatrixType& amat) using std::sqrt; using std::swap; using std::abs; + using internal::convert_index; eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); - StorageIndex n = amat.cols(); // Size of the matrix + Index n = amat.cols(); // Size of the matrix m_lu.resize(n,n); // Declare Working vectors and variables Vector u(n) ; // real values of the row -- maximum size is n -- @@ -259,36 +260,36 @@ void IncompleteLUT::factorize(const _MatrixType& amat) u.fill(0); // number of largest elements to keep in each row: - StorageIndex fill_in = static_cast (amat.nonZeros()*m_fillfactor)/n+1; + Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1; if (fill_in > n) fill_in = n; // number of largest nonzero elements to keep in the L and the U part of the current row: - StorageIndex nnzL = fill_in/2; - StorageIndex nnzU = nnzL; + Index nnzL = fill_in/2; + Index nnzU = nnzL; m_lu.reserve(n * (nnzL + nnzU + 1)); // global loop over the rows of the sparse matrix - for (StorageIndex ii = 0; ii < n; ii++) + for (Index ii = 0; ii < n; ii++) { // 1 - copy the lower and the upper part of the row i of mat in the working vector u - StorageIndex sizeu = 1; // number of nonzero elements in the upper part of the current row - StorageIndex sizel = 0; // number of nonzero elements in the lower part of the current row - ju(ii) = ii; + Index sizeu = 1; // number of nonzero elements in the upper part of the current row + Index sizel = 0; // number of nonzero elements in the lower part of the current row + ju(ii) = convert_index(ii); u(ii) = 0; - jr(ii) = ii; + jr(ii) = convert_index(ii); RealScalar rownorm = 0; typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii for (; j_it; ++j_it) { - StorageIndex k = j_it.index(); + Index k = j_it.index(); if (k < ii) { // copy the lower part - ju(sizel) = k; + ju(sizel) = convert_index(k); u(sizel) = j_it.value(); - jr(k) = sizel; + jr(k) = convert_index(sizel); ++sizel; } else if (k == ii) @@ -298,10 +299,10 @@ void IncompleteLUT::factorize(const _MatrixType& amat) else { // copy the upper part - StorageIndex jpos = ii + sizeu; - ju(jpos) = k; + Index jpos = ii + sizeu; + ju(jpos) = convert_index(k); u(jpos) = j_it.value(); - jr(k) = jpos; + jr(k) = convert_index(jpos); ++sizeu; } rownorm += numext::abs2(j_it.value()); @@ -317,21 +318,22 @@ void IncompleteLUT::factorize(const _MatrixType& amat) rownorm = sqrt(rownorm); // 3 - eliminate the previous nonzero rows - StorageIndex jj = 0; - StorageIndex len = 0; + Index jj = 0; + Index len = 0; while (jj < sizel) { // In order to eliminate in the correct order, // we must select first the smallest column index among ju(jj:sizel) - StorageIndex k; - StorageIndex minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment + Index k; + Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment k += jj; if (minrow != ju(jj)) { // swap the two locations - StorageIndex j = ju(jj); + Index j = ju(jj); swap(ju(jj), ju(k)); - jr(minrow) = jj; jr(j) = k; + jr(minrow) = convert_index(jj); + jr(j) = convert_index(k); swap(u(jj), u(k)); } // Reset this location @@ -355,11 +357,11 @@ void IncompleteLUT::factorize(const _MatrixType& amat) for (; ki_it; ++ki_it) { Scalar prod = fact * ki_it.value(); - StorageIndex j = ki_it.index(); - StorageIndex jpos = jr(j); + Index j = ki_it.index(); + Index jpos = jr(j); if (jpos == -1) // fill-in element { - StorageIndex newpos; + Index newpos; if (j >= ii) // dealing with the upper part { newpos = ii + sizeu; @@ -372,23 +374,23 @@ void IncompleteLUT::factorize(const _MatrixType& amat) sizel++; eigen_internal_assert(sizel<=ii); } - ju(newpos) = j; + ju(newpos) = convert_index(j); u(newpos) = -prod; - jr(j) = newpos; + jr(j) = convert_index(newpos); } else u(jpos) -= prod; } // store the pivot element - u(len) = fact; - ju(len) = minrow; + u(len) = fact; + ju(len) = convert_index(minrow); ++len; jj++; } // end of the elimination on the row ii // reset the upper part of the pointer jr to zero - for(StorageIndex k = 0; k ::factorize(const _MatrixType& amat) // store the largest m_fill elements of the L part m_lu.startVec(ii); - for(StorageIndex k = 0; k < len; k++) + for(Index k = 0; k < len; k++) m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); // store the diagonal element @@ -413,7 +415,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) // sort the U-part of the row // apply the dropping rule first len = 0; - for(StorageIndex k = 1; k < sizeu; k++) + for(Index k = 1; k < sizeu; k++) { if(abs(u(ii+k)) > m_droptol * rownorm ) { @@ -429,7 +431,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) internal::QuickSplit(uu, juu, len); // store the largest elements of the U part - for(StorageIndex k = ii + 1; k < ii + len; k++) + for(Index k = ii + 1; k < ii + len; k++) m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); } diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h index eb4520004..49c8b183d 100644 --- a/Eigen/src/LU/FullPivLU.h +++ b/Eigen/src/LU/FullPivLU.h @@ -66,10 +66,10 @@ template class FullPivLU typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename internal::traits::StorageKind StorageKind; - typedef typename MatrixType::Index Index; + // FIXME should be int typedef typename MatrixType::StorageIndex StorageIndex; - typedef typename internal::plain_row_type::type IntRowVectorType; - typedef typename internal::plain_col_type::type IntColVectorType; + typedef typename internal::plain_row_type::type IntRowVectorType; + typedef typename internal::plain_col_type::type IntColVectorType; typedef PermutationMatrix PermutationQType; typedef PermutationMatrix PermutationPType; typedef typename MatrixType::PlainObject PlainObject; diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h index 7e2c8b471..43c2a716e 100644 --- a/Eigen/src/LU/PartialPivLU.h +++ b/Eigen/src/LU/PartialPivLU.h @@ -72,7 +72,7 @@ template class PartialPivLU typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename internal::traits::StorageKind StorageKind; - typedef typename MatrixType::Index Index; + // FIXME should be int typedef typename MatrixType::StorageIndex StorageIndex; typedef PermutationMatrix PermutationType; typedef Transpositions TranspositionType; diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h index 0dee139ce..c500529da 100644 --- a/Eigen/src/QR/ColPivHouseholderQR.h +++ b/Eigen/src/QR/ColPivHouseholderQR.h @@ -57,7 +57,7 @@ template class ColPivHouseholderQR }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; + // FIXME should be int typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix MatrixQType; typedef typename internal::plain_diag_type::type HCoeffsType; diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h index 90ab25b2b..a7a0d9138 100644 --- a/Eigen/src/QR/FullPivHouseholderQR.h +++ b/Eigen/src/QR/FullPivHouseholderQR.h @@ -66,11 +66,11 @@ template class FullPivHouseholderQR }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; + // FIXME should be int typedef typename MatrixType::StorageIndex StorageIndex; typedef internal::FullPivHouseholderQRMatrixQReturnType MatrixQReturnType; typedef typename internal::plain_diag_type::type HCoeffsType; - typedef Matrix IntDiagSizeVectorType; typedef PermutationMatrix PermutationType; diff --git a/Eigen/src/QR/HouseholderQR.h b/Eigen/src/QR/HouseholderQR.h index 5156a6e4b..8f25cf728 100644 --- a/Eigen/src/QR/HouseholderQR.h +++ b/Eigen/src/QR/HouseholderQR.h @@ -53,7 +53,7 @@ template class HouseholderQR }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; + // FIXME should be int typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix MatrixQType; typedef typename internal::plain_diag_type::type HCoeffsType; diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h index 58efa6024..4ad22f8b4 100644 --- a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h @@ -236,7 +236,7 @@ class SPQR : public SparseSolverBase > mutable cholmod_sparse *m_H; //The householder vectors mutable StorageIndex *m_HPinv; // The row permutation of H mutable cholmod_dense *m_HTau; // The Householder coefficients - mutable StorageIndex m_rank; // The rank of the matrix + mutable Index m_rank; // The rank of the matrix mutable cholmod_common m_cc; // Workspace and parameters bool m_useDefaultThreshold; // Use default threshold template friend struct SPQR_QProduct; @@ -250,8 +250,8 @@ struct SPQR_QProduct : ReturnByValue > //Define the constructor to get reference to argument types SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {} - inline StorageIndex rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); } - inline StorageIndex cols() const { return m_other.cols(); } + inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); } + inline Index cols() const { return m_other.cols(); } // Assign to a vector template void evalTo(ResType& res) const diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index 0bc2ede28..95d378da9 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -52,7 +52,6 @@ public: typedef typename internal::traits::MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; typedef typename MatrixType::StorageIndex StorageIndex; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h index c47077e50..2580151de 100644 --- a/Eigen/src/SparseCholesky/SimplicialCholesky.h +++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h @@ -92,8 +92,8 @@ class SimplicialCholeskyBase : public SparseSolverBase Derived& derived() { return *static_cast(this); } const Derived& derived() const { return *static_cast(this); } - inline StorageIndex cols() const { return m_matrix.cols(); } - inline StorageIndex rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } /** \brief Reports whether previous computation was successful. * @@ -108,12 +108,12 @@ class SimplicialCholeskyBase : public SparseSolverBase /** \returns the permutation P * \sa permutationPinv() */ - const PermutationMatrix& permutationP() const + const PermutationMatrix& permutationP() const { return m_P; } /** \returns the inverse P^-1 of the permutation P * \sa permutationP() */ - const PermutationMatrix& permutationPinv() const + const PermutationMatrix& permutationPinv() const { return m_Pinv; } /** Sets the shift parameters that will be used to adjust the diagonal coefficients during the numerical factorization. diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h index a93189df2..9e2e878e0 100644 --- a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +++ b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h @@ -57,7 +57,7 @@ void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrix ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); - for(StorageIndex k = 0; k < size; ++k) + for(Index k = 0; k < size; ++k) { /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ m_parent[k] = -1; /* parent of k is not yet known */ @@ -104,7 +104,7 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); eigen_assert(ap.rows()==ap.cols()); - const StorageIndex size = ap.rows(); + const Index size = ap.rows(); eigen_assert(m_parent.size()==size); eigen_assert(m_nonZerosPerCol.size()==size); @@ -119,20 +119,20 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& bool ok = true; m_diag.resize(DoLDLT ? size : 0); - for(StorageIndex k = 0; k < size; ++k) + for(Index k = 0; k < size; ++k) { // compute nonzero pattern of kth row of L, in topological order y[k] = 0.0; // Y(0:k) is now all zero - StorageIndex top = size; // stack for pattern is empty + Index top = size; // stack for pattern is empty tags[k] = k; // mark node k as visited m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) { - StorageIndex i = it.index(); + Index i = it.index(); if(i <= k) { y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ - StorageIndex len; + Index len; for(len = 0; tags[i] != k; i = m_parent[i]) { pattern[len++] = i; /* L(k,i) is nonzero */ @@ -149,7 +149,7 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& y[k] = 0.0; for(; top < size; ++top) { - StorageIndex i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ + Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ Scalar yi = y[i]; /* get and clear Y(i) */ y[i] = 0.0; @@ -160,8 +160,8 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& else yi = l_ki = yi / Lx[Lp[i]]; - StorageIndex p2 = Lp[i] + m_nonZerosPerCol[i]; - StorageIndex p; + Index p2 = Lp[i] + m_nonZerosPerCol[i]; + Index p; for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p) y[Li[p]] -= numext::conj(Lx[p]) * yi; d -= numext::real(l_ki * numext::conj(yi)); @@ -180,7 +180,7 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& } else { - StorageIndex p = Lp[k] + m_nonZerosPerCol[k]++; + Index p = Lp[k] + m_nonZerosPerCol[k]++; Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */ if(d <= RealScalar(0)) { ok = false; /* failure, matrix is not positive definite */ diff --git a/Eigen/src/SparseCore/AmbiVector.h b/Eigen/src/SparseCore/AmbiVector.h index 8bde5d58e..1233e164e 100644 --- a/Eigen/src/SparseCore/AmbiVector.h +++ b/Eigen/src/SparseCore/AmbiVector.h @@ -36,7 +36,7 @@ class AmbiVector void init(double estimatedDensity); void init(int mode); - StorageIndex nonZeros() const; + Index nonZeros() const; /** Specifies a sub-vector to work on */ void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); } @@ -126,7 +126,7 @@ class AmbiVector /** \returns the number of non zeros in the current sub vector */ template -_StorageIndex AmbiVector<_Scalar,_StorageIndex>::nonZeros() const +Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const { if (m_mode==IsSparse) return m_llSize; diff --git a/Eigen/src/SparseCore/CompressedStorage.h b/Eigen/src/SparseCore/CompressedStorage.h index 2d4f2bcf8..bba8a104b 100644 --- a/Eigen/src/SparseCore/CompressedStorage.h +++ b/Eigen/src/SparseCore/CompressedStorage.h @@ -36,7 +36,7 @@ class CompressedStorage : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) {} - explicit CompressedStorage(size_t size) + explicit CompressedStorage(Index size) : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) { resize(size); @@ -70,9 +70,9 @@ class CompressedStorage delete[] m_indices; } - void reserve(size_t size) + void reserve(Index size) { - size_t newAllocatedSize = m_size + size; + Index newAllocatedSize = m_size + size; if (newAllocatedSize > m_allocatedSize) reallocate(newAllocatedSize); } @@ -83,13 +83,14 @@ class CompressedStorage reallocate(m_size); } - void resize(size_t size, double reserveSizeFactor = 0) + void resize(Index size, double reserveSizeFactor = 0) { if (m_allocatedSize(i); } - inline size_t size() const { return m_size; } - inline size_t allocatedSize() const { return m_allocatedSize; } + inline Index size() const { return m_size; } + inline Index allocatedSize() const { return m_allocatedSize; } inline void clear() { m_size = 0; } - inline Scalar& value(size_t i) { return m_values[i]; } - inline const Scalar& value(size_t i) const { return m_values[i]; } + inline Scalar& value(Index i) { return m_values[i]; } + inline const Scalar& value(Index i) const { return m_values[i]; } - inline StorageIndex& index(size_t i) { return m_indices[i]; } - inline const StorageIndex& index(size_t i) const { return m_indices[i]; } + inline StorageIndex& index(Index i) { return m_indices[i]; } + inline const StorageIndex& index(Index i) const { return m_indices[i]; } /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */ - inline StorageIndex searchLowerIndex(Index key) const + inline StorageIndex searchLowerIndex(StorageIndex key) const { return searchLowerIndex(0, m_size, key); } /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */ - inline StorageIndex searchLowerIndex(size_t start, size_t end, Index key) const + inline Index searchLowerIndex(Index start, Index end, StorageIndex key) const { while(end>start) { - size_t mid = (end+start)>>1; + Index mid = (end+start)>>1; if (m_indices[mid](start); + return start; } /** \returns the stored value at index \a key @@ -138,12 +139,12 @@ class CompressedStorage return m_values[m_size-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) - const size_t id = searchLowerIndex(0,m_size-1,key); + const Index id = searchLowerIndex(0,m_size-1,key); return ((id=end) return defaultValue; @@ -151,7 +152,7 @@ class CompressedStorage return m_values[end-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) - const size_t id = searchLowerIndex(start,end-1,key); + const Index id = searchLowerIndex(start,end-1,key); return ((id=m_size || m_indices[id]!=key) { if (m_allocatedSize::dummy_precision()) { - size_t k = 0; - size_t n = size(); - for (size_t i=0; i newValues(size); internal::scoped_array newIndices(size); - size_t copySize = (std::min)(size, m_size); + Index copySize = (std::min)(size, m_size); internal::smart_copy(m_values, m_values+copySize, newValues.ptr()); internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr()); std::swap(m_values,newValues.ptr()); @@ -228,8 +229,8 @@ class CompressedStorage protected: Scalar* m_values; StorageIndex* m_indices; - size_t m_size; - size_t m_allocatedSize; + Index m_size; + Index m_allocatedSize; }; diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h index dfdf4314a..5256bf950 100644 --- a/Eigen/src/SparseCore/SparseBlock.h +++ b/Eigen/src/SparseCore/SparseBlock.h @@ -34,14 +34,14 @@ public: : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols)) {} - EIGEN_STRONG_INLINE StorageIndex rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE StorageIndex cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } - StorageIndex nonZeros() const + Index nonZeros() const { typedef typename internal::evaluator::type EvaluatorType; EvaluatorType matEval(m_matrix); - StorageIndex nnz = 0; + Index nnz = 0; Index end = m_outerStart + m_outerSize.value(); for(Index j=m_outerStart; j m_outerSize; + Index m_outerStart; + const internal::variable_if_dynamic m_outerSize; public: EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) @@ -106,11 +106,11 @@ public: SparseMatrix tmp(other); // 2 - let's check whether there is enough allocated memory - StorageIndex nnz = tmp.nonZeros(); - StorageIndex start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block - StorageIndex end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block - StorageIndex block_size = end - start; // available room in the current block - StorageIndex tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; + Index nnz = tmp.nonZeros(); + Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block + Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block + Index block_size = end - start; // available room in the current block + Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; Index free_size = m_matrix.isCompressed() ? Index(matrix.data().allocatedSize()) + block_size @@ -192,7 +192,7 @@ public: inline StorageIndex* innerNonZeroPtr() { return isCompressed() ? 0 : m_matrix.const_cast_derived().innerNonZeroPtr(); } - StorageIndex nonZeros() const + Index nonZeros() const { if(m_matrix.isCompressed()) return ( (m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) @@ -215,20 +215,20 @@ public: return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; } - EIGEN_STRONG_INLINE StorageIndex rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - EIGEN_STRONG_INLINE StorageIndex cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } - StorageIndex startRow() const { return IsRowMajor ? m_outerStart : 0; } - StorageIndex startCol() const { return IsRowMajor ? 0 : m_outerStart; } - StorageIndex blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } - StorageIndex blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + Index startRow() const { return IsRowMajor ? m_outerStart : 0; } + Index startCol() const { return IsRowMajor ? 0 : m_outerStart; } + Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: typename SparseMatrixType::Nested m_matrix; - StorageIndex m_outerStart; - const internal::variable_if_dynamic m_outerSize; + Index m_outerStart; + const internal::variable_if_dynamic m_outerSize; }; @@ -353,8 +353,8 @@ public: : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols)) {} - inline StorageIndex rows() const { return m_blockRows.value(); } - inline StorageIndex cols() const { return m_blockCols.value(); } + inline Index rows() const { return m_blockRows.value(); } + inline Index cols() const { return m_blockCols.value(); } inline Scalar& coeffRef(Index row, Index col) { @@ -382,10 +382,10 @@ public: } inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } - StorageIndex startRow() const { return m_startRow.value(); } - StorageIndex startCol() const { return m_startCol.value(); } - StorageIndex blockRows() const { return m_blockRows.value(); } - StorageIndex blockCols() const { return m_blockCols.value(); } + Index startRow() const { return m_startRow.value(); } + Index startCol() const { return m_startCol.value(); } + Index blockRows() const { return m_blockRows.value(); } + Index blockCols() const { return m_blockCols.value(); } protected: friend class internal::GenericSparseBlockInnerIteratorImpl; @@ -394,10 +394,10 @@ public: EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) typename XprType::Nested m_matrix; - const internal::variable_if_dynamic m_startRow; - const internal::variable_if_dynamic m_startCol; - const internal::variable_if_dynamic m_blockRows; - const internal::variable_if_dynamic m_blockCols; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; }; @@ -425,10 +425,10 @@ namespace internal { Base::operator++(); } - inline StorageIndex index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } - inline StorageIndex outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } - inline StorageIndex row() const { return Base::row() - m_block.m_startRow.value(); } - inline StorageIndex col() const { return Base::col() - m_block.m_startCol.value(); } + inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } + inline Index row() const { return Base::row() - m_block.m_startRow.value(); } + inline Index col() const { return Base::col() - m_block.m_startCol.value(); } inline operator bool() const { return Base::operator bool() && Base::index() < m_end; } }; @@ -445,10 +445,10 @@ namespace internal { typedef typename BlockType::StorageIndex StorageIndex; typedef typename BlockType::Scalar Scalar; const BlockType& m_block; - StorageIndex m_outerPos; - StorageIndex m_innerIndex; + Index m_outerPos; + Index m_innerIndex; Scalar m_value; - StorageIndex m_end; + Index m_end; public: explicit EIGEN_STRONG_INLINE GenericSparseBlockInnerIteratorImpl(const BlockType& block, Index outer = 0) @@ -464,10 +464,10 @@ namespace internal { ++(*this); } - inline StorageIndex index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } - inline StorageIndex outer() const { return 0; } - inline StorageIndex row() const { return IsRowMajor ? 0 : index(); } - inline StorageIndex col() const { return IsRowMajor ? index() : 0; } + inline Index index() const { return m_outerPos - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline Index outer() const { return 0; } + inline Index row() const { return IsRowMajor ? 0 : index(); } + inline Index col() const { return IsRowMajor ? index() : 0; } inline Scalar value() const { return m_value; } @@ -546,10 +546,10 @@ public: EvalIterator::operator++(); } - inline StorageIndex index() const { return EvalIterator::index() - (IsRowMajor ? m_block.startCol() : m_block.startRow()); } - inline StorageIndex outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); } - inline StorageIndex row() const { return EvalIterator::row() - m_block.startRow(); } - inline StorageIndex col() const { return EvalIterator::col() - m_block.startCol(); } + inline Index index() const { return EvalIterator::index() - (IsRowMajor ? m_block.startCol() : m_block.startRow()); } + inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); } + inline Index row() const { return EvalIterator::row() - m_block.startRow(); } + inline Index col() const { return EvalIterator::col() - m_block.startCol(); } inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; } }; @@ -558,10 +558,10 @@ template class unary_evaluator, IteratorBased>::OuterVectorInnerIterator { const unary_evaluator& m_eval; - StorageIndex m_outerPos; - StorageIndex m_innerIndex; + Index m_outerPos; + Index m_innerIndex; Scalar m_value; - StorageIndex m_end; + Index m_end; public: EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer) @@ -576,10 +576,10 @@ public: ++(*this); } - inline StorageIndex index() const { return m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow()); } - inline StorageIndex outer() const { return 0; } - inline StorageIndex row() const { return IsRowMajor ? 0 : index(); } - inline StorageIndex col() const { return IsRowMajor ? index() : 0; } + inline Index index() const { return m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow()); } + inline Index outer() const { return 0; } + inline Index row() const { return IsRowMajor ? 0 : index(); } + inline Index col() const { return IsRowMajor ? index() : 0; } inline Scalar value() const { return m_value; } diff --git a/Eigen/src/SparseCore/SparseColEtree.h b/Eigen/src/SparseCore/SparseColEtree.h index 88c799068..28fb2d175 100644 --- a/Eigen/src/SparseCore/SparseColEtree.h +++ b/Eigen/src/SparseCore/SparseColEtree.h @@ -60,7 +60,7 @@ Index etree_find (Index i, IndexVector& pp) template int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0) { - typedef typename MatrixType::StorageIndex Index; + typedef typename MatrixType::StorageIndex StorageIndex; Index nc = mat.cols(); // Number of columns Index m = mat.rows(); Index diagSize = (std::min)(nc,m); @@ -70,7 +70,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl pp.setZero(); // Initialize disjoint sets parent.resize(mat.cols()); //Compute first nonzero column in each row - Index row,col; + StorageIndex row,col; firstRowElt.resize(m); firstRowElt.setConstant(nc); firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1); @@ -127,7 +127,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl * Depth-first search from vertex n. No recursion. * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France. */ -template +template void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum) { Index current = n, first, next; @@ -174,7 +174,7 @@ void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector * \param parent Input tree * \param post postordered tree */ -template +template void treePostorder(Index n, IndexVector& parent, IndexVector& post) { IndexVector first_kid, next_kid; // Linked list of children diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h index afb09ad91..3b4e9df59 100644 --- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h @@ -56,7 +56,6 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::StorageIndex StorageIndex; public: @@ -97,9 +96,9 @@ public: EIGEN_STRONG_INLINE Scalar value() const { return m_value; } - EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; } - EIGEN_STRONG_INLINE StorageIndex row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); } - EIGEN_STRONG_INLINE StorageIndex col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); } + EIGEN_STRONG_INLINE Index index() const { return m_id; } + EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); } + EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; } @@ -108,7 +107,7 @@ public: RhsIterator m_rhsIter; const BinaryOp& m_functor; Scalar m_value; - StorageIndex m_id; + Index m_id; }; @@ -145,7 +144,6 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::StorageIndex StorageIndex; public: @@ -177,9 +175,9 @@ public: EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); } - EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); } - EIGEN_STRONG_INLINE StorageIndex row() const { return m_lhsIter.row(); } - EIGEN_STRONG_INLINE StorageIndex col() const { return m_lhsIter.col(); } + EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); } @@ -223,7 +221,6 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::StorageIndex StorageIndex; enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit }; public: @@ -241,9 +238,9 @@ public: EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); } - EIGEN_STRONG_INLINE StorageIndex index() const { return m_rhsIter.index(); } - EIGEN_STRONG_INLINE StorageIndex row() const { return m_rhsIter.row(); } - EIGEN_STRONG_INLINE StorageIndex col() const { return m_rhsIter.col(); } + EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); } + EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; } @@ -288,7 +285,6 @@ public: class InnerIterator { typedef typename traits::Scalar Scalar; - typedef typename XprType::StorageIndex StorageIndex; enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit }; public: @@ -307,9 +303,9 @@ public: { return m_functor(m_lhsIter.value(), m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); } - EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); } - EIGEN_STRONG_INLINE StorageIndex row() const { return m_lhsIter.row(); } - EIGEN_STRONG_INLINE StorageIndex col() const { return m_lhsIter.col(); } + EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); } EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; } @@ -317,7 +313,7 @@ public: LhsIterator m_lhsIter; const RhsEvaluator &m_rhsEval; const BinaryOp& m_functor; - const StorageIndex m_outer; + const Index m_outer; }; diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h index f6e6fab29..edb9d5998 100644 --- a/Eigen/src/SparseCore/SparseDenseProduct.h +++ b/Eigen/src/SparseCore/SparseDenseProduct.h @@ -29,7 +29,6 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { @@ -62,7 +61,6 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { @@ -86,7 +84,6 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { @@ -106,7 +103,6 @@ struct sparse_time_dense_product_impl::type Lhs; typedef typename internal::remove_all::type Rhs; typedef typename internal::remove_all::type Res; - typedef typename Lhs::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { @@ -193,7 +189,6 @@ protected: typedef typename evaluator::type RhsEval; typedef typename evaluator::InnerIterator LhsIterator; typedef typename ProdXprType::Scalar Scalar; - typedef typename ProdXprType::StorageIndex StorageIndex; public: enum { @@ -211,9 +206,9 @@ public: m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits::StorageKind() )) {} - EIGEN_STRONG_INLINE StorageIndex outer() const { return m_outer; } - EIGEN_STRONG_INLINE StorageIndex row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } - EIGEN_STRONG_INLINE StorageIndex col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } + EIGEN_STRONG_INLINE Index outer() const { return m_outer; } + EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } + EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; } EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); } diff --git a/Eigen/src/SparseCore/SparseDiagonalProduct.h b/Eigen/src/SparseCore/SparseDiagonalProduct.h index 19a79edad..b7598c885 100644 --- a/Eigen/src/SparseCore/SparseDiagonalProduct.h +++ b/Eigen/src/SparseCore/SparseDiagonalProduct.h @@ -66,7 +66,6 @@ struct sparse_diagonal_product_evaluator::InnerIterator SparseXprInnerIterator; typedef typename SparseXprType::Scalar Scalar; - typedef typename SparseXprType::StorageIndex StorageIndex; public: class InnerIterator : public SparseXprInnerIterator @@ -96,7 +95,6 @@ template struct sparse_diagonal_product_evaluator { typedef typename SparseXprType::Scalar Scalar; - typedef typename SparseXprType::StorageIndex StorageIndex; typedef CwiseBinaryOp, const typename SparseXprType::ConstInnerVectorReturnType, @@ -111,14 +109,14 @@ struct sparse_diagonal_product_evaluator(outer)) + m_outer(outer) {} - inline Scalar value() const { return m_cwiseIter.value(); } - inline StorageIndex index() const { return convert_index(m_cwiseIter.index()); } - inline StorageIndex outer() const { return m_outer; } - inline StorageIndex col() const { return SparseXprType::IsRowMajor ? m_cwiseIter.index() : m_outer; } - inline StorageIndex row() const { return SparseXprType::IsRowMajor ? m_outer : m_cwiseIter.index(); } + inline Scalar value() const { return m_cwiseIter.value(); } + inline Index index() const { return m_cwiseIter.index(); } + inline Index outer() const { return m_outer; } + inline Index col() const { return SparseXprType::IsRowMajor ? m_cwiseIter.index() : m_outer; } + inline Index row() const { return SparseXprType::IsRowMajor ? m_outer : m_cwiseIter.index(); } EIGEN_STRONG_INLINE InnerIterator& operator++() { ++m_cwiseIter; return *this; } @@ -127,7 +125,7 @@ struct sparse_diagonal_product_evaluator TransposedSparseMatrix; - StorageIndex m_outerSize; - StorageIndex m_innerSize; + Index m_outerSize; + Index m_innerSize; StorageIndex* m_outerIndex; StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed Storage m_data; @@ -129,14 +129,14 @@ class SparseMatrix public: /** \returns the number of rows of the matrix */ - inline StorageIndex rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } /** \returns the number of columns of the matrix */ - inline StorageIndex cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */ - inline StorageIndex innerSize() const { return m_innerSize; } + inline Index innerSize() const { return m_innerSize; } /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */ - inline StorageIndex outerSize() const { return m_outerSize; } + inline Index outerSize() const { return m_outerSize; } /** \returns a const pointer to the array of values. * This function is aimed at interoperability with other libraries. @@ -253,7 +253,7 @@ class SparseMatrix } /** \returns the number of non zero coefficients */ - inline StorageIndex nonZeros() const + inline Index nonZeros() const { if(m_innerNonZeros) return innerNonZeros().sum(); @@ -299,7 +299,7 @@ class SparseMatrix { if(isCompressed()) { - std::size_t totalReserveSize = 0; + Index totalReserveSize = 0; // turn the matrix into non-compressed mode m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(StorageIndex))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); @@ -424,7 +424,7 @@ class SparseMatrix { if(isCompressed()) { - StorageIndex size = internal::convert_index(Index(m_data.size())); + StorageIndex size = internal::convert_index(m_data.size()); Index i = m_outerSize; // find the last filled column while (i>=0 && m_outerIndex[i]==0) @@ -605,8 +605,8 @@ class SparseMatrix */ void resize(Index rows, Index cols) { - const StorageIndex outerSize = convert_index(IsRowMajor ? rows : cols); - m_innerSize = convert_index(IsRowMajor ? cols : rows); + const Index outerSize = IsRowMajor ? rows : cols; + m_innerSize = IsRowMajor ? cols : rows; m_data.clear(); if (m_outerSize != outerSize || m_outerSize==0) { @@ -1069,7 +1069,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse { eigen_assert(!isCompressed()); - const StorageIndex outer = convert_index(IsRowMajor ? row : col); + const Index outer = IsRowMajor ? row : col; const StorageIndex inner = convert_index(IsRowMajor ? col : row); Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h index c55a6a930..9039ebcec 100644 --- a/Eigen/src/SparseCore/SparseMatrixBase.h +++ b/Eigen/src/SparseCore/SparseMatrixBase.h @@ -144,15 +144,15 @@ template class SparseMatrixBase : public EigenBase #undef EIGEN_CURRENT_STORAGE_BASE_CLASS /** \returns the number of rows. \sa cols() */ - inline StorageIndex rows() const { return derived().rows(); } + inline Index rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows() */ - inline StorageIndex cols() const { return derived().cols(); } + inline Index cols() const { return derived().cols(); } /** \returns the number of coefficients, which is \a rows()*cols(). * \sa rows(), cols(). */ - inline StorageIndex size() const { return rows() * cols(); } + inline Index size() const { return rows() * cols(); } /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ - inline StorageIndex nonZeros() const { return derived().nonZeros(); } + inline Index nonZeros() const { return derived().nonZeros(); } /** \returns true if either the number of rows or the number of columns is equal to 1. * In other words, this function returns * \code rows()==1 || cols()==1 \endcode @@ -160,10 +160,10 @@ template class SparseMatrixBase : public EigenBase inline bool isVector() const { return rows()==1 || cols()==1; } /** \returns the size of the storage major dimension, * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ - StorageIndex outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } + Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } /** \returns the size of the inner dimension according to the storage order, * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ - StorageIndex innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } + Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } bool isRValue() const { return m_isRValue; } Derived& markAsRValue() { m_isRValue = true; return derived(); } diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h index e13f98144..05be8e57c 100644 --- a/Eigen/src/SparseCore/SparseSelfAdjointView.h +++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h @@ -58,8 +58,8 @@ template class SparseSelfAdjointView eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices"); } - inline StorageIndex rows() const { return m_matrix.rows(); } - inline StorageIndex cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } /** \internal \returns a reference to the nested matrix */ const _MatrixTypeNested& matrix() const { return m_matrix; } @@ -530,8 +530,8 @@ class SparseSymmetricPermutationProduct : m_matrix(mat), m_perm(perm) {} - inline StorageIndex rows() const { return m_matrix.rows(); } - inline StorageIndex cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } template void evalTo(SparseMatrix& _dest) const diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h index 1384fbbff..3db01bf2d 100644 --- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -25,8 +25,8 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r typedef typename remove_all::type::StorageIndex StorageIndex; // make sure to call innerSize/outerSize since we fake the storage order. - StorageIndex rows = lhs.innerSize(); - StorageIndex cols = rhs.outerSize(); + Index rows = lhs.innerSize(); + Index cols = rhs.outerSize(); //Index size = lhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); @@ -39,7 +39,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r // the product of a rhs column with the lhs is X+Y where X is the average number of non zero // per column of the lhs. // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs) - StorageIndex estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); + Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros(); // mimics a resizeByInnerOuter: if(ResultType::IsRowMajor) diff --git a/Eigen/src/SparseCore/SparseTranspose.h b/Eigen/src/SparseCore/SparseTranspose.h index 84413c374..45d9c6700 100644 --- a/Eigen/src/SparseCore/SparseTranspose.h +++ b/Eigen/src/SparseCore/SparseTranspose.h @@ -48,7 +48,7 @@ template class TransposeImpl protected: typedef internal::SparseTransposeImpl Base; public: - inline typename MatrixType::StorageIndex nonZeros() const { return Base::derived().nestedExpression().nonZeros(); } + inline Index nonZeros() const { return Base::derived().nestedExpression().nonZeros(); } }; namespace internal { @@ -61,7 +61,6 @@ struct unary_evaluator, IteratorBased> typedef typename evaluator::ReverseInnerIterator EvalReverseIterator; public: typedef Transpose XprType; - typedef typename XprType::StorageIndex StorageIndex; class InnerIterator : public EvalIterator { @@ -70,8 +69,8 @@ struct unary_evaluator, IteratorBased> : EvalIterator(unaryOp.m_argImpl,outer) {} - StorageIndex row() const { return EvalIterator::col(); } - StorageIndex col() const { return EvalIterator::row(); } + Index row() const { return EvalIterator::col(); } + Index col() const { return EvalIterator::row(); } }; class ReverseInnerIterator : public EvalReverseIterator @@ -81,8 +80,8 @@ struct unary_evaluator, IteratorBased> : EvalReverseIterator(unaryOp.m_argImpl,outer) {} - StorageIndex row() const { return EvalReverseIterator::col(); } - StorageIndex col() const { return EvalReverseIterator::row(); } + Index row() const { return EvalReverseIterator::col(); } + Index col() const { return EvalReverseIterator::row(); } }; enum { diff --git a/Eigen/src/SparseCore/SparseTriangularView.h b/Eigen/src/SparseCore/SparseTriangularView.h index 15bdbacb5..b5fbcbdde 100644 --- a/Eigen/src/SparseCore/SparseTriangularView.h +++ b/Eigen/src/SparseCore/SparseTriangularView.h @@ -64,7 +64,6 @@ template class TriangularViewImpl::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator { typedef typename MatrixTypeNestedCleaned::InnerIterator Base; - typedef typename TriangularViewType::StorageIndex StorageIndex; public: EIGEN_STRONG_INLINE InnerIterator(const TriangularViewImpl& view, Index outer) @@ -102,9 +101,9 @@ class TriangularViewImpl::InnerIterator : public MatrixT return *this; } - inline StorageIndex row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); } - inline StorageIndex col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); } - inline StorageIndex index() const + inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); } + inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); } + inline Index index() const { if(HasUnitDiag && m_returnOne) return Base::outer(); else return Base::index(); @@ -134,7 +133,6 @@ template class TriangularViewImpl::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator { typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base; - typedef typename TriangularViewImpl::StorageIndex StorageIndex; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const TriangularViewType& view, Index outer) @@ -150,8 +148,8 @@ class TriangularViewImpl::ReverseInnerIterator : public EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() { Base::operator--(); return *this; } - inline StorageIndex row() const { return Base::row(); } - inline StorageIndex col() const { return Base::col(); } + inline Index row() const { return Base::row(); } + inline Index col() const { return Base::col(); } EIGEN_STRONG_INLINE operator bool() const { @@ -175,7 +173,6 @@ struct unary_evaluator, IteratorBased> protected: typedef typename XprType::Scalar Scalar; - typedef typename XprType::StorageIndex StorageIndex; typedef typename evaluator::InnerIterator EvalIterator; enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit)) @@ -246,9 +243,9 @@ public: } } -// inline StorageIndex row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); } -// inline StorageIndex col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); } - inline StorageIndex index() const +// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); } +// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); } + inline Index index() const { if(HasUnitDiag && m_returnOne) return Base::outer(); else return Base::index(); diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h index fd70cf2bc..b1cc4df77 100644 --- a/Eigen/src/SparseCore/SparseVector.h +++ b/Eigen/src/SparseCore/SparseVector.h @@ -79,10 +79,10 @@ class SparseVector Options = _Options }; - EIGEN_STRONG_INLINE StorageIndex rows() const { return IsColVector ? m_size : 1; } - EIGEN_STRONG_INLINE StorageIndex cols() const { return IsColVector ? 1 : m_size; } - EIGEN_STRONG_INLINE StorageIndex innerSize() const { return m_size; } - EIGEN_STRONG_INLINE StorageIndex outerSize() const { return 1; } + EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; } + EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } + EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } + EIGEN_STRONG_INLINE Index outerSize() const { return 1; } EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); } EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); } @@ -132,7 +132,7 @@ class SparseVector inline void setZero() { m_data.clear(); } /** \returns the number of non zero coefficients */ - inline StorageIndex nonZeros() const { return static_cast(m_data.size()); } + inline Index nonZeros() const { return m_data.size(); } inline void startVec(Index outer) { @@ -213,7 +213,7 @@ class SparseVector void resize(Index newSize) { - m_size = convert_index(newSize); + m_size = newSize; m_data.clear(); } @@ -353,7 +353,7 @@ protected: } Storage m_data; - StorageIndex m_size; + Index m_size; }; template @@ -361,14 +361,14 @@ class SparseVector::InnerIterator { public: explicit InnerIterator(const SparseVector& vec, Index outer=0) - : m_data(vec.m_data), m_id(0), m_end(convert_index(m_data.size())) + : m_data(vec.m_data), m_id(0), m_end(m_data.size()) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } explicit InnerIterator(const internal::CompressedStorage& data) - : m_data(data), m_id(0), m_end(convert_index(m_data.size())) + : m_data(data), m_id(0), m_end(m_data.size()) {} inline InnerIterator& operator++() { m_id++; return *this; } @@ -376,16 +376,16 @@ class SparseVector::InnerIterator inline Scalar value() const { return m_data.value(m_id); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id)); } - inline StorageIndex index() const { return m_data.index(m_id); } - inline StorageIndex row() const { return IsColVector ? index() : 0; } - inline StorageIndex col() const { return IsColVector ? 0 : index(); } + inline Index index() const { return m_data.index(m_id); } + inline Index row() const { return IsColVector ? index() : 0; } + inline Index col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id < m_end); } protected: const internal::CompressedStorage& m_data; - StorageIndex m_id; - const StorageIndex m_end; + Index m_id; + const Index m_end; private: // If you get here, then you're not using the right InnerIterator type, e.g.: // SparseMatrix A; @@ -398,14 +398,14 @@ class SparseVector::ReverseInnerIterator { public: explicit ReverseInnerIterator(const SparseVector& vec, Index outer=0) - : m_data(vec.m_data), m_id(convert_index(m_data.size())), m_start(0) + : m_data(vec.m_data), m_id(m_data.size()), m_start(0) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } explicit ReverseInnerIterator(const internal::CompressedStorage& data) - : m_data(data), m_id(convert_index(m_data.size())), m_start(0) + : m_data(data), m_id(m_data.size()), m_start(0) {} inline ReverseInnerIterator& operator--() { m_id--; return *this; } @@ -413,15 +413,15 @@ class SparseVector::ReverseInnerIterator inline Scalar value() const { return m_data.value(m_id-1); } inline Scalar& valueRef() { return const_cast(m_data.value(m_id-1)); } - inline StorageIndex index() const { return m_data.index(m_id-1); } - inline StorageIndex row() const { return IsColVector ? index() : 0; } - inline StorageIndex col() const { return IsColVector ? 0 : index(); } + inline Index index() const { return m_data.index(m_id-1); } + inline Index row() const { return IsColVector ? index() : 0; } + inline Index col() const { return IsColVector ? 0 : index(); } inline operator bool() const { return (m_id > m_start); } protected: const internal::CompressedStorage& m_data; - StorageIndex m_id; + Index m_id; const Index m_start; }; diff --git a/Eigen/src/SparseCore/SparseView.h b/Eigen/src/SparseCore/SparseView.h index e26016c16..d6042d970 100644 --- a/Eigen/src/SparseCore/SparseView.h +++ b/Eigen/src/SparseCore/SparseView.h @@ -40,11 +40,11 @@ public: RealScalar m_epsilon = NumTraits::dummy_precision()) : m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {} - inline StorageIndex rows() const { return m_matrix.rows(); } - inline StorageIndex cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } - inline StorageIndex innerSize() const { return m_matrix.innerSize(); } - inline StorageIndex outerSize() const { return m_matrix.outerSize(); } + inline Index innerSize() const { return m_matrix.innerSize(); } + inline Index outerSize() const { return m_matrix.outerSize(); } /** \returns the nested expression */ const typename internal::remove_all::type& @@ -153,17 +153,17 @@ struct unary_evaluator, IndexBased> : m_sve.m_argImpl.coeff(m_inner, m_outer); } - EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; } - inline StorageIndex row() const { return IsRowMajor ? m_outer : index(); } - inline StorageIndex col() const { return IsRowMajor ? index() : m_outer; } + EIGEN_STRONG_INLINE Index index() const { return m_inner; } + inline Index row() const { return IsRowMajor ? m_outer : index(); } + inline Index col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } protected: const unary_evaluator &m_sve; - StorageIndex m_inner; - const StorageIndex m_outer; - const StorageIndex m_end; + Index m_inner; + const Index m_outer; + const Index m_end; private: void incrementToNonZero() diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h index 380ba25c0..f60e4ac9d 100644 --- a/Eigen/src/SparseLU/SparseLU.h +++ b/Eigen/src/SparseLU/SparseLU.h @@ -122,8 +122,8 @@ class SparseLU : public SparseSolverBase >, factorize(matrix); } - inline StorageIndex rows() const { return m_mat.rows(); } - inline StorageIndex cols() const { return m_mat.cols(); } + inline Index rows() const { return m_mat.rows(); } + inline Index cols() const { return m_mat.cols(); } /** Indicate that the pattern of the input matrix is symmetric */ void isSymmetric(bool sym) { @@ -334,10 +334,10 @@ class SparseLU : public SparseSolverBase >, // SparseLU options bool m_symmetricmode; // values for performance - internal::perfvalues m_perfv; + internal::perfvalues m_perfv; RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot - StorageIndex m_nnzL, m_nnzU; // Nonzeros in L and U factors - StorageIndex m_detPermR; // Determinant of the coefficient matrix + Index m_nnzL, m_nnzU; // Nonzeros in L and U factors + Index m_detPermR; // Determinant of the coefficient matrix private: // Disable copy constructor SparseLU (const SparseLU& ); @@ -449,7 +449,7 @@ void SparseLU::factorize(const MatrixType& matrix) eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices"); - typedef typename IndexVector::Scalar Index; + typedef typename IndexVector::Scalar StorageIndex; m_isInitialized = true; @@ -461,11 +461,11 @@ void SparseLU::factorize(const MatrixType& matrix) { m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. //Then, permute only the column pointers - const Index * outerIndexPtr; + const StorageIndex * outerIndexPtr; if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr(); else { - Index* outerIndexPtr_t = new Index[matrix.cols()+1]; + StorageIndex* outerIndexPtr_t = new StorageIndex[matrix.cols()+1]; for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i]; outerIndexPtr = outerIndexPtr_t; } @@ -649,12 +649,11 @@ void SparseLU::factorize(const MatrixType& matrix) template struct SparseLUMatrixLReturnType : internal::no_assignment_operator { - typedef typename MappedSupernodalType::StorageIndex StorageIndex; typedef typename MappedSupernodalType::Scalar Scalar; explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL) { } - StorageIndex rows() { return m_mapL.rows(); } - StorageIndex cols() { return m_mapL.cols(); } + Index rows() { return m_mapL.rows(); } + Index cols() { return m_mapL.cols(); } template void solveInPlace( MatrixBase &X) const { @@ -666,13 +665,12 @@ struct SparseLUMatrixLReturnType : internal::no_assignment_operator template struct SparseLUMatrixUReturnType : internal::no_assignment_operator { - typedef typename MatrixLType::StorageIndex StorageIndex; typedef typename MatrixLType::Scalar Scalar; explicit SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU) : m_mapL(mapL),m_mapU(mapU) { } - StorageIndex rows() { return m_mapL.rows(); } - StorageIndex cols() { return m_mapL.cols(); } + Index rows() { return m_mapL.rows(); } + Index cols() { return m_mapL.cols(); } template void solveInPlace(MatrixBase &X) const { diff --git a/Eigen/src/SparseLU/SparseLUImpl.h b/Eigen/src/SparseLU/SparseLUImpl.h index 14d70897d..e735fd5c8 100644 --- a/Eigen/src/SparseLU/SparseLUImpl.h +++ b/Eigen/src/SparseLU/SparseLUImpl.h @@ -16,17 +16,17 @@ namespace internal { * \class SparseLUImpl * Base class for sparseLU */ -template +template class SparseLUImpl { public: typedef Matrix ScalarVector; - typedef Matrix IndexVector; + typedef Matrix IndexVector; typedef typename ScalarVector::RealScalar RealScalar; typedef Ref > BlockScalarVector; - typedef Ref > BlockIndexVector; + typedef Ref > BlockIndexVector; typedef LU_GlobalLU_t GlobalLU_t; - typedef SparseMatrix MatrixType; + typedef SparseMatrix MatrixType; protected: template diff --git a/Eigen/src/SparseLU/SparseLU_Memory.h b/Eigen/src/SparseLU/SparseLU_Memory.h index 1ffa7d54e..1cf8bebc7 100644 --- a/Eigen/src/SparseLU/SparseLU_Memory.h +++ b/Eigen/src/SparseLU/SparseLU_Memory.h @@ -36,13 +36,12 @@ namespace internal { enum { LUNoMarker = 3 }; enum {emptyIdxLU = -1}; -template inline Index LUnumTempV(Index& m, Index& w, Index& t, Index& b) { return (std::max)(m, (t+b)*w); } -template< typename Scalar, typename Index> +template< typename Scalar> inline Index LUTempSpace(Index&m, Index& w) { return (2*w + 4 + LUNoMarker) * m * sizeof(Index) + (w + 1) * m * sizeof(Scalar); @@ -59,9 +58,9 @@ inline Index LUTempSpace(Index&m, Index& w) * \param keep_prev 1: use length and do not expand the vector; 0: compute new_len and expand * \param[in,out] num_expansions Number of times the memory has been expanded */ -template +template template -Index SparseLUImpl::expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions) +Index SparseLUImpl::expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions) { float alpha = 1.5; // Ratio of the memory increase @@ -148,8 +147,8 @@ Index SparseLUImpl::expand(VectorType& vec, Index& length, Index * \return an estimated size of the required memory if lwork = -1; otherwise, return the size of actually allocated memory when allocation failed, and 0 on success * \note Unlike SuperLU, this routine does not support successive factorization with the same pattern and the same row permutation */ -template -Index SparseLUImpl::memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu) +template +Index SparseLUImpl::memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu) { Index& num_expansions = glu.num_expansions; //No memory expansions so far num_expansions = 0; @@ -205,9 +204,9 @@ Index SparseLUImpl::memInit(Index m, Index n, Index annz, Index lw * \param num_expansions Number of expansions * \return 0 on success, > 0 size of the memory allocated so far */ -template +template template -Index SparseLUImpl::memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions) +Index SparseLUImpl::memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions) { Index failed_size; if (memtype == USUB) diff --git a/Eigen/src/SparseLU/SparseLU_Structs.h b/Eigen/src/SparseLU/SparseLU_Structs.h index 24d6bf179..cf5ec449b 100644 --- a/Eigen/src/SparseLU/SparseLU_Structs.h +++ b/Eigen/src/SparseLU/SparseLU_Structs.h @@ -75,7 +75,7 @@ typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType; template struct LU_GlobalLU_t { - typedef typename IndexVector::Scalar Index; + typedef typename IndexVector::Scalar StorageIndex; IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping) ScalarVector lusup; // nonzero values of L ordered by columns @@ -93,7 +93,6 @@ struct LU_GlobalLU_t { }; // Values to set for performance -template struct perfvalues { Index panel_size; // a panel consists of at most consecutive columns Index relax; // To control degree of relaxing supernodes. If the number of nodes (columns) diff --git a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h index 098763765..f7ffc2d9c 100644 --- a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +++ b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h @@ -42,7 +42,7 @@ class MappedSuperNodalMatrix { } - MappedSuperNodalMatrix(StorageIndex m, StorageIndex n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, + MappedSuperNodalMatrix(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col ) { setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col); @@ -58,7 +58,7 @@ class MappedSuperNodalMatrix * FIXME This class will be modified such that it can be use in the course * of the factorization. */ - void setInfos(StorageIndex m, StorageIndex n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, + void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col ) { m_row = m; @@ -75,12 +75,12 @@ class MappedSuperNodalMatrix /** * Number of rows */ - StorageIndex rows() { return m_row; } + Index rows() { return m_row; } /** * Number of columns */ - StorageIndex cols() { return m_col; } + Index cols() { return m_col; } /** * Return the array of nonzero values packed by column @@ -148,7 +148,7 @@ class MappedSuperNodalMatrix /** * Return the number of supernodes */ - StorageIndex nsuper() const + Index nsuper() const { return m_nsuper; } @@ -161,9 +161,9 @@ class MappedSuperNodalMatrix protected: - StorageIndex m_row; // Number of rows - StorageIndex m_col; // Number of columns - StorageIndex m_nsuper; // Number of supernodes + Index m_row; // Number of rows + Index m_col; // Number of columns + Index m_nsuper; // Number of supernodes Scalar* m_nzval; //array of nonzero values packed by column StorageIndex* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j StorageIndex* m_rowind; // Array of compressed row indices of rectangular supernodes @@ -184,7 +184,7 @@ class MappedSuperNodalMatrix::InnerIterator public: InnerIterator(const MappedSuperNodalMatrix& mat, Eigen::Index outer) : m_matrix(mat), - m_outer(convert_index(outer)), + m_outer(outer), m_supno(mat.colToSup()[outer]), m_idval(mat.colIndexPtr()[outer]), m_startidval(m_idval), diff --git a/Eigen/src/SparseLU/SparseLU_Utils.h b/Eigen/src/SparseLU/SparseLU_Utils.h index 15352ac33..b48157d9f 100644 --- a/Eigen/src/SparseLU/SparseLU_Utils.h +++ b/Eigen/src/SparseLU/SparseLU_Utils.h @@ -17,8 +17,8 @@ namespace internal { /** * \brief Count Nonzero elements in the factors */ -template -void SparseLUImpl::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu) +template +void SparseLUImpl::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu) { nnzL = 0; nnzU = (glu.xusub)(n); @@ -48,8 +48,8 @@ void SparseLUImpl::countnz(const Index n, Index& nnzL, Index& nnzU * and applies permutation to the remaining subscripts * */ -template -void SparseLUImpl::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu) +template +void SparseLUImpl::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu) { Index fsupc, i, j, k, jstart; diff --git a/Eigen/src/SparseLU/SparseLU_column_bmod.h b/Eigen/src/SparseLU/SparseLU_column_bmod.h index f24bd87d3..bda01dcb3 100644 --- a/Eigen/src/SparseLU/SparseLU_column_bmod.h +++ b/Eigen/src/SparseLU/SparseLU_column_bmod.h @@ -49,8 +49,9 @@ namespace internal { * > 0 - number of bytes allocated when run out of space * */ -template -Index SparseLUImpl::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu) +template +Index SparseLUImpl::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, + BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu) { Index jsupno, k, ksub, krep, ksupno; Index lptr, nrow, isub, irow, nextlu, new_next, ufirst; diff --git a/Eigen/src/SparseLU/SparseLU_column_dfs.h b/Eigen/src/SparseLU/SparseLU_column_dfs.h index 4c04b0e44..17c9e6adb 100644 --- a/Eigen/src/SparseLU/SparseLU_column_dfs.h +++ b/Eigen/src/SparseLU/SparseLU_column_dfs.h @@ -30,7 +30,7 @@ #ifndef SPARSELU_COLUMN_DFS_H #define SPARSELU_COLUMN_DFS_H -template class SparseLUImpl; +template class SparseLUImpl; namespace Eigen { namespace internal { @@ -39,8 +39,8 @@ template struct column_dfs_traits : no_assignment_operator { typedef typename ScalarVector::Scalar Scalar; - typedef typename IndexVector::Scalar Index; - column_dfs_traits(Index jcol, Index& jsuper, typename SparseLUImpl::GlobalLU_t& glu, SparseLUImpl& luImpl) + typedef typename IndexVector::Scalar StorageIndex; + column_dfs_traits(Index jcol, Index& jsuper, typename SparseLUImpl::GlobalLU_t& glu, SparseLUImpl& luImpl) : m_jcol(jcol), m_jsuper_ref(jsuper), m_glu(glu), m_luImpl(luImpl) {} bool update_segrep(Index /*krep*/, Index /*jj*/) @@ -57,8 +57,8 @@ struct column_dfs_traits : no_assignment_operator Index m_jcol; Index& m_jsuper_ref; - typename SparseLUImpl::GlobalLU_t& m_glu; - SparseLUImpl& m_luImpl; + typename SparseLUImpl::GlobalLU_t& m_glu; + SparseLUImpl& m_luImpl; }; @@ -89,8 +89,10 @@ struct column_dfs_traits : no_assignment_operator * > 0 number of bytes allocated when run out of space * */ -template -Index SparseLUImpl::column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu) +template +Index SparseLUImpl::column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, + BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, + IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu) { Index jsuper = glu.supno(jcol); diff --git a/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h b/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h index 170610d9f..bf237951d 100644 --- a/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +++ b/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h @@ -46,8 +46,9 @@ namespace internal { * > 0 - number of bytes allocated when run out of space * */ -template -Index SparseLUImpl::copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu) +template +Index SparseLUImpl::copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, + BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu) { Index ksub, krep, ksupno; diff --git a/Eigen/src/SparseLU/SparseLU_gemm_kernel.h b/Eigen/src/SparseLU/SparseLU_gemm_kernel.h index 9e4e3e72b..7420b4d17 100644 --- a/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +++ b/Eigen/src/SparseLU/SparseLU_gemm_kernel.h @@ -21,7 +21,7 @@ namespace internal { * - lda and ldc must be multiples of the respective packet size * - C must have the same alignment as A */ -template +template EIGEN_DONT_INLINE void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc) { diff --git a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h index 7a4e4305a..4092f842f 100644 --- a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +++ b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h @@ -42,8 +42,8 @@ namespace internal { * \param descendants Number of descendants of each node in the etree * \param relax_end last column in a supernode */ -template -void SparseLUImpl::heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end) +template +void SparseLUImpl::heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end) { // The etree may not be postordered, but its heap ordered @@ -75,7 +75,7 @@ void SparseLUImpl::heap_relax_snode (const Index n, IndexVector& e } // Identify the relaxed supernodes by postorder traversal of the etree Index snode_start; // beginning of a snode - Index k; + StorageIndex k; Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree Index nsuper_et = 0; // Number of relaxed snodes in the original etree Index l; diff --git a/Eigen/src/SparseLU/SparseLU_kernel_bmod.h b/Eigen/src/SparseLU/SparseLU_kernel_bmod.h index cad149ded..9513f8369 100644 --- a/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +++ b/Eigen/src/SparseLU/SparseLU_kernel_bmod.h @@ -30,13 +30,13 @@ namespace internal { */ template struct LU_kernel_bmod { - template + template static EIGEN_DONT_INLINE void run(const Index segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros); }; template -template +template EIGEN_DONT_INLINE void LU_kernel_bmod::run(const Index segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros) { @@ -91,21 +91,22 @@ EIGEN_DONT_INLINE void LU_kernel_bmod::run(const Index seg template <> struct LU_kernel_bmod<1> { - template + template static EIGEN_DONT_INLINE void run(const Index /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr, const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros); }; -template +template EIGEN_DONT_INLINE void LU_kernel_bmod<1>::run(const Index /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr, const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros) { typedef typename ScalarVector::Scalar Scalar; + typedef typename IndexVector::Scalar StorageIndex; Scalar f = dense(lsub(lptr + no_zeros)); luptr += lda * no_zeros + no_zeros + 1; const Scalar* a(lusup.data() + luptr); - const /*typename IndexVector::Scalar*/Index* irow(lsub.data()+lptr + no_zeros + 1); + const StorageIndex* irow(lsub.data()+lptr + no_zeros + 1); Index i = 0; for (; i+1 < nrow; i+=2) { diff --git a/Eigen/src/SparseLU/SparseLU_panel_bmod.h b/Eigen/src/SparseLU/SparseLU_panel_bmod.h index da0e0fc3c..bd3cf87b9 100644 --- a/Eigen/src/SparseLU/SparseLU_panel_bmod.h +++ b/Eigen/src/SparseLU/SparseLU_panel_bmod.h @@ -52,8 +52,8 @@ namespace internal { * * */ -template -void SparseLUImpl::panel_bmod(const Index m, const Index w, const Index jcol, +template +void SparseLUImpl::panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu) { diff --git a/Eigen/src/SparseLU/SparseLU_panel_dfs.h b/Eigen/src/SparseLU/SparseLU_panel_dfs.h index dc0054efd..f4a908ee5 100644 --- a/Eigen/src/SparseLU/SparseLU_panel_dfs.h +++ b/Eigen/src/SparseLU/SparseLU_panel_dfs.h @@ -37,8 +37,8 @@ namespace internal { template struct panel_dfs_traits { - typedef typename IndexVector::Scalar Index; - panel_dfs_traits(Index jcol, Index* marker) + typedef typename IndexVector::Scalar StorageIndex; + panel_dfs_traits(Index jcol, StorageIndex* marker) : m_jcol(jcol), m_marker(marker) {} bool update_segrep(Index krep, Index jj) @@ -53,13 +53,13 @@ struct panel_dfs_traits void mem_expand(IndexVector& /*glu.lsub*/, Index /*nextl*/, Index /*chmark*/) {} enum { ExpandMem = false }; Index m_jcol; - Index* m_marker; + StorageIndex* m_marker; }; -template +template template -void SparseLUImpl::dfs_kernel(const Index jj, IndexVector& perm_r, +void SparseLUImpl::dfs_kernel(const Index jj, IndexVector& perm_r, Index& nseg, IndexVector& panel_lsub, IndexVector& segrep, Ref repfnz_col, IndexVector& xprune, Ref marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu, @@ -215,8 +215,8 @@ void SparseLUImpl::dfs_kernel(const Index jj, IndexVector& perm_r, * */ -template -void SparseLUImpl::panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu) +template +void SparseLUImpl::panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu) { Index nextl_col; // Next available position in panel_lsub[*,jj] diff --git a/Eigen/src/SparseLU/SparseLU_pivotL.h b/Eigen/src/SparseLU/SparseLU_pivotL.h index 457789c78..01f5ba4e9 100644 --- a/Eigen/src/SparseLU/SparseLU_pivotL.h +++ b/Eigen/src/SparseLU/SparseLU_pivotL.h @@ -56,8 +56,8 @@ namespace internal { * \return 0 if success, i > 0 if U(i,i) is exactly zero * */ -template -Index SparseLUImpl::pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu) +template +Index SparseLUImpl::pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu) { Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol @@ -67,7 +67,7 @@ Index SparseLUImpl::pivotL(const Index jcol, const RealScalar& dia Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); // leading dimension Scalar* lu_sup_ptr = &(glu.lusup.data()[glu.xlusup(fsupc)]); // Start of the current supernode Scalar* lu_col_ptr = &(glu.lusup.data()[glu.xlusup(jcol)]); // Start of jcol in the supernode - Index* lsub_ptr = &(glu.lsub.data()[lptr]); // Start of row indices of the supernode + StorageIndex* lsub_ptr = &(glu.lsub.data()[lptr]); // Start of row indices of the supernode // Determine the largest abs numerical value for partial pivoting Index diagind = iperm_c(jcol); // diagonal index diff --git a/Eigen/src/SparseLU/SparseLU_pruneL.h b/Eigen/src/SparseLU/SparseLU_pruneL.h index 66460d168..13133fcc2 100644 --- a/Eigen/src/SparseLU/SparseLU_pruneL.h +++ b/Eigen/src/SparseLU/SparseLU_pruneL.h @@ -49,8 +49,9 @@ namespace internal { * \param glu Global LU data * */ -template -void SparseLUImpl::pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu) +template +void SparseLUImpl::pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, + const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu) { // For each supernode-rep irep in U(*,j] Index jsupno = glu.supno(jcol); diff --git a/Eigen/src/SparseLU/SparseLU_relax_snode.h b/Eigen/src/SparseLU/SparseLU_relax_snode.h index 58ec32e27..21c182d56 100644 --- a/Eigen/src/SparseLU/SparseLU_relax_snode.h +++ b/Eigen/src/SparseLU/SparseLU_relax_snode.h @@ -43,8 +43,8 @@ namespace internal { * \param descendants Number of descendants of each node in the etree * \param relax_end last column in a supernode */ -template -void SparseLUImpl::relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end) +template +void SparseLUImpl::relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end) { // compute the number of descendants of each node in the etree diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h index 58bfc1cb4..920b884e5 100644 --- a/Eigen/src/SparseQR/SparseQR.h +++ b/Eigen/src/SparseQR/SparseQR.h @@ -109,11 +109,11 @@ class SparseQR : public SparseSolverBase > /** \returns the number of rows of the represented matrix. */ - inline StorageIndex rows() const { return m_pmat.rows(); } + inline Index rows() const { return m_pmat.rows(); } /** \returns the number of columns of the represented matrix. */ - inline StorageIndex cols() const { return m_pmat.cols();} + inline Index cols() const { return m_pmat.cols();} /** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization. */ @@ -123,7 +123,7 @@ class SparseQR : public SparseSolverBase > * * \sa setPivotThreshold() */ - StorageIndex rank() const + Index rank() const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); return m_nonzeropivots; @@ -260,7 +260,7 @@ class SparseQR : public SparseSolverBase > PermutationType m_outputPerm_c; // The final column permutation RealScalar m_threshold; // Threshold to determine null Householder reflections bool m_useDefaultThreshold; // Use default threshold - StorageIndex m_nonzeropivots; // Number of non zero pivots found + Index m_nonzeropivots; // Number of non zero pivots found IndexVector m_etree; // Column elimination tree IndexVector m_firstRowElt; // First element in each row bool m_isQSorted; // whether Q is sorted or not @@ -289,9 +289,9 @@ void SparseQR::analyzePattern(const MatrixType& mat) // Compute the column fill reducing ordering OrderingType ord; ord(matCpy, m_perm_c); - StorageIndex n = mat.cols(); - StorageIndex m = mat.rows(); - StorageIndex diagSize = (std::min)(m,n); + Index n = mat.cols(); + Index m = mat.rows(); + Index diagSize = (std::min)(m,n); if (!m_perm_c.size()) { @@ -327,9 +327,9 @@ void SparseQR::factorize(const MatrixType& mat) using std::abs; eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); - Index m = mat.rows(); - Index n = mat.cols(); - Index diagSize = (std::min)(m,n); + StorageIndex m = mat.rows(); + StorageIndex n = mat.cols(); + StorageIndex diagSize = (std::min)(m,n); IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q @@ -578,7 +578,6 @@ struct SparseQR_QProduct : ReturnByValue struct SparseQRMatrixQReturnType : public EigenBase > { - typedef typename SparseQRType::StorageIndex StorageIndex; typedef typename SparseQRType::Scalar Scalar; typedef Matrix DenseMatrix; explicit SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {} @@ -647,8 +645,8 @@ struct SparseQRMatrixQReturnType : public EigenBase(m_qr); } - inline StorageIndex rows() const { return m_qr.rows(); } - inline StorageIndex cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); } + inline Index rows() const { return m_qr.rows(); } + inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); } // To use for operations with the transpose of Q SparseQRMatrixQTransposeReturnType transpose() const { diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h index 3a9b5fd74..8779eb74c 100644 --- a/Eigen/src/SuperLUSupport/SuperLUSupport.h +++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h @@ -313,8 +313,8 @@ class SuperLUBase : public SparseSolverBase clearFactors(); } - inline StorageIndex rows() const { return m_matrix.rows(); } - inline StorageIndex cols() const { return m_matrix.cols(); } + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } /** \returns a reference to the Super LU option object to configure the Super LU algorithms. */ inline superlu_options_t& options() { return m_sluOptions; } @@ -616,7 +616,7 @@ void SuperLU::_solve_impl(const MatrixBase &b, MatrixBase { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()"); - const StorageIndex size = m_matrix.rows(); + const Index size = m_matrix.rows(); const Index rhsCols = b.cols(); eigen_assert(size==b.rows()); diff --git a/Eigen/src/UmfPackSupport/UmfPackSupport.h b/Eigen/src/UmfPackSupport/UmfPackSupport.h index 47e8b6304..dcbd4ab71 100644 --- a/Eigen/src/UmfPackSupport/UmfPackSupport.h +++ b/Eigen/src/UmfPackSupport/UmfPackSupport.h @@ -164,8 +164,8 @@ class UmfPackLU : public SparseSolverBase > if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar()); } - inline StorageIndex rows() const { return m_copyMatrix.rows(); } - inline StorageIndex cols() const { return m_copyMatrix.cols(); } + inline Index rows() const { return m_copyMatrix.rows(); } + inline Index cols() const { return m_copyMatrix.cols(); } /** \brief Reports whether previous computation was successful. * diff --git a/test/nullary.cpp b/test/nullary.cpp index 8344855df..2c148e205 100644 --- a/test/nullary.cpp +++ b/test/nullary.cpp @@ -12,7 +12,6 @@ template bool equalsIdentity(const MatrixType& A) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Scalar zero = static_cast(0); @@ -35,7 +34,6 @@ bool equalsIdentity(const MatrixType& A) template void testVectorType(const VectorType& base) { - typedef typename VectorType::Index Index; typedef typename VectorType::Scalar Scalar; const Index size = base.size(); @@ -104,7 +102,6 @@ void testVectorType(const VectorType& base) template void testMatrixType(const MatrixType& m) { - typedef typename MatrixType::Index Index; const Index rows = m.rows(); const Index cols = m.cols(); diff --git a/test/sparse_basic.cpp b/test/sparse_basic.cpp index 8fd759c93..b06956974 100644 --- a/test/sparse_basic.cpp +++ b/test/sparse_basic.cpp @@ -16,8 +16,8 @@ template void sparse_basic(const SparseMatrixType& re typedef typename SparseMatrixType::StorageIndex StorageIndex; typedef Matrix Vector2; - const StorageIndex rows = ref.rows(); - const StorageIndex cols = ref.cols(); + const Index rows = ref.rows(); + const Index cols = ref.cols(); const Index inner = ref.innerSize(); const Index outer = ref.outerSize(); diff --git a/test/sparse_product.cpp b/test/sparse_product.cpp index b3f653d0e..480a660fc 100644 --- a/test/sparse_product.cpp +++ b/test/sparse_product.cpp @@ -11,7 +11,7 @@ template void sparse_product() { - typedef typename SparseMatrixType::StorageIndex Index; + typedef typename SparseMatrixType::StorageIndex StorageIndex; Index n = 100; const Index rows = internal::random(1,n); const Index cols = internal::random(1,n); @@ -23,8 +23,8 @@ template void sparse_product() typedef Matrix DenseMatrix; typedef Matrix DenseVector; typedef Matrix RowDenseVector; - typedef SparseVector ColSpVector; - typedef SparseVector RowSpVector; + typedef SparseVector ColSpVector; + typedef SparseVector RowSpVector; Scalar s1 = internal::random(); Scalar s2 = internal::random(); diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h index 6e847e110..873f2bf2a 100644 --- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h @@ -271,7 +271,6 @@ public: using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h index 2845b9cfd..ea8b73d38 100644 --- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h @@ -207,7 +207,6 @@ namespace Eigen { using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h index d0b51970d..4406437cc 100644 --- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h +++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h @@ -31,7 +31,6 @@ class KroneckerProductBase : public ReturnByValue protected: typedef typename Traits::Lhs Lhs; typedef typename Traits::Rhs Rhs; - typedef typename Traits::StorageIndex StorageIndex; public: /*! \brief Constructor. */ @@ -39,8 +38,8 @@ class KroneckerProductBase : public ReturnByValue : m_A(A), m_B(B) {} - inline StorageIndex rows() const { return m_A.rows() * m_B.rows(); } - inline StorageIndex cols() const { return m_A.cols() * m_B.cols(); } + inline Index rows() const { return m_A.rows() * m_B.rows(); } + inline Index cols() const { return m_A.cols() * m_B.cols(); } /*! * This overrides ReturnByValue::coeff because this function is diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h b/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h index 32d3ad518..b75bea25f 100644 --- a/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h +++ b/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h @@ -23,7 +23,6 @@ void covar( Scalar tol = std::sqrt(NumTraits::epsilon()) ) { using std::abs; - typedef DenseIndex Index; /* Local variables */ Index i, j, k, l, ii, jj; bool sing; diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h b/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h index 9532042d9..731862341 100644 --- a/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h +++ b/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h @@ -30,7 +30,7 @@ namespace internal { using std::abs; typedef typename QRSolver::MatrixType MatrixType; typedef typename QRSolver::Scalar Scalar; - typedef typename QRSolver::Index Index; + typedef typename QRSolver::StorageIndex StorageIndex; /* Local variables */ Index j; diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h index db3a0ef2c..ae9d793b1 100644 --- a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h +++ b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h @@ -28,8 +28,6 @@ void lmqrsolv( Matrix &x, Matrix &sdiag) { - typedef typename Matrix::Index Index; - /* Local variables */ Index i, j, k; Scalar temp; diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h index 7cebe4e06..9eca33d04 100644 --- a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h +++ b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h @@ -115,8 +115,7 @@ class LevenbergMarquardt : internal::no_assignment_operator typedef typename FunctorType::JacobianType JacobianType; typedef typename JacobianType::Scalar Scalar; typedef typename JacobianType::RealScalar RealScalar; - typedef typename JacobianType::Index Index; - typedef typename QRSolver::Index PermIndex; + typedef typename QRSolver::StorageIndex PermIndex; typedef Matrix FVectorType; typedef PermutationMatrix PermutationType; public: diff --git a/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h index d92fd0ef1..8a7e0e57f 100644 --- a/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h +++ b/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h @@ -535,7 +535,7 @@ class BlockSparseMatrix : public SparseMatrixBase coeffRef(StorageIndex brow, StorageIndex bcol) + Ref coeffRef(Index brow, Index bcol) { eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS"); eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS"); @@ -829,7 +829,7 @@ class BlockSparseMatrix : public SparseMatrixBase coeff(StorageIndex brow, StorageIndex bcol) const + Map coeff(Index brow, Index bcol) const { eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS"); eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS"); @@ -857,9 +857,9 @@ class BlockSparseMatrix : public SparseMatrixBase(m_values);} // inline Scalar *valuePtr(){ return m_values; } @@ -873,7 +873,7 @@ class BlockSparseMatrix : public SparseMatrixBase in the array of values */ - StorageIndex blockPtr(Index id) const + Index blockPtr(Index id) const { if(m_blockSize == Dynamic) return m_blockPtr[id]; else return id * m_blockSize * m_blockSize; @@ -955,17 +955,17 @@ class BlockSparseMatrix : public SparseMatrixBase insert(Index brow, Index bcol); - StorageIndex m_innerBSize; // Number of block rows - StorageIndex m_outerBSize; // Number of block columns + Index m_innerBSize; // Number of block rows + Index m_outerBSize; // Number of block columns StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1) StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1) - StorageIndex m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize) - StorageIndex m_nonzeros; // Total nonzeros elements + Index m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize) + Index m_nonzeros; // Total nonzeros elements Scalar *m_values; //Values stored block column after block column (size m_nonzeros) StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK - StorageIndex m_blockSize; // Size of a block for fixed-size blocks, otherwise -1 + Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1 }; template @@ -977,7 +977,7 @@ class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>:: Flags = _Options }; - BlockInnerIterator(const BlockSparseMatrix& mat, const StorageIndex outer) + BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer) : m_mat(mat),m_outer(outer), m_id(mat.m_outerIndex[outer]), m_end(mat.m_outerIndex[outer+1]) @@ -997,23 +997,23 @@ class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>:: rows(),cols()); } // Block inner index - inline StorageIndex index() const {return m_mat.m_indices[m_id]; } - inline StorageIndex outer() const { return m_outer; } + inline Index index() const {return m_mat.m_indices[m_id]; } + inline Index outer() const { return m_outer; } // block row index - inline StorageIndex row() const {return index(); } + inline Index row() const {return index(); } // block column index - inline StorageIndex col() const {return outer(); } + inline Index col() const {return outer(); } // FIXME Number of rows in the current block - inline StorageIndex rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; } + inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; } // Number of columns in the current block ... - inline StorageIndex cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;} + inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;} inline operator bool() const { return (m_id < m_end); } protected: const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat; - const StorageIndex m_outer; - StorageIndex m_id; - StorageIndex m_end; + const Index m_outer; + Index m_id; + Index m_end; }; template @@ -1055,23 +1055,23 @@ class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>:: { return itb.valueRef().coeff(m_id - m_start, m_offset); } - inline StorageIndex index() const { return m_id; } - inline StorageIndex outer() const {return m_outer; } - inline StorageIndex col() const {return outer(); } - inline StorageIndex row() const { return index();} + inline Index index() const { return m_id; } + inline Index outer() const {return m_outer; } + inline Index col() const {return outer(); } + inline Index row() const { return index();} inline operator bool() const { return itb; } protected: const BlockSparseMatrix& m_mat; - const StorageIndex m_outer; - const StorageIndex m_outerB; + const Index m_outer; + const Index m_outerB; BlockInnerIterator itb; // Iterator through the blocks - const StorageIndex m_offset; // Position of this column in the block - StorageIndex m_start; // starting inner index of this block - StorageIndex m_id; // current inner index in the block - StorageIndex m_end; // starting inner index of the next block + const Index m_offset; // Position of this column in the block + Index m_start; // starting inner index of this block + Index m_id; // current inner index in the block + Index m_end; // starting inner index of the next block }; } // end namespace Eigen diff --git a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h index bedb1dec5..e1284c782 100644 --- a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h +++ b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h @@ -33,11 +33,11 @@ namespace Eigen { */ namespace internal { -template -struct traits > +template +struct traits > { typedef _Scalar Scalar; - typedef _Index StorageIndex; + typedef _StorageIndex StorageIndex; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { @@ -52,9 +52,9 @@ struct traits > }; } -template +template class DynamicSparseMatrix - : public SparseMatrixBase > + : public SparseMatrixBase > { public: EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix) @@ -72,16 +72,16 @@ template typedef DynamicSparseMatrix TransposedSparseMatrix; - StorageIndex m_innerSize; + Index m_innerSize; std::vector > m_data; public: - inline StorageIndex rows() const { return IsRowMajor ? outerSize() : m_innerSize; } - inline StorageIndex cols() const { return IsRowMajor ? m_innerSize : outerSize(); } - inline StorageIndex innerSize() const { return m_innerSize; } - inline StorageIndex outerSize() const { return convert_index(m_data.size()); } - inline StorageIndex innerNonZeros(Index j) const { return m_data[j].size(); } + inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; } + inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); } + inline Index innerSize() const { return m_innerSize; } + inline Index outerSize() const { return convert_index(m_data.size()); } + inline Index innerNonZeros(Index j) const { return m_data[j].size(); } std::vector >& _data() { return m_data; } const std::vector >& _data() const { return m_data; } @@ -117,11 +117,11 @@ template } /** \returns the number of non zero coefficients */ - StorageIndex nonZeros() const + Index nonZeros() const { - StorageIndex res = 0; + Index res = 0; for (Index j=0; j # endif }; -template -class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator +template +class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator { - typedef typename SparseVector::InnerIterator Base; + typedef typename SparseVector::InnerIterator Base; public: InnerIterator(const DynamicSparseMatrix& mat, Index outer) : Base(mat.m_data[outer]), m_outer(outer) @@ -337,10 +337,10 @@ class DynamicSparseMatrix::InnerIterator : public Sparse const Index m_outer; }; -template -class DynamicSparseMatrix::ReverseInnerIterator : public SparseVector::ReverseInnerIterator +template +class DynamicSparseMatrix::ReverseInnerIterator : public SparseVector::ReverseInnerIterator { - typedef typename SparseVector::ReverseInnerIterator Base; + typedef typename SparseVector::ReverseInnerIterator Base; public: ReverseInnerIterator(const DynamicSparseMatrix& mat, Index outer) : Base(mat.m_data[outer]), m_outer(outer) @@ -356,13 +356,13 @@ class DynamicSparseMatrix::ReverseInnerIterator : public namespace internal { -template -struct evaluator > - : evaluator_base > +template +struct evaluator > + : evaluator_base > { typedef _Scalar Scalar; - typedef _Index Index; - typedef DynamicSparseMatrix<_Scalar,_Options,_Index> SparseMatrixType; + typedef _StorageIndex Index; + typedef DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType; typedef typename SparseMatrixType::InnerIterator InnerIterator; typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator; diff --git a/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/unsupported/Eigen/src/SparseExtra/RandomSetter.h index 807ba9d94..0b71ed3ad 100644 --- a/unsupported/Eigen/src/SparseExtra/RandomSetter.h +++ b/unsupported/Eigen/src/SparseExtra/RandomSetter.h @@ -292,10 +292,10 @@ class RandomSetter /** \returns a reference to the coefficient at given coordinates \a row, \a col */ Scalar& operator() (Index row, Index col) { - const StorageIndex outer = internal::convert_index(SetterRowMajor ? row : col); - const StorageIndex inner = internal::convert_index(SetterRowMajor ? col : row); - const StorageIndex outerMajor = outer >> OuterPacketBits; // index of the packet/map - const StorageIndex outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet + const Index outer = SetterRowMajor ? row : col; + const Index inner = SetterRowMajor ? col : row; + const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map + const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet const KeyType key = (KeyType(outerMinor)< Date: Mon, 16 Feb 2015 13:19:05 +0100 Subject: [PATCH 3/5] Fix many long to int conversion warnings: - fix usage of Index (API) versus StorageIndex (when multiple indexes are stored) - use StorageIndex(val) when the input has already been check - use internal::convert_index(val) when val is potentially unsafe (directly comes from user input) --- Eigen/src/Core/CwiseUnaryOp.h | 4 +- Eigen/src/Core/PermutationMatrix.h | 3 +- Eigen/src/IterativeLinearSolvers/BiCGSTAB.h | 12 +- .../ConjugateGradient.h | 10 +- .../IterativeSolverBase.h | 16 +- Eigen/src/OrderingMethods/Amd.h | 74 ++-- Eigen/src/OrderingMethods/Eigen_Colamd.h | 390 +++++++++--------- Eigen/src/OrderingMethods/Ordering.h | 24 +- Eigen/src/PaStiXSupport/PaStiXSupport.h | 6 +- Eigen/src/SparseCholesky/SimplicialCholesky.h | 4 +- .../SparseCholesky/SimplicialCholesky_impl.h | 22 +- Eigen/src/SparseCore/CompressedStorage.h | 8 +- Eigen/src/SparseCore/SparseBlock.h | 4 +- Eigen/src/SparseCore/SparseColEtree.h | 36 +- Eigen/src/SparseCore/SparseMatrix.h | 10 +- Eigen/src/SparseCore/SparsePermutation.h | 2 +- Eigen/src/SparseCore/SparseSelfAdjointView.h | 2 +- Eigen/src/SparseCore/SparseSolverBase.h | 12 +- Eigen/src/SparseCore/SparseVector.h | 4 +- Eigen/src/SparseLU/SparseLU.h | 4 +- Eigen/src/SparseLU/SparseLUImpl.h | 2 +- .../src/SparseLU/SparseLU_SupernodalMatrix.h | 6 +- Eigen/src/SparseLU/SparseLU_Utils.h | 2 +- Eigen/src/SparseLU/SparseLU_column_bmod.h | 2 +- Eigen/src/SparseLU/SparseLU_column_dfs.h | 22 +- Eigen/src/SparseLU/SparseLU_copy_to_ucol.h | 2 +- .../src/SparseLU/SparseLU_heap_relax_snode.h | 15 +- Eigen/src/SparseLU/SparseLU_panel_dfs.h | 32 +- Eigen/src/SparseLU/SparseLU_pivotL.h | 4 +- Eigen/src/SparseLU/SparseLU_pruneL.h | 2 +- Eigen/src/SparseLU/SparseLU_relax_snode.h | 8 +- Eigen/src/SparseQR/SparseQR.h | 10 +- Eigen/src/UmfPackSupport/UmfPackSupport.h | 5 +- test/sparse_basic.cpp | 4 +- test/spqr_support.cpp | 2 +- .../Eigen/src/IterativeSolvers/GMRES.h | 24 +- .../Eigen/src/IterativeSolvers/MINRES.h | 6 +- 37 files changed, 397 insertions(+), 398 deletions(-) diff --git a/Eigen/src/Core/CwiseUnaryOp.h b/Eigen/src/Core/CwiseUnaryOp.h index 5388af216..da1d1992d 100644 --- a/Eigen/src/Core/CwiseUnaryOp.h +++ b/Eigen/src/Core/CwiseUnaryOp.h @@ -66,9 +66,9 @@ class CwiseUnaryOp : public CwiseUnaryOpImpl /** Sets *this to be the identity permutation matrix */ void setIdentity() { - for(Index i = 0; i < size(); ++i) + StorageIndex n = StorageIndex(size()); + for(StorageIndex i = 0; i < n; ++i) indices().coeffRef(i) = i; } diff --git a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h index a715c7285..e67f09184 100644 --- a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +++ b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h @@ -27,7 +27,7 @@ namespace internal { */ template bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, - const Preconditioner& precond, int& iters, + const Preconditioner& precond, Index& iters, typename Dest::RealScalar& tol_error) { using std::sqrt; @@ -36,9 +36,9 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, typedef typename Dest::Scalar Scalar; typedef Matrix VectorType; RealScalar tol = tol_error; - int maxIters = iters; + Index maxIters = iters; - int n = mat.cols(); + Index n = mat.cols(); VectorType r = rhs - mat * x; VectorType r0 = r; @@ -61,8 +61,8 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, RealScalar tol2 = tol*tol; RealScalar eps2 = NumTraits::epsilon()*NumTraits::epsilon(); - int i = 0; - int restarts = 0; + Index i = 0; + Index restarts = 0; while ( r.squaredNorm()/rhs_sqnorm > tol2 && i EIGEN_DONT_INLINE void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, - const Preconditioner& precond, int& iters, + const Preconditioner& precond, Index& iters, typename Dest::RealScalar& tol_error) { using std::sqrt; @@ -36,9 +36,9 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, typedef Matrix VectorType; RealScalar tol = tol_error; - int maxIters = iters; + Index maxIters = iters; - int n = mat.cols(); + Index n = mat.cols(); VectorType residual = rhs - mat * x; //initial residual @@ -64,7 +64,7 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, VectorType z(n), tmp(n); RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM - int i = 0; + Index i = 0; while(i < maxIters) { tmp.noalias() = mat * p; // the bottleneck of the algorithm @@ -190,7 +190,7 @@ public: m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; - for(int j=0; j tb(size); Eigen::Matrix tx(size); - for(int k=0; k mp_matrix; Preconditioner m_preconditioner; - int m_maxIterations; + Index m_maxIterations; RealScalar m_tolerance; mutable RealScalar m_error; - mutable int m_iterations; + mutable Index m_iterations; mutable ComputationInfo m_info; mutable bool m_analysisIsOk, m_factorizationIsOk; }; diff --git a/Eigen/src/OrderingMethods/Amd.h b/Eigen/src/OrderingMethods/Amd.h index 50022d1ca..3d2981f0c 100644 --- a/Eigen/src/OrderingMethods/Amd.h +++ b/Eigen/src/OrderingMethods/Amd.h @@ -41,10 +41,10 @@ template inline bool amd_marked(const T0* w, const T1& template inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); } /* clear w */ -template -static Index cs_wclear (Index mark, Index lemax, Index *w, Index n) +template +static StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n) { - Index k; + StorageIndex k; if(mark < 2 || (mark + lemax < 0)) { for(k = 0; k < n; k++) @@ -56,10 +56,10 @@ static Index cs_wclear (Index mark, Index lemax, Index *w, Index n) } /* depth-first search and postorder of a tree rooted at node j */ -template -Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Index *stack) +template +StorageIndex cs_tdfs(StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack) { - Index i, p, top = 0; + StorageIndex i, p, top = 0; if(!head || !next || !post || !stack) return (-1); /* check inputs */ stack[0] = j; /* place j on the stack */ while (top >= 0) /* while (stack is not empty) */ @@ -87,41 +87,39 @@ Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Ind * \returns the permutation P reducing the fill-in of the input matrix \a C * The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional. * On exit the values of C are destroyed */ -template -void minimum_degree_ordering(SparseMatrix& C, PermutationMatrix& perm) +template +void minimum_degree_ordering(SparseMatrix& C, PermutationMatrix& perm) { using std::sqrt; - Index d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1, - k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi, - ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t; + StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1, + k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi, + ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h; - std::size_t h; - - Index n = C.cols(); - dense = std::max (16, Index(10 * sqrt(double(n)))); /* find dense threshold */ - dense = std::min (n-2, dense); + StorageIndex n = StorageIndex(C.cols()); + dense = std::max (16, StorageIndex(10 * sqrt(double(n)))); /* find dense threshold */ + dense = (std::min)(n-2, dense); - Index cnz = C.nonZeros(); + StorageIndex cnz = StorageIndex(C.nonZeros()); perm.resize(n+1); t = cnz + cnz/5 + 2*n; /* add elbow room to C */ C.resizeNonZeros(t); // get workspace - ei_declare_aligned_stack_constructed_variable(Index,W,8*(n+1),0); - Index* len = W; - Index* nv = W + (n+1); - Index* next = W + 2*(n+1); - Index* head = W + 3*(n+1); - Index* elen = W + 4*(n+1); - Index* degree = W + 5*(n+1); - Index* w = W + 6*(n+1); - Index* hhead = W + 7*(n+1); - Index* last = perm.indices().data(); /* use P as workspace for last */ + ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0); + StorageIndex* len = W; + StorageIndex* nv = W + (n+1); + StorageIndex* next = W + 2*(n+1); + StorageIndex* head = W + 3*(n+1); + StorageIndex* elen = W + 4*(n+1); + StorageIndex* degree = W + 5*(n+1); + StorageIndex* w = W + 6*(n+1); + StorageIndex* hhead = W + 7*(n+1); + StorageIndex* last = perm.indices().data(); /* use P as workspace for last */ /* --- Initialize quotient graph ---------------------------------------- */ - Index* Cp = C.outerIndexPtr(); - Index* Ci = C.innerIndexPtr(); + StorageIndex* Cp = C.outerIndexPtr(); + StorageIndex* Ci = C.innerIndexPtr(); for(k = 0; k < n; k++) len[k] = Cp[k+1] - Cp[k]; len[n] = 0; @@ -138,7 +136,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation elen[i] = 0; // Ek of node i is empty degree[i] = len[i]; // degree of node i } - mark = internal::cs_wclear(0, 0, w, n); /* clear w */ + mark = internal::cs_wclear(0, 0, w, n); /* clear w */ elen[n] = -2; /* n is a dead element */ Cp[n] = -1; /* n is a root of assembly tree */ w[n] = 0; /* n is a dead element */ @@ -253,7 +251,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation elen[k] = -2; /* k is now an element */ /* --- Find set differences ----------------------------------------- */ - mark = internal::cs_wclear(mark, lemax, w, n); /* clear w if necessary */ + mark = internal::cs_wclear(mark, lemax, w, n); /* clear w if necessary */ for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */ { i = Ci[pk]; @@ -323,7 +321,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } else { - degree[i] = std::min (degree[i], d); /* update degree(i) */ + degree[i] = std::min (degree[i], d); /* update degree(i) */ Ci[pn] = Ci[p3]; /* move first node to end */ Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */ Ci[p1] = k; /* add k as 1st element in of Ei */ @@ -331,12 +329,12 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation h %= n; /* finalize hash of i */ next[i] = hhead[h]; /* place i in hash bucket */ hhead[h] = i; - last[i] = Index(h); /* save hash of i in last[i] */ + last[i] = h; /* save hash of i in last[i] */ } } /* scan2 is done */ degree[k] = dk; /* finalize |Lk| */ - lemax = std::max(lemax, dk); - mark = internal::cs_wclear(mark+lemax, lemax, w, n); /* clear w */ + lemax = std::max(lemax, dk); + mark = internal::cs_wclear(mark+lemax, lemax, w, n); /* clear w */ /* --- Supernode detection ------------------------------------------ */ for(pk = pk1; pk < pk2; pk++) @@ -384,12 +382,12 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */ nv[i] = nvi; /* restore nv[i] */ d = degree[i] + dk - nvi; /* compute external degree(i) */ - d = std::min (d, n - nel - nvi); + d = std::min (d, n - nel - nvi); if(head[d] != -1) last[head[d]] = i; next[i] = head[d]; /* put i back in degree list */ last[i] = -1; head[d] = i; - mindeg = std::min (mindeg, d); /* find new minimum degree */ + mindeg = std::min (mindeg, d); /* find new minimum degree */ degree[i] = d; Ci[p++] = i; /* place i in Lk */ } @@ -422,7 +420,7 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */ { - if(Cp[i] == -1) k = internal::cs_tdfs(i, k, head, next, perm.indices().data(), w); + if(Cp[i] == -1) k = internal::cs_tdfs(i, k, head, next, perm.indices().data(), w); } perm.indices().conservativeResize(n); diff --git a/Eigen/src/OrderingMethods/Eigen_Colamd.h b/Eigen/src/OrderingMethods/Eigen_Colamd.h index 44548f660..6238676e5 100644 --- a/Eigen/src/OrderingMethods/Eigen_Colamd.h +++ b/Eigen/src/OrderingMethods/Eigen_Colamd.h @@ -135,54 +135,54 @@ namespace internal { /* ========================================================================== */ // == Row and Column structures == -template +template struct colamd_col { - Index start ; /* index for A of first row in this column, or DEAD */ + IndexType start ; /* index for A of first row in this column, or DEAD */ /* if column is dead */ - Index length ; /* number of rows in this column */ + IndexType length ; /* number of rows in this column */ union { - Index thickness ; /* number of original columns represented by this */ + IndexType thickness ; /* number of original columns represented by this */ /* col, if the column is alive */ - Index parent ; /* parent in parent tree super-column structure, if */ + IndexType parent ; /* parent in parent tree super-column structure, if */ /* the column is dead */ } shared1 ; union { - Index score ; /* the score used to maintain heap, if col is alive */ - Index order ; /* pivot ordering of this column, if col is dead */ + IndexType score ; /* the score used to maintain heap, if col is alive */ + IndexType order ; /* pivot ordering of this column, if col is dead */ } shared2 ; union { - Index headhash ; /* head of a hash bucket, if col is at the head of */ + IndexType headhash ; /* head of a hash bucket, if col is at the head of */ /* a degree list */ - Index hash ; /* hash value, if col is not in a degree list */ - Index prev ; /* previous column in degree list, if col is in a */ + IndexType hash ; /* hash value, if col is not in a degree list */ + IndexType prev ; /* previous column in degree list, if col is in a */ /* degree list (but not at the head of a degree list) */ } shared3 ; union { - Index degree_next ; /* next column, if col is in a degree list */ - Index hash_next ; /* next column, if col is in a hash list */ + IndexType degree_next ; /* next column, if col is in a degree list */ + IndexType hash_next ; /* next column, if col is in a hash list */ } shared4 ; }; -template +template struct Colamd_Row { - Index start ; /* index for A of first col in this row */ - Index length ; /* number of principal columns in this row */ + IndexType start ; /* index for A of first col in this row */ + IndexType length ; /* number of principal columns in this row */ union { - Index degree ; /* number of principal & non-principal columns in row */ - Index p ; /* used as a row pointer in init_rows_cols () */ + IndexType degree ; /* number of principal & non-principal columns in row */ + IndexType p ; /* used as a row pointer in init_rows_cols () */ } shared1 ; union { - Index mark ; /* for computing set differences and marking dead rows*/ - Index first_column ;/* first column in row (used in garbage collection) */ + IndexType mark ; /* for computing set differences and marking dead rows*/ + IndexType first_column ;/* first column in row (used in garbage collection) */ } shared2 ; }; @@ -202,38 +202,38 @@ struct Colamd_Row This macro is not needed when using symamd. - Explicit typecast to Index added Sept. 23, 2002, COLAMD version 2.2, to avoid + Explicit typecast to IndexType added Sept. 23, 2002, COLAMD version 2.2, to avoid gcc -pedantic warning messages. */ -template -inline Index colamd_c(Index n_col) -{ return Index( ((n_col) + 1) * sizeof (colamd_col) / sizeof (Index) ) ; } +template +inline IndexType colamd_c(IndexType n_col) +{ return IndexType( ((n_col) + 1) * sizeof (colamd_col) / sizeof (IndexType) ) ; } -template -inline Index colamd_r(Index n_row) -{ return Index(((n_row) + 1) * sizeof (Colamd_Row) / sizeof (Index)); } +template +inline IndexType colamd_r(IndexType n_row) +{ return IndexType(((n_row) + 1) * sizeof (Colamd_Row) / sizeof (IndexType)); } // Prototypes of non-user callable routines -template -static Index init_rows_cols (Index n_row, Index n_col, Colamd_Row Row [], colamd_col col [], Index A [], Index p [], Index stats[COLAMD_STATS] ); +template +static IndexType init_rows_cols (IndexType n_row, IndexType n_col, Colamd_Row Row [], colamd_col col [], IndexType A [], IndexType p [], IndexType stats[COLAMD_STATS] ); -template -static void init_scoring (Index n_row, Index n_col, Colamd_Row Row [], colamd_col Col [], Index A [], Index head [], double knobs[COLAMD_KNOBS], Index *p_n_row2, Index *p_n_col2, Index *p_max_deg); +template +static void init_scoring (IndexType n_row, IndexType n_col, Colamd_Row Row [], colamd_col Col [], IndexType A [], IndexType head [], double knobs[COLAMD_KNOBS], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg); -template -static Index find_ordering (Index n_row, Index n_col, Index Alen, Colamd_Row Row [], colamd_col Col [], Index A [], Index head [], Index n_col2, Index max_deg, Index pfree); +template +static IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, Colamd_Row Row [], colamd_col Col [], IndexType A [], IndexType head [], IndexType n_col2, IndexType max_deg, IndexType pfree); -template -static void order_children (Index n_col, colamd_col Col [], Index p []); +template +static void order_children (IndexType n_col, colamd_col Col [], IndexType p []); -template -static void detect_super_cols (colamd_col Col [], Index A [], Index head [], Index row_start, Index row_length ) ; +template +static void detect_super_cols (colamd_col Col [], IndexType A [], IndexType head [], IndexType row_start, IndexType row_length ) ; -template -static Index garbage_collection (Index n_row, Index n_col, Colamd_Row Row [], colamd_col Col [], Index A [], Index *pfree) ; +template +static IndexType garbage_collection (IndexType n_row, IndexType n_col, Colamd_Row Row [], colamd_col Col [], IndexType A [], IndexType *pfree) ; -template -static inline Index clear_mark (Index n_row, Colamd_Row Row [] ) ; +template +static inline IndexType clear_mark (IndexType n_row, Colamd_Row Row [] ) ; /* === No debugging ========================================================= */ @@ -260,8 +260,8 @@ static inline Index clear_mark (Index n_row, Colamd_Row Row [] ) ; * \param n_col number of columns in A * \return recommended value of Alen for use by colamd */ -template -inline Index colamd_recommended ( Index nnz, Index n_row, Index n_col) +template +inline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType n_col) { if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0) return (-1); @@ -325,22 +325,22 @@ static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS]) * \param knobs parameter settings for colamd * \param stats colamd output statistics and error codes */ -template -static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, double knobs[COLAMD_KNOBS], Index stats[COLAMD_STATS]) +template +static bool colamd(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[COLAMD_KNOBS], IndexType stats[COLAMD_STATS]) { /* === Local variables ================================================== */ - Index i ; /* loop index */ - Index nnz ; /* nonzeros in A */ - Index Row_size ; /* size of Row [], in integers */ - Index Col_size ; /* size of Col [], in integers */ - Index need ; /* minimum required length of A */ - Colamd_Row *Row ; /* pointer into A of Row [0..n_row] array */ - colamd_col *Col ; /* pointer into A of Col [0..n_col] array */ - Index n_col2 ; /* number of non-dense, non-empty columns */ - Index n_row2 ; /* number of non-dense, non-empty rows */ - Index ngarbage ; /* number of garbage collections performed */ - Index max_deg ; /* maximum row degree */ + IndexType i ; /* loop index */ + IndexType nnz ; /* nonzeros in A */ + IndexType Row_size ; /* size of Row [], in integers */ + IndexType Col_size ; /* size of Col [], in integers */ + IndexType need ; /* minimum required length of A */ + Colamd_Row *Row ; /* pointer into A of Row [0..n_row] array */ + colamd_col *Col ; /* pointer into A of Col [0..n_col] array */ + IndexType n_col2 ; /* number of non-dense, non-empty columns */ + IndexType n_row2 ; /* number of non-dense, non-empty rows */ + IndexType ngarbage ; /* number of garbage collections performed */ + IndexType max_deg ; /* maximum row degree */ double default_knobs [COLAMD_KNOBS] ; /* default knobs array */ @@ -431,8 +431,8 @@ static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, dou } Alen -= Col_size + Row_size ; - Col = (colamd_col *) &A [Alen] ; - Row = (Colamd_Row *) &A [Alen + Col_size] ; + Col = (colamd_col *) &A [Alen] ; + Row = (Colamd_Row *) &A [Alen + Col_size] ; /* === Construct the row and column data structures ===================== */ @@ -485,29 +485,29 @@ static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, dou column form of the matrix. Returns false if the matrix is invalid, true otherwise. Not user-callable. */ -template -static Index init_rows_cols /* returns true if OK, or false otherwise */ +template +static IndexType init_rows_cols /* returns true if OK, or false otherwise */ ( /* === Parameters ======================================================= */ - Index n_row, /* number of rows of A */ - Index n_col, /* number of columns of A */ - Colamd_Row Row [], /* of size n_row+1 */ - colamd_col Col [], /* of size n_col+1 */ - Index A [], /* row indices of A, of size Alen */ - Index p [], /* pointers to columns in A, of size n_col+1 */ - Index stats [COLAMD_STATS] /* colamd statistics */ + IndexType n_row, /* number of rows of A */ + IndexType n_col, /* number of columns of A */ + Colamd_Row Row [], /* of size n_row+1 */ + colamd_col Col [], /* of size n_col+1 */ + IndexType A [], /* row indices of A, of size Alen */ + IndexType p [], /* pointers to columns in A, of size n_col+1 */ + IndexType stats [COLAMD_STATS] /* colamd statistics */ ) { /* === Local variables ================================================== */ - Index col ; /* a column index */ - Index row ; /* a row index */ - Index *cp ; /* a column pointer */ - Index *cp_end ; /* a pointer to the end of a column */ - Index *rp ; /* a row pointer */ - Index *rp_end ; /* a pointer to the end of a row */ - Index last_row ; /* previous row */ + IndexType col ; /* a column index */ + IndexType row ; /* a row index */ + IndexType *cp ; /* a column pointer */ + IndexType *cp_end ; /* a pointer to the end of a column */ + IndexType *rp ; /* a row pointer */ + IndexType *rp_end ; /* a pointer to the end of a row */ + IndexType last_row ; /* previous row */ /* === Initialize columns, and check column pointers ==================== */ @@ -701,40 +701,40 @@ static Index init_rows_cols /* returns true if OK, or false otherwise */ Kills dense or empty columns and rows, calculates an initial score for each column, and places all columns in the degree lists. Not user-callable. */ -template +template static void init_scoring ( /* === Parameters ======================================================= */ - Index n_row, /* number of rows of A */ - Index n_col, /* number of columns of A */ - Colamd_Row Row [], /* of size n_row+1 */ - colamd_col Col [], /* of size n_col+1 */ - Index A [], /* column form and row form of A */ - Index head [], /* of size n_col+1 */ + IndexType n_row, /* number of rows of A */ + IndexType n_col, /* number of columns of A */ + Colamd_Row Row [], /* of size n_row+1 */ + colamd_col Col [], /* of size n_col+1 */ + IndexType A [], /* column form and row form of A */ + IndexType head [], /* of size n_col+1 */ double knobs [COLAMD_KNOBS],/* parameters */ - Index *p_n_row2, /* number of non-dense, non-empty rows */ - Index *p_n_col2, /* number of non-dense, non-empty columns */ - Index *p_max_deg /* maximum row degree */ + IndexType *p_n_row2, /* number of non-dense, non-empty rows */ + IndexType *p_n_col2, /* number of non-dense, non-empty columns */ + IndexType *p_max_deg /* maximum row degree */ ) { /* === Local variables ================================================== */ - Index c ; /* a column index */ - Index r, row ; /* a row index */ - Index *cp ; /* a column pointer */ - Index deg ; /* degree of a row or column */ - Index *cp_end ; /* a pointer to the end of a column */ - Index *new_cp ; /* new column pointer */ - Index col_length ; /* length of pruned column */ - Index score ; /* current column score */ - Index n_col2 ; /* number of non-dense, non-empty columns */ - Index n_row2 ; /* number of non-dense, non-empty rows */ - Index dense_row_count ; /* remove rows with more entries than this */ - Index dense_col_count ; /* remove cols with more entries than this */ - Index min_score ; /* smallest column score */ - Index max_deg ; /* maximum row degree */ - Index next_col ; /* Used to add to degree list.*/ + IndexType c ; /* a column index */ + IndexType r, row ; /* a row index */ + IndexType *cp ; /* a column pointer */ + IndexType deg ; /* degree of a row or column */ + IndexType *cp_end ; /* a pointer to the end of a column */ + IndexType *new_cp ; /* new column pointer */ + IndexType col_length ; /* length of pruned column */ + IndexType score ; /* current column score */ + IndexType n_col2 ; /* number of non-dense, non-empty columns */ + IndexType n_row2 ; /* number of non-dense, non-empty rows */ + IndexType dense_row_count ; /* remove rows with more entries than this */ + IndexType dense_col_count ; /* remove cols with more entries than this */ + IndexType min_score ; /* smallest column score */ + IndexType max_deg ; /* maximum row degree */ + IndexType next_col ; /* Used to add to degree list.*/ /* === Extract knobs ==================================================== */ @@ -845,7 +845,7 @@ static void init_scoring score = COLAMD_MIN (score, n_col) ; } /* determine pruned column length */ - col_length = (Index) (new_cp - &A [Col [c].start]) ; + col_length = (IndexType) (new_cp - &A [Col [c].start]) ; if (col_length == 0) { /* a newly-made null column (all rows in this col are "dense" */ @@ -938,56 +938,56 @@ static void init_scoring (no supercolumns on input). Uses a minimum approximate column minimum degree ordering method. Not user-callable. */ -template -static Index find_ordering /* return the number of garbage collections */ +template +static IndexType find_ordering /* return the number of garbage collections */ ( /* === Parameters ======================================================= */ - Index n_row, /* number of rows of A */ - Index n_col, /* number of columns of A */ - Index Alen, /* size of A, 2*nnz + n_col or larger */ - Colamd_Row Row [], /* of size n_row+1 */ - colamd_col Col [], /* of size n_col+1 */ - Index A [], /* column form and row form of A */ - Index head [], /* of size n_col+1 */ - Index n_col2, /* Remaining columns to order */ - Index max_deg, /* Maximum row degree */ - Index pfree /* index of first free slot (2*nnz on entry) */ + IndexType n_row, /* number of rows of A */ + IndexType n_col, /* number of columns of A */ + IndexType Alen, /* size of A, 2*nnz + n_col or larger */ + Colamd_Row Row [], /* of size n_row+1 */ + colamd_col Col [], /* of size n_col+1 */ + IndexType A [], /* column form and row form of A */ + IndexType head [], /* of size n_col+1 */ + IndexType n_col2, /* Remaining columns to order */ + IndexType max_deg, /* Maximum row degree */ + IndexType pfree /* index of first free slot (2*nnz on entry) */ ) { /* === Local variables ================================================== */ - Index k ; /* current pivot ordering step */ - Index pivot_col ; /* current pivot column */ - Index *cp ; /* a column pointer */ - Index *rp ; /* a row pointer */ - Index pivot_row ; /* current pivot row */ - Index *new_cp ; /* modified column pointer */ - Index *new_rp ; /* modified row pointer */ - Index pivot_row_start ; /* pointer to start of pivot row */ - Index pivot_row_degree ; /* number of columns in pivot row */ - Index pivot_row_length ; /* number of supercolumns in pivot row */ - Index pivot_col_score ; /* score of pivot column */ - Index needed_memory ; /* free space needed for pivot row */ - Index *cp_end ; /* pointer to the end of a column */ - Index *rp_end ; /* pointer to the end of a row */ - Index row ; /* a row index */ - Index col ; /* a column index */ - Index max_score ; /* maximum possible score */ - Index cur_score ; /* score of current column */ + IndexType k ; /* current pivot ordering step */ + IndexType pivot_col ; /* current pivot column */ + IndexType *cp ; /* a column pointer */ + IndexType *rp ; /* a row pointer */ + IndexType pivot_row ; /* current pivot row */ + IndexType *new_cp ; /* modified column pointer */ + IndexType *new_rp ; /* modified row pointer */ + IndexType pivot_row_start ; /* pointer to start of pivot row */ + IndexType pivot_row_degree ; /* number of columns in pivot row */ + IndexType pivot_row_length ; /* number of supercolumns in pivot row */ + IndexType pivot_col_score ; /* score of pivot column */ + IndexType needed_memory ; /* free space needed for pivot row */ + IndexType *cp_end ; /* pointer to the end of a column */ + IndexType *rp_end ; /* pointer to the end of a row */ + IndexType row ; /* a row index */ + IndexType col ; /* a column index */ + IndexType max_score ; /* maximum possible score */ + IndexType cur_score ; /* score of current column */ unsigned int hash ; /* hash value for supernode detection */ - Index head_column ; /* head of hash bucket */ - Index first_col ; /* first column in hash bucket */ - Index tag_mark ; /* marker value for mark array */ - Index row_mark ; /* Row [row].shared2.mark */ - Index set_difference ; /* set difference size of row with pivot row */ - Index min_score ; /* smallest column score */ - Index col_thickness ; /* "thickness" (no. of columns in a supercol) */ - Index max_mark ; /* maximum value of tag_mark */ - Index pivot_col_thickness ; /* number of columns represented by pivot col */ - Index prev_col ; /* Used by Dlist operations. */ - Index next_col ; /* Used by Dlist operations. */ - Index ngarbage ; /* number of garbage collections performed */ + IndexType head_column ; /* head of hash bucket */ + IndexType first_col ; /* first column in hash bucket */ + IndexType tag_mark ; /* marker value for mark array */ + IndexType row_mark ; /* Row [row].shared2.mark */ + IndexType set_difference ; /* set difference size of row with pivot row */ + IndexType min_score ; /* smallest column score */ + IndexType col_thickness ; /* "thickness" (no. of columns in a supercol) */ + IndexType max_mark ; /* maximum value of tag_mark */ + IndexType pivot_col_thickness ; /* number of columns represented by pivot col */ + IndexType prev_col ; /* Used by Dlist operations. */ + IndexType next_col ; /* Used by Dlist operations. */ + IndexType ngarbage ; /* number of garbage collections performed */ /* === Initialization and clear mark ==================================== */ @@ -1277,7 +1277,7 @@ static Index find_ordering /* return the number of garbage collections */ } /* recompute the column's length */ - Col [col].length = (Index) (new_cp - &A [Col [col].start]) ; + Col [col].length = (IndexType) (new_cp - &A [Col [col].start]) ; /* === Further mass elimination ================================= */ @@ -1325,7 +1325,7 @@ static Index find_ordering /* return the number of garbage collections */ Col [col].shared4.hash_next = first_col ; /* save hash function in Col [col].shared3.hash */ - Col [col].shared3.hash = (Index) hash ; + Col [col].shared3.hash = (IndexType) hash ; COLAMD_ASSERT (COL_IS_ALIVE (col)) ; } } @@ -1420,7 +1420,7 @@ static Index find_ordering /* return the number of garbage collections */ /* update pivot row length to reflect any cols that were killed */ /* during super-col detection and mass elimination */ Row [pivot_row].start = pivot_row_start ; - Row [pivot_row].length = (Index) (new_rp - &A[pivot_row_start]) ; + Row [pivot_row].length = (IndexType) (new_rp - &A[pivot_row_start]) ; Row [pivot_row].shared1.degree = pivot_row_degree ; Row [pivot_row].shared2.mark = 0 ; /* pivot row is no longer dead */ @@ -1449,22 +1449,22 @@ static Index find_ordering /* return the number of garbage collections */ taken by this routine is O (n_col), that is, linear in the number of columns. Not user-callable. */ -template +template static inline void order_children ( /* === Parameters ======================================================= */ - Index n_col, /* number of columns of A */ - colamd_col Col [], /* of size n_col+1 */ - Index p [] /* p [0 ... n_col-1] is the column permutation*/ + IndexType n_col, /* number of columns of A */ + colamd_col Col [], /* of size n_col+1 */ + IndexType p [] /* p [0 ... n_col-1] is the column permutation*/ ) { /* === Local variables ================================================== */ - Index i ; /* loop counter for all columns */ - Index c ; /* column index */ - Index parent ; /* index of column's parent */ - Index order ; /* column's order */ + IndexType i ; /* loop counter for all columns */ + IndexType c ; /* column index */ + IndexType parent ; /* index of column's parent */ + IndexType order ; /* column's order */ /* === Order each non-principal column ================================== */ @@ -1550,33 +1550,33 @@ static inline void order_children just been computed in the approximate degree computation. Not user-callable. */ -template +template static void detect_super_cols ( /* === Parameters ======================================================= */ - colamd_col Col [], /* of size n_col+1 */ - Index A [], /* row indices of A */ - Index head [], /* head of degree lists and hash buckets */ - Index row_start, /* pointer to set of columns to check */ - Index row_length /* number of columns to check */ + colamd_col Col [], /* of size n_col+1 */ + IndexType A [], /* row indices of A */ + IndexType head [], /* head of degree lists and hash buckets */ + IndexType row_start, /* pointer to set of columns to check */ + IndexType row_length /* number of columns to check */ ) { /* === Local variables ================================================== */ - Index hash ; /* hash value for a column */ - Index *rp ; /* pointer to a row */ - Index c ; /* a column index */ - Index super_c ; /* column index of the column to absorb into */ - Index *cp1 ; /* column pointer for column super_c */ - Index *cp2 ; /* column pointer for column c */ - Index length ; /* length of column super_c */ - Index prev_c ; /* column preceding c in hash bucket */ - Index i ; /* loop counter */ - Index *rp_end ; /* pointer to the end of the row */ - Index col ; /* a column index in the row to check */ - Index head_column ; /* first column in hash bucket or degree list */ - Index first_col ; /* first column in hash bucket */ + IndexType hash ; /* hash value for a column */ + IndexType *rp ; /* pointer to a row */ + IndexType c ; /* a column index */ + IndexType super_c ; /* column index of the column to absorb into */ + IndexType *cp1 ; /* column pointer for column super_c */ + IndexType *cp2 ; /* column pointer for column c */ + IndexType length ; /* length of column super_c */ + IndexType prev_c ; /* column preceding c in hash bucket */ + IndexType i ; /* loop counter */ + IndexType *rp_end ; /* pointer to the end of the row */ + IndexType col ; /* a column index in the row to check */ + IndexType head_column ; /* first column in hash bucket or degree list */ + IndexType first_col ; /* first column in hash bucket */ /* === Consider each column in the row ================================== */ @@ -1701,27 +1701,27 @@ static void detect_super_cols itself linear in the number of nonzeros in the input matrix. Not user-callable. */ -template -static Index garbage_collection /* returns the new value of pfree */ +template +static IndexType garbage_collection /* returns the new value of pfree */ ( /* === Parameters ======================================================= */ - Index n_row, /* number of rows */ - Index n_col, /* number of columns */ - Colamd_Row Row [], /* row info */ - colamd_col Col [], /* column info */ - Index A [], /* A [0 ... Alen-1] holds the matrix */ - Index *pfree /* &A [0] ... pfree is in use */ + IndexType n_row, /* number of rows */ + IndexType n_col, /* number of columns */ + Colamd_Row Row [], /* row info */ + colamd_col Col [], /* column info */ + IndexType A [], /* A [0 ... Alen-1] holds the matrix */ + IndexType *pfree /* &A [0] ... pfree is in use */ ) { /* === Local variables ================================================== */ - Index *psrc ; /* source pointer */ - Index *pdest ; /* destination pointer */ - Index j ; /* counter */ - Index r ; /* a row index */ - Index c ; /* a column index */ - Index length ; /* length of a row or column */ + IndexType *psrc ; /* source pointer */ + IndexType *pdest ; /* destination pointer */ + IndexType j ; /* counter */ + IndexType r ; /* a row index */ + IndexType c ; /* a column index */ + IndexType length ; /* length of a row or column */ /* === Defragment the columns =========================================== */ @@ -1734,7 +1734,7 @@ static Index garbage_collection /* returns the new value of pfree */ /* move and compact the column */ COLAMD_ASSERT (pdest <= psrc) ; - Col [c].start = (Index) (pdest - &A [0]) ; + Col [c].start = (IndexType) (pdest - &A [0]) ; length = Col [c].length ; for (j = 0 ; j < length ; j++) { @@ -1744,7 +1744,7 @@ static Index garbage_collection /* returns the new value of pfree */ *pdest++ = r ; } } - Col [c].length = (Index) (pdest - &A [Col [c].start]) ; + Col [c].length = (IndexType) (pdest - &A [Col [c].start]) ; } } @@ -1791,7 +1791,7 @@ static Index garbage_collection /* returns the new value of pfree */ /* move and compact the row */ COLAMD_ASSERT (pdest <= psrc) ; - Row [r].start = (Index) (pdest - &A [0]) ; + Row [r].start = (IndexType) (pdest - &A [0]) ; length = Row [r].length ; for (j = 0 ; j < length ; j++) { @@ -1801,7 +1801,7 @@ static Index garbage_collection /* returns the new value of pfree */ *pdest++ = c ; } } - Row [r].length = (Index) (pdest - &A [Row [r].start]) ; + Row [r].length = (IndexType) (pdest - &A [Row [r].start]) ; } } @@ -1810,7 +1810,7 @@ static Index garbage_collection /* returns the new value of pfree */ /* === Return the new value of pfree ==================================== */ - return ((Index) (pdest - &A [0])) ; + return ((IndexType) (pdest - &A [0])) ; } @@ -1822,18 +1822,18 @@ static Index garbage_collection /* returns the new value of pfree */ Clears the Row [].shared2.mark array, and returns the new tag_mark. Return value is the new tag_mark. Not user-callable. */ -template -static inline Index clear_mark /* return the new value for tag_mark */ +template +static inline IndexType clear_mark /* return the new value for tag_mark */ ( /* === Parameters ======================================================= */ - Index n_row, /* number of rows in A */ - Colamd_Row Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */ + IndexType n_row, /* number of rows in A */ + Colamd_Row Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */ ) { /* === Local variables ================================================== */ - Index r ; + IndexType r ; for (r = 0 ; r < n_row ; r++) { diff --git a/Eigen/src/OrderingMethods/Ordering.h b/Eigen/src/OrderingMethods/Ordering.h index f3c31f9cb..e88e637a4 100644 --- a/Eigen/src/OrderingMethods/Ordering.h +++ b/Eigen/src/OrderingMethods/Ordering.h @@ -111,12 +111,12 @@ class NaturalOrdering * Functor computing the \em column \em approximate \em minimum \em degree ordering * The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()). */ -template +template class COLAMDOrdering { public: - typedef PermutationMatrix PermutationType; - typedef Matrix IndexVector; + typedef PermutationMatrix PermutationType; + typedef Matrix IndexVector; /** Compute the permutation vector \a perm form the sparse matrix \a mat * \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). @@ -126,26 +126,26 @@ class COLAMDOrdering { eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering"); - Index m = mat.rows(); - Index n = mat.cols(); - Index nnz = mat.nonZeros(); + StorageIndex m = StorageIndex(mat.rows()); + StorageIndex n = StorageIndex(mat.cols()); + StorageIndex nnz = StorageIndex(mat.nonZeros()); // Get the recommended value of Alen to be used by colamd - Index Alen = internal::colamd_recommended(nnz, m, n); + StorageIndex Alen = internal::colamd_recommended(nnz, m, n); // Set the default parameters double knobs [COLAMD_KNOBS]; - Index stats [COLAMD_STATS]; + StorageIndex stats [COLAMD_STATS]; internal::colamd_set_defaults(knobs); IndexVector p(n+1), A(Alen); - for(Index i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i]; - for(Index i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i]; + for(StorageIndex i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i]; + for(StorageIndex i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i]; // Call Colamd routine to compute the ordering - Index info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); + StorageIndex info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); EIGEN_UNUSED_VARIABLE(info); eigen_assert( info && "COLAMD failed " ); perm.resize(n); - for (Index i = 0; i < n; i++) perm.indices()(p(i)) = i; + for (StorageIndex i = 0; i < n; i++) perm.indices()(p(i)) = i; } }; diff --git a/Eigen/src/PaStiXSupport/PaStiXSupport.h b/Eigen/src/PaStiXSupport/PaStiXSupport.h index e20c9ba2a..4e73edf5b 100644 --- a/Eigen/src/PaStiXSupport/PaStiXSupport.h +++ b/Eigen/src/PaStiXSupport/PaStiXSupport.h @@ -308,7 +308,7 @@ void PastixBase::analyzePattern(ColSpMatrix& mat) if(m_size>0) clean(); - m_size = mat.rows(); + m_size = internal::convert_index(mat.rows()); m_perm.resize(m_size); m_invp.resize(m_size); @@ -337,7 +337,7 @@ void PastixBase::factorize(ColSpMatrix& mat) eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase"); m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT; m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT; - m_size = mat.rows(); + m_size = internal::convert_index(mat.rows()); internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); @@ -373,7 +373,7 @@ bool PastixBase::_solve_impl(const MatrixBase &b, MatrixBase &x m_iparm[IPARM_START_TASK] = API_TASK_SOLVE; m_iparm[IPARM_END_TASK] = API_TASK_REFINE; - internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, x.rows(), 0, 0, 0, + internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, internal::convert_index(x.rows()), 0, 0, 0, m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data()); } diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h index 2580151de..e2d7f95f2 100644 --- a/Eigen/src/SparseCholesky/SimplicialCholesky.h +++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h @@ -202,7 +202,7 @@ class SimplicialCholeskyBase : public SparseSolverBase void factorize(const MatrixType& a) { eigen_assert(a.rows()==a.cols()); - int size = a.cols(); + Index size = a.cols(); CholMatrixType tmp(size,size); ConstCholMatrixPtr pmat; @@ -226,7 +226,7 @@ class SimplicialCholeskyBase : public SparseSolverBase void analyzePattern(const MatrixType& a, bool doLDLT) { eigen_assert(a.rows()==a.cols()); - int size = a.cols(); + Index size = a.cols(); CholMatrixType tmp(size,size); ConstCholMatrixPtr pmat; ordering(a, pmat, tmp); diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h index 9e2e878e0..31e06995b 100644 --- a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +++ b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h @@ -50,14 +50,14 @@ namespace Eigen { template void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT) { - const Index size = ap.rows(); + const StorageIndex size = StorageIndex(ap.rows()); m_matrix.resize(size, size); m_parent.resize(size); m_nonZerosPerCol.resize(size); - ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); + ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0); - for(Index k = 0; k < size; ++k) + for(StorageIndex k = 0; k < size; ++k) { /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ m_parent[k] = -1; /* parent of k is not yet known */ @@ -65,7 +65,7 @@ void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrix m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) { - Index i = it.index(); + StorageIndex i = it.index(); if(i < k) { /* follow path from i to root of etree, stop at flagged node */ @@ -84,7 +84,7 @@ void SimplicialCholeskyBase::analyzePattern_preordered(const CholMatrix /* construct Lp index array from m_nonZerosPerCol column counts */ StorageIndex* Lp = m_matrix.outerIndexPtr(); Lp[0] = 0; - for(Index k = 0; k < size; ++k) + for(StorageIndex k = 0; k < size; ++k) Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1); m_matrix.resizeNonZeros(Lp[size]); @@ -104,10 +104,10 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); eigen_assert(ap.rows()==ap.cols()); - const Index size = ap.rows(); - eigen_assert(m_parent.size()==size); - eigen_assert(m_nonZerosPerCol.size()==size); + eigen_assert(m_parent.size()==ap.rows()); + eigen_assert(m_nonZerosPerCol.size()==ap.rows()); + const StorageIndex size = StorageIndex(ap.rows()); const StorageIndex* Lp = m_matrix.outerIndexPtr(); StorageIndex* Li = m_matrix.innerIndexPtr(); Scalar* Lx = m_matrix.valuePtr(); @@ -119,16 +119,16 @@ void SimplicialCholeskyBase::factorize_preordered(const CholMatrixType& bool ok = true; m_diag.resize(DoLDLT ? size : 0); - for(Index k = 0; k < size; ++k) + for(StorageIndex k = 0; k < size; ++k) { // compute nonzero pattern of kth row of L, in topological order y[k] = 0.0; // Y(0:k) is now all zero - Index top = size; // stack for pattern is empty + StorageIndex top = size; // stack for pattern is empty tags[k] = k; // mark node k as visited m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) { - Index i = it.index(); + StorageIndex i = it.index(); if(i <= k) { y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ diff --git a/Eigen/src/SparseCore/CompressedStorage.h b/Eigen/src/SparseCore/CompressedStorage.h index bba8a104b..454462ad5 100644 --- a/Eigen/src/SparseCore/CompressedStorage.h +++ b/Eigen/src/SparseCore/CompressedStorage.h @@ -131,7 +131,7 @@ class CompressedStorage /** \returns the stored value at index \a key * If the value does not exist, then the value \a defaultValue is returned without any insertion. */ - inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const + inline Scalar at(StorageIndex key, const Scalar& defaultValue = Scalar(0)) const { if (m_size==0) return defaultValue; @@ -144,7 +144,7 @@ class CompressedStorage } /** Like at(), but the search is performed in the range [start,end) */ - inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const + inline Scalar atInRange(Index start, Index end, StorageIndex key, const Scalar &defaultValue = Scalar(0)) const { if (start>=end) return defaultValue; @@ -159,7 +159,7 @@ class CompressedStorage /** \returns a reference to the value at index \a key * If the value does not exist, then the value \a defaultValue is inserted * such that the keys are sorted. */ - inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0)) + inline Scalar& atWithInsertion(StorageIndex key, const Scalar& defaultValue = Scalar(0)) { Index id = searchLowerIndex(0,m_size,key); if (id>=m_size || m_indices[id]!=key) @@ -189,7 +189,7 @@ class CompressedStorage internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1); } m_size++; - m_indices[id] = convert_index(key); + m_indices[id] = key; m_values[id] = defaultValue; } return m_values[id]; diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h index 5256bf950..b8604a219 100644 --- a/Eigen/src/SparseCore/SparseBlock.h +++ b/Eigen/src/SparseCore/SparseBlock.h @@ -149,10 +149,10 @@ public: // update innerNonZeros if(!m_matrix.isCompressed()) for(Index j=0; j int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0) { typedef typename MatrixType::StorageIndex StorageIndex; - Index nc = mat.cols(); // Number of columns - Index m = mat.rows(); - Index diagSize = (std::min)(nc,m); + StorageIndex nc = convert_index(mat.cols()); // Number of columns + StorageIndex m = convert_index(mat.rows()); + StorageIndex diagSize = (std::min)(nc,m); IndexVector root(nc); // root of subtree of etree root.setZero(); IndexVector pp(nc); // disjoint sets pp.setZero(); // Initialize disjoint sets parent.resize(mat.cols()); //Compute first nonzero column in each row - StorageIndex row,col; firstRowElt.resize(m); firstRowElt.setConstant(nc); firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1); bool found_diag; - for (col = 0; col < nc; col++) + for (StorageIndex col = 0; col < nc; col++) { - Index pcol = col; + StorageIndex pcol = col; if(perm) pcol = perm[col]; for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it) { - row = it.row(); + Index row = it.row(); firstRowElt(row) = (std::min)(firstRowElt(row), col); } } @@ -89,8 +88,8 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl except use (firstRowElt[r],c) in place of an edge (r,c) of A. Thus each row clique in A'*A is replaced by a star centered at its first vertex, which has the same fill. */ - Index rset, cset, rroot; - for (col = 0; col < nc; col++) + StorageIndex rset, cset, rroot; + for (StorageIndex col = 0; col < nc; col++) { found_diag = col>=m; pp(col) = col; @@ -99,7 +98,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl parent(col) = nc; /* The diagonal element is treated here even if it does not exist in the matrix * hence the loop is executed once more */ - Index pcol = col; + StorageIndex pcol = col; if(perm) pcol = perm[col]; for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it) { // A sequence of interleaved find and union is performed @@ -107,7 +106,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl if(it) i = it.index(); if (i == col) found_diag = true; - row = firstRowElt(i); + StorageIndex row = firstRowElt(i); if (row >= col) continue; rset = internal::etree_find(row, pp); // Find the name of the set containing row rroot = root(rset); @@ -128,9 +127,10 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France. */ template -void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum) +void nr_etdfs (typename IndexVector::Scalar n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, typename IndexVector::Scalar postnum) { - Index current = n, first, next; + typedef typename IndexVector::Scalar StorageIndex; + StorageIndex current = n, first, next; while (postnum != n) { // No kid for the current node @@ -175,21 +175,21 @@ void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector * \param post postordered tree */ template -void treePostorder(Index n, IndexVector& parent, IndexVector& post) +void treePostorder(typename IndexVector::Scalar n, IndexVector& parent, IndexVector& post) { + typedef typename IndexVector::Scalar StorageIndex; IndexVector first_kid, next_kid; // Linked list of children - Index postnum; + StorageIndex postnum; // Allocate storage for working arrays and results first_kid.resize(n+1); next_kid.setZero(n+1); post.setZero(n+1); // Set up structure describing children - Index v, dad; first_kid.setConstant(-1); - for (v = n-1; v >= 0; v--) + for (StorageIndex v = n-1; v >= 0; v--) { - dad = parent(v); + StorageIndex dad = parent(v); next_kid(v) = first_kid(dad); first_kid(dad) = v; } diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h index 3cfd7ae9b..4cf4f1826 100644 --- a/Eigen/src/SparseCore/SparseMatrix.h +++ b/Eigen/src/SparseCore/SparseMatrix.h @@ -188,7 +188,7 @@ class SparseMatrix const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; - return m_data.atInRange(m_outerIndex[outer], end, inner); + return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner)); } /** \returns a non-const reference to the value of the matrix at position \a i, \a j @@ -211,7 +211,7 @@ class SparseMatrix eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); if(end<=start) return insert(row,col); - const Index p = m_data.searchLowerIndex(start,end-1,inner); + const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner)); if((pm_data.resize(rows()); - Eigen::Map(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); + Eigen::Map(&this->m_data.index(0), rows()).setLinSpaced(0, StorageIndex(rows()-1)); Eigen::Map(&this->m_data.value(0), rows()).setOnes(); - Eigen::Map(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); + Eigen::Map(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows())); } inline SparseMatrix& operator=(const SparseMatrix& other) { diff --git a/Eigen/src/SparseCore/SparsePermutation.h b/Eigen/src/SparseCore/SparsePermutation.h index 80e5c5fef..4be93c18c 100644 --- a/Eigen/src/SparseCore/SparsePermutation.h +++ b/Eigen/src/SparseCore/SparsePermutation.h @@ -61,7 +61,7 @@ struct permut_sparsematrix_product_retval for(Index j=0; j tmp(size,tmpCols); Eigen::Matrix tmpX(size,tmpCols); - for(int k=0; k(rhsCols-k, NbColsAtOnce); + Index actualCols = std::min(rhsCols-k, NbColsAtOnce); tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols); tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols)); dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView(); diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h index b1cc4df77..35bcec819 100644 --- a/Eigen/src/SparseCore/SparseVector.h +++ b/Eigen/src/SparseCore/SparseVector.h @@ -103,7 +103,7 @@ class SparseVector inline Scalar coeff(Index i) const { eigen_assert(i>=0 && i=0 && i::analyzePattern(const MatrixType& mat) if (!m_symmetricmode) { IndexVector post, iwork; // Post order etree - internal::treePostorder(m_mat.cols(), m_etree, post); + internal::treePostorder(StorageIndex(m_mat.cols()), m_etree, post); // Renumber etree in postorder @@ -479,7 +479,7 @@ void SparseLU::factorize(const MatrixType& matrix) else { //FIXME This should not be needed if the empty permutation is handled transparently m_perm_c.resize(matrix.cols()); - for(Index i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i; + for(StorageIndex i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i; } Index m = m_mat.rows(); diff --git a/Eigen/src/SparseLU/SparseLUImpl.h b/Eigen/src/SparseLU/SparseLUImpl.h index e735fd5c8..731d1652c 100644 --- a/Eigen/src/SparseLU/SparseLUImpl.h +++ b/Eigen/src/SparseLU/SparseLUImpl.h @@ -40,7 +40,7 @@ class SparseLUImpl Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu); Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu); template - void dfs_kernel(const Index jj, IndexVector& perm_r, + void dfs_kernel(const StorageIndex jj, IndexVector& perm_r, Index& nseg, IndexVector& panel_lsub, IndexVector& segrep, Ref repfnz_col, IndexVector& xprune, Ref marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits); diff --git a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h index f7ffc2d9c..b37b93cf1 100644 --- a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +++ b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h @@ -178,11 +178,11 @@ class MappedSuperNodalMatrix * \brief InnerIterator class to iterate over nonzero values of the current column in the supernodal matrix L * */ -template -class MappedSuperNodalMatrix::InnerIterator +template +class MappedSuperNodalMatrix::InnerIterator { public: - InnerIterator(const MappedSuperNodalMatrix& mat, Eigen::Index outer) + InnerIterator(const MappedSuperNodalMatrix& mat, Index outer) : m_matrix(mat), m_outer(outer), m_supno(mat.colToSup()[outer]), diff --git a/Eigen/src/SparseLU/SparseLU_Utils.h b/Eigen/src/SparseLU/SparseLU_Utils.h index b48157d9f..9e3dab44d 100644 --- a/Eigen/src/SparseLU/SparseLU_Utils.h +++ b/Eigen/src/SparseLU/SparseLU_Utils.h @@ -53,7 +53,7 @@ void SparseLUImpl::fixupL(const Index n, const IndexVector& { Index fsupc, i, j, k, jstart; - Index nextl = 0; + StorageIndex nextl = 0; Index nsuper = (glu.supno)(n); // For each supernode diff --git a/Eigen/src/SparseLU/SparseLU_column_bmod.h b/Eigen/src/SparseLU/SparseLU_column_bmod.h index bda01dcb3..be190997d 100644 --- a/Eigen/src/SparseLU/SparseLU_column_bmod.h +++ b/Eigen/src/SparseLU/SparseLU_column_bmod.h @@ -138,7 +138,7 @@ Index SparseLUImpl::column_bmod(const Index jcol, const Ind glu.lusup.segment(nextlu,offset).setZero(); nextlu += offset; } - glu.xlusup(jcol + 1) = nextlu; // close L\U(*,jcol); + glu.xlusup(jcol + 1) = StorageIndex(nextlu); // close L\U(*,jcol); /* For more updates within the panel (also within the current supernode), * should start from the first column of the panel, or the first column diff --git a/Eigen/src/SparseLU/SparseLU_column_dfs.h b/Eigen/src/SparseLU/SparseLU_column_dfs.h index 17c9e6adb..c98b30e32 100644 --- a/Eigen/src/SparseLU/SparseLU_column_dfs.h +++ b/Eigen/src/SparseLU/SparseLU_column_dfs.h @@ -112,13 +112,13 @@ Index SparseLUImpl::column_dfs(const Index m, const Index j // krow was visited before, go to the next nonz; if (kmark == jcol) continue; - dfs_kernel(jcol, perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent, + dfs_kernel(StorageIndex(jcol), perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent, xplore, glu, nextl, krow, traits); } // for each nonzero ... - Index fsupc, jptr, jm1ptr, ito, ifrom, istop; - Index nsuper = glu.supno(jcol); - Index jcolp1 = jcol + 1; + Index fsupc; + StorageIndex nsuper = glu.supno(jcol); + StorageIndex jcolp1 = StorageIndex(jcol) + 1; Index jcolm1 = jcol - 1; // check to see if j belongs in the same supernode as j-1 @@ -129,8 +129,8 @@ Index SparseLUImpl::column_dfs(const Index m, const Index j else { fsupc = glu.xsup(nsuper); - jptr = glu.xlsub(jcol); // Not yet compressed - jm1ptr = glu.xlsub(jcolm1); + StorageIndex jptr = glu.xlsub(jcol); // Not yet compressed + StorageIndex jm1ptr = glu.xlsub(jcolm1); // Use supernodes of type T2 : see SuperLU paper if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU; @@ -148,13 +148,13 @@ Index SparseLUImpl::column_dfs(const Index m, const Index j { // starts a new supernode if ( (fsupc < jcolm1-1) ) { // >= 3 columns in nsuper - ito = glu.xlsub(fsupc+1); + StorageIndex ito = glu.xlsub(fsupc+1); glu.xlsub(jcolm1) = ito; - istop = ito + jptr - jm1ptr; + StorageIndex istop = ito + jptr - jm1ptr; xprune(jcolm1) = istop; // intialize xprune(jcol-1) glu.xlsub(jcol) = istop; - for (ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito) + for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito) glu.lsub(ito) = glu.lsub(ifrom); nextl = ito; // = istop + length(jcol) } @@ -166,8 +166,8 @@ Index SparseLUImpl::column_dfs(const Index m, const Index j // Tidy up the pointers before exit glu.xsup(nsuper+1) = jcolp1; glu.supno(jcolp1) = nsuper; - xprune(jcol) = nextl; // Intialize upper bound for pruning - glu.xlsub(jcolp1) = nextl; + xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning + glu.xlsub(jcolp1) = StorageIndex(nextl); return 0; } diff --git a/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h b/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h index bf237951d..c32d8d8b1 100644 --- a/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +++ b/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h @@ -56,7 +56,7 @@ Index SparseLUImpl::copy_to_ucol(const Index jcol, const In // For each nonzero supernode segment of U[*,j] in topological order Index k = nseg - 1, i; - Index nextu = glu.xusub(jcol); + StorageIndex nextu = glu.xusub(jcol); Index kfnz, isub, segsize; Index new_next,irow; Index fsupc, mem; diff --git a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h index 4092f842f..6f75d500e 100644 --- a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +++ b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h @@ -48,15 +48,14 @@ void SparseLUImpl::heap_relax_snode (const Index n, IndexVe // The etree may not be postordered, but its heap ordered IndexVector post; - internal::treePostorder(n, et, post); // Post order etree + internal::treePostorder(StorageIndex(n), et, post); // Post order etree IndexVector inv_post(n+1); - Index i; - for (i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()??? + for (StorageIndex i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()??? // Renumber etree in postorder IndexVector iwork(n); IndexVector et_save(n+1); - for (i = 0; i < n; ++i) + for (Index i = 0; i < n; ++i) { iwork(post(i)) = post(et(i)); } @@ -78,7 +77,7 @@ void SparseLUImpl::heap_relax_snode (const Index n, IndexVe StorageIndex k; Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree Index nsuper_et = 0; // Number of relaxed snodes in the original etree - Index l; + StorageIndex l; for (j = 0; j < n; ) { parent = et(j); @@ -90,8 +89,8 @@ void SparseLUImpl::heap_relax_snode (const Index n, IndexVe } // Found a supernode in postordered etree, j is the last column ++nsuper_et_post; - k = n; - for (i = snode_start; i <= j; ++i) + k = StorageIndex(n); + for (Index i = snode_start; i <= j; ++i) k = (std::min)(k, inv_post(i)); l = inv_post(j); if ( (l - k) == (j - snode_start) ) // Same number of columns in the snode @@ -102,7 +101,7 @@ void SparseLUImpl::heap_relax_snode (const Index n, IndexVe } else { - for (i = snode_start; i <= j; ++i) + for (Index i = snode_start; i <= j; ++i) { l = inv_post(i); if (descendants(i) == 0) diff --git a/Eigen/src/SparseLU/SparseLU_panel_dfs.h b/Eigen/src/SparseLU/SparseLU_panel_dfs.h index f4a908ee5..155df7336 100644 --- a/Eigen/src/SparseLU/SparseLU_panel_dfs.h +++ b/Eigen/src/SparseLU/SparseLU_panel_dfs.h @@ -41,7 +41,7 @@ struct panel_dfs_traits panel_dfs_traits(Index jcol, StorageIndex* marker) : m_jcol(jcol), m_marker(marker) {} - bool update_segrep(Index krep, Index jj) + bool update_segrep(Index krep, StorageIndex jj) { if(m_marker[krep] template -void SparseLUImpl::dfs_kernel(const Index jj, IndexVector& perm_r, +void SparseLUImpl::dfs_kernel(const StorageIndex jj, IndexVector& perm_r, Index& nseg, IndexVector& panel_lsub, IndexVector& segrep, Ref repfnz_col, IndexVector& xprune, Ref marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu, @@ -67,14 +67,14 @@ void SparseLUImpl::dfs_kernel(const Index jj, IndexVector& ) { - Index kmark = marker(krow); + StorageIndex kmark = marker(krow); // For each unmarked krow of jj marker(krow) = jj; - Index kperm = perm_r(krow); + StorageIndex kperm = perm_r(krow); if (kperm == emptyIdxLU ) { // krow is in L : place it in structure of L(*, jj) - panel_lsub(nextl_col++) = krow; // krow is indexed into A + panel_lsub(nextl_col++) = StorageIndex(krow); // krow is indexed into A traits.mem_expand(panel_lsub, nextl_col, kmark); } @@ -83,9 +83,9 @@ void SparseLUImpl::dfs_kernel(const Index jj, IndexVector& // krow is in U : if its supernode-representative krep // has been explored, update repfnz(*) // krep = supernode representative of the current row - Index krep = glu.xsup(glu.supno(kperm)+1) - 1; + StorageIndex krep = glu.xsup(glu.supno(kperm)+1) - 1; // First nonzero element in the current column: - Index myfnz = repfnz_col(krep); + StorageIndex myfnz = repfnz_col(krep); if (myfnz != emptyIdxLU ) { @@ -96,26 +96,26 @@ void SparseLUImpl::dfs_kernel(const Index jj, IndexVector& else { // Otherwise, perform dfs starting at krep - Index oldrep = emptyIdxLU; + StorageIndex oldrep = emptyIdxLU; parent(krep) = oldrep; repfnz_col(krep) = kperm; - Index xdfs = glu.xlsub(krep); + StorageIndex xdfs = glu.xlsub(krep); Index maxdfs = xprune(krep); - Index kpar; + StorageIndex kpar; do { // For each unmarked kchild of krep while (xdfs < maxdfs) { - Index kchild = glu.lsub(xdfs); + StorageIndex kchild = glu.lsub(xdfs); xdfs++; - Index chmark = marker(kchild); + StorageIndex chmark = marker(kchild); if (chmark != jj ) { marker(kchild) = jj; - Index chperm = perm_r(kchild); + StorageIndex chperm = perm_r(kchild); if (chperm == emptyIdxLU) { @@ -128,7 +128,7 @@ void SparseLUImpl::dfs_kernel(const Index jj, IndexVector& // case kchild is in U : // chrep = its supernode-rep. If its rep has been explored, // update its repfnz(*) - Index chrep = glu.xsup(glu.supno(chperm)+1) - 1; + StorageIndex chrep = glu.xsup(glu.supno(chperm)+1) - 1; myfnz = repfnz_col(chrep); if (myfnz != emptyIdxLU) @@ -227,7 +227,7 @@ void SparseLUImpl::panel_dfs(const Index m, const Index w, panel_dfs_traits traits(jcol, marker1.data()); // For each column in the panel - for (Index jj = jcol; jj < jcol + w; jj++) + for (StorageIndex jj = StorageIndex(jcol); jj < jcol + w; jj++) { nextl_col = (jj - jcol) * m; @@ -241,7 +241,7 @@ void SparseLUImpl::panel_dfs(const Index m, const Index w, Index krow = it.row(); dense_col(krow) = it.value(); - Index kmark = marker(krow); + StorageIndex kmark = marker(krow); if (kmark == jj) continue; // krow visited before, go to the next nonzero diff --git a/Eigen/src/SparseLU/SparseLU_pivotL.h b/Eigen/src/SparseLU/SparseLU_pivotL.h index 01f5ba4e9..562128b69 100644 --- a/Eigen/src/SparseLU/SparseLU_pivotL.h +++ b/Eigen/src/SparseLU/SparseLU_pivotL.h @@ -89,7 +89,7 @@ Index SparseLUImpl::pivotL(const Index jcol, const RealScal // Test for singularity if ( pivmax == 0.0 ) { pivrow = lsub_ptr[pivptr]; - perm_r(pivrow) = jcol; + perm_r(pivrow) = StorageIndex(jcol); return (jcol+1); } @@ -110,7 +110,7 @@ Index SparseLUImpl::pivotL(const Index jcol, const RealScal } // Record pivot row - perm_r(pivrow) = jcol; + perm_r(pivrow) = StorageIndex(jcol); // Interchange row subscripts if (pivptr != nsupc ) { diff --git a/Eigen/src/SparseLU/SparseLU_pruneL.h b/Eigen/src/SparseLU/SparseLU_pruneL.h index 13133fcc2..ad32fed5e 100644 --- a/Eigen/src/SparseLU/SparseLU_pruneL.h +++ b/Eigen/src/SparseLU/SparseLU_pruneL.h @@ -124,7 +124,7 @@ void SparseLUImpl::pruneL(const Index jcol, const IndexVect } } // end while - xprune(irep) = kmin; //Pruning + xprune(irep) = StorageIndex(kmin); //Pruning } // end if do_prune } // end pruning } // End for each U-segment diff --git a/Eigen/src/SparseLU/SparseLU_relax_snode.h b/Eigen/src/SparseLU/SparseLU_relax_snode.h index 21c182d56..c408d01b4 100644 --- a/Eigen/src/SparseLU/SparseLU_relax_snode.h +++ b/Eigen/src/SparseLU/SparseLU_relax_snode.h @@ -48,10 +48,10 @@ void SparseLUImpl::relax_snode (const Index n, IndexVector& { // compute the number of descendants of each node in the etree - Index j, parent; + Index parent; relax_end.setConstant(emptyIdxLU); descendants.setZero(); - for (j = 0; j < n; j++) + for (Index j = 0; j < n; j++) { parent = et(j); if (parent != n) // not the dummy root @@ -59,7 +59,7 @@ void SparseLUImpl::relax_snode (const Index n, IndexVector& } // Identify the relaxed supernodes by postorder traversal of the etree Index snode_start; // beginning of a snode - for (j = 0; j < n; ) + for (Index j = 0; j < n; ) { parent = et(j); snode_start = j; @@ -69,7 +69,7 @@ void SparseLUImpl::relax_snode (const Index n, IndexVector& parent = et(j); } // Found a supernode in postordered etree, j is the last column - relax_end(snode_start) = j; // Record last column + relax_end(snode_start) = StorageIndex(j); // Record last column j++; // Search for a new leaf while (descendants(j) != 0 && j < n) j++; diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h index 920b884e5..ce4a70454 100644 --- a/Eigen/src/SparseQR/SparseQR.h +++ b/Eigen/src/SparseQR/SparseQR.h @@ -296,7 +296,7 @@ void SparseQR::analyzePattern(const MatrixType& mat) if (!m_perm_c.size()) { m_perm_c.resize(n); - m_perm_c.indices().setLinSpaced(n, 0,n-1); + m_perm_c.indices().setLinSpaced(n, 0,StorageIndex(n-1)); } // Compute the column elimination tree of the permuted matrix @@ -327,8 +327,8 @@ void SparseQR::factorize(const MatrixType& mat) using std::abs; eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); - StorageIndex m = mat.rows(); - StorageIndex n = mat.cols(); + StorageIndex m = StorageIndex(mat.rows()); + StorageIndex n = StorageIndex(mat.cols()); StorageIndex diagSize = (std::min)(m,n); IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q @@ -406,7 +406,7 @@ void SparseQR::factorize(const MatrixType& mat) for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) { StorageIndex curIdx = nonzeroCol; - if(itp) curIdx = itp.row(); + if(itp) curIdx = StorageIndex(itp.row()); if(curIdx == nonzeroCol) found_diag = true; // Get the nonzeros indexes of the current column of R @@ -467,7 +467,7 @@ void SparseQR::factorize(const MatrixType& mat) { for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) { - StorageIndex iQ = itq.row(); + StorageIndex iQ = StorageIndex(itq.row()); if (mark(iQ) != col) { Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q, diff --git a/Eigen/src/UmfPackSupport/UmfPackSupport.h b/Eigen/src/UmfPackSupport/UmfPackSupport.h index dcbd4ab71..3d30403c7 100644 --- a/Eigen/src/UmfPackSupport/UmfPackSupport.h +++ b/Eigen/src/UmfPackSupport/UmfPackSupport.h @@ -313,8 +313,9 @@ class UmfPackLU : public SparseSolverBase > void analyzePattern_impl() { int errorCode = 0; - errorCode = umfpack_symbolic(m_copyMatrix.rows(), m_copyMatrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, - &m_symbolic, 0, 0); + errorCode = umfpack_symbolic(internal::convert_index(m_copyMatrix.rows()), + internal::convert_index(m_copyMatrix.cols()), + m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, &m_symbolic, 0, 0); m_isInitialized = true; m_info = errorCode ? InvalidInput : Success; diff --git a/test/sparse_basic.cpp b/test/sparse_basic.cpp index b06956974..8021f4db6 100644 --- a/test/sparse_basic.cpp +++ b/test/sparse_basic.cpp @@ -398,8 +398,8 @@ template void sparse_basic(const SparseMatrixType& re refMat.setZero(); for(Index i=0;i(0,rows-1); - StorageIndex c = internal::random(0,cols-1); + StorageIndex r = internal::random(0,StorageIndex(rows-1)); + StorageIndex c = internal::random(0,StorageIndex(cols-1)); Scalar v = internal::random(); triplets.push_back(TripletType(r,c,v)); refMat(r,c) += v; diff --git a/test/spqr_support.cpp b/test/spqr_support.cpp index 901c42c40..baa25a0c2 100644 --- a/test/spqr_support.cpp +++ b/test/spqr_support.cpp @@ -37,7 +37,7 @@ template void test_spqr_scalar() SPQR solver; generate_sparse_rectangular_problem(A,dA); - int m = A.rows(); + Index m = A.rows(); b = DenseVector::Random(m); solver.compute(A); if (solver.info() != Success) diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h index 873f2bf2a..3e733e053 100644 --- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h @@ -54,7 +54,7 @@ namespace internal { */ template bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond, - int &iters, const int &restart, typename Dest::RealScalar & tol_error) { + Index &iters, const Index &restart, typename Dest::RealScalar & tol_error) { using std::sqrt; using std::abs; @@ -65,10 +65,10 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition typedef Matrix < Scalar, Dynamic, Dynamic > FMatrixType; RealScalar tol = tol_error; - const int maxIters = iters; + const Index maxIters = iters; iters = 0; - const int m = mat.rows(); + const Index m = mat.rows(); // residual and preconditioned residual const VectorType p0 = rhs - mat*x; @@ -97,14 +97,14 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition w(0)=(Scalar) beta; H.bottomLeftCorner(m - 1, 1) = e; - for (int k = 1; k <= restart; ++k) { + for (Index k = 1; k <= restart; ++k) { ++iters; VectorType v = VectorType::Unit(m, k - 1), workspace(m); // apply Householder reflections H_{1} ... H_{k-1} to v - for (int i = k - 1; i >= 0; --i) { + for (Index i = k - 1; i >= 0; --i) { v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } @@ -113,7 +113,7 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition v=precond.solve(t); // apply Householder reflections H_{k-1} ... H_{1} to v - for (int i = 0; i < k; ++i) { + for (Index i = 0; i < k; ++i) { v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } @@ -133,7 +133,7 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition } if (k > 1) { - for (int i = 0; i < k - 1; ++i) { + for (Index i = 0; i < k - 1; ++i) { // apply old Givens rotations to v v.applyOnTheLeft(i, i + 1, G[i].adjoint()); } @@ -166,7 +166,7 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition // apply Householder reflection H_{k} to x_new x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data()); - for (int i = k - 2; i >= 0; --i) { + for (Index i = k - 2; i >= 0; --i) { x_new += y(i) * VectorType::Unit(m, i); // apply Householder reflection H_{i} to x_new x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); @@ -265,7 +265,7 @@ class GMRES : public IterativeSolverBase > using Base::m_isInitialized; private: - int m_restart; + Index m_restart; public: using Base::_solve_impl; @@ -295,19 +295,19 @@ public: /** Get the number of iterations after that a restart is performed. */ - int get_restart() { return m_restart; } + Index get_restart() { return m_restart; } /** Set the number of iterations after that a restart is performed. * \param restart number of iterations for a restarti, default is 30. */ - void set_restart(const int restart) { m_restart=restart; } + void set_restart(const Index restart) { m_restart=restart; } /** \internal */ template void _solve_with_guess_impl(const Rhs& b, Dest& x) const { bool failed = false; - for(int j=0; j EIGEN_DONT_INLINE void minres(const MatrixType& mat, const Rhs& rhs, Dest& x, - const Preconditioner& precond, int& iters, + const Preconditioner& precond, Index& iters, typename Dest::RealScalar& tol_error) { using std::sqrt; @@ -48,8 +48,8 @@ namespace Eigen { } // initialize - const int maxIters(iters); // initialize maxIters to iters - const int N(mat.cols()); // the size of the matrix + const Index maxIters(iters); // initialize maxIters to iters + const Index N(mat.cols()); // the size of the matrix const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2) // Initialize preconditioned Lanczos From cc641aabb710ab002b6c641bd5c3e1deed0e634d Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 16 Feb 2015 14:46:51 +0100 Subject: [PATCH 4/5] Remove deprecated usage of expr::Index. --- Eigen/src/Cholesky/LDLT.h | 8 +-- Eigen/src/Cholesky/LLT.h | 19 +++--- Eigen/src/Cholesky/LLT_MKL.h | 10 +-- Eigen/src/Core/Assign_MKL.h | 1 - Eigen/src/Core/CwiseNullaryOp.h | 1 - Eigen/src/Core/DenseCoeffsBase.h | 6 +- Eigen/src/Core/GeneralProduct.h | 4 -- Eigen/src/Core/IO.h | 1 - Eigen/src/Core/Map.h | 2 - Eigen/src/Core/MapBase.h | 2 +- Eigen/src/Core/Redux.h | 4 -- Eigen/src/Core/SolveTriangular.h | 4 +- Eigen/src/Core/Transpose.h | 1 - Eigen/src/Core/Transpositions.h | 68 ++++++++----------- Eigen/src/Core/VectorwiseOp.h | 2 +- Eigen/src/Core/Visitor.h | 5 -- Eigen/src/Core/products/GeneralMatrixMatrix.h | 1 - .../products/GeneralMatrixMatrixTriangular.h | 3 - .../Core/products/SelfadjointMatrixMatrix.h | 1 - .../Core/products/SelfadjointMatrixVector.h | 1 - Eigen/src/Core/products/SelfadjointProduct.h | 2 - .../Core/products/TriangularMatrixMatrix.h | 1 - .../Core/products/TriangularMatrixVector.h | 2 - Eigen/src/Core/util/Macros.h | 1 - Eigen/src/Eigenvalues/ComplexEigenSolver.h | 2 +- Eigen/src/Eigenvalues/ComplexSchur.h | 2 +- Eigen/src/Eigenvalues/EigenSolver.h | 2 +- .../src/Eigenvalues/GeneralizedEigenSolver.h | 2 +- .../GeneralizedSelfAdjointEigenSolver.h | 1 - .../src/Eigenvalues/HessenbergDecomposition.h | 3 +- Eigen/src/Eigenvalues/RealQZ.h | 6 +- Eigen/src/Eigenvalues/RealSchur.h | 4 +- .../src/Eigenvalues/SelfAdjointEigenSolver.h | 7 +- Eigen/src/Eigenvalues/Tridiagonalization.h | 5 +- Eigen/src/Geometry/Homogeneous.h | 2 - Eigen/src/Geometry/OrthoMethods.h | 1 - Eigen/src/Geometry/Transform.h | 3 +- Eigen/src/Geometry/Umeyama.h | 1 - Eigen/src/Householder/BlockHouseholder.h | 3 - .../IterativeLinearSolvers/SolveWithGuess.h | 1 - Eigen/src/Jacobi/Jacobi.h | 5 +- Eigen/src/LU/PartialPivLU.h | 7 +- Eigen/src/QR/FullPivHouseholderQR.h | 1 - Eigen/src/QR/HouseholderQR.h | 5 +- Eigen/src/QR/HouseholderQR_MKL.h | 3 +- Eigen/src/SVD/BDCSVD.h | 1 - Eigen/src/SVD/JacobiSVD.h | 12 ---- Eigen/src/SVD/SVDBase.h | 1 + Eigen/src/SVD/UpperBidiagonalization.h | 9 +-- Eigen/src/SparseCore/SparseBlock.h | 2 +- Eigen/src/SparseCore/SparseView.h | 3 +- Eigen/src/SuperLUSupport/SuperLUSupport.h | 1 - Eigen/src/misc/Image.h | 2 - Eigen/src/misc/Kernel.h | 2 - 54 files changed, 80 insertions(+), 169 deletions(-) diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index e2a6ca2b2..f46f7b758 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -59,7 +59,7 @@ template class LDLT }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix TmpMatrixType; @@ -252,8 +252,7 @@ template<> struct ldlt_inplace using std::abs; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; - typedef typename TranspositionType::StorageIndexType IndexType; + typedef typename TranspositionType::StorageIndex IndexType; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); @@ -343,7 +342,6 @@ template<> struct ldlt_inplace using numext::isfinite; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; const Index size = mat.rows(); eigen_assert(mat.cols() == size && w.size()==size); @@ -451,7 +449,7 @@ template template LDLT& LDLT::rankUpdate(const MatrixBase& w, const typename NumTraits::Real& sigma) { - typedef typename TranspositionType::StorageIndexType IndexType; + typedef typename TranspositionType::StorageIndex IndexType; const Index size = w.rows(); if (m_isInitialized) { diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h index 5e0cf6c88..629c87161 100644 --- a/Eigen/src/Cholesky/LLT.h +++ b/Eigen/src/Cholesky/LLT.h @@ -59,7 +59,7 @@ template class LLT }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef typename MatrixType::StorageIndex StorageIndex; enum { @@ -184,12 +184,11 @@ namespace internal { template struct llt_inplace; template -static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) +static Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) { using std::sqrt; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; typedef typename MatrixType::ColXpr ColXpr; typedef typename internal::remove_all::type ColXprCleaned; typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; @@ -258,10 +257,9 @@ template struct llt_inplace { typedef typename NumTraits::Real RealScalar; template - static typename MatrixType::Index unblocked(MatrixType& mat) + static Index unblocked(MatrixType& mat) { using std::sqrt; - typedef typename MatrixType::Index Index; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); @@ -285,9 +283,8 @@ template struct llt_inplace } template - static typename MatrixType::Index blocked(MatrixType& m) + static Index blocked(MatrixType& m) { - typedef typename MatrixType::Index Index; eigen_assert(m.rows()==m.cols()); Index size = m.rows(); if(size<32) @@ -318,7 +315,7 @@ template struct llt_inplace } template - static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) + static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } @@ -329,19 +326,19 @@ template struct llt_inplace typedef typename NumTraits::Real RealScalar; template - static EIGEN_STRONG_INLINE typename MatrixType::Index unblocked(MatrixType& mat) + static EIGEN_STRONG_INLINE Index unblocked(MatrixType& mat) { Transpose matt(mat); return llt_inplace::unblocked(matt); } template - static EIGEN_STRONG_INLINE typename MatrixType::Index blocked(MatrixType& mat) + static EIGEN_STRONG_INLINE Index blocked(MatrixType& mat) { Transpose matt(mat); return llt_inplace::blocked(matt); } template - static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) + static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { Transpose matt(mat); return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); diff --git a/Eigen/src/Cholesky/LLT_MKL.h b/Eigen/src/Cholesky/LLT_MKL.h index 64daa445c..09bf59d43 100644 --- a/Eigen/src/Cholesky/LLT_MKL.h +++ b/Eigen/src/Cholesky/LLT_MKL.h @@ -46,7 +46,7 @@ template struct mkl_llt; template<> struct mkl_llt \ { \ template \ - static inline typename MatrixType::Index potrf(MatrixType& m, char uplo) \ + static inline Index potrf(MatrixType& m, char uplo) \ { \ lapack_int matrix_order; \ lapack_int size, lda, info, StorageOrder; \ @@ -67,23 +67,23 @@ template<> struct mkl_llt \ template<> struct llt_inplace \ { \ template \ - static typename MatrixType::Index blocked(MatrixType& m) \ + static Index blocked(MatrixType& m) \ { \ return mkl_llt::potrf(m, 'L'); \ } \ template \ - static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ + static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \ }; \ template<> struct llt_inplace \ { \ template \ - static typename MatrixType::Index blocked(MatrixType& m) \ + static Index blocked(MatrixType& m) \ { \ return mkl_llt::potrf(m, 'U'); \ } \ template \ - static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ + static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ { \ Transpose matt(mat); \ return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); \ diff --git a/Eigen/src/Core/Assign_MKL.h b/Eigen/src/Core/Assign_MKL.h index 97134ffd7..a7b9e9293 100644 --- a/Eigen/src/Core/Assign_MKL.h +++ b/Eigen/src/Core/Assign_MKL.h @@ -84,7 +84,6 @@ template { typedef typename Derived1::Scalar Scalar; - typedef typename Derived1::Index Index; static inline void run(Derived1& dst, const CwiseUnaryOp& src) { // in case we want to (or have to) skip VML at runtime we can call: diff --git a/Eigen/src/Core/CwiseNullaryOp.h b/Eigen/src/Core/CwiseNullaryOp.h index 05c4fedd0..009fd845d 100644 --- a/Eigen/src/Core/CwiseNullaryOp.h +++ b/Eigen/src/Core/CwiseNullaryOp.h @@ -753,7 +753,6 @@ struct setIdentity_impl template struct setIdentity_impl { - typedef typename Derived::Index Index; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) { diff --git a/Eigen/src/Core/DenseCoeffsBase.h b/Eigen/src/Core/DenseCoeffsBase.h index 569fed956..f08380bed 100644 --- a/Eigen/src/Core/DenseCoeffsBase.h +++ b/Eigen/src/Core/DenseCoeffsBase.h @@ -583,14 +583,14 @@ namespace internal { template struct first_aligned_impl { - static inline typename Derived::Index run(const Derived&) + static inline Index run(const Derived&) { return 0; } }; template struct first_aligned_impl { - static inline typename Derived::Index run(const Derived& m) + static inline Index run(const Derived& m) { return internal::first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size()); } @@ -602,7 +602,7 @@ struct first_aligned_impl * documentation. */ template -static inline typename Derived::Index first_aligned(const Derived& m) +static inline Index first_aligned(const Derived& m) { return first_aligned_impl diff --git a/Eigen/src/Core/GeneralProduct.h b/Eigen/src/Core/GeneralProduct.h index 81750722c..7027130e0 100644 --- a/Eigen/src/Core/GeneralProduct.h +++ b/Eigen/src/Core/GeneralProduct.h @@ -221,7 +221,6 @@ template<> struct gemv_dense_sense_selector template static inline void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { - typedef typename Dest::Index Index; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef typename Dest::Scalar ResScalar; @@ -298,7 +297,6 @@ template<> struct gemv_dense_sense_selector template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { - typedef typename Dest::Index Index; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef typename Dest::Scalar ResScalar; @@ -352,7 +350,6 @@ template<> struct gemv_dense_sense_selector template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { - typedef typename Dest::Index Index; // TODO makes sure dest is sequentially stored in memory, otherwise use a temp const Index size = rhs.rows(); for(Index k=0; k struct gemv_dense_sense_selector template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { - typedef typename Dest::Index Index; // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp const Index rows = dest.rows(); for(Index i=0; i > : public traits { typedef traits TraitsBase; - typedef typename PlainObjectType::Index Index; - typedef typename PlainObjectType::Scalar Scalar; enum { InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 ? int(PlainObjectType::InnerStrideAtCompileTime) diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h index 8dca9796d..acac74aa4 100644 --- a/Eigen/src/Core/MapBase.h +++ b/Eigen/src/Core/MapBase.h @@ -178,7 +178,7 @@ template class MapBase typedef typename Base::Scalar Scalar; typedef typename Base::PacketScalar PacketScalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::PointerType PointerType; using Base::derived; diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h index f6546917e..1a0a00481 100644 --- a/Eigen/src/Core/Redux.h +++ b/Eigen/src/Core/Redux.h @@ -191,7 +191,6 @@ template struct redux_impl { typedef typename Derived::Scalar Scalar; - typedef typename Derived::Index Index; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func) { @@ -217,7 +216,6 @@ struct redux_impl { typedef typename Derived::Scalar Scalar; typedef typename packet_traits::type PacketScalar; - typedef typename Derived::Index Index; static Scalar run(const Derived &mat, const Func& func) { @@ -275,7 +273,6 @@ struct redux_impl { typedef typename Derived::Scalar Scalar; typedef typename packet_traits::type PacketScalar; - typedef typename Derived::Index Index; EIGEN_DEVICE_FUNC static Scalar run(const Derived &mat, const Func& func) { @@ -342,7 +339,6 @@ public: typedef _XprType XprType; EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {} - typedef typename XprType::Index Index; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; diff --git a/Eigen/src/Core/SolveTriangular.h b/Eigen/src/Core/SolveTriangular.h index f97048bda..9bac726f7 100644 --- a/Eigen/src/Core/SolveTriangular.h +++ b/Eigen/src/Core/SolveTriangular.h @@ -68,7 +68,7 @@ struct triangular_solver_selector if(!useRhsDirectly) MappedRhs(actualRhs,rhs.size()) = rhs; - triangular_solve_vector ::run(actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs); @@ -82,7 +82,6 @@ template struct triangular_solver_selector { typedef typename Rhs::Scalar Scalar; - typedef typename Rhs::Index Index; typedef blas_traits LhsProductTraits; typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType; @@ -232,7 +231,6 @@ template struct triangular_solv { typedef typename remove_all::type RhsNestedCleaned; typedef ReturnByValue Base; - typedef typename Base::Index Index; triangular_solve_retval(const TriangularType& tri, const Rhs& rhs) : m_triangularMatrix(tri), m_rhs(rhs) diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index 7e41769a3..2c967abca 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -232,7 +232,6 @@ struct inplace_transpose_selector { // PacketSize x Packet static void run(MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef typename internal::packet_traits::type Packet; - typedef typename MatrixType::Index Index; const Index PacketSize = internal::packet_traits::size; const Index Alignment = internal::evaluator::Flags&AlignedBit ? Aligned : Unaligned; PacketBlock A; diff --git a/Eigen/src/Core/Transpositions.h b/Eigen/src/Core/Transpositions.h index 77e7d6f45..e9b54c2ba 100644 --- a/Eigen/src/Core/Transpositions.h +++ b/Eigen/src/Core/Transpositions.h @@ -53,8 +53,8 @@ class TranspositionsBase public: typedef typename Traits::IndicesType IndicesType; - typedef typename IndicesType::Scalar StorageIndexType; - typedef typename IndicesType::Index Index; + typedef typename IndicesType::Scalar StorageIndex; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 Derived& derived() { return *static_cast(this); } const Derived& derived() const { return *static_cast(this); } @@ -82,17 +82,17 @@ class TranspositionsBase inline Index size() const { return indices().size(); } /** Direct access to the underlying index vector */ - inline const StorageIndexType& coeff(Index i) const { return indices().coeff(i); } + inline const StorageIndex& coeff(Index i) const { return indices().coeff(i); } /** Direct access to the underlying index vector */ - inline StorageIndexType& coeffRef(Index i) { return indices().coeffRef(i); } + inline StorageIndex& coeffRef(Index i) { return indices().coeffRef(i); } /** Direct access to the underlying index vector */ - inline const StorageIndexType& operator()(Index i) const { return indices()(i); } + inline const StorageIndex& operator()(Index i) const { return indices()(i); } /** Direct access to the underlying index vector */ - inline StorageIndexType& operator()(Index i) { return indices()(i); } + inline StorageIndex& operator()(Index i) { return indices()(i); } /** Direct access to the underlying index vector */ - inline const StorageIndexType& operator[](Index i) const { return indices()(i); } + inline const StorageIndex& operator[](Index i) const { return indices()(i); } /** Direct access to the underlying index vector */ - inline StorageIndexType& operator[](Index i) { return indices()(i); } + inline StorageIndex& operator[](Index i) { return indices()(i); } /** const version of indices(). */ const IndicesType& indices() const { return derived().indices(); } @@ -108,7 +108,7 @@ class TranspositionsBase /** Sets \c *this to represents an identity transformation */ void setIdentity() { - for(StorageIndexType i = 0; i < indices().size(); ++i) + for(StorageIndex i = 0; i < indices().size(); ++i) coeffRef(i) = i; } @@ -145,26 +145,23 @@ class TranspositionsBase }; namespace internal { -template -struct traits > +template +struct traits > { - typedef Matrix<_StorageIndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType; - typedef typename IndicesType::Index Index; - typedef _StorageIndexType StorageIndexType; + typedef Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType; + typedef _StorageIndex StorageIndex; }; } -template -class Transpositions : public TranspositionsBase > +template +class Transpositions : public TranspositionsBase > { typedef internal::traits Traits; public: typedef TranspositionsBase Base; typedef typename Traits::IndicesType IndicesType; - typedef typename IndicesType::Scalar StorageIndexType; - typedef typename IndicesType::Index Index; - + typedef typename IndicesType::Scalar StorageIndex; inline Transpositions() {} @@ -219,32 +216,30 @@ class Transpositions : public TranspositionsBase -struct traits,_PacketAccess> > +template +struct traits,_PacketAccess> > { - typedef Map, _PacketAccess> IndicesType; - typedef typename IndicesType::Index Index; - typedef _StorageIndexType StorageIndexType; + typedef Map, _PacketAccess> IndicesType; + typedef _StorageIndex StorageIndex; }; } -template -class Map,PacketAccess> - : public TranspositionsBase,PacketAccess> > +template +class Map,PacketAccess> + : public TranspositionsBase,PacketAccess> > { typedef internal::traits Traits; public: typedef TranspositionsBase Base; typedef typename Traits::IndicesType IndicesType; - typedef typename IndicesType::Scalar StorageIndexType; - typedef typename IndicesType::Index Index; + typedef typename IndicesType::Scalar StorageIndex; - explicit inline Map(const StorageIndexType* indicesPtr) + explicit inline Map(const StorageIndex* indicesPtr) : m_indices(indicesPtr) {} - inline Map(const StorageIndexType* indicesPtr, Index size) + inline Map(const StorageIndex* indicesPtr, Index size) : m_indices(indicesPtr,size) {} @@ -281,8 +276,7 @@ namespace internal { template struct traits > { - typedef typename _IndicesType::Scalar StorageIndexType; - typedef typename _IndicesType::Index Index; + typedef typename _IndicesType::Scalar StorageIndex; typedef _IndicesType IndicesType; }; } @@ -296,8 +290,7 @@ class TranspositionsWrapper typedef TranspositionsBase Base; typedef typename Traits::IndicesType IndicesType; - typedef typename IndicesType::Scalar StorageIndexType; - typedef typename IndicesType::Index Index; + typedef typename IndicesType::Scalar StorageIndex; explicit inline TranspositionsWrapper(IndicesType& a_indices) : m_indices(a_indices) @@ -370,8 +363,7 @@ struct transposition_matrix_product_retval : public ReturnByValue > { typedef typename remove_all::type MatrixTypeNestedCleaned; - typedef typename TranspositionType::Index Index; - typedef typename TranspositionType::StorageIndexType StorageIndexType; + typedef typename TranspositionType::StorageIndex StorageIndex; transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix) : m_transpositions(tr), m_matrix(matrix) @@ -383,7 +375,7 @@ struct transposition_matrix_product_retval template inline void evalTo(Dest& dst) const { const Index size = m_transpositions.size(); - StorageIndexType j = 0; + StorageIndex j = 0; if(!(is_same::value && extract_data(dst) == extract_data(m_matrix))) dst = m_matrix; diff --git a/Eigen/src/Core/VectorwiseOp.h b/Eigen/src/Core/VectorwiseOp.h index a626310ec..b3dc0c224 100644 --- a/Eigen/src/Core/VectorwiseOp.h +++ b/Eigen/src/Core/VectorwiseOp.h @@ -159,7 +159,7 @@ template class VectorwiseOp typedef typename ExpressionType::Scalar Scalar; typedef typename ExpressionType::RealScalar RealScalar; - typedef typename ExpressionType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef typename internal::conditional::ret, ExpressionType, ExpressionType&>::type ExpressionTypeNested; typedef typename internal::remove_all::type ExpressionTypeNestedCleaned; diff --git a/Eigen/src/Core/Visitor.h b/Eigen/src/Core/Visitor.h index 02bd4eff3..6b1ecae8b 100644 --- a/Eigen/src/Core/Visitor.h +++ b/Eigen/src/Core/Visitor.h @@ -41,7 +41,6 @@ struct visitor_impl template struct visitor_impl { - typedef typename Derived::Index Index; static inline void run(const Derived& mat, Visitor& visitor) { visitor.init(mat.coeff(0,0), 0, 0); @@ -60,7 +59,6 @@ class visitor_evaluator public: explicit visitor_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {} - typedef typename XprType::Index Index; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; @@ -124,7 +122,6 @@ namespace internal { template struct coeff_visitor { - typedef typename Derived::Index Index; typedef typename Derived::Scalar Scalar; Index row, col; Scalar res; @@ -144,7 +141,6 @@ struct coeff_visitor template struct min_coeff_visitor : coeff_visitor { - typedef typename Derived::Index Index; typedef typename Derived::Scalar Scalar; void operator() (const Scalar& value, Index i, Index j) { @@ -172,7 +168,6 @@ struct functor_traits > { template struct max_coeff_visitor : coeff_visitor { - typedef typename Derived::Index Index; typedef typename Derived::Scalar Scalar; void operator() (const Scalar& value, Index i, Index j) { diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h index fd9443cd2..8210ea584 100644 --- a/Eigen/src/Core/products/GeneralMatrixMatrix.h +++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h @@ -386,7 +386,6 @@ struct generic_product_impl : generic_product_impl_base > { typedef typename Product::Scalar Scalar; - typedef typename Product::Index Index; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; diff --git a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h index e55994900..a36eb2fe0 100644 --- a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +++ b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h @@ -198,7 +198,6 @@ struct general_product_to_triangular_selector static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha) { typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; typedef typename internal::remove_all::type Lhs; typedef internal::blas_traits LhsBlasTraits; @@ -243,8 +242,6 @@ struct general_product_to_triangular_selector { static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha) { - typedef typename MatrixType::Index Index; - typedef typename internal::remove_all::type Lhs; typedef internal::blas_traits LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs; diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h index 4b6316d63..f84f54982 100644 --- a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h @@ -474,7 +474,6 @@ template struct selfadjoint_product_impl { typedef typename Product::Scalar Scalar; - typedef typename Product::Index Index; typedef internal::blas_traits LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector.h b/Eigen/src/Core/products/SelfadjointMatrixVector.h index 372a44e47..5d6ef9913 100644 --- a/Eigen/src/Core/products/SelfadjointMatrixVector.h +++ b/Eigen/src/Core/products/SelfadjointMatrixVector.h @@ -174,7 +174,6 @@ template struct selfadjoint_product_impl { typedef typename Product::Scalar Scalar; - typedef typename Product::Index Index; typedef internal::blas_traits LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; diff --git a/Eigen/src/Core/products/SelfadjointProduct.h b/Eigen/src/Core/products/SelfadjointProduct.h index 6ca4ae6c0..2af00058d 100644 --- a/Eigen/src/Core/products/SelfadjointProduct.h +++ b/Eigen/src/Core/products/SelfadjointProduct.h @@ -53,7 +53,6 @@ struct selfadjoint_product_selector static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha) { typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; typedef internal::blas_traits OtherBlasTraits; typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType; typedef typename internal::remove_all::type _ActualOtherType; @@ -86,7 +85,6 @@ struct selfadjoint_product_selector static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha) { typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; typedef internal::blas_traits OtherBlasTraits; typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType; typedef typename internal::remove_all::type _ActualOtherType; diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix.h b/Eigen/src/Core/products/TriangularMatrixMatrix.h index 60c99dcd2..5f01eb5a8 100644 --- a/Eigen/src/Core/products/TriangularMatrixMatrix.h +++ b/Eigen/src/Core/products/TriangularMatrixMatrix.h @@ -388,7 +388,6 @@ struct triangular_product_impl { template static void run(Dest& dst, const Lhs &a_lhs, const Rhs &a_rhs, const typename Dest::Scalar& alpha) { - typedef typename Dest::Index Index; typedef typename Dest::Scalar Scalar; typedef internal::blas_traits LhsBlasTraits; diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h index 4d88a710b..7c014b72a 100644 --- a/Eigen/src/Core/products/TriangularMatrixVector.h +++ b/Eigen/src/Core/products/TriangularMatrixVector.h @@ -206,7 +206,6 @@ template struct trmv_selector template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { - typedef typename Dest::Index Index; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef typename Dest::Scalar ResScalar; @@ -283,7 +282,6 @@ template struct trmv_selector template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { - typedef typename Dest::Index Index; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef typename Dest::Scalar ResScalar; diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index 07923848a..e607cdd12 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -660,7 +660,6 @@ namespace Eigen { #define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ typedef typename Base::PacketScalar PacketScalar; \ - typedef Eigen::Index Index; \ enum { MaxRowsAtCompileTime = Eigen::internal::traits::MaxRowsAtCompileTime, \ MaxColsAtCompileTime = Eigen::internal::traits::MaxColsAtCompileTime}; \ using Base::derived; \ diff --git a/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/Eigen/src/Eigenvalues/ComplexEigenSolver.h index 25082546e..075a62848 100644 --- a/Eigen/src/Eigenvalues/ComplexEigenSolver.h +++ b/Eigen/src/Eigenvalues/ComplexEigenSolver.h @@ -60,7 +60,7 @@ template class ComplexEigenSolver /** \brief Scalar type for matrices of type #MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 /** \brief Complex scalar type for #MatrixType. * diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h index a3a5a4649..993ee7e1e 100644 --- a/Eigen/src/Eigenvalues/ComplexSchur.h +++ b/Eigen/src/Eigenvalues/ComplexSchur.h @@ -63,7 +63,7 @@ template class ComplexSchur /** \brief Scalar type for matrices of type \p _MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 /** \brief Complex scalar type for \p _MatrixType. * diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h index 9372021ff..a63a42341 100644 --- a/Eigen/src/Eigenvalues/EigenSolver.h +++ b/Eigen/src/Eigenvalues/EigenSolver.h @@ -79,7 +79,7 @@ template class EigenSolver /** \brief Scalar type for matrices of type #MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 /** \brief Complex scalar type for #MatrixType. * diff --git a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h index c20ea03e6..c9da6740a 100644 --- a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +++ b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h @@ -72,7 +72,7 @@ template class GeneralizedEigenSolver /** \brief Scalar type for matrices of type #MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 /** \brief Complex scalar type for #MatrixType. * diff --git a/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h index 1ce1f5f58..5f6bb8289 100644 --- a/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +++ b/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h @@ -50,7 +50,6 @@ class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixT typedef SelfAdjointEigenSolver<_MatrixType> Base; public: - typedef typename Base::Index Index; typedef _MatrixType MatrixType; /** \brief Default constructor for fixed-size matrices. diff --git a/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/Eigen/src/Eigenvalues/HessenbergDecomposition.h index 2615a9f23..87a5bcb69 100644 --- a/Eigen/src/Eigenvalues/HessenbergDecomposition.h +++ b/Eigen/src/Eigenvalues/HessenbergDecomposition.h @@ -71,7 +71,7 @@ template class HessenbergDecomposition /** \brief Scalar type for matrices of type #MatrixType. */ typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 /** \brief Type for vector of Householder coefficients. * @@ -337,7 +337,6 @@ namespace internal { template struct HessenbergDecompositionMatrixHReturnType : public ReturnByValue > { - typedef typename MatrixType::Index Index; public: /** \brief Constructor. * diff --git a/Eigen/src/Eigenvalues/RealQZ.h b/Eigen/src/Eigenvalues/RealQZ.h index 128ef9028..ca75f2f50 100644 --- a/Eigen/src/Eigenvalues/RealQZ.h +++ b/Eigen/src/Eigenvalues/RealQZ.h @@ -67,7 +67,7 @@ namespace Eigen { }; typedef typename MatrixType::Scalar Scalar; typedef std::complex::Real> ComplexScalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef Matrix EigenvalueType; typedef Matrix ColumnVectorType; @@ -276,7 +276,7 @@ namespace Eigen { /** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */ template - inline typename MatrixType::Index RealQZ::findSmallSubdiagEntry(Index iu) + inline Index RealQZ::findSmallSubdiagEntry(Index iu) { using std::abs; Index res = iu; @@ -294,7 +294,7 @@ namespace Eigen { /** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1) */ template - inline typename MatrixType::Index RealQZ::findSmallDiagEntry(Index f, Index l) + inline Index RealQZ::findSmallDiagEntry(Index f, Index l) { using std::abs; Index res = l; diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h index 51e61ba38..60ade50a0 100644 --- a/Eigen/src/Eigenvalues/RealSchur.h +++ b/Eigen/src/Eigenvalues/RealSchur.h @@ -64,7 +64,7 @@ template class RealSchur }; typedef typename MatrixType::Scalar Scalar; typedef std::complex::Real> ComplexScalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef Matrix EigenvalueType; typedef Matrix ColumnVectorType; @@ -343,7 +343,7 @@ inline typename MatrixType::Scalar RealSchur::computeNormOfT() /** \internal Look for single small sub-diagonal element and returns its index */ template -inline typename MatrixType::Index RealSchur::findSmallSubdiagEntry(Index iu) +inline Index RealSchur::findSmallSubdiagEntry(Index iu) { using std::abs; Index res = iu; diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h index 54f60b197..66d1154cf 100644 --- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h @@ -21,7 +21,7 @@ class GeneralizedSelfAdjointEigenSolver; namespace internal { template struct direct_selfadjoint_eigenvalues; template -ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const typename MatrixType::Index maxIterations, bool computeEigenvectors, MatrixType& eivec); +ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec); } /** \eigenvalues_module \ingroup Eigenvalues_Module @@ -81,7 +81,7 @@ template class SelfAdjointEigenSolver /** \brief Scalar type for matrices of type \p _MatrixType. */ typedef typename MatrixType::Scalar Scalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 /** \brief Real scalar type for \p _MatrixType. * @@ -456,12 +456,11 @@ namespace internal { * \returns \c Success or \c NoConvergence */ template -ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const typename MatrixType::Index maxIterations, bool computeEigenvectors, MatrixType& eivec) +ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec) { using std::abs; ComputationInfo info; - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index n = diag.size(); diff --git a/Eigen/src/Eigenvalues/Tridiagonalization.h b/Eigen/src/Eigenvalues/Tridiagonalization.h index bedd1cb34..a6fb00b21 100644 --- a/Eigen/src/Eigenvalues/Tridiagonalization.h +++ b/Eigen/src/Eigenvalues/Tridiagonalization.h @@ -69,7 +69,7 @@ template class Tridiagonalization typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 enum { Size = MatrixType::RowsAtCompileTime, @@ -345,7 +345,6 @@ template void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs) { using numext::conj; - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; Index n = matA.rows(); @@ -437,7 +436,6 @@ struct tridiagonalization_inplace_selector { typedef typename Tridiagonalization::CoeffVectorType CoeffVectorType; typedef typename Tridiagonalization::HouseholderSequenceType HouseholderSequenceType; - typedef typename MatrixType::Index Index; template static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) { @@ -525,7 +523,6 @@ struct tridiagonalization_inplace_selector template struct TridiagonalizationMatrixTReturnType : public ReturnByValue > { - typedef typename MatrixType::Index Index; public: /** \brief Constructor. * diff --git a/Eigen/src/Geometry/Homogeneous.h b/Eigen/src/Geometry/Homogeneous.h index 7f1907542..f16451656 100644 --- a/Eigen/src/Geometry/Homogeneous.h +++ b/Eigen/src/Geometry/Homogeneous.h @@ -238,7 +238,6 @@ struct homogeneous_left_product_impl,Lhs> typedef typename traits::LhsMatrixType LhsMatrixType; typedef typename remove_all::type LhsMatrixTypeCleaned; typedef typename remove_all::type LhsMatrixTypeNested; - typedef typename MatrixType::Index Index; homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs) : m_lhs(take_matrix_for_product::run(lhs)), m_rhs(rhs) @@ -278,7 +277,6 @@ struct homogeneous_right_product_impl,Rhs> : public ReturnByValue,Rhs> > { typedef typename remove_all::type RhsNested; - typedef typename MatrixType::Index Index; homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) {} diff --git a/Eigen/src/Geometry/OrthoMethods.h b/Eigen/src/Geometry/OrthoMethods.h index a245c79d3..6b2e57392 100644 --- a/Eigen/src/Geometry/OrthoMethods.h +++ b/Eigen/src/Geometry/OrthoMethods.h @@ -133,7 +133,6 @@ struct unitOrthogonal_selector typedef typename plain_matrix_type::type VectorType; typedef typename traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename Derived::Index Index; typedef Matrix Vector2; EIGEN_DEVICE_FUNC static inline VectorType run(const Derived& src) diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h index 34d337c90..8c9d7049b 100644 --- a/Eigen/src/Geometry/Transform.h +++ b/Eigen/src/Geometry/Transform.h @@ -205,8 +205,7 @@ public: /** the scalar type of the coefficients */ typedef _Scalar Scalar; typedef Eigen::Index StorageIndex; - /** \deprecated */ - typedef Eigen::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 /** type of the matrix used to represent the transformation */ typedef typename internal::make_proper_matrix_type::type MatrixType; /** constified MatrixType */ diff --git a/Eigen/src/Geometry/Umeyama.h b/Eigen/src/Geometry/Umeyama.h index 5e20662f8..8d9b7a154 100644 --- a/Eigen/src/Geometry/Umeyama.h +++ b/Eigen/src/Geometry/Umeyama.h @@ -97,7 +97,6 @@ umeyama(const MatrixBase& src, const MatrixBase& dst, boo typedef typename internal::umeyama_transform_matrix_type::type TransformationMatrixType; typedef typename internal::traits::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename Derived::Index Index; EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) EIGEN_STATIC_ASSERT((internal::is_same::Scalar>::value), diff --git a/Eigen/src/Householder/BlockHouseholder.h b/Eigen/src/Householder/BlockHouseholder.h index 35dbf80a1..39bf8c83d 100644 --- a/Eigen/src/Householder/BlockHouseholder.h +++ b/Eigen/src/Householder/BlockHouseholder.h @@ -21,7 +21,6 @@ namespace internal { // template // void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs) // { -// typedef typename TriangularFactorType::Index Index; // typedef typename VectorsType::Scalar Scalar; // const Index nbVecs = vectors.cols(); // eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs); @@ -51,7 +50,6 @@ namespace internal { template void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs) { - typedef typename TriangularFactorType::Index Index; const Index nbVecs = vectors.cols(); eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs); @@ -80,7 +78,6 @@ void make_block_householder_triangular_factor(TriangularFactorType& triFactor, c template void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs, bool forward) { - typedef typename MatrixType::Index Index; enum { TFactorSize = MatrixType::ColsAtCompileTime }; Index nbVecs = vectors.cols(); Matrix T(nbVecs,nbVecs); diff --git a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h b/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h index 251c6fa1a..ef7efc9cf 100644 --- a/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +++ b/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h @@ -41,7 +41,6 @@ template class SolveWithGuess : public internal::generic_xpr_base, MatrixXpr, typename internal::traits::StorageKind>::type { public: - typedef typename RhsType::Index Index; typedef typename internal::traits::Scalar Scalar; typedef typename internal::traits::PlainObject PlainObject; typedef typename internal::generic_xpr_base, MatrixXpr, typename internal::traits::StorageKind>::type Base; diff --git a/Eigen/src/Jacobi/Jacobi.h b/Eigen/src/Jacobi/Jacobi.h index da9fb53d0..25eabe984 100644 --- a/Eigen/src/Jacobi/Jacobi.h +++ b/Eigen/src/Jacobi/Jacobi.h @@ -62,7 +62,7 @@ template class JacobiRotation JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); } template - bool makeJacobi(const MatrixBase&, typename Derived::Index p, typename Derived::Index q); + bool makeJacobi(const MatrixBase&, Index p, Index q); bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z); void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0); @@ -123,7 +123,7 @@ bool JacobiRotation::makeJacobi(const RealScalar& x, const Scalar& y, co */ template template -inline bool JacobiRotation::makeJacobi(const MatrixBase& m, typename Derived::Index p, typename Derived::Index q) +inline bool JacobiRotation::makeJacobi(const MatrixBase& m, Index p, Index q) { return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q))); } @@ -300,7 +300,6 @@ namespace internal { template void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation& j) { - typedef typename VectorX::Index Index; typedef typename VectorX::Scalar Scalar; enum { PacketSize = packet_traits::size }; typedef typename packet_traits::type Packet; diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h index 43c2a716e..e57b36bc5 100644 --- a/Eigen/src/LU/PartialPivLU.h +++ b/Eigen/src/LU/PartialPivLU.h @@ -262,7 +262,6 @@ struct partial_lu_impl typedef Block MatrixType; typedef Block BlockType; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; /** \internal performs the LU decomposition in-place of the matrix \a lu * using an unblocked algorithm. @@ -409,13 +408,13 @@ struct partial_lu_impl /** \internal performs the LU decomposition with partial pivoting in-place. */ template -void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::StorageIndexType& nb_transpositions) +void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::StorageIndex& nb_transpositions) { eigen_assert(lu.cols() == row_transpositions.size()); eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1); partial_lu_impl - + ::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions); } @@ -434,7 +433,7 @@ PartialPivLU& PartialPivLU::compute(const MatrixType& ma m_rowsTranspositions.resize(size); - typename TranspositionType::StorageIndexType nb_transpositions; + typename TranspositionType::StorageIndex nb_transpositions; internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions); m_det_p = (nb_transpositions%2) ? -1 : 1; diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h index a7a0d9138..7d5e58d2f 100644 --- a/Eigen/src/QR/FullPivHouseholderQR.h +++ b/Eigen/src/QR/FullPivHouseholderQR.h @@ -557,7 +557,6 @@ template struct FullPivHouseholderQRMatrixQReturnType : public ReturnByValue > { public: - typedef typename MatrixType::Index Index; typedef typename FullPivHouseholderQR::IntDiagSizeVectorType IntDiagSizeVectorType; typedef typename internal::plain_diag_type::type HCoeffsType; typedef Matrix void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0) { - typedef typename MatrixQR::Index Index; typedef typename MatrixQR::Scalar Scalar; typedef typename MatrixQR::RealScalar RealScalar; Index rows = mat.rows(); @@ -264,11 +263,9 @@ template BlockType; diff --git a/Eigen/src/QR/HouseholderQR_MKL.h b/Eigen/src/QR/HouseholderQR_MKL.h index 8a3a7e406..84ab640a1 100644 --- a/Eigen/src/QR/HouseholderQR_MKL.h +++ b/Eigen/src/QR/HouseholderQR_MKL.h @@ -46,8 +46,7 @@ namespace internal { template \ struct householder_qr_inplace_blocked \ { \ - static void run(MatrixQR& mat, HCoeffs& hCoeffs, \ - typename MatrixQR::Index = 32, \ + static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index = 32, \ typename MatrixQR::Scalar* = 0) \ { \ lapack_int m = (lapack_int) mat.rows(); \ diff --git a/Eigen/src/SVD/BDCSVD.h b/Eigen/src/SVD/BDCSVD.h index dad59bcca..3fe17b27f 100644 --- a/Eigen/src/SVD/BDCSVD.h +++ b/Eigen/src/SVD/BDCSVD.h @@ -65,7 +65,6 @@ public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h index 444187ae7..fcf01f518 100644 --- a/Eigen/src/SVD/JacobiSVD.h +++ b/Eigen/src/SVD/JacobiSVD.h @@ -52,7 +52,6 @@ template class qr_preconditioner_impl { public: - typedef typename MatrixType::Index Index; void allocate(const JacobiSVD&) {} bool run(JacobiSVD&, const MatrixType&) { @@ -66,7 +65,6 @@ template class qr_preconditioner_impl { public: - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { @@ -107,7 +105,6 @@ template class qr_preconditioner_impl { public: - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { @@ -157,8 +154,6 @@ template class qr_preconditioner_impl { public: - typedef typename MatrixType::Index Index; - void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) @@ -198,7 +193,6 @@ template class qr_preconditioner_impl { public: - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { @@ -257,8 +251,6 @@ template class qr_preconditioner_impl { public: - typedef typename MatrixType::Index Index; - void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) @@ -297,7 +289,6 @@ template class qr_preconditioner_impl { public: - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { @@ -359,7 +350,6 @@ template struct svd_precondition_2x2_block_to_be_real { typedef JacobiSVD SVD; - typedef typename SVD::Index Index; static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {} }; @@ -369,7 +359,6 @@ struct svd_precondition_2x2_block_to_be_real typedef JacobiSVD SVD; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename SVD::Index Index; static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q) { using std::sqrt; @@ -514,7 +503,6 @@ template class JacobiSVD typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef typename MatrixType::Index Index; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index 95d378da9..8903755e7 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -53,6 +53,7 @@ public: typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; typedef typename MatrixType::StorageIndex StorageIndex; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, diff --git a/Eigen/src/SVD/UpperBidiagonalization.h b/Eigen/src/SVD/UpperBidiagonalization.h index eaa6bb86e..9dc470fd9 100644 --- a/Eigen/src/SVD/UpperBidiagonalization.h +++ b/Eigen/src/SVD/UpperBidiagonalization.h @@ -29,7 +29,7 @@ template class UpperBidiagonalization }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; - typedef typename MatrixType::Index Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef Matrix RowVectorType; typedef Matrix ColVectorType; typedef BandMatrix BidiagonalType; @@ -95,7 +95,6 @@ void upperbidiagonalization_inplace_unblocked(MatrixType& mat, typename MatrixType::RealScalar *upper_diagonal, typename MatrixType::Scalar* tempData = 0) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; Index rows = mat.rows(); @@ -153,13 +152,12 @@ template void upperbidiagonalization_blocked_helper(MatrixType& A, typename MatrixType::RealScalar *diagonal, typename MatrixType::RealScalar *upper_diagonal, - typename MatrixType::Index bs, + Index bs, Ref::Flags & RowMajorBit> > X, Ref::Flags & RowMajorBit> > Y) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; enum { StorageOrder = traits::Flags & RowMajorBit }; typedef InnerStride ColInnerStride; @@ -282,10 +280,9 @@ void upperbidiagonalization_blocked_helper(MatrixType& A, */ template void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal, - typename MatrixType::Index maxBlockSize=32, + Index maxBlockSize=32, typename MatrixType::Scalar* /*tempData*/ = 0) { - typedef typename MatrixType::Index Index; typedef typename MatrixType::Scalar Scalar; typedef Block BlockType; diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h index b8604a219..40dc1a2bd 100644 --- a/Eigen/src/SparseCore/SparseBlock.h +++ b/Eigen/src/SparseCore/SparseBlock.h @@ -158,7 +158,7 @@ public: matrix.outerIndexPtr()[m_outerStart+k] = p; p += tmp.innerVector(k).nonZeros(); } - std::ptrdiff_t offset = nnz - block_size; + StorageIndex offset = internal::convert_index(nnz - block_size); for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k) { matrix.outerIndexPtr()[k] += offset; diff --git a/Eigen/src/SparseCore/SparseView.h b/Eigen/src/SparseCore/SparseView.h index d6042d970..1c69aa458 100644 --- a/Eigen/src/SparseCore/SparseView.h +++ b/Eigen/src/SparseCore/SparseView.h @@ -78,7 +78,7 @@ struct unary_evaluator, IteratorBased> typedef typename XprType::Scalar Scalar; public: - EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, typename XprType::Index outer) + EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer) : EvalIterator(sve.m_argImpl,outer), m_view(sve.m_view) { incrementToNonZero(); @@ -126,7 +126,6 @@ struct unary_evaluator, IndexBased> typedef SparseView XprType; protected: enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; - typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::Scalar Scalar; public: diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h index 8779eb74c..d182b59b3 100644 --- a/Eigen/src/SuperLUSupport/SuperLUSupport.h +++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h @@ -808,7 +808,6 @@ class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> > typedef _MatrixType MatrixType; typedef typename Base::Scalar Scalar; typedef typename Base::RealScalar RealScalar; - typedef typename Base::Index Index; public: using Base::_solve_impl; diff --git a/Eigen/src/misc/Image.h b/Eigen/src/misc/Image.h index 75c5f433a..b8b8a0455 100644 --- a/Eigen/src/misc/Image.h +++ b/Eigen/src/misc/Image.h @@ -38,7 +38,6 @@ template struct image_retval_base typedef _DecompositionType DecompositionType; typedef typename DecompositionType::MatrixType MatrixType; typedef ReturnByValue Base; - typedef typename Base::Index Index; image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix) : m_dec(dec), m_rank(dec.rank()), @@ -69,7 +68,6 @@ template struct image_retval_base typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ - typedef typename MatrixType::Index Index; \ typedef Eigen::internal::image_retval_base Base; \ using Base::dec; \ using Base::originalMatrix; \ diff --git a/Eigen/src/misc/Kernel.h b/Eigen/src/misc/Kernel.h index 4b03e44c1..bef5d6ff5 100644 --- a/Eigen/src/misc/Kernel.h +++ b/Eigen/src/misc/Kernel.h @@ -39,7 +39,6 @@ template struct kernel_retval_base { typedef _DecompositionType DecompositionType; typedef ReturnByValue Base; - typedef typename Base::Index Index; explicit kernel_retval_base(const DecompositionType& dec) : m_dec(dec), @@ -68,7 +67,6 @@ template struct kernel_retval_base typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ - typedef typename MatrixType::Index Index; \ typedef Eigen::internal::kernel_retval_base Base; \ using Base::dec; \ using Base::rank; \ From 45cbb0bbb1f66bbc923dd4dd1034b919f6b4a191 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 16 Feb 2015 15:05:41 +0100 Subject: [PATCH 5/5] The usage of DenseIndex is deprecated, so let's replace DenseIndex by Index --- Eigen/src/Core/BandMatrix.h | 2 +- Eigen/src/Core/DenseStorage.h | 96 +++++++++---------- Eigen/src/Core/GenericPacketMath.h | 4 +- Eigen/src/Core/Matrix.h | 2 +- Eigen/src/Core/Stride.h | 4 +- Eigen/src/Core/arch/AVX/Complex.h | 8 +- Eigen/src/Core/arch/AVX/PacketMath.h | 8 +- Eigen/src/Core/arch/AltiVec/Complex.h | 8 +- Eigen/src/Core/arch/AltiVec/PacketMath.h | 12 +-- Eigen/src/Core/arch/NEON/Complex.h | 8 +- Eigen/src/Core/arch/NEON/PacketMath.h | 12 +-- Eigen/src/Core/arch/SSE/Complex.h | 4 +- Eigen/src/Core/arch/SSE/PacketMath.h | 12 +-- Eigen/src/Core/functors/NullaryFunctors.h | 2 +- Eigen/src/Core/products/GeneralMatrixMatrix.h | 24 ++--- Eigen/src/Geometry/AlignedBox.h | 2 +- Eigen/src/Geometry/Hyperplane.h | 2 +- Eigen/src/Geometry/ParametrizedLine.h | 2 +- Eigen/src/Geometry/Quaternion.h | 7 +- Eigen/src/PardisoSupport/PardisoSupport.h | 68 ++++++------- Eigen/src/SVD/BDCSVD.h | 2 +- 21 files changed, 143 insertions(+), 146 deletions(-) diff --git a/Eigen/src/Core/BandMatrix.h b/Eigen/src/Core/BandMatrix.h index d07ea7056..87c124fdf 100644 --- a/Eigen/src/Core/BandMatrix.h +++ b/Eigen/src/Core/BandMatrix.h @@ -179,7 +179,7 @@ struct traits > { typedef _Scalar Scalar; typedef Dense StorageKind; - typedef DenseIndex StorageIndex; + typedef Eigen::Index StorageIndex; enum { CoeffReadCost = NumTraits::ReadCost, RowsAtCompileTime = _Rows, diff --git a/Eigen/src/Core/DenseStorage.h b/Eigen/src/Core/DenseStorage.h index 852648639..4c37fadbd 100644 --- a/Eigen/src/Core/DenseStorage.h +++ b/Eigen/src/Core/DenseStorage.h @@ -140,12 +140,12 @@ template class DenseSt if (this != &other) m_data = other.m_data; return *this; } - EIGEN_DEVICE_FUNC DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} + EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {} EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); } - EIGEN_DEVICE_FUNC static DenseIndex rows(void) {return _Rows;} - EIGEN_DEVICE_FUNC static DenseIndex cols(void) {return _Cols;} - EIGEN_DEVICE_FUNC void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} - EIGEN_DEVICE_FUNC void resize(DenseIndex,DenseIndex,DenseIndex) {} + EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;} + EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;} + EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {} + EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {} EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } EIGEN_DEVICE_FUNC T *data() { return m_data.array; } }; @@ -158,12 +158,12 @@ template class DenseStorage class DenseStorage class DenseStorage { internal::plain_array m_data; - DenseIndex m_rows; - DenseIndex m_cols; + Index m_rows; + Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) @@ -199,13 +199,13 @@ template class DenseStorage class DenseStorage class DenseStorage { internal::plain_array m_data; - DenseIndex m_rows; + Index m_rows; public: EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) @@ -229,12 +229,12 @@ template class DenseStorage class DenseStorage class DenseStorage { internal::plain_array m_data; - DenseIndex m_cols; + Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) @@ -258,12 +258,12 @@ template class DenseStorage class DenseStorage class DenseStorage { T *m_data; - DenseIndex m_rows; - DenseIndex m_cols; + Index m_rows; + Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0), m_cols(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0), m_cols(0) {} - DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) + DenseStorage(Index size, Index nbRows, Index nbCols) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(nbRows), m_cols(nbCols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } DenseStorage(const DenseStorage& other) @@ -317,15 +317,15 @@ template class DenseStorage(m_data, m_rows*m_cols); } void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } - EIGEN_DEVICE_FUNC DenseIndex rows(void) const {return m_rows;} - EIGEN_DEVICE_FUNC DenseIndex cols(void) const {return m_cols;} - void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) + EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;} + EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;} + void conservativeResize(Index size, Index nbRows, Index nbCols) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*m_cols); m_rows = nbRows; m_cols = nbCols; } - void resize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) + void resize(Index size, Index nbRows, Index nbCols) { if(size != m_rows*m_cols) { @@ -347,11 +347,11 @@ template class DenseStorage class DenseStorage { T *m_data; - DenseIndex m_cols; + Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_cols(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} - DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto(size)), m_cols(nbCols) + DenseStorage(Index size, Index, Index nbCols) : m_data(internal::conditional_aligned_new_auto(size)), m_cols(nbCols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } DenseStorage(const DenseStorage& other) : m_data(internal::conditional_aligned_new_auto(_Rows*other.m_cols)) @@ -385,14 +385,14 @@ template class DenseStorage(m_data, _Rows*m_cols); } void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } - EIGEN_DEVICE_FUNC static DenseIndex rows(void) {return _Rows;} - EIGEN_DEVICE_FUNC DenseIndex cols(void) const {return m_cols;} - void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols) + EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;} + EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;} + void conservativeResize(Index size, Index, Index nbCols) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, _Rows*m_cols); m_cols = nbCols; } - EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex nbCols) + EIGEN_STRONG_INLINE void resize(Index size, Index, Index nbCols) { if(size != _Rows*m_cols) { @@ -413,11 +413,11 @@ template class DenseStorage class DenseStorage { T *m_data; - DenseIndex m_rows; + Index m_rows; public: EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} - DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(nbRows) + DenseStorage(Index size, Index nbRows, Index) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(nbRows) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } DenseStorage(const DenseStorage& other) : m_data(internal::conditional_aligned_new_auto(other.m_rows*_Cols)) @@ -451,14 +451,14 @@ template class DenseStorage(m_data, _Cols*m_rows); } void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } - EIGEN_DEVICE_FUNC DenseIndex rows(void) const {return m_rows;} - EIGEN_DEVICE_FUNC static DenseIndex cols(void) {return _Cols;} - void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex) + EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;} + EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;} + void conservativeResize(Index size, Index nbRows, Index) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*_Cols); m_rows = nbRows; } - EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex nbRows, DenseIndex) + EIGEN_STRONG_INLINE void resize(Index size, Index nbRows, Index) { if(size != m_rows*_Cols) { diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h index 8759cd06c..74e1174ae 100644 --- a/Eigen/src/Core/GenericPacketMath.h +++ b/Eigen/src/Core/GenericPacketMath.h @@ -236,10 +236,10 @@ template EIGEN_DEVICE_FUNC inline void pstore( template EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from) { (*to) = from; } - template EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, DenseIndex /*stride*/) + template EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/) { return ploadu(from); } - template EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, DenseIndex /*stride*/) + template EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index /*stride*/) { pstore(to, from); } /** \internal tries to do cache prefetching of \a addr */ diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h index 94b1a966e..a10d1856f 100644 --- a/Eigen/src/Core/Matrix.h +++ b/Eigen/src/Core/Matrix.h @@ -107,7 +107,7 @@ struct traits > { typedef _Scalar Scalar; typedef Dense StorageKind; - typedef DenseIndex StorageIndex; + typedef Eigen::Index StorageIndex; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = _Rows, diff --git a/Eigen/src/Core/Stride.h b/Eigen/src/Core/Stride.h index e46faad34..9a2f4f1eb 100644 --- a/Eigen/src/Core/Stride.h +++ b/Eigen/src/Core/Stride.h @@ -44,7 +44,7 @@ template class Stride { public: - typedef DenseIndex Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 enum { InnerStrideAtCompileTime = _InnerStrideAtCompileTime, OuterStrideAtCompileTime = _OuterStrideAtCompileTime @@ -91,7 +91,6 @@ class InnerStride : public Stride<0, Value> { typedef Stride<0, Value> Base; public: - typedef DenseIndex Index; EIGEN_DEVICE_FUNC InnerStride() : Base() {} EIGEN_DEVICE_FUNC InnerStride(Index v) : Base(0, v) {} // FIXME making this explicit could break valid code }; @@ -103,7 +102,6 @@ class OuterStride : public Stride { typedef Stride Base; public: - typedef DenseIndex Index; EIGEN_DEVICE_FUNC OuterStride() : Base() {} EIGEN_DEVICE_FUNC OuterStride(Index v) : Base(v,0) {} // FIXME making this explicit could break valid code }; diff --git a/Eigen/src/Core/arch/AVX/Complex.h b/Eigen/src/Core/arch/AVX/Complex.h index aa5aa1e34..003a1fc3c 100644 --- a/Eigen/src/Core/arch/AVX/Complex.h +++ b/Eigen/src/Core/arch/AVX/Complex.h @@ -92,7 +92,7 @@ template<> EIGEN_STRONG_INLINE Packet4cf ploaddup(const std::complex< template<> EIGEN_STRONG_INLINE void pstore >(std::complex* to, const Packet4cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); } template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex* to, const Packet4cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); } -template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather, Packet4cf>(const std::complex* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather, Packet4cf>(const std::complex* from, Index stride) { return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]), std::imag(from[2*stride]), std::real(from[2*stride]), @@ -100,7 +100,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather, Packe std::imag(from[0*stride]), std::real(from[0*stride]))); } -template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet4cf>(std::complex* to, const Packet4cf& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet4cf>(std::complex* to, const Packet4cf& from, Index stride) { __m128 low = _mm256_extractf128_ps(from.v, 0); to[stride*0] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)), @@ -310,13 +310,13 @@ template<> EIGEN_STRONG_INLINE Packet2cd ploaddup(const std::complex< template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); } template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); } -template<> EIGEN_DEVICE_FUNC inline Packet2cd pgather, Packet2cd>(const std::complex* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet2cd pgather, Packet2cd>(const std::complex* from, Index stride) { return Packet2cd(_mm256_set_pd(std::imag(from[1*stride]), std::real(from[1*stride]), std::imag(from[0*stride]), std::real(from[0*stride]))); } -template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cd>(std::complex* to, const Packet2cd& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cd>(std::complex* to, const Packet2cd& from, Index stride) { __m128d low = _mm256_extractf128_pd(from.v, 0); to[stride*0] = std::complex(_mm_cvtsd_f64(low), _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1))); diff --git a/Eigen/src/Core/arch/AVX/PacketMath.h b/Eigen/src/Core/arch/AVX/PacketMath.h index be66a502a..ff6cc6b56 100644 --- a/Eigen/src/Core/arch/AVX/PacketMath.h +++ b/Eigen/src/Core/arch/AVX/PacketMath.h @@ -226,17 +226,17 @@ template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet8i& // NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available // NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4); -template<> EIGEN_DEVICE_FUNC inline Packet8f pgather(const float* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet8f pgather(const float* from, Index stride) { return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride], from[3*stride], from[2*stride], from[1*stride], from[0*stride]); } -template<> EIGEN_DEVICE_FUNC inline Packet4d pgather(const double* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet4d pgather(const double* from, Index stride) { return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); } -template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet8f& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet8f& from, Index stride) { __m128 low = _mm256_extractf128_ps(from, 0); to[stride*0] = _mm_cvtss_f32(low); @@ -250,7 +250,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, co to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2)); to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3)); } -template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet4d& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet4d& from, Index stride) { __m128d low = _mm256_extractf128_pd(from, 0); to[stride*0] = _mm_cvtsd_f64(low); diff --git a/Eigen/src/Core/arch/AltiVec/Complex.h b/Eigen/src/Core/arch/AltiVec/Complex.h index f9b93a42b..565d2ece0 100644 --- a/Eigen/src/Core/arch/AltiVec/Complex.h +++ b/Eigen/src/Core/arch/AltiVec/Complex.h @@ -67,14 +67,14 @@ template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, Index stride) { std::complex EIGEN_ALIGN16 af[2]; af[0] = from[0*stride]; af[1] = from[1*stride]; return Packet2cf(vec_ld(0, (const float*)af)); } -template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, Index stride) { std::complex EIGEN_ALIGN16 af[2]; vec_st(from.v, 0, (float*)af); @@ -285,14 +285,14 @@ template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex< template<> EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) { /* here we really have to use unaligned loads :( */ return ploadu(&from); } -template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>(const std::complex* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>(const std::complex* from, Index stride) { std::complex EIGEN_ALIGN16 af[2]; af[0] = from[0*stride]; af[1] = from[1*stride]; return pload(af); } -template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>(std::complex* to, const Packet1cd& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>(std::complex* to, const Packet1cd& from, Index stride) { std::complex EIGEN_ALIGN16 af[2]; pstore >(af, from); diff --git a/Eigen/src/Core/arch/AltiVec/PacketMath.h b/Eigen/src/Core/arch/AltiVec/PacketMath.h index 6b68fc7a5..d647427ce 100755 --- a/Eigen/src/Core/arch/AltiVec/PacketMath.h +++ b/Eigen/src/Core/arch/AltiVec/PacketMath.h @@ -252,7 +252,7 @@ pbroadcast4(const int *a, a3 = vec_splat(a3, 3); } -template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) { float EIGEN_ALIGN16 af[4]; af[0] = from[0*stride]; @@ -261,7 +261,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const floa af[3] = from[3*stride]; return pload(af); } -template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, Index stride) { int EIGEN_ALIGN16 ai[4]; ai[0] = from[0*stride]; @@ -270,7 +270,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* f ai[3] = from[3*stride]; return pload(ai); } -template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) { float EIGEN_ALIGN16 af[4]; pstore(af, from); @@ -279,7 +279,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, co to[2*stride] = af[2]; to[3*stride] = af[3]; } -template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, Index stride) { int EIGEN_ALIGN16 ai[4]; pstore((int *)ai, from); @@ -793,14 +793,14 @@ pbroadcast4(const double *a, a2 = vec_splat_dbl(a3, 0); a3 = vec_splat_dbl(a3, 1); } -template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) { double EIGEN_ALIGN16 af[2]; af[0] = from[0*stride]; af[1] = from[1*stride]; return pload(af); } -template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) { double EIGEN_ALIGN16 af[2]; pstore(af, from); diff --git a/Eigen/src/Core/arch/NEON/Complex.h b/Eigen/src/Core/arch/NEON/Complex.h index 0fdcb0741..154daa7a7 100644 --- a/Eigen/src/Core/arch/NEON/Complex.h +++ b/Eigen/src/Core/arch/NEON/Complex.h @@ -112,7 +112,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex< template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); } template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); } -template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, Index stride) { Packet4f res; res = vsetq_lane_f32(std::real(from[0*stride]), res, 0); @@ -122,7 +122,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packe return Packet2cf(res); } -template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, Index stride) { to[stride*0] = std::complex(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1)); to[stride*1] = std::complex(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3)); @@ -363,7 +363,7 @@ template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex< template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_ARM_PREFETCH((double *)addr); } -template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>(const std::complex* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>(const std::complex* from, Index stride) { Packet2d res; res = vsetq_lane_f64(std::real(from[0*stride]), res, 0); @@ -371,7 +371,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Pack return Packet1cd(res); } -template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>(std::complex* to, const Packet1cd& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>(std::complex* to, const Packet1cd& from, Index stride) { to[stride*0] = std::complex(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1)); } diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h index 559682cf7..8149aed7f 100644 --- a/Eigen/src/Core/arch/NEON/PacketMath.h +++ b/Eigen/src/Core/arch/NEON/PacketMath.h @@ -250,7 +250,7 @@ template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& f template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); } template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); } -template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) { Packet4f res; res = vsetq_lane_f32(from[0*stride], res, 0); @@ -259,7 +259,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const floa res = vsetq_lane_f32(from[3*stride], res, 3); return res; } -template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, Index stride) { Packet4i res; res = vsetq_lane_s32(from[0*stride], res, 0); @@ -269,14 +269,14 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* f return res; } -template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) { to[stride*0] = vgetq_lane_f32(from, 0); to[stride*1] = vgetq_lane_f32(from, 1); to[stride*2] = vgetq_lane_f32(from, 2); to[stride*3] = vgetq_lane_f32(from, 3); } -template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, Index stride) { to[stride*0] = vgetq_lane_s32(from, 0); to[stride*1] = vgetq_lane_s32(from, 1); @@ -606,14 +606,14 @@ template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); } -template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) { Packet2d res; res = vsetq_lane_f64(from[0*stride], res, 0); res = vsetq_lane_f64(from[1*stride], res, 1); return res; } -template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) { to[stride*0] = vgetq_lane_f64(from, 0); to[stride*1] = vgetq_lane_f64(from, 1); diff --git a/Eigen/src/Core/arch/SSE/Complex.h b/Eigen/src/Core/arch/SSE/Complex.h index 565e448fe..acb49abf8 100644 --- a/Eigen/src/Core/arch/SSE/Complex.h +++ b/Eigen/src/Core/arch/SSE/Complex.h @@ -115,13 +115,13 @@ template<> EIGEN_STRONG_INLINE void pstore >(std::complex EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), Packet4f(from.v)); } -template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, Index stride) { return Packet2cf(_mm_set_ps(std::imag(from[1*stride]), std::real(from[1*stride]), std::imag(from[0*stride]), std::real(from[0*stride]))); } -template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, Index stride) { to[stride*0] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 0)), _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 1))); diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h index 898cb9ab0..86d94dffa 100755 --- a/Eigen/src/Core/arch/SSE/PacketMath.h +++ b/Eigen/src/Core/arch/SSE/PacketMath.h @@ -387,32 +387,32 @@ template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast(to), Packet2d(_mm_castps_pd(from))); } template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast(to), Packet2d(_mm_castsi128_pd(from))); } -template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) { return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); } -template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) { return _mm_set_pd(from[1*stride], from[0*stride]); } -template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, Index stride) { return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); } -template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) { to[stride*0] = _mm_cvtss_f32(from); to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1)); to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2)); to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3)); } -template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) { to[stride*0] = _mm_cvtsd_f64(from); to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1)); } -template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, DenseIndex stride) +template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, Index stride) { to[stride*0] = _mm_cvtsi128_si32(from); to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1)); diff --git a/Eigen/src/Core/functors/NullaryFunctors.h b/Eigen/src/Core/functors/NullaryFunctors.h index be03fbf52..2362b3a7f 100644 --- a/Eigen/src/Core/functors/NullaryFunctors.h +++ b/Eigen/src/Core/functors/NullaryFunctors.h @@ -112,7 +112,7 @@ template struct functor_traits< linspaced_o template struct linspaced_op { typedef typename packet_traits::type Packet; - linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {} + linspaced_op(const Scalar& low, const Scalar& high, Index num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {} template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); } diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h index 8210ea584..44e44b986 100644 --- a/Eigen/src/Core/products/GeneralMatrixMatrix.h +++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h @@ -257,9 +257,9 @@ class level3_blocking LhsScalar* m_blockA; RhsScalar* m_blockB; - DenseIndex m_mc; - DenseIndex m_nc; - DenseIndex m_kc; + Index m_mc; + Index m_nc; + Index m_kc; public: @@ -267,9 +267,9 @@ class level3_blocking : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} - inline DenseIndex mc() const { return m_mc; } - inline DenseIndex nc() const { return m_nc; } - inline DenseIndex kc() const { return m_kc; } + inline Index mc() const { return m_mc; } + inline Index nc() const { return m_nc; } + inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } @@ -299,7 +299,7 @@ class gemm_blocking_spacem_mc = ActualRows; this->m_nc = ActualCols; @@ -326,12 +326,12 @@ class gemm_blocking_space::type RhsScalar; typedef gebp_traits Traits; - DenseIndex m_sizeA; - DenseIndex m_sizeB; + Index m_sizeA; + Index m_sizeB; public: - gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth, int num_threads, bool l3_blocking) + gemm_blocking_space(Index rows, Index cols, Index depth, int num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; @@ -343,8 +343,8 @@ class gemm_blocking_spacem_mc; - DenseIndex n = this->m_nc; + Index m = this->m_mc; + Index n = this->m_nc; computeProductBlockingSizes(this->m_kc, m, n, num_threads); } diff --git a/Eigen/src/Geometry/AlignedBox.h b/Eigen/src/Geometry/AlignedBox.h index d6c5c1293..b7c02e8db 100644 --- a/Eigen/src/Geometry/AlignedBox.h +++ b/Eigen/src/Geometry/AlignedBox.h @@ -32,7 +32,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) enum { AmbientDimAtCompileTime = _AmbientDim }; typedef _Scalar Scalar; typedef NumTraits ScalarTraits; - typedef DenseIndex Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef typename ScalarTraits::Real RealScalar; typedef typename ScalarTraits::NonInteger NonInteger; typedef Matrix VectorType; diff --git a/Eigen/src/Geometry/Hyperplane.h b/Eigen/src/Geometry/Hyperplane.h index 00b7c4300..2d076d7f8 100644 --- a/Eigen/src/Geometry/Hyperplane.h +++ b/Eigen/src/Geometry/Hyperplane.h @@ -41,7 +41,7 @@ public: }; typedef _Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef DenseIndex Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef Matrix VectorType; typedef Matrix::Real RealScalar; - typedef DenseIndex Index; + typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef Matrix VectorType; /** Default constructor without initialization */ diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h index 508eba767..e1ad803bb 100644 --- a/Eigen/src/Geometry/Quaternion.h +++ b/Eigen/src/Geometry/Quaternion.h @@ -724,7 +724,6 @@ template struct quaternionbase_assign_impl { typedef typename Other::Scalar Scalar; - typedef DenseIndex Index; template static inline void run(QuaternionBase& q, const Other& mat) { using std::sqrt; @@ -742,13 +741,13 @@ struct quaternionbase_assign_impl } else { - DenseIndex i = 0; + Index i = 0; if (mat.coeff(1,1) > mat.coeff(0,0)) i = 1; if (mat.coeff(2,2) > mat.coeff(i,i)) i = 2; - DenseIndex j = (i+1)%3; - DenseIndex k = (j+1)%3; + Index j = (i+1)%3; + Index k = (j+1)%3; t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); q.coeffs().coeffRef(i) = Scalar(0.5) * t; diff --git a/Eigen/src/PardisoSupport/PardisoSupport.h b/Eigen/src/PardisoSupport/PardisoSupport.h index 7c75dcb7f..7ab2e3e6b 100644 --- a/Eigen/src/PardisoSupport/PardisoSupport.h +++ b/Eigen/src/PardisoSupport/PardisoSupport.h @@ -40,13 +40,13 @@ template class PardisoLDLT; namespace internal { - template + template struct pardiso_run_selector { - static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a, - Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x) + static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a, + IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x) { - Index error = 0; + IndexType error = 0; ::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error); return error; } @@ -54,11 +54,11 @@ namespace internal template<> struct pardiso_run_selector { - typedef long long int Index; - static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a, - Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x) + typedef long long int IndexTypeType; + static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a, + IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x) { - Index error = 0; + IndexType error = 0; ::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error); return error; } @@ -72,7 +72,7 @@ namespace internal typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; - typedef typename _MatrixType::Index Index; + typedef typename _MatrixType::StorageIndex StorageIndex; }; template @@ -81,7 +81,7 @@ namespace internal typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; - typedef typename _MatrixType::Index Index; + typedef typename _MatrixType::StorageIndex StorageIndex; }; template @@ -90,7 +90,7 @@ namespace internal typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; - typedef typename _MatrixType::Index Index; + typedef typename _MatrixType::StorageIndex StorageIndex; }; } @@ -111,18 +111,18 @@ class PardisoImpl : public SparseSolveBase typedef typename Traits::Scalar Scalar; typedef typename Traits::RealScalar RealScalar; typedef typename Traits::StorageIndex StorageIndex; - typedef SparseMatrix SparseMatrixType; + typedef SparseMatrix SparseMatrixType; typedef Matrix VectorType; - typedef Matrix IntRowVectorType; - typedef Matrix IntColVectorType; - typedef Array ParameterType; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + typedef Array ParameterType; enum { ScalarIsComplex = NumTraits::IsComplex }; PardisoImpl() { - eigen_assert((sizeof(Index) >= sizeof(_INTEGER_t) && sizeof(Index) <= 8) && "Non-supported index type"); + eigen_assert((sizeof(StorageIndex) >= sizeof(_INTEGER_t) && sizeof(StorageIndex) <= 8) && "Non-supported index type"); m_iparm.setZero(); m_msglvl = 0; // No output m_isInitialized = false; @@ -181,7 +181,7 @@ class PardisoImpl : public SparseSolveBase { if(m_isInitialized) // Factorization ran at least once { - internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0, + internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0, m_iparm.data(), m_msglvl, 0, 0); } } @@ -261,9 +261,9 @@ Derived& PardisoImpl::compute(const MatrixType& a) derived().getMatrix(a); Index error; - error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 12, m_size, - m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), - m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 12, m_size, + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); manageErrorCode(error); m_analysisIsOk = true; @@ -284,9 +284,9 @@ Derived& PardisoImpl::analyzePattern(const MatrixType& a) derived().getMatrix(a); Index error; - error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 11, m_size, - m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), - m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 11, m_size, + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); manageErrorCode(error); m_analysisIsOk = true; @@ -304,9 +304,9 @@ Derived& PardisoImpl::factorize(const MatrixType& a) derived().getMatrix(a); Index error; - error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 22, m_size, - m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), - m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 22, m_size, + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); manageErrorCode(error); m_factorizationIsOk = true; @@ -348,10 +348,10 @@ bool PardisoImpl::_solve_impl(const MatrixBase &b, MatrixBase::run(m_pt, 1, 1, m_type, 33, m_size, - m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), - m_perm.data(), nrhs, m_iparm.data(), m_msglvl, - rhs_ptr, x.derived().data()); + error = internal::pardiso_run_selector::run(m_pt, 1, 1, m_type, 33, m_size, + m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), + m_perm.data(), nrhs, m_iparm.data(), m_msglvl, + rhs_ptr, x.derived().data()); return error==0; } @@ -424,7 +424,7 @@ class PardisoLLT : public PardisoImpl< PardisoLLT > protected: typedef PardisoImpl< PardisoLLT > Base; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::RealScalar RealScalar; using Base::pardisoInit; using Base::m_matrix; @@ -454,7 +454,7 @@ class PardisoLLT : public PardisoImpl< PardisoLLT > void getMatrix(const MatrixType& matrix) { // PARDISO supports only upper, row-major matrices - PermutationMatrix p_null; + PermutationMatrix p_null; m_matrix.resize(matrix.rows(), matrix.cols()); m_matrix.template selfadjointView() = matrix.template selfadjointView().twistedBy(p_null); } @@ -482,7 +482,7 @@ class PardisoLDLT : public PardisoImpl< PardisoLDLT > protected: typedef PardisoImpl< PardisoLDLT > Base; typedef typename Base::Scalar Scalar; - typedef typename Base::Index Index; + typedef typename Base::StorageIndex StorageIndex; typedef typename Base::RealScalar RealScalar; using Base::pardisoInit; using Base::m_matrix; @@ -510,7 +510,7 @@ class PardisoLDLT : public PardisoImpl< PardisoLDLT > void getMatrix(const MatrixType& matrix) { // PARDISO supports only upper, row-major matrices - PermutationMatrix p_null; + PermutationMatrix p_null; m_matrix.resize(matrix.rows(), matrix.cols()); m_matrix.template selfadjointView() = matrix.template selfadjointView().twistedBy(p_null); } diff --git a/Eigen/src/SVD/BDCSVD.h b/Eigen/src/SVD/BDCSVD.h index 3fe17b27f..fd7c8a4b2 100644 --- a/Eigen/src/SVD/BDCSVD.h +++ b/Eigen/src/SVD/BDCSVD.h @@ -306,7 +306,7 @@ void BDCSVD::structured_update(Block A, co { // If the matrices are large enough, let's exploit the sparse structure of A by // splitting it in half (wrt n1), and packing the non-zero columns. - DenseIndex n2 = n - n1; + Index n2 = n - n1; MatrixXr A1(n1,n), A2(n2,n), B1(n,n), B2(n,n); Index k1=0, k2=0; for(Index j=0; j