Merged in chtz/eigen-indexconversion (pull request PR-92)

bug #877, bug #572: Get rid of Index conversion warnings, summary of changes:

- Introduce a global typedef Eigen::Index making Eigen::DenseIndex and AnyExpr<>::Index deprecated (default is std::ptrdiff_t).

 - Eigen::Index is used throughout the API to represent indices, offsets, and sizes.

 - Classes storing an array of indices uses the type StorageIndex to store them. This is a template parameter of the class. Default is int.

 - Methods that *explicitly* set or return an element of such an array take or return a StorageIndex type. In all other cases, the Index type is used.
This commit is contained in:
Gael Guennebaud 2015-02-16 15:29:00 +01:00
commit 98604576d1
176 changed files with 1495 additions and 1662 deletions

View File

@ -59,7 +59,8 @@ template<typename _MatrixType, int _UpLo> class LDLT
};
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType;
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
@ -251,8 +252,7 @@ template<> struct ldlt_inplace<Lower>
using std::abs;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
typedef typename TranspositionType::StorageIndexType IndexType;
typedef typename TranspositionType::StorageIndex IndexType;
eigen_assert(mat.rows()==mat.cols());
const Index size = mat.rows();
@ -342,7 +342,6 @@ template<> struct ldlt_inplace<Lower>
using numext::isfinite;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
const Index size = mat.rows();
eigen_assert(mat.cols() == size && w.size()==size);
@ -450,7 +449,7 @@ template<typename MatrixType, int _UpLo>
template<typename Derived>
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
{
typedef typename TranspositionType::StorageIndexType IndexType;
typedef typename TranspositionType::StorageIndex IndexType;
const Index size = w.rows();
if (m_isInitialized)
{

View File

@ -59,7 +59,8 @@ template<typename _MatrixType, int _UpLo> class LLT
};
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef typename MatrixType::StorageIndex StorageIndex;
enum {
PacketSize = internal::packet_traits<Scalar>::size,
@ -183,12 +184,11 @@ namespace internal {
template<typename Scalar, int UpLo> struct llt_inplace;
template<typename MatrixType, typename VectorType>
static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma)
static Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma)
{
using std::sqrt;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::ColXpr ColXpr;
typedef typename internal::remove_all<ColXpr>::type ColXprCleaned;
typedef typename ColXprCleaned::SegmentReturnType ColXprSegment;
@ -257,10 +257,9 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
{
typedef typename NumTraits<Scalar>::Real RealScalar;
template<typename MatrixType>
static typename MatrixType::Index unblocked(MatrixType& mat)
static Index unblocked(MatrixType& mat)
{
using std::sqrt;
typedef typename MatrixType::Index Index;
eigen_assert(mat.rows()==mat.cols());
const Index size = mat.rows();
@ -284,9 +283,8 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
}
template<typename MatrixType>
static typename MatrixType::Index blocked(MatrixType& m)
static Index blocked(MatrixType& m)
{
typedef typename MatrixType::Index Index;
eigen_assert(m.rows()==m.cols());
Index size = m.rows();
if(size<32)
@ -317,7 +315,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
}
template<typename MatrixType, typename VectorType>
static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma)
static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma)
{
return Eigen::internal::llt_rank_update_lower(mat, vec, sigma);
}
@ -328,19 +326,19 @@ template<typename Scalar> struct llt_inplace<Scalar, Upper>
typedef typename NumTraits<Scalar>::Real RealScalar;
template<typename MatrixType>
static EIGEN_STRONG_INLINE typename MatrixType::Index unblocked(MatrixType& mat)
static EIGEN_STRONG_INLINE Index unblocked(MatrixType& mat)
{
Transpose<MatrixType> matt(mat);
return llt_inplace<Scalar, Lower>::unblocked(matt);
}
template<typename MatrixType>
static EIGEN_STRONG_INLINE typename MatrixType::Index blocked(MatrixType& mat)
static EIGEN_STRONG_INLINE Index blocked(MatrixType& mat)
{
Transpose<MatrixType> matt(mat);
return llt_inplace<Scalar, Lower>::blocked(matt);
}
template<typename MatrixType, typename VectorType>
static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma)
static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma)
{
Transpose<MatrixType> matt(mat);
return llt_inplace<Scalar, Lower>::rankUpdate(matt, vec.conjugate(), sigma);

View File

@ -46,7 +46,7 @@ template<typename Scalar> struct mkl_llt;
template<> struct mkl_llt<EIGTYPE> \
{ \
template<typename MatrixType> \
static inline typename MatrixType::Index potrf(MatrixType& m, char uplo) \
static inline Index potrf(MatrixType& m, char uplo) \
{ \
lapack_int matrix_order; \
lapack_int size, lda, info, StorageOrder; \
@ -67,23 +67,23 @@ template<> struct mkl_llt<EIGTYPE> \
template<> struct llt_inplace<EIGTYPE, Lower> \
{ \
template<typename MatrixType> \
static typename MatrixType::Index blocked(MatrixType& m) \
static Index blocked(MatrixType& m) \
{ \
return mkl_llt<EIGTYPE>::potrf(m, 'L'); \
} \
template<typename MatrixType, typename VectorType> \
static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
{ return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \
}; \
template<> struct llt_inplace<EIGTYPE, Upper> \
{ \
template<typename MatrixType> \
static typename MatrixType::Index blocked(MatrixType& m) \
static Index blocked(MatrixType& m) \
{ \
return mkl_llt<EIGTYPE>::potrf(m, 'U'); \
} \
template<typename MatrixType, typename VectorType> \
static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
{ \
Transpose<MatrixType> matt(mat); \
return llt_inplace<EIGTYPE, Lower>::rankUpdate(matt, vec.conjugate(), sigma); \

View File

@ -48,8 +48,8 @@ void cholmod_configure_matrix(CholmodType& mat)
/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object.
* Note that the data are shared.
*/
template<typename _Scalar, int _Options, typename _Index>
cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
template<typename _Scalar, int _Options, typename _StorageIndex>
cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_StorageIndex>& mat)
{
cholmod_sparse res;
res.nzmax = mat.nonZeros();
@ -74,11 +74,11 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
res.dtype = 0;
res.stype = -1;
if (internal::is_same<_Index,int>::value)
if (internal::is_same<_StorageIndex,int>::value)
{
res.itype = CHOLMOD_INT;
}
else if (internal::is_same<_Index,UF_long>::value)
else if (internal::is_same<_StorageIndex,UF_long>::value)
{
res.itype = CHOLMOD_LONG;
}
@ -138,12 +138,12 @@ cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)
/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix.
* The data are not copied but shared. */
template<typename Scalar, int Flags, typename Index>
MappedSparseMatrix<Scalar,Flags,Index> viewAsEigen(cholmod_sparse& cm)
template<typename Scalar, int Flags, typename StorageIndex>
MappedSparseMatrix<Scalar,Flags,StorageIndex> viewAsEigen(cholmod_sparse& cm)
{
return MappedSparseMatrix<Scalar,Flags,Index>
(cm.nrow, cm.ncol, static_cast<Index*>(cm.p)[cm.ncol],
static_cast<Index*>(cm.p), static_cast<Index*>(cm.i),static_cast<Scalar*>(cm.x) );
return MappedSparseMatrix<Scalar,Flags,StorageIndex>
(cm.nrow, cm.ncol, static_cast<StorageIndex*>(cm.p)[cm.ncol],
static_cast<StorageIndex*>(cm.p), static_cast<StorageIndex*>(cm.i),static_cast<Scalar*>(cm.x) );
}
enum CholmodMode {
@ -169,7 +169,7 @@ class CholmodBase : public SparseSolverBase<Derived>
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef MatrixType CholMatrixType;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::StorageIndex StorageIndex;
public:
@ -195,8 +195,8 @@ class CholmodBase : public SparseSolverBase<Derived>
cholmod_finish(&m_cholmod);
}
inline Index cols() const { return m_cholmodFactor->n; }
inline Index rows() const { return m_cholmodFactor->n; }
inline StorageIndex cols() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
inline StorageIndex rows() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
/** \brief Reports whether previous computation was successful.
*

View File

@ -50,7 +50,6 @@ template<typename Derived> class ArrayBase
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;

View File

@ -179,20 +179,20 @@ struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, Stop, Sto
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { }
};
template<typename Kernel, int Index, int Stop>
template<typename Kernel, int Index_, int Stop>
struct copy_using_evaluator_DefaultTraversal_InnerUnrolling
{
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, typename Kernel::Index outer)
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer)
{
kernel.assignCoeffByOuterInner(outer, Index);
copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, Index+1, Stop>::run(kernel, outer);
kernel.assignCoeffByOuterInner(outer, Index_);
copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, Index_+1, Stop>::run(kernel, outer);
}
};
template<typename Kernel, int Stop>
struct copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, Stop, Stop>
{
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, typename Kernel::Index) { }
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index) { }
};
/***********************
@ -246,13 +246,13 @@ struct copy_using_evaluator_innervec_CompleteUnrolling<Kernel, Stop, Stop>
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { }
};
template<typename Kernel, int Index, int Stop>
template<typename Kernel, int Index_, int Stop>
struct copy_using_evaluator_innervec_InnerUnrolling
{
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, typename Kernel::Index outer)
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer)
{
kernel.template assignPacketByOuterInner<Aligned, Aligned>(outer, Index);
enum { NextIndex = Index + packet_traits<typename Kernel::Scalar>::size };
kernel.template assignPacketByOuterInner<Aligned, Aligned>(outer, Index_);
enum { NextIndex = Index_ + packet_traits<typename Kernel::Scalar>::size };
copy_using_evaluator_innervec_InnerUnrolling<Kernel, NextIndex, Stop>::run(kernel, outer);
}
};
@ -260,7 +260,7 @@ struct copy_using_evaluator_innervec_InnerUnrolling
template<typename Kernel, int Stop>
struct copy_using_evaluator_innervec_InnerUnrolling<Kernel, Stop, Stop>
{
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, typename Kernel::Index) { }
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, Index) { }
};
/***************************************************************************
@ -283,8 +283,6 @@ struct dense_assignment_loop<Kernel, DefaultTraversal, NoUnrolling>
{
EIGEN_DEVICE_FUNC static void run(Kernel &kernel)
{
typedef typename Kernel::Index Index;
for(Index outer = 0; outer < kernel.outerSize(); ++outer) {
for(Index inner = 0; inner < kernel.innerSize(); ++inner) {
kernel.assignCoeffByOuterInner(outer, inner);
@ -306,7 +304,7 @@ struct dense_assignment_loop<Kernel, DefaultTraversal, CompleteUnrolling>
template<typename Kernel>
struct dense_assignment_loop<Kernel, DefaultTraversal, InnerUnrolling>
{
typedef typename Kernel::Index Index;
typedef typename Kernel::StorageIndex StorageIndex;
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)
{
typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
@ -330,7 +328,7 @@ struct unaligned_dense_assignment_loop
{
// if IsAligned = true, then do nothing
template <typename Kernel>
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, typename Kernel::Index, typename Kernel::Index) {}
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index, Index) {}
};
template <>
@ -342,16 +340,16 @@ struct unaligned_dense_assignment_loop<false>
#if EIGEN_COMP_MSVC
template <typename Kernel>
static EIGEN_DONT_INLINE void run(Kernel &kernel,
typename Kernel::Index start,
typename Kernel::Index end)
Index start,
Index end)
#else
template <typename Kernel>
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel,
typename Kernel::Index start,
typename Kernel::Index end)
Index start,
Index end)
#endif
{
for (typename Kernel::Index index = start; index < end; ++index)
for (Index index = start; index < end; ++index)
kernel.assignCoeff(index);
}
};
@ -361,8 +359,6 @@ struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, NoUnrolling>
{
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)
{
typedef typename Kernel::Index Index;
const Index size = kernel.size();
typedef packet_traits<typename Kernel::Scalar> PacketTraits;
enum {
@ -386,7 +382,7 @@ struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, NoUnrolling>
template<typename Kernel>
struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, CompleteUnrolling>
{
typedef typename Kernel::Index Index;
typedef typename Kernel::StorageIndex StorageIndex;
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)
{
typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
@ -409,8 +405,6 @@ struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, NoUnrolling>
{
EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel)
{
typedef typename Kernel::Index Index;
const Index innerSize = kernel.innerSize();
const Index outerSize = kernel.outerSize();
const Index packetSize = packet_traits<typename Kernel::Scalar>::size;
@ -433,7 +427,7 @@ struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, CompleteUnrolling
template<typename Kernel>
struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, InnerUnrolling>
{
typedef typename Kernel::Index Index;
typedef typename Kernel::StorageIndex StorageIndex;
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)
{
typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
@ -452,7 +446,6 @@ struct dense_assignment_loop<Kernel, LinearTraversal, NoUnrolling>
{
EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel)
{
typedef typename Kernel::Index Index;
const Index size = kernel.size();
for(Index i = 0; i < size; ++i)
kernel.assignCoeff(i);
@ -478,7 +471,6 @@ struct dense_assignment_loop<Kernel, SliceVectorizedTraversal, NoUnrolling>
{
EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel)
{
typedef typename Kernel::Index Index;
typedef packet_traits<typename Kernel::Scalar> PacketTraits;
enum {
packetSize = PacketTraits::size,
@ -533,7 +525,7 @@ public:
typedef DstEvaluatorTypeT DstEvaluatorType;
typedef SrcEvaluatorTypeT SrcEvaluatorType;
typedef typename DstEvaluatorType::Scalar Scalar;
typedef typename DstEvaluatorType::Index Index;
typedef typename DstEvaluatorType::StorageIndex StorageIndex;
typedef copy_using_evaluator_traits<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor> AssignmentTraits;
@ -731,8 +723,8 @@ EIGEN_DEVICE_FUNC void call_assignment_no_alias(Dst& dst, const Src& src, const
&& int(Dst::SizeAtCompileTime) != 1
};
typename Dst::Index dstRows = NeedToTranspose ? src.cols() : src.rows();
typename Dst::Index dstCols = NeedToTranspose ? src.rows() : src.cols();
Index dstRows = NeedToTranspose ? src.cols() : src.rows();
Index dstCols = NeedToTranspose ? src.rows() : src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);

View File

@ -84,7 +84,6 @@ template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal,
struct vml_assign_impl<Derived1, Derived2, UnaryOp, Traversal, Unrolling, InnerVectorizedTraversal>
{
typedef typename Derived1::Scalar Scalar;
typedef typename Derived1::Index Index;
static inline void run(Derived1& dst, const CwiseUnaryOp<UnaryOp, Derived2>& src)
{
// in case we want to (or have to) skip VML at runtime we can call:

View File

@ -32,7 +32,7 @@ class BandMatrixBase : public EigenBase<Derived>
};
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
typedef typename DenseMatrixType::Index Index;
typedef typename DenseMatrixType::StorageIndex StorageIndex;
typedef typename internal::traits<Derived>::CoefficientsType CoefficientsType;
typedef EigenBase<Derived> Base;
@ -179,7 +179,7 @@ struct traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> >
{
typedef _Scalar Scalar;
typedef Dense StorageKind;
typedef DenseIndex Index;
typedef Eigen::Index StorageIndex;
enum {
CoeffReadCost = NumTraits<Scalar>::ReadCost,
RowsAtCompileTime = _Rows,
@ -201,7 +201,7 @@ class BandMatrix : public BandMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Sub
public:
typedef typename internal::traits<BandMatrix>::Scalar Scalar;
typedef typename internal::traits<BandMatrix>::Index Index;
typedef typename internal::traits<BandMatrix>::StorageIndex StorageIndex;
typedef typename internal::traits<BandMatrix>::CoefficientsType CoefficientsType;
explicit inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs)
@ -241,7 +241,7 @@ struct traits<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Opt
{
typedef typename _CoefficientsType::Scalar Scalar;
typedef typename _CoefficientsType::StorageKind StorageKind;
typedef typename _CoefficientsType::Index Index;
typedef typename _CoefficientsType::StorageIndex StorageIndex;
enum {
CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost,
RowsAtCompileTime = _Rows,
@ -264,7 +264,7 @@ class BandMatrixWrapper : public BandMatrixBase<BandMatrixWrapper<_CoefficientsT
typedef typename internal::traits<BandMatrixWrapper>::Scalar Scalar;
typedef typename internal::traits<BandMatrixWrapper>::CoefficientsType CoefficientsType;
typedef typename internal::traits<BandMatrixWrapper>::Index Index;
typedef typename internal::traits<BandMatrixWrapper>::StorageIndex StorageIndex;
explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs)
: m_coeffs(coeffs),
@ -312,7 +312,7 @@ template<typename Scalar, int Size, int Options>
class TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor>
{
typedef BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> Base;
typedef typename Base::Index Index;
typedef typename Base::StorageIndex StorageIndex;
public:
explicit TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {}

View File

@ -154,7 +154,7 @@ class BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense>
: public internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel>
{
typedef internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> Impl;
typedef typename XprType::Index Index;
typedef typename XprType::StorageIndex StorageIndex;
public:
typedef Impl Base;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
@ -306,13 +306,13 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
}
EIGEN_DEVICE_FUNC
Index startRow() const
StorageIndex startRow() const
{
return m_startRow.value();
}
EIGEN_DEVICE_FUNC
Index startCol() const
StorageIndex startCol() const
{
return m_startCol.value();
}
@ -320,10 +320,10 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
protected:
const typename XprType::Nested m_xpr;
const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
const internal::variable_if_dynamic<StorageIndex, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
const internal::variable_if_dynamic<StorageIndex, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
const internal::variable_if_dynamic<StorageIndex, RowsAtCompileTime> m_blockRows;
const internal::variable_if_dynamic<StorageIndex, ColsAtCompileTime> m_blockCols;
};
/** \internal Internal implementation of dense Blocks in the direct access case.*/

View File

@ -130,7 +130,7 @@ inline bool DenseBase<Derived>::any() const
* \sa all(), any()
*/
template<typename Derived>
inline typename DenseBase<Derived>::Index DenseBase<Derived>::count() const
inline Eigen::Index DenseBase<Derived>::count() const
{
return derived().template cast<bool>().template cast<Index>().sum();
}

View File

@ -28,7 +28,6 @@ template<typename XprType>
struct CommaInitializer
{
typedef typename XprType::Scalar Scalar;
typedef typename XprType::Index Index;
EIGEN_DEVICE_FUNC
inline CommaInitializer(XprType& xpr, const Scalar& s)

View File

@ -111,7 +111,8 @@ struct evaluator_base
typedef evaluator<ExpressionType> type;
typedef evaluator<ExpressionType> nestedType;
typedef typename traits<ExpressionType>::Index Index;
// FIXME is it really usefull?
typedef typename traits<ExpressionType>::StorageIndex StorageIndex;
// TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
typedef traits<ExpressionType> ExpressionTraits;
};
@ -128,7 +129,6 @@ struct evaluator<PlainObjectBase<Derived> >
: evaluator_base<Derived>
{
typedef PlainObjectBase<Derived> PlainObjectType;
typedef typename PlainObjectType::Index Index;
typedef typename PlainObjectType::Scalar Scalar;
typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
typedef typename PlainObjectType::PacketScalar PacketScalar;
@ -264,7 +264,6 @@ struct unary_evaluator<Transpose<ArgType>, IndexBased>
EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
@ -343,7 +342,6 @@ struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
: m_functor(n.functor())
{ }
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
@ -394,7 +392,6 @@ struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
m_argImpl(op.nestedExpression())
{ }
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
@ -469,7 +466,6 @@ struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBase
m_rhsImpl(xpr.rhs())
{ }
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
@ -522,7 +518,6 @@ struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
m_argImpl(op.nestedExpression())
{ }
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
@ -563,7 +558,6 @@ struct mapbase_evaluator : evaluator_base<Derived>
{
typedef Derived XprType;
typedef typename XprType::PointerType PointerType;
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
@ -760,7 +754,6 @@ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBa
m_startCol(block.startCol())
{ }
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
@ -865,7 +858,6 @@ struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
m_elseImpl(select.elseMatrix())
{ }
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
inline EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const
@ -898,7 +890,6 @@ struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
: evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
{
typedef Replicate<ArgType, RowFactor, ColFactor> XprType;
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType;
enum {
@ -981,7 +972,6 @@ struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> >
: m_expr(expr)
{}
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const
@ -1016,7 +1006,6 @@ struct evaluator_wrapper_base
EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
typedef typename ArgType::Index Index;
typedef typename ArgType::Scalar Scalar;
typedef typename ArgType::CoeffReturnType CoeffReturnType;
typedef typename ArgType::PacketScalar PacketScalar;
@ -1103,7 +1092,6 @@ struct unary_evaluator<Reverse<ArgType, Direction> >
: evaluator_base<Reverse<ArgType, Direction> >
{
typedef Reverse<ArgType, Direction> XprType;
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
@ -1219,7 +1207,6 @@ struct evaluator<Diagonal<ArgType, DiagIndex> >
m_index(diagonal.index())
{ }
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
// FIXME having to check whether ArgType is sparse here i not very nice.
typedef typename internal::conditional<!internal::is_same<typename ArgType::StorageKind,Sparse>::value,

View File

@ -36,7 +36,6 @@ protected:
typedef internal::inner_iterator_selector<XprType, typename internal::evaluator_traits<XprType>::Kind> IteratorType;
typedef typename internal::evaluator<XprType>::type EvaluatorType;
typedef typename internal::traits<XprType>::Scalar Scalar;
typedef typename internal::traits<XprType>::Index Index;
public:
/** Construct an iterator over the \a outerId -th row or column of \a xpr */
InnerIterator(const XprType &xpr, const Index &outerId)
@ -77,7 +76,6 @@ class inner_iterator_selector<XprType, IndexBased>
protected:
typedef typename evaluator<XprType>::type EvaluatorType;
typedef typename traits<XprType>::Scalar Scalar;
typedef typename traits<XprType>::Index Index;
enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
public:
@ -115,7 +113,6 @@ class inner_iterator_selector<XprType, IteratorBased>
protected:
typedef typename evaluator<XprType>::InnerIterator Base;
typedef typename evaluator<XprType>::type EvaluatorType;
typedef typename traits<XprType>::Index Index;
public:
EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/)

View File

@ -59,8 +59,8 @@ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind,
typename traits<Rhs>::StorageKind,
BinaryOp>::ret StorageKind;
typedef typename promote_index_type<typename traits<Lhs>::Index,
typename traits<Rhs>::Index>::type Index;
typedef typename promote_index_type<typename traits<Lhs>::StorageIndex,
typename traits<Rhs>::StorageIndex>::type StorageIndex;
typedef typename Lhs::Nested LhsNested;
typedef typename Rhs::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested;

View File

@ -753,7 +753,6 @@ struct setIdentity_impl
template<typename Derived>
struct setIdentity_impl<Derived, true>
{
typedef typename Derived::Index Index;
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE Derived& run(Derived& m)
{

View File

@ -58,17 +58,20 @@ template<typename Derived> class DenseBase
typedef typename internal::traits<Derived>::StorageKind StorageKind;
/** \brief The type of indices
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
* \sa \ref TopicPreprocessorDirectives.
*/
typedef typename internal::traits<Derived>::Index Index;
/**
* \brief The type used to store indices
* \details This typedef is relevant for types that store multiple indices such as
* PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index
* \sa \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase.
*/
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseCoeffsBase<Derived> Base;
typedef internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real> Base;
using Base::derived;
using Base::const_cast_derived;
using Base::rows;

View File

@ -35,7 +35,6 @@ class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
{
public:
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
@ -287,7 +286,6 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@ -450,7 +448,6 @@ class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived
public:
typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@ -525,7 +522,6 @@ class DenseCoeffsBase<Derived, DirectWriteAccessors>
public:
typedef DenseCoeffsBase<Derived, WriteAccessors> Base;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@ -587,14 +583,14 @@ namespace internal {
template<typename Derived, bool JustReturnZero>
struct first_aligned_impl
{
static inline typename Derived::Index run(const Derived&)
static inline Index run(const Derived&)
{ return 0; }
};
template<typename Derived>
struct first_aligned_impl<Derived, false>
{
static inline typename Derived::Index run(const Derived& m)
static inline Index run(const Derived& m)
{
return internal::first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size());
}
@ -606,7 +602,7 @@ struct first_aligned_impl<Derived, false>
* documentation.
*/
template<typename Derived>
static inline typename Derived::Index first_aligned(const Derived& m)
static inline Index first_aligned(const Derived& m)
{
return first_aligned_impl
<Derived, (Derived::Flags & AlignedBit) || !(Derived::Flags & DirectAccessBit)>

View File

@ -140,12 +140,12 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
if (this != &other) m_data = other.m_data;
return *this;
}
EIGEN_DEVICE_FUNC DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {}
EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
EIGEN_DEVICE_FUNC static DenseIndex rows(void) {return _Rows;}
EIGEN_DEVICE_FUNC static DenseIndex cols(void) {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
EIGEN_DEVICE_FUNC void resize(DenseIndex,DenseIndex,DenseIndex) {}
EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
};
@ -158,12 +158,12 @@ template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0
EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) {}
EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) {}
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) { return *this; }
EIGEN_DEVICE_FUNC DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {}
EIGEN_DEVICE_FUNC void swap(DenseStorage& ) {}
EIGEN_DEVICE_FUNC static DenseIndex rows(void) {return _Rows;}
EIGEN_DEVICE_FUNC static DenseIndex cols(void) {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
EIGEN_DEVICE_FUNC void resize(DenseIndex,DenseIndex,DenseIndex) {}
EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {}
EIGEN_DEVICE_FUNC const T *data() const { return 0; }
EIGEN_DEVICE_FUNC T *data() { return 0; }
};
@ -182,8 +182,8 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, 0, Dynamic,
template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options>
{
internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_rows;
DenseIndex m_cols;
Index m_rows;
Index m_cols;
public:
EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {}
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
@ -199,13 +199,13 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
}
return *this;
}
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
DenseStorage(Index, Index nbRows, Index nbCols) : m_rows(nbRows), m_cols(nbCols) {}
void swap(DenseStorage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
EIGEN_DEVICE_FUNC DenseIndex rows() const {return m_rows;}
EIGEN_DEVICE_FUNC DenseIndex cols() const {return m_cols;}
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
EIGEN_DEVICE_FUNC Index rows() const {return m_rows;}
EIGEN_DEVICE_FUNC Index cols() const {return m_cols;}
void conservativeResize(Index, Index nbRows, Index nbCols) { m_rows = nbRows; m_cols = nbCols; }
void resize(Index, Index nbRows, Index nbCols) { m_rows = nbRows; m_cols = nbCols; }
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
};
@ -214,7 +214,7 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options>
{
internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_rows;
Index m_rows;
public:
EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {}
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
@ -229,12 +229,12 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
}
return *this;
}
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
DenseStorage(Index, Index nbRows, Index) : m_rows(nbRows) {}
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
EIGEN_DEVICE_FUNC DenseIndex rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC DenseIndex cols(void) const {return _Cols;}
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return _Cols;}
void conservativeResize(Index, Index nbRows, Index) { m_rows = nbRows; }
void resize(Index, Index nbRows, Index) { m_rows = nbRows; }
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
};
@ -243,7 +243,7 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options>
{
internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_cols;
Index m_cols;
public:
EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {}
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
@ -258,12 +258,12 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
}
return *this;
}
DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
DenseStorage(Index, Index, Index nbCols) : m_cols(nbCols) {}
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
EIGEN_DEVICE_FUNC DenseIndex rows(void) const {return _Rows;}
EIGEN_DEVICE_FUNC DenseIndex cols(void) const {return m_cols;}
void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
EIGEN_DEVICE_FUNC Index rows(void) const {return _Rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
void conservativeResize(Index, Index, Index nbCols) { m_cols = nbCols; }
void resize(Index, Index, Index nbCols) { m_cols = nbCols; }
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
};
@ -272,13 +272,13 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options>
{
T *m_data;
DenseIndex m_rows;
DenseIndex m_cols;
Index m_rows;
Index m_cols;
public:
EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(0), m_rows(0), m_cols(0) {}
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
DenseStorage(Index size, Index nbRows, Index nbCols)
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
DenseStorage(const DenseStorage& other)
@ -317,15 +317,15 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
void swap(DenseStorage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
EIGEN_DEVICE_FUNC DenseIndex rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC DenseIndex cols(void) const {return m_cols;}
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
void conservativeResize(Index size, Index nbRows, Index nbCols)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
m_rows = nbRows;
m_cols = nbCols;
}
void resize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
void resize(Index size, Index nbRows, Index nbCols)
{
if(size != m_rows*m_cols)
{
@ -347,11 +347,11 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options>
{
T *m_data;
DenseIndex m_cols;
Index m_cols;
public:
EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_cols(0) {}
explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
DenseStorage(Index size, Index, Index nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
DenseStorage(const DenseStorage& other)
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(_Rows*other.m_cols))
@ -385,14 +385,14 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
#endif
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
EIGEN_DEVICE_FUNC static DenseIndex rows(void) {return _Rows;}
EIGEN_DEVICE_FUNC DenseIndex cols(void) const {return m_cols;}
void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
void conservativeResize(Index size, Index, Index nbCols)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
m_cols = nbCols;
}
EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex nbCols)
EIGEN_STRONG_INLINE void resize(Index size, Index, Index nbCols)
{
if(size != _Rows*m_cols)
{
@ -413,11 +413,11 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options>
{
T *m_data;
DenseIndex m_rows;
Index m_rows;
public:
EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0) {}
explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
DenseStorage(Index size, Index nbRows, Index) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
DenseStorage(const DenseStorage& other)
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(other.m_rows*_Cols))
@ -451,14 +451,14 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
#endif
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
EIGEN_DEVICE_FUNC DenseIndex rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC static DenseIndex cols(void) {return _Cols;}
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
void conservativeResize(Index size, Index nbRows, Index)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
m_rows = nbRows;
}
EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex nbRows, DenseIndex)
EIGEN_STRONG_INLINE void resize(Index size, Index nbRows, Index)
{
if(size != m_rows*_Cols)
{

View File

@ -77,8 +77,8 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
EIGEN_DEVICE_FUNC
inline Index rows() const
{
return m_index.value()<0 ? numext::mini(Index(m_matrix.cols()),Index(m_matrix.rows()+m_index.value()))
: numext::mini(Index(m_matrix.rows()),Index(m_matrix.cols()-m_index.value()));
return m_index.value()<0 ? numext::mini<Index>(m_matrix.cols(),m_matrix.rows()+m_index.value())
: numext::mini<Index>(m_matrix.rows(),m_matrix.cols()-m_index.value());
}
EIGEN_DEVICE_FUNC

View File

@ -22,7 +22,7 @@ class DiagonalBase : public EigenBase<Derived>
typedef typename DiagonalVectorType::Scalar Scalar;
typedef typename DiagonalVectorType::RealScalar RealScalar;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
enum {
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
@ -108,7 +108,6 @@ struct traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
{
typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;
typedef DiagonalShape StorageKind;
typedef DenseIndex Index;
enum {
Flags = LvalueBit | NoPreferredStorageOrderBit
};
@ -124,7 +123,7 @@ class DiagonalMatrix
typedef const DiagonalMatrix& Nested;
typedef _Scalar Scalar;
typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind;
typedef typename internal::traits<DiagonalMatrix>::Index Index;
typedef typename internal::traits<DiagonalMatrix>::StorageIndex StorageIndex;
#endif
protected:
@ -230,7 +229,7 @@ struct traits<DiagonalWrapper<_DiagonalVectorType> >
{
typedef _DiagonalVectorType DiagonalVectorType;
typedef typename DiagonalVectorType::Scalar Scalar;
typedef typename DiagonalVectorType::Index Index;
typedef typename DiagonalVectorType::StorageIndex StorageIndex;
typedef DiagonalShape StorageKind;
typedef typename traits<DiagonalVectorType>::XprKind XprKind;
enum {

View File

@ -13,7 +13,9 @@
namespace Eigen {
/** Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).
/** \class EigenBase
*
* Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).
*
* In other words, an EigenBase object is an object that can be copied into a MatrixBase.
*
@ -27,8 +29,15 @@ template<typename Derived> struct EigenBase
{
// typedef typename internal::plain_matrix_type<Derived>::type PlainObject;
/** \brief The interface type of indices
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
* \deprecated Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead.
* \sa StorageIndex, \ref TopicPreprocessorDirectives.
*/
typedef Eigen::Index Index;
// FIXME is it needed?
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
/** \returns a reference to the derived object */
EIGEN_DEVICE_FUNC

View File

@ -221,7 +221,6 @@ template<> struct gemv_dense_sense_selector<OnTheRight,ColMajor,true>
template<typename Lhs, typename Rhs, typename Dest>
static inline void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
{
typedef typename Dest::Index Index;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef typename Dest::Scalar ResScalar;
@ -298,7 +297,6 @@ template<> struct gemv_dense_sense_selector<OnTheRight,RowMajor,true>
template<typename Lhs, typename Rhs, typename Dest>
static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
{
typedef typename Dest::Index Index;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef typename Dest::Scalar ResScalar;
@ -352,7 +350,6 @@ template<> struct gemv_dense_sense_selector<OnTheRight,ColMajor,false>
template<typename Lhs, typename Rhs, typename Dest>
static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
{
typedef typename Dest::Index Index;
// TODO makes sure dest is sequentially stored in memory, otherwise use a temp
const Index size = rhs.rows();
for(Index k=0; k<size; ++k)
@ -365,7 +362,6 @@ template<> struct gemv_dense_sense_selector<OnTheRight,RowMajor,false>
template<typename Lhs, typename Rhs, typename Dest>
static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
{
typedef typename Dest::Index Index;
// TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
const Index rows = dest.rows();
for(Index i=0; i<rows; ++i)

View File

@ -236,10 +236,10 @@ template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstore(
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from)
{ (*to) = from; }
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, DenseIndex /*stride*/)
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/)
{ return ploadu<Packet>(from); }
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, DenseIndex /*stride*/)
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index /*stride*/)
{ pstore(to, from); }
/** \internal tries to do cache prefetching of \a addr */

View File

@ -164,7 +164,6 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
typename Derived::Nested m = _m;
typedef typename Derived::Scalar Scalar;
typedef typename Derived::Index Index;
Index width = 0;

View File

@ -45,7 +45,7 @@ template<typename XprType>
class Inverse : public InverseImpl<XprType,typename internal::traits<XprType>::StorageKind>
{
public:
typedef typename XprType::Index Index;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename XprType::PlainObject PlainObject;
typedef typename internal::nested<XprType>::type XprTypeNested;
typedef typename internal::remove_all<XprTypeNested>::type XprTypeNestedCleaned;

View File

@ -70,8 +70,6 @@ struct traits<Map<PlainObjectType, MapOptions, StrideType> >
: public traits<PlainObjectType>
{
typedef traits<PlainObjectType> TraitsBase;
typedef typename PlainObjectType::Index Index;
typedef typename PlainObjectType::Scalar Scalar;
enum {
InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
? int(PlainObjectType::InnerStrideAtCompileTime)

View File

@ -37,7 +37,6 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
};
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@ -179,7 +178,7 @@ template<typename Derived> class MapBase<Derived, WriteAccessors>
typedef typename Base::Scalar Scalar;
typedef typename Base::PacketScalar PacketScalar;
typedef typename Base::Index Index;
typedef typename Base::StorageIndex StorageIndex;
typedef typename Base::PointerType PointerType;
using Base::derived;

View File

@ -107,7 +107,7 @@ struct traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
{
typedef _Scalar Scalar;
typedef Dense StorageKind;
typedef DenseIndex Index;
typedef Eigen::Index StorageIndex;
typedef MatrixXpr XprKind;
enum {
RowsAtCompileTime = _Rows,

View File

@ -52,7 +52,7 @@ template<typename Derived> class MatrixBase
#ifndef EIGEN_PARSED_BY_DOXYGEN
typedef MatrixBase StorageBaseType;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;

View File

@ -66,11 +66,10 @@ class PermutationBase : public EigenBase<Derived>
MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
};
typedef typename Traits::StorageIndexType StorageIndexType;
typedef typename Traits::Index Index;
typedef Matrix<StorageIndexType,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime>
typedef typename Traits::StorageIndex StorageIndex;
typedef Matrix<StorageIndex,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime>
DenseMatrixType;
typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,StorageIndexType>
typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,StorageIndex>
PlainPermutationType;
using Base::derived;
typedef Transpose<PermutationBase> TransposeReturnType;
@ -148,7 +147,8 @@ class PermutationBase : public EigenBase<Derived>
/** Sets *this to be the identity permutation matrix */
void setIdentity()
{
for(StorageIndexType i = 0; i < size(); ++i)
StorageIndex n = StorageIndex(size());
for(StorageIndex i = 0; i < n; ++i)
indices().coeffRef(i) = i;
}
@ -174,8 +174,8 @@ class PermutationBase : public EigenBase<Derived>
eigen_assert(i>=0 && j>=0 && i<size() && j<size());
for(Index k = 0; k < size(); ++k)
{
if(indices().coeff(k) == i) indices().coeffRef(k) = StorageIndexType(j);
else if(indices().coeff(k) == j) indices().coeffRef(k) = StorageIndexType(i);
if(indices().coeff(k) == i) indices().coeffRef(k) = StorageIndex(j);
else if(indices().coeff(k) == j) indices().coeffRef(k) = StorageIndex(i);
}
return derived();
}
@ -263,7 +263,7 @@ class PermutationBase : public EigenBase<Derived>
*
* \param SizeAtCompileTime the number of rows/cols, or Dynamic
* \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
* \param StorageIndexType the integer type of the indices
* \param StorageIndex the integer type of the indices
*
* This class represents a permutation matrix, internally stored as a vector of integers.
*
@ -271,19 +271,18 @@ class PermutationBase : public EigenBase<Derived>
*/
namespace internal {
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType>
struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndexType> >
: traits<Matrix<_StorageIndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex>
struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex> >
: traits<Matrix<_StorageIndex,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
{
typedef PermutationStorage StorageKind;
typedef Matrix<_StorageIndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
typedef typename IndicesType::Index Index;
typedef _StorageIndexType StorageIndexType;
typedef Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
typedef _StorageIndex StorageIndex;
};
}
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType>
class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndexType> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex>
class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex> >
{
typedef PermutationBase<PermutationMatrix> Base;
typedef internal::traits<PermutationMatrix> Traits;
@ -293,8 +292,7 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
#ifndef EIGEN_PARSED_BY_DOXYGEN
typedef typename Traits::IndicesType IndicesType;
typedef typename Traits::StorageIndexType StorageIndexType;
typedef typename Traits::Index Index;
typedef typename Traits::StorageIndex StorageIndex;
#endif
inline PermutationMatrix()
@ -304,7 +302,7 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
*/
explicit inline PermutationMatrix(Index size) : m_indices(size)
{
eigen_internal_assert(size <= NumTraits<StorageIndexType>::highest());
eigen_internal_assert(size <= NumTraits<StorageIndex>::highest());
}
/** Copy constructor. */
@ -376,9 +374,9 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
PermutationMatrix(const Transpose<PermutationBase<Other> >& other)
: m_indices(other.nestedPermutation().size())
{
eigen_internal_assert(m_indices.size() <= NumTraits<StorageIndexType>::highest());
StorageIndexType end = StorageIndexType(m_indices.size());
for (StorageIndexType i=0; i<end;++i)
eigen_internal_assert(m_indices.size() <= NumTraits<StorageIndex>::highest());
StorageIndex end = StorageIndex(m_indices.size());
for (StorageIndex i=0; i<end;++i)
m_indices.coeffRef(other.nestedPermutation().indices().coeff(i)) = i;
}
template<typename Lhs,typename Rhs>
@ -396,20 +394,19 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
namespace internal {
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType, int _PacketAccess>
struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndexType>,_PacketAccess> >
: traits<Matrix<_StorageIndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int _PacketAccess>
struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex>,_PacketAccess> >
: traits<Matrix<_StorageIndex,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
{
typedef PermutationStorage StorageKind;
typedef Map<const Matrix<_StorageIndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType;
typedef typename IndicesType::Index Index;
typedef _StorageIndexType StorageIndexType;
typedef Map<const Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType;
typedef _StorageIndex StorageIndex;
};
}
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType, int _PacketAccess>
class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndexType>,_PacketAccess>
: public PermutationBase<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndexType>,_PacketAccess> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int _PacketAccess>
class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex>,_PacketAccess>
: public PermutationBase<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex>,_PacketAccess> >
{
typedef PermutationBase<Map> Base;
typedef internal::traits<Map> Traits;
@ -417,15 +414,14 @@ class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageInd
#ifndef EIGEN_PARSED_BY_DOXYGEN
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar StorageIndexType;
typedef typename IndicesType::Index Index;
typedef typename IndicesType::Scalar StorageIndex;
#endif
inline Map(const StorageIndexType* indicesPtr)
inline Map(const StorageIndex* indicesPtr)
: m_indices(indicesPtr)
{}
inline Map(const StorageIndexType* indicesPtr, Index size)
inline Map(const StorageIndex* indicesPtr, Index size)
: m_indices(indicesPtr,size)
{}
@ -479,8 +475,7 @@ struct traits<PermutationWrapper<_IndicesType> >
{
typedef PermutationStorage StorageKind;
typedef typename _IndicesType::Scalar Scalar;
typedef typename _IndicesType::Scalar StorageIndexType;
typedef typename _IndicesType::Index Index;
typedef typename _IndicesType::Scalar StorageIndex;
typedef _IndicesType IndicesType;
enum {
RowsAtCompileTime = _IndicesType::SizeAtCompileTime,
@ -558,7 +553,7 @@ struct permut_matrix_product_retval
: public ReturnByValue<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
{
typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::StorageIndex StorageIndex;
permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
: m_permutation(perm), m_matrix(matrix)
@ -650,7 +645,7 @@ class Transpose<PermutationBase<Derived> >
MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
};
typedef typename Traits::Scalar Scalar;
typedef typename Traits::Index Index;
typedef typename Traits::StorageIndex StorageIndex;
#endif
Transpose(const PermutationType& p) : m_permutation(p) {}

View File

@ -95,7 +95,6 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
typedef typename internal::dense_xpr_base<Derived>::type Base;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
@ -846,7 +845,6 @@ namespace internal {
template <typename Derived, typename OtherDerived, bool IsVector>
struct conservative_resize_like_impl
{
typedef typename Derived::Index Index;
static void run(DenseBase<Derived>& _this, Index rows, Index cols)
{
if (_this.rows() == rows && _this.cols() == cols) return;
@ -912,7 +910,6 @@ struct conservative_resize_like_impl<Derived,OtherDerived,true>
{
using conservative_resize_like_impl<Derived,OtherDerived,false>::run;
typedef typename Derived::Index Index;
static void run(DenseBase<Derived>& _this, Index size)
{
const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;

View File

@ -67,8 +67,8 @@ struct traits<Product<Lhs, Rhs, Option> >
typedef typename product_promote_storage_type<typename LhsTraits::StorageKind,
typename RhsTraits::StorageKind,
internal::product_type<Lhs,Rhs>::ret>::ret StorageKind;
typedef typename promote_index_type<typename LhsTraits::Index,
typename RhsTraits::Index>::type Index;
typedef typename promote_index_type<typename LhsTraits::StorageIndex,
typename RhsTraits::StorageIndex>::type StorageIndex;
enum {
RowsAtCompileTime = LhsTraits::RowsAtCompileTime,
@ -149,7 +149,6 @@ class dense_product_base<Lhs, Rhs, Option, InnerProduct>
public:
using Base::derived;
typedef typename Base::Scalar Scalar;
typedef typename Base::Index Index;
operator const Scalar() const
{

View File

@ -210,7 +210,6 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct>
template<typename Dst, typename Lhs, typename Rhs, typename Func>
EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&)
{
typedef typename Dst::Index Index;
typename evaluator<Rhs>::type rhsEval(rhs);
// FIXME make sure lhs is sequentially stored
// FIXME not very good if rhs is real and lhs complex while alpha is real too
@ -222,8 +221,8 @@ EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, cons
// Row major result
template<typename Dst, typename Lhs, typename Rhs, typename Func>
EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&) {
typedef typename Dst::Index Index;
EIGEN_DONT_INLINE void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&)
{
typename evaluator<Lhs>::type lhsEval(lhs);
// FIXME make sure rhs is sequentially stored
// FIXME not very good if lhs is real and rhs complex while alpha is real too
@ -374,7 +373,6 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
: evaluator_base<Product<Lhs, Rhs, LazyProduct> >
{
typedef Product<Lhs, Rhs, LazyProduct> XprType;
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
@ -526,7 +524,6 @@ struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, LazyCoeffBasedProduc
template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
{
etor_product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);
@ -537,7 +534,6 @@ struct etor_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, Load
template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
{
etor_product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);
@ -548,7 +544,6 @@ struct etor_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, Load
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
{
res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
@ -558,7 +553,6 @@ struct etor_product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
{
res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
@ -568,7 +562,6 @@ struct etor_product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
{
eigen_assert(innerDim>0 && "you are using a non initialized matrix");
@ -581,7 +574,6 @@ struct etor_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
{
eigen_assert(innerDim>0 && "you are using a non initialized matrix");
@ -670,7 +662,6 @@ template<typename MatrixType, typename DiagonalType, typename Derived, int Produ
struct diagonal_product_evaluator_base
: evaluator_base<Derived>
{
typedef typename MatrixType::Index Index;
typedef typename scalar_product_traits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
public:
@ -735,7 +726,6 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DiagonalSha
using Base::coeff;
using Base::packet_impl;
typedef typename Base::Scalar Scalar;
typedef typename Base::Index Index;
typedef typename Base::PacketScalar PacketScalar;
typedef Product<Lhs, Rhs, ProductKind> XprType;
@ -783,7 +773,6 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DenseShape,
using Base::coeff;
using Base::packet_impl;
typedef typename Base::Scalar Scalar;
typedef typename Base::Index Index;
typedef typename Base::PacketScalar PacketScalar;
typedef Product<Lhs, Rhs, ProductKind> XprType;

View File

@ -191,7 +191,6 @@ template<typename Func, typename Derived>
struct redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>
{
typedef typename Derived::Scalar Scalar;
typedef typename Derived::Index Index;
EIGEN_DEVICE_FUNC
static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
{
@ -217,7 +216,6 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
{
typedef typename Derived::Scalar Scalar;
typedef typename packet_traits<Scalar>::type PacketScalar;
typedef typename Derived::Index Index;
static Scalar run(const Derived &mat, const Func& func)
{
@ -275,7 +273,6 @@ struct redux_impl<Func, Derived, SliceVectorizedTraversal, NoUnrolling>
{
typedef typename Derived::Scalar Scalar;
typedef typename packet_traits<Scalar>::type PacketScalar;
typedef typename Derived::Index Index;
EIGEN_DEVICE_FUNC static Scalar run(const Derived &mat, const Func& func)
{
@ -342,7 +339,6 @@ public:
typedef _XprType XprType;
EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;

View File

@ -58,8 +58,7 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
/** \brief The type of coefficients in this matrix */
typedef typename internal::traits<SelfAdjointView>::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::StorageIndex StorageIndex;
enum {
Mode = internal::traits<SelfAdjointView>::Mode,
@ -224,7 +223,6 @@ public:
typedef typename Base::DstEvaluatorType DstEvaluatorType;
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
typedef typename Base::Scalar Scalar;
typedef typename Base::Index Index;
typedef typename Base::AssignmentTraits AssignmentTraits;

View File

@ -48,6 +48,7 @@ struct traits<Solve<Decomposition, RhsType> >
: traits<typename solve_traits<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>::PlainObject>
{
typedef typename solve_traits<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>::PlainObject PlainObject;
typedef typename promote_index_type<typename Decomposition::StorageIndex, typename RhsType::StorageIndex>::type StorageIndex;
typedef traits<PlainObject> BaseTraits;
enum {
Flags = BaseTraits::Flags & RowMajorBit,
@ -62,8 +63,8 @@ template<typename Decomposition, typename RhsType>
class Solve : public SolveImpl<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>
{
public:
typedef typename RhsType::Index Index;
typedef typename internal::traits<Solve>::PlainObject PlainObject;
typedef typename internal::traits<Solve>::StorageIndex StorageIndex;
Solve(const Decomposition &dec, const RhsType &rhs)
: m_dec(dec), m_rhs(rhs)

View File

@ -68,7 +68,7 @@ struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,1>
if(!useRhsDirectly)
MappedRhs(actualRhs,rhs.size()) = rhs;
triangular_solve_vector<LhsScalar, RhsScalar, typename Lhs::Index, Side, Mode, LhsProductTraits::NeedToConjugate,
triangular_solve_vector<LhsScalar, RhsScalar, Index, Side, Mode, LhsProductTraits::NeedToConjugate,
(int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor>
::run(actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs);
@ -82,7 +82,6 @@ template<typename Lhs, typename Rhs, int Side, int Mode>
struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,Dynamic>
{
typedef typename Rhs::Scalar Scalar;
typedef typename Rhs::Index Index;
typedef blas_traits<Lhs> LhsProductTraits;
typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType;
@ -232,7 +231,6 @@ template<int Side, typename TriangularType, typename Rhs> struct triangular_solv
{
typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned;
typedef ReturnByValue<triangular_solve_retval> Base;
typedef typename Base::Index Index;
triangular_solve_retval(const TriangularType& tri, const Rhs& rhs)
: m_triangularMatrix(tri), m_rhs(rhs)

View File

@ -55,7 +55,6 @@ inline typename NumTraits<typename traits<Derived>::Scalar>::Real
blueNorm_impl(const EigenBase<Derived>& _vec)
{
typedef typename Derived::RealScalar RealScalar;
typedef typename Derived::Index Index;
using std::pow;
using std::sqrt;
using std::abs;

View File

@ -44,7 +44,7 @@ template<int _OuterStrideAtCompileTime, int _InnerStrideAtCompileTime>
class Stride
{
public:
typedef DenseIndex Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
InnerStrideAtCompileTime = _InnerStrideAtCompileTime,
OuterStrideAtCompileTime = _OuterStrideAtCompileTime
@ -91,7 +91,6 @@ class InnerStride : public Stride<0, Value>
{
typedef Stride<0, Value> Base;
public:
typedef DenseIndex Index;
EIGEN_DEVICE_FUNC InnerStride() : Base() {}
EIGEN_DEVICE_FUNC InnerStride(Index v) : Base(0, v) {} // FIXME making this explicit could break valid code
};
@ -103,7 +102,6 @@ class OuterStride : public Stride<Value, 0>
{
typedef Stride<Value, 0> Base;
public:
typedef DenseIndex Index;
EIGEN_DEVICE_FUNC OuterStride() : Base() {}
EIGEN_DEVICE_FUNC OuterStride(Index v) : Base(v,0) {} // FIXME making this explicit could break valid code
};

View File

@ -28,7 +28,6 @@ protected:
public:
typedef typename Base::Scalar Scalar;
typedef typename Base::Index Index;
typedef typename Base::DstXprType DstXprType;
typedef swap_assign_op<Scalar> Functor;

View File

@ -29,14 +29,10 @@ namespace Eigen {
namespace internal {
template<typename MatrixType>
struct traits<Transpose<MatrixType> >
struct traits<Transpose<MatrixType> > : public traits<MatrixType>
{
typedef typename traits<MatrixType>::Scalar Scalar;
typedef typename traits<MatrixType>::Index Index;
typedef typename nested<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedPlain;
typedef typename traits<MatrixType>::StorageKind StorageKind;
typedef typename traits<MatrixType>::XprKind XprKind;
enum {
RowsAtCompileTime = MatrixType::ColsAtCompileTime,
ColsAtCompileTime = MatrixType::RowsAtCompileTime,
@ -236,7 +232,6 @@ struct inplace_transpose_selector<MatrixType,true,true> { // PacketSize x Packet
static void run(MatrixType& m) {
typedef typename MatrixType::Scalar Scalar;
typedef typename internal::packet_traits<typename MatrixType::Scalar>::type Packet;
typedef typename MatrixType::Index Index;
const Index PacketSize = internal::packet_traits<Scalar>::size;
const Index Alignment = internal::evaluator<MatrixType>::Flags&AlignedBit ? Aligned : Unaligned;
PacketBlock<Packet> A;

View File

@ -53,8 +53,8 @@ class TranspositionsBase
public:
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar StorageIndexType;
typedef typename IndicesType::Index Index;
typedef typename IndicesType::Scalar StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
@ -82,17 +82,17 @@ class TranspositionsBase
inline Index size() const { return indices().size(); }
/** Direct access to the underlying index vector */
inline const StorageIndexType& coeff(Index i) const { return indices().coeff(i); }
inline const StorageIndex& coeff(Index i) const { return indices().coeff(i); }
/** Direct access to the underlying index vector */
inline StorageIndexType& coeffRef(Index i) { return indices().coeffRef(i); }
inline StorageIndex& coeffRef(Index i) { return indices().coeffRef(i); }
/** Direct access to the underlying index vector */
inline const StorageIndexType& operator()(Index i) const { return indices()(i); }
inline const StorageIndex& operator()(Index i) const { return indices()(i); }
/** Direct access to the underlying index vector */
inline StorageIndexType& operator()(Index i) { return indices()(i); }
inline StorageIndex& operator()(Index i) { return indices()(i); }
/** Direct access to the underlying index vector */
inline const StorageIndexType& operator[](Index i) const { return indices()(i); }
inline const StorageIndex& operator[](Index i) const { return indices()(i); }
/** Direct access to the underlying index vector */
inline StorageIndexType& operator[](Index i) { return indices()(i); }
inline StorageIndex& operator[](Index i) { return indices()(i); }
/** const version of indices(). */
const IndicesType& indices() const { return derived().indices(); }
@ -108,7 +108,7 @@ class TranspositionsBase
/** Sets \c *this to represents an identity transformation */
void setIdentity()
{
for(StorageIndexType i = 0; i < indices().size(); ++i)
for(StorageIndex i = 0; i < indices().size(); ++i)
coeffRef(i) = i;
}
@ -145,26 +145,23 @@ class TranspositionsBase
};
namespace internal {
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType>
struct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndexType> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex>
struct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex> >
{
typedef Matrix<_StorageIndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
typedef typename IndicesType::Index Index;
typedef _StorageIndexType StorageIndexType;
typedef Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
typedef _StorageIndex StorageIndex;
};
}
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType>
class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndexType> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex>
class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex> >
{
typedef internal::traits<Transpositions> Traits;
public:
typedef TranspositionsBase<Transpositions> Base;
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar StorageIndexType;
typedef typename IndicesType::Index Index;
typedef typename IndicesType::Scalar StorageIndex;
inline Transpositions() {}
@ -219,32 +216,30 @@ class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTim
namespace internal {
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType, int _PacketAccess>
struct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndexType>,_PacketAccess> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int _PacketAccess>
struct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex>,_PacketAccess> >
{
typedef Map<const Matrix<_StorageIndexType,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;
typedef typename IndicesType::Index Index;
typedef _StorageIndexType StorageIndexType;
typedef Map<const Matrix<_StorageIndex,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;
typedef _StorageIndex StorageIndex;
};
}
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndexType, int PacketAccess>
class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndexType>,PacketAccess>
: public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndexType>,PacketAccess> >
template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int PacketAccess>
class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex>,PacketAccess>
: public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex>,PacketAccess> >
{
typedef internal::traits<Map> Traits;
public:
typedef TranspositionsBase<Map> Base;
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar StorageIndexType;
typedef typename IndicesType::Index Index;
typedef typename IndicesType::Scalar StorageIndex;
explicit inline Map(const StorageIndexType* indicesPtr)
explicit inline Map(const StorageIndex* indicesPtr)
: m_indices(indicesPtr)
{}
inline Map(const StorageIndexType* indicesPtr, Index size)
inline Map(const StorageIndex* indicesPtr, Index size)
: m_indices(indicesPtr,size)
{}
@ -281,8 +276,7 @@ namespace internal {
template<typename _IndicesType>
struct traits<TranspositionsWrapper<_IndicesType> >
{
typedef typename _IndicesType::Scalar StorageIndexType;
typedef typename _IndicesType::Index Index;
typedef typename _IndicesType::Scalar StorageIndex;
typedef _IndicesType IndicesType;
};
}
@ -296,8 +290,7 @@ class TranspositionsWrapper
typedef TranspositionsBase<TranspositionsWrapper> Base;
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar StorageIndexType;
typedef typename IndicesType::Index Index;
typedef typename IndicesType::Scalar StorageIndex;
explicit inline TranspositionsWrapper(IndicesType& a_indices)
: m_indices(a_indices)
@ -370,8 +363,7 @@ struct transposition_matrix_product_retval
: public ReturnByValue<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
{
typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
typedef typename TranspositionType::Index Index;
typedef typename TranspositionType::StorageIndexType StorageIndexType;
typedef typename TranspositionType::StorageIndex StorageIndex;
transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix)
: m_transpositions(tr), m_matrix(matrix)
@ -383,7 +375,7 @@ struct transposition_matrix_product_retval
template<typename Dest> inline void evalTo(Dest& dst) const
{
const Index size = m_transpositions.size();
StorageIndexType j = 0;
StorageIndex j = 0;
if(!(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix)))
dst = m_matrix;

View File

@ -45,7 +45,7 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
};
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
typedef typename internal::traits<Derived>::FullMatrixType DenseMatrixType;
typedef DenseMatrixType DenseType;
typedef Derived const& Nested;
@ -199,7 +199,6 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
public:
typedef typename internal::traits<TriangularView>::StorageKind StorageKind;
typedef typename internal::traits<TriangularView>::Index Index;
typedef typename internal::traits<TriangularView>::MatrixTypeNestedCleaned NestedExpression;
enum {
@ -325,7 +324,6 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
using Base::derived;
typedef typename internal::traits<TriangularViewType>::StorageKind StorageKind;
typedef typename internal::traits<TriangularViewType>::Index Index;
enum {
Mode = _Mode,
@ -688,7 +686,6 @@ public:
typedef typename Base::DstEvaluatorType DstEvaluatorType;
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
typedef typename Base::Scalar Scalar;
typedef typename Base::Index Index;
typedef typename Base::AssignmentTraits AssignmentTraits;
@ -831,7 +828,6 @@ struct triangular_assignment_loop<Kernel, Mode, 0, SetOpposite>
template<typename Kernel, unsigned int Mode, bool SetOpposite>
struct triangular_assignment_loop<Kernel, Mode, Dynamic, SetOpposite>
{
typedef typename Kernel::Index Index;
typedef typename Kernel::Scalar Scalar;
EIGEN_DEVICE_FUNC
static inline void run(Kernel &kernel)

View File

@ -159,7 +159,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
typedef typename ExpressionType::Scalar Scalar;
typedef typename ExpressionType::RealScalar RealScalar;
typedef typename ExpressionType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef typename internal::conditional<internal::must_nest_by_value<ExpressionType>::ret,
ExpressionType, ExpressionType&>::type ExpressionTypeNested;
typedef typename internal::remove_all<ExpressionTypeNested>::type ExpressionTypeNestedCleaned;

View File

@ -41,7 +41,6 @@ struct visitor_impl<Visitor, Derived, 1>
template<typename Visitor, typename Derived>
struct visitor_impl<Visitor, Derived, Dynamic>
{
typedef typename Derived::Index Index;
static inline void run(const Derived& mat, Visitor& visitor)
{
visitor.init(mat.coeff(0,0), 0, 0);
@ -60,7 +59,6 @@ class visitor_evaluator
public:
explicit visitor_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
@ -124,7 +122,6 @@ namespace internal {
template <typename Derived>
struct coeff_visitor
{
typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
Index row, col;
Scalar res;
@ -144,7 +141,6 @@ struct coeff_visitor
template <typename Derived>
struct min_coeff_visitor : coeff_visitor<Derived>
{
typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
void operator() (const Scalar& value, Index i, Index j)
{
@ -172,7 +168,6 @@ struct functor_traits<min_coeff_visitor<Scalar> > {
template <typename Derived>
struct max_coeff_visitor : coeff_visitor<Derived>
{
typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
void operator() (const Scalar& value, Index i, Index j)
{

View File

@ -92,7 +92,7 @@ template<> EIGEN_STRONG_INLINE Packet4cf ploaddup<Packet4cf>(const std::complex<
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packet4cf>(const std::complex<float>* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packet4cf>(const std::complex<float>* from, Index stride)
{
return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]),
std::imag(from[2*stride]), std::real(from[2*stride]),
@ -100,7 +100,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packe
std::imag(from[0*stride]), std::real(from[0*stride])));
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet4cf>(std::complex<float>* to, const Packet4cf& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet4cf>(std::complex<float>* to, const Packet4cf& from, Index stride)
{
__m128 low = _mm256_extractf128_ps(from.v, 0);
to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)),
@ -310,13 +310,13 @@ template<> EIGEN_STRONG_INLINE Packet2cd ploaddup<Packet2cd>(const std::complex<
template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet2cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet2cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
template<> EIGEN_DEVICE_FUNC inline Packet2cd pgather<std::complex<double>, Packet2cd>(const std::complex<double>* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet2cd pgather<std::complex<double>, Packet2cd>(const std::complex<double>* from, Index stride)
{
return Packet2cd(_mm256_set_pd(std::imag(from[1*stride]), std::real(from[1*stride]),
std::imag(from[0*stride]), std::real(from[0*stride])));
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet2cd>(std::complex<double>* to, const Packet2cd& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet2cd>(std::complex<double>* to, const Packet2cd& from, Index stride)
{
__m128d low = _mm256_extractf128_pd(from.v, 0);
to[stride*0] = std::complex<double>(_mm_cvtsd_f64(low), _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1)));

View File

@ -233,17 +233,17 @@ template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i&
// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride)
{
return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
}
template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride)
{
return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride)
{
__m128 low = _mm256_extractf128_ps(from, 0);
to[stride*0] = _mm_cvtss_f32(low);
@ -257,7 +257,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, co
to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride)
{
__m128d low = _mm256_extractf128_pd(from, 0);
to[stride*0] = _mm_cvtsd_f64(low);

View File

@ -67,14 +67,14 @@ template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<flo
return res;
}
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
{
std::complex<float> EIGEN_ALIGN16 af[2];
af[0] = from[0*stride];
af[1] = from[1*stride];
return Packet2cf(vec_ld(0, (const float*)af));
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
{
std::complex<float> EIGEN_ALIGN16 af[2];
vec_st(from.v, 0, (float*)af);
@ -285,14 +285,14 @@ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<
template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)
{
std::complex<double> EIGEN_ALIGN16 af[2];
af[0] = from[0*stride];
af[1] = from[1*stride];
return pload<Packet1cd>(af);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride)
{
std::complex<double> EIGEN_ALIGN16 af[2];
pstore<std::complex<double> >(af, from);

View File

@ -252,7 +252,7 @@ pbroadcast4<Packet4i>(const int *a,
a3 = vec_splat(a3, 3);
}
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
{
float EIGEN_ALIGN16 af[4];
af[0] = from[0*stride];
@ -261,7 +261,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const floa
af[3] = from[3*stride];
return pload<Packet4f>(af);
}
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
{
int EIGEN_ALIGN16 ai[4];
ai[0] = from[0*stride];
@ -270,7 +270,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* f
ai[3] = from[3*stride];
return pload<Packet4i>(ai);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
{
float EIGEN_ALIGN16 af[4];
pstore<float>(af, from);
@ -279,7 +279,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, co
to[2*stride] = af[2];
to[3*stride] = af[3];
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
{
int EIGEN_ALIGN16 ai[4];
pstore<int>((int *)ai, from);
@ -793,14 +793,14 @@ pbroadcast4<Packet2d>(const double *a,
a2 = vec_splat_dbl(a3, 0);
a3 = vec_splat_dbl(a3, 1);
}
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
{
double EIGEN_ALIGN16 af[2];
af[0] = from[0*stride];
af[1] = from[1*stride];
return pload<Packet2d>(af);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
{
double EIGEN_ALIGN16 af[2];
pstore<double>(af, from);

View File

@ -112,7 +112,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
{
Packet4f res;
res = vsetq_lane_f32(std::real(from[0*stride]), res, 0);
@ -122,7 +122,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packe
return Packet2cf(res);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
{
to[stride*0] = std::complex<float>(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1));
to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));
@ -363,7 +363,7 @@ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ARM_PREFETCH((double *)addr); }
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)
{
Packet2d res;
res = vsetq_lane_f64(std::real(from[0*stride]), res, 0);
@ -371,7 +371,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Pack
return Packet1cd(res);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride)
{
to[stride*0] = std::complex<double>(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1));
}

View File

@ -250,7 +250,7 @@ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& f
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
{
Packet4f res;
res = vsetq_lane_f32(from[0*stride], res, 0);
@ -259,7 +259,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const floa
res = vsetq_lane_f32(from[3*stride], res, 3);
return res;
}
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
{
Packet4i res;
res = vsetq_lane_s32(from[0*stride], res, 0);
@ -269,14 +269,14 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* f
return res;
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
{
to[stride*0] = vgetq_lane_f32(from, 0);
to[stride*1] = vgetq_lane_f32(from, 1);
to[stride*2] = vgetq_lane_f32(from, 2);
to[stride*3] = vgetq_lane_f32(from, 3);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
{
to[stride*0] = vgetq_lane_s32(from, 0);
to[stride*1] = vgetq_lane_s32(from, 1);
@ -606,14 +606,14 @@ template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d&
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); }
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
{
Packet2d res;
res = vsetq_lane_f64(from[0*stride], res, 0);
res = vsetq_lane_f64(from[1*stride], res, 1);
return res;
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
{
to[stride*0] = vgetq_lane_f64(from, 0);
to[stride*1] = vgetq_lane_f64(from, 1);

View File

@ -115,13 +115,13 @@ template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<f
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), Packet4f(from.v)); }
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
{
return Packet2cf(_mm_set_ps(std::imag(from[1*stride]), std::real(from[1*stride]),
std::imag(from[0*stride]), std::real(from[0*stride])));
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
{
to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 0)),
_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 1)));

View File

@ -387,32 +387,32 @@ template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d&
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), Packet2d(_mm_castps_pd(from))); }
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), Packet2d(_mm_castsi128_pd(from))); }
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
{
return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
}
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
{
return _mm_set_pd(from[1*stride], from[0*stride]);
}
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
{
return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
{
to[stride*0] = _mm_cvtss_f32(from);
to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));
to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));
to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
{
to[stride*0] = _mm_cvtsd_f64(from);
to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
}
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, DenseIndex stride)
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
{
to[stride*0] = _mm_cvtsi128_si32(from);
to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));

View File

@ -112,7 +112,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o
template <typename Scalar, bool RandomAccess> struct linspaced_op
{
typedef typename packet_traits<Scalar>::type Packet;
linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {}
linspaced_op(const Scalar& low, const Scalar& high, Index num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {}
template<typename Index>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }

View File

@ -257,9 +257,9 @@ class level3_blocking
LhsScalar* m_blockA;
RhsScalar* m_blockB;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
Index m_mc;
Index m_nc;
Index m_kc;
public:
@ -267,9 +267,9 @@ class level3_blocking
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
@ -299,7 +299,7 @@ class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, M
public:
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/, int /*num_threads*/, bool /*full_rows = false*/)
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, int /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
@ -326,12 +326,12 @@ class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, M
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth, int num_threads, bool l3_blocking)
gemm_blocking_space(Index rows, Index cols, Index depth, int num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
@ -343,8 +343,8 @@ class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, M
}
else // no l3 blocking
{
DenseIndex m = this->m_mc;
DenseIndex n = this->m_nc;
Index m = this->m_mc;
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, n, num_threads);
}
@ -386,7 +386,6 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Product<Lhs,Rhs>::Index Index;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;

View File

@ -198,7 +198,6 @@ struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,true>
static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
@ -243,8 +242,6 @@ struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,false>
{
static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha)
{
typedef typename MatrixType::Index Index;
typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;

View File

@ -474,7 +474,6 @@ template<typename Lhs, int LhsMode, typename Rhs, int RhsMode>
struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,RhsMode,false>
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Product<Lhs,Rhs>::Index Index;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;

View File

@ -174,7 +174,6 @@ template<typename Lhs, int LhsMode, typename Rhs>
struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,0,true>
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Product<Lhs,Rhs>::Index Index;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;

View File

@ -53,7 +53,6 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef internal::blas_traits<OtherType> OtherBlasTraits;
typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType;
typedef typename internal::remove_all<ActualOtherType>::type _ActualOtherType;
@ -86,7 +85,6 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef internal::blas_traits<OtherType> OtherBlasTraits;
typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType;
typedef typename internal::remove_all<ActualOtherType>::type _ActualOtherType;

View File

@ -388,7 +388,6 @@ struct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
{
template<typename Dest> static void run(Dest& dst, const Lhs &a_lhs, const Rhs &a_rhs, const typename Dest::Scalar& alpha)
{
typedef typename Dest::Index Index;
typedef typename Dest::Scalar Scalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;

View File

@ -206,7 +206,6 @@ template<int Mode> struct trmv_selector<Mode,ColMajor>
template<typename Lhs, typename Rhs, typename Dest>
static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
{
typedef typename Dest::Index Index;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef typename Dest::Scalar ResScalar;
@ -283,7 +282,6 @@ template<int Mode> struct trmv_selector<Mode,RowMajor>
template<typename Lhs, typename Rhs, typename Dest>
static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)
{
typedef typename Dest::Index Index;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef typename Dest::Scalar ResScalar;

View File

@ -648,7 +648,7 @@ namespace Eigen {
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::internal::nested<Derived>::type Nested; \
typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
typedef typename Eigen::internal::traits<Derived>::Index Index; \
typedef typename Eigen::internal::traits<Derived>::StorageIndex StorageIndex; \
enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
Flags = Eigen::internal::traits<Derived>::Flags, \
@ -658,21 +658,10 @@ namespace Eigen {
#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
typedef typename Base::PacketScalar PacketScalar; \
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::internal::nested<Derived>::type Nested; \
typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
typedef typename Eigen::internal::traits<Derived>::Index Index; \
enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
MaxRowsAtCompileTime = Eigen::internal::traits<Derived>::MaxRowsAtCompileTime, \
MaxColsAtCompileTime = Eigen::internal::traits<Derived>::MaxColsAtCompileTime, \
Flags = Eigen::internal::traits<Derived>::Flags, \
SizeAtCompileTime = Base::SizeAtCompileTime, \
MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
enum { MaxRowsAtCompileTime = Eigen::internal::traits<Derived>::MaxRowsAtCompileTime, \
MaxColsAtCompileTime = Eigen::internal::traits<Derived>::MaxColsAtCompileTime}; \
using Base::derived; \
using Base::const_cast_derived;

View File

@ -26,8 +26,25 @@ namespace Eigen {
typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex;
/**
* \brief The Index type as used for the API.
* \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
* \sa \ref TopicPreprocessorDirectives, StorageIndex.
*/
typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE Index;
namespace internal {
template<typename IndexDest, typename IndexSrc>
EIGEN_DEVICE_FUNC
inline IndexDest convert_index(const IndexSrc& idx) {
// for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:
eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && "Index value to big for target type");
return IndexDest(idx);
}
//classes inheriting no_assignment_operator don't generate a default operator=.
class no_assignment_operator
{

View File

@ -60,7 +60,7 @@ template<typename _MatrixType> class ComplexEigenSolver
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for #MatrixType.
*

View File

@ -63,7 +63,7 @@ template<typename _MatrixType> class ComplexSchur
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for \p _MatrixType.
*

View File

@ -79,7 +79,7 @@ template<typename _MatrixType> class EigenSolver
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for #MatrixType.
*

View File

@ -72,7 +72,7 @@ template<typename _MatrixType> class GeneralizedEigenSolver
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for #MatrixType.
*

View File

@ -50,7 +50,6 @@ class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixT
typedef SelfAdjointEigenSolver<_MatrixType> Base;
public:
typedef typename Base::Index Index;
typedef _MatrixType MatrixType;
/** \brief Default constructor for fixed-size matrices.

View File

@ -71,7 +71,7 @@ template<typename _MatrixType> class HessenbergDecomposition
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Type for vector of Householder coefficients.
*
@ -337,7 +337,6 @@ namespace internal {
template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType
: public ReturnByValue<HessenbergDecompositionMatrixHReturnType<MatrixType> >
{
typedef typename MatrixType::Index Index;
public:
/** \brief Constructor.
*

View File

@ -67,7 +67,7 @@ namespace Eigen {
};
typedef typename MatrixType::Scalar Scalar;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
@ -276,7 +276,7 @@ namespace Eigen {
/** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */
template<typename MatrixType>
inline typename MatrixType::Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu)
inline Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu)
{
using std::abs;
Index res = iu;
@ -294,7 +294,7 @@ namespace Eigen {
/** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1) */
template<typename MatrixType>
inline typename MatrixType::Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l)
inline Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l)
{
using std::abs;
Index res = l;

View File

@ -64,7 +64,7 @@ template<typename _MatrixType> class RealSchur
};
typedef typename MatrixType::Scalar Scalar;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
@ -343,7 +343,7 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
/** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType>
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
inline Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
{
using std::abs;
Index res = iu;

View File

@ -21,7 +21,7 @@ class GeneralizedSelfAdjointEigenSolver;
namespace internal {
template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;
template<typename MatrixType, typename DiagType, typename SubDiagType>
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const typename MatrixType::Index maxIterations, bool computeEigenvectors, MatrixType& eivec);
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec);
}
/** \eigenvalues_module \ingroup Eigenvalues_Module
@ -81,7 +81,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Real scalar type for \p _MatrixType.
*
@ -456,12 +456,11 @@ namespace internal {
* \returns \c Success or \c NoConvergence
*/
template<typename MatrixType, typename DiagType, typename SubDiagType>
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const typename MatrixType::Index maxIterations, bool computeEigenvectors, MatrixType& eivec)
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec)
{
using std::abs;
ComputationInfo info;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
Index n = diag.size();

View File

@ -69,7 +69,7 @@ template<typename _MatrixType> class Tridiagonalization
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
Size = MatrixType::RowsAtCompileTime,
@ -345,7 +345,6 @@ template<typename MatrixType, typename CoeffVectorType>
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
{
using numext::conj;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
Index n = matA.rows();
@ -437,7 +436,6 @@ struct tridiagonalization_inplace_selector
{
typedef typename Tridiagonalization<MatrixType>::CoeffVectorType CoeffVectorType;
typedef typename Tridiagonalization<MatrixType>::HouseholderSequenceType HouseholderSequenceType;
typedef typename MatrixType::Index Index;
template<typename DiagonalType, typename SubDiagonalType>
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
@ -525,7 +523,6 @@ struct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>
template<typename MatrixType> struct TridiagonalizationMatrixTReturnType
: public ReturnByValue<TridiagonalizationMatrixTReturnType<MatrixType> >
{
typedef typename MatrixType::Index Index;
public:
/** \brief Constructor.
*

View File

@ -32,7 +32,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar;
typedef NumTraits<Scalar> ScalarTraits;
typedef DenseIndex Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef typename ScalarTraits::Real RealScalar;
typedef typename ScalarTraits::NonInteger NonInteger;
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;

View File

@ -238,7 +238,6 @@ struct homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs>
typedef typename traits<homogeneous_left_product_impl>::LhsMatrixType LhsMatrixType;
typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned;
typedef typename remove_all<typename LhsMatrixTypeCleaned::Nested>::type LhsMatrixTypeNested;
typedef typename MatrixType::Index Index;
homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs)
: m_lhs(take_matrix_for_product<Lhs>::run(lhs)),
m_rhs(rhs)
@ -278,7 +277,6 @@ struct homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs>
: public ReturnByValue<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >
{
typedef typename remove_all<typename Rhs::Nested>::type RhsNested;
typedef typename MatrixType::Index Index;
homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs)
: m_lhs(lhs), m_rhs(rhs)
{}

View File

@ -41,7 +41,7 @@ public:
};
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseIndex Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic
? Dynamic

View File

@ -133,7 +133,6 @@ struct unitOrthogonal_selector
typedef typename plain_matrix_type<Derived>::type VectorType;
typedef typename traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename Derived::Index Index;
typedef Matrix<Scalar,2,1> Vector2;
EIGEN_DEVICE_FUNC
static inline VectorType run(const Derived& src)

View File

@ -37,7 +37,7 @@ public:
};
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseIndex Index;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
/** Default constructor without initialization */

View File

@ -724,7 +724,6 @@ template<typename Other>
struct quaternionbase_assign_impl<Other,3,3>
{
typedef typename Other::Scalar Scalar;
typedef DenseIndex Index;
template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& mat)
{
using std::sqrt;
@ -742,13 +741,13 @@ struct quaternionbase_assign_impl<Other,3,3>
}
else
{
DenseIndex i = 0;
Index i = 0;
if (mat.coeff(1,1) > mat.coeff(0,0))
i = 1;
if (mat.coeff(2,2) > mat.coeff(i,i))
i = 2;
DenseIndex j = (i+1)%3;
DenseIndex k = (j+1)%3;
Index j = (i+1)%3;
Index k = (j+1)%3;
t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
q.coeffs().coeffRef(i) = Scalar(0.5) * t;

View File

@ -66,7 +66,7 @@ template<typename _Scalar, int _Dim, int _Mode, int _Options>
struct traits<Transform<_Scalar,_Dim,_Mode,_Options> >
{
typedef _Scalar Scalar;
typedef DenseIndex Index;
typedef Eigen::Index StorageIndex;
typedef Dense StorageKind;
enum {
Dim1 = _Dim==Dynamic ? _Dim : _Dim + 1,
@ -204,7 +204,8 @@ public:
};
/** the scalar type of the coefficients */
typedef _Scalar Scalar;
typedef DenseIndex Index;
typedef Eigen::Index StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** type of the matrix used to represent the transformation */
typedef typename internal::make_proper_matrix_type<Scalar,Rows,HDim,Options>::type MatrixType;
/** constified MatrixType */

View File

@ -97,7 +97,6 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
typedef typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType;
typedef typename internal::traits<TransformationMatrixType>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename Derived::Index Index;
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value),

View File

@ -21,7 +21,6 @@ namespace internal {
// template<typename TriangularFactorType,typename VectorsType,typename CoeffsType>
// void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs)
// {
// typedef typename TriangularFactorType::Index Index;
// typedef typename VectorsType::Scalar Scalar;
// const Index nbVecs = vectors.cols();
// eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs);
@ -51,7 +50,6 @@ namespace internal {
template<typename TriangularFactorType,typename VectorsType,typename CoeffsType>
void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs)
{
typedef typename TriangularFactorType::Index Index;
const Index nbVecs = vectors.cols();
eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs);
@ -80,7 +78,6 @@ void make_block_householder_triangular_factor(TriangularFactorType& triFactor, c
template<typename MatrixType,typename VectorsType,typename CoeffsType>
void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs, bool forward)
{
typedef typename MatrixType::Index Index;
enum { TFactorSize = MatrixType::ColsAtCompileTime };
Index nbVecs = vectors.cols();
Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, RowMajor> T(nbVecs,nbVecs);

View File

@ -60,7 +60,7 @@ template<typename VectorsType, typename CoeffsType, int Side>
struct traits<HouseholderSequence<VectorsType,CoeffsType,Side> >
{
typedef typename VectorsType::Scalar Scalar;
typedef typename VectorsType::Index Index;
typedef typename VectorsType::StorageIndex StorageIndex;
typedef typename VectorsType::StorageKind StorageKind;
enum {
RowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::RowsAtCompileTime
@ -87,7 +87,6 @@ struct hseq_side_dependent_impl
{
typedef Block<const VectorsType, Dynamic, 1> EssentialVectorType;
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType;
typedef typename VectorsType::Index Index;
static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
{
Index start = k+1+h.m_shift;
@ -100,7 +99,6 @@ struct hseq_side_dependent_impl<VectorsType, CoeffsType, OnTheRight>
{
typedef Transpose<Block<const VectorsType, 1, Dynamic> > EssentialVectorType;
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheRight> HouseholderSequenceType;
typedef typename VectorsType::Index Index;
static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
{
Index start = k+1+h.m_shift;
@ -131,7 +129,6 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
MaxColsAtCompileTime = internal::traits<HouseholderSequence>::MaxColsAtCompileTime
};
typedef typename internal::traits<HouseholderSequence>::Scalar Scalar;
typedef typename VectorsType::Index Index;
typedef HouseholderSequence<
typename internal::conditional<NumTraits<Scalar>::IsComplex,

View File

@ -34,9 +34,8 @@ class DiagonalPreconditioner
{
typedef _Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef typename Vector::Index Index;
public:
typedef typename Vector::StorageIndex StorageIndex;
// this typedef is only to export the scalar type and compile-time dimensions to solve_retval
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;

View File

@ -27,7 +27,7 @@ namespace internal {
*/
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
const Preconditioner& precond, int& iters,
const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error)
{
using std::sqrt;
@ -36,9 +36,9 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
typedef typename Dest::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> VectorType;
RealScalar tol = tol_error;
int maxIters = iters;
Index maxIters = iters;
int n = mat.cols();
Index n = mat.cols();
VectorType r = rhs - mat * x;
VectorType r0 = r;
@ -61,8 +61,8 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
RealScalar tol2 = tol*tol;
RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
int i = 0;
int restarts = 0;
Index i = 0;
Index restarts = 0;
while ( r.squaredNorm()/rhs_sqnorm > tol2 && i<maxIters )
{
@ -155,7 +155,6 @@ class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
@ -183,7 +182,7 @@ public:
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
bool failed = false;
for(int j=0; j<b.cols(); ++j)
for(Index j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;

View File

@ -26,7 +26,7 @@ namespace internal {
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
EIGEN_DONT_INLINE
void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
const Preconditioner& precond, int& iters,
const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error)
{
using std::sqrt;
@ -36,9 +36,9 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
typedef Matrix<Scalar,Dynamic,1> VectorType;
RealScalar tol = tol_error;
int maxIters = iters;
Index maxIters = iters;
int n = mat.cols();
Index n = mat.cols();
VectorType residual = rhs - mat * x; //initial residual
@ -64,7 +64,7 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
VectorType z(n), tmp(n);
RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
int i = 0;
Index i = 0;
while(i < maxIters)
{
tmp.noalias() = mat * p; // the bottleneck of the algorithm
@ -153,7 +153,6 @@ class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixTy
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
@ -191,7 +190,7 @@ public:
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
for(int j=0; j<b.cols(); ++j)
for(Index j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;

View File

@ -25,7 +25,7 @@ namespace internal {
* \param ind The array of index for the elements in @p row
* \param ncut The number of largest elements to keep
**/
template <typename VectorV, typename VectorI, typename Index>
template <typename VectorV, typename VectorI>
Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
{
typedef typename VectorV::RealScalar RealScalar;
@ -105,7 +105,7 @@ class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar> >
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef SparseMatrix<Scalar,RowMajor> FactorType;
typedef SparseMatrix<Scalar,ColMajor> PermutType;
typedef typename FactorType::Index Index;
typedef typename FactorType::StorageIndex StorageIndex;
public:
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
@ -189,8 +189,8 @@ protected:
bool m_analysisIsOk;
bool m_factorizationIsOk;
ComputationInfo m_info;
PermutationMatrix<Dynamic,Dynamic,Index> m_P; // Fill-reducing permutation
PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv; // Inverse permutation
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // Fill-reducing permutation
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // Inverse permutation
};
/**
@ -218,14 +218,14 @@ template<typename _MatrixType>
void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
{
// Compute the Fill-reducing permutation
SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();
// Symmetrize the pattern
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1;
AtA.prune(keep_diag());
internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P); // Then compute the AMD ordering...
internal::minimum_degree_ordering<Scalar, StorageIndex>(AtA, m_P); // Then compute the AMD ordering...
m_Pinv = m_P.inverse(); // ... and the inverse permutation
@ -239,6 +239,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
using std::sqrt;
using std::swap;
using std::abs;
using internal::convert_index;
eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
Index n = amat.cols(); // Size of the matrix
@ -250,7 +251,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
// Apply the fill-reducing permutation
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
SparseMatrix<Scalar,RowMajor, Index> mat;
SparseMatrix<Scalar,RowMajor, StorageIndex> mat;
mat = amat.twistedBy(m_Pinv);
// Initialization
@ -259,7 +260,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
u.fill(0);
// number of largest elements to keep in each row:
Index fill_in = static_cast<Index> (amat.nonZeros()*m_fillfactor)/n+1;
Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1;
if (fill_in > n) fill_in = n;
// number of largest nonzero elements to keep in the L and the U part of the current row:
@ -274,9 +275,9 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
Index sizeu = 1; // number of nonzero elements in the upper part of the current row
Index sizel = 0; // number of nonzero elements in the lower part of the current row
ju(ii) = ii;
ju(ii) = convert_index<StorageIndex>(ii);
u(ii) = 0;
jr(ii) = ii;
jr(ii) = convert_index<StorageIndex>(ii);
RealScalar rownorm = 0;
typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
@ -286,9 +287,9 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
if (k < ii)
{
// copy the lower part
ju(sizel) = k;
ju(sizel) = convert_index<StorageIndex>(k);
u(sizel) = j_it.value();
jr(k) = sizel;
jr(k) = convert_index<StorageIndex>(sizel);
++sizel;
}
else if (k == ii)
@ -299,9 +300,9 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
{
// copy the upper part
Index jpos = ii + sizeu;
ju(jpos) = k;
ju(jpos) = convert_index<StorageIndex>(k);
u(jpos) = j_it.value();
jr(k) = jpos;
jr(k) = convert_index<StorageIndex>(jpos);
++sizeu;
}
rownorm += numext::abs2(j_it.value());
@ -331,7 +332,8 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
// swap the two locations
Index j = ju(jj);
swap(ju(jj), ju(k));
jr(minrow) = jj; jr(j) = k;
jr(minrow) = convert_index<StorageIndex>(jj);
jr(j) = convert_index<StorageIndex>(k);
swap(u(jj), u(k));
}
// Reset this location
@ -355,8 +357,8 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
for (; ki_it; ++ki_it)
{
Scalar prod = fact * ki_it.value();
Index j = ki_it.index();
Index jpos = jr(j);
Index j = ki_it.index();
Index jpos = jr(j);
if (jpos == -1) // fill-in element
{
Index newpos;
@ -372,16 +374,16 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
sizel++;
eigen_internal_assert(sizel<=ii);
}
ju(newpos) = j;
ju(newpos) = convert_index<StorageIndex>(j);
u(newpos) = -prod;
jr(j) = newpos;
jr(j) = convert_index<StorageIndex>(newpos);
}
else
u(jpos) -= prod;
}
// store the pivot element
u(len) = fact;
ju(len) = minrow;
u(len) = fact;
ju(len) = convert_index<StorageIndex>(minrow);
++len;
jj++;

View File

@ -28,7 +28,7 @@ public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename internal::traits<Derived>::Preconditioner Preconditioner;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::RealScalar RealScalar;
public:
@ -121,6 +121,7 @@ public:
/** \internal */
Index rows() const { return mp_matrix.rows(); }
/** \internal */
Index cols() const { return mp_matrix.cols(); }
@ -144,7 +145,7 @@ public:
* It is either the value setted by setMaxIterations or, by default,
* twice the number of columns of the matrix.
*/
int maxIterations() const
Index maxIterations() const
{
return (m_maxIterations<0) ? 2*mp_matrix.cols() : m_maxIterations;
}
@ -152,14 +153,14 @@ public:
/** Sets the max number of iterations.
* Default is twice the number of columns of the matrix.
*/
Derived& setMaxIterations(int maxIters)
Derived& setMaxIterations(Index maxIters)
{
m_maxIterations = maxIters;
return derived();
}
/** \returns the number of iterations performed during the last solve */
int iterations() const
Index iterations() const
{
eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
return m_iterations;
@ -199,11 +200,11 @@ public:
{
eigen_assert(rows()==b.rows());
int rhsCols = b.cols();
int size = b.rows();
Index rhsCols = b.cols();
Index size = b.rows();
Eigen::Matrix<DestScalar,Dynamic,1> tb(size);
Eigen::Matrix<DestScalar,Dynamic,1> tx(size);
for(int k=0; k<rhsCols; ++k)
for(Index k=0; k<rhsCols; ++k)
{
tb = b.col(k);
tx = derived().solve(tb);
@ -232,11 +233,11 @@ protected:
Ref<const MatrixType> mp_matrix;
Preconditioner m_preconditioner;
int m_maxIterations;
Index m_maxIterations;
RealScalar m_tolerance;
mutable RealScalar m_error;
mutable int m_iterations;
mutable Index m_iterations;
mutable ComputationInfo m_info;
mutable bool m_analysisIsOk, m_factorizationIsOk;
};

View File

@ -41,7 +41,6 @@ template<typename Decomposition, typename RhsType, typename GuessType>
class SolveWithGuess : public internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type
{
public:
typedef typename RhsType::Index Index;
typedef typename internal::traits<SolveWithGuess>::Scalar Scalar;
typedef typename internal::traits<SolveWithGuess>::PlainObject PlainObject;
typedef typename internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type Base;

View File

@ -62,7 +62,7 @@ template<typename Scalar> class JacobiRotation
JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); }
template<typename Derived>
bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
bool makeJacobi(const MatrixBase<Derived>&, Index p, Index q);
bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z);
void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
@ -123,7 +123,7 @@ bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, co
*/
template<typename Scalar>
template<typename Derived>
inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, typename Derived::Index p, typename Derived::Index q)
inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, Index p, Index q)
{
return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q)));
}
@ -300,7 +300,6 @@ namespace internal {
template<typename VectorX, typename VectorY, typename OtherScalar>
void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation<OtherScalar>& j)
{
typedef typename VectorX::Index Index;
typedef typename VectorX::Scalar Scalar;
enum { PacketSize = packet_traits<Scalar>::size };
typedef typename packet_traits<Scalar>::type Packet;

View File

@ -66,9 +66,10 @@ template<typename _MatrixType> class FullPivLU
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename internal::traits<MatrixType>::StorageKind StorageKind;
typedef typename MatrixType::Index Index;
typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
typedef typename internal::plain_col_type<MatrixType, Index>::type IntColVectorType;
// FIXME should be int
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename internal::plain_row_type<MatrixType, StorageIndex>::type IntRowVectorType;
typedef typename internal::plain_col_type<MatrixType, StorageIndex>::type IntColVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType;
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationPType;
typedef typename MatrixType::PlainObject PlainObject;

View File

@ -72,7 +72,8 @@ template<typename _MatrixType> class PartialPivLU
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename internal::traits<MatrixType>::StorageKind StorageKind;
typedef typename MatrixType::Index Index;
// FIXME should be int
typedef typename MatrixType::StorageIndex StorageIndex;
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
typedef typename MatrixType::PlainObject PlainObject;
@ -261,7 +262,6 @@ struct partial_lu_impl
typedef Block<MapLU, Dynamic, Dynamic> MatrixType;
typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
/** \internal performs the LU decomposition in-place of the matrix \a lu
* using an unblocked algorithm.
@ -408,13 +408,13 @@ struct partial_lu_impl
/** \internal performs the LU decomposition with partial pivoting in-place.
*/
template<typename MatrixType, typename TranspositionType>
void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::StorageIndexType& nb_transpositions)
void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::StorageIndex& nb_transpositions)
{
eigen_assert(lu.cols() == row_transpositions.size());
eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
partial_lu_impl
<typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::StorageIndexType>
<typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::StorageIndex>
::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
}
@ -433,7 +433,7 @@ PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& ma
m_rowsTranspositions.resize(size);
typename TranspositionType::StorageIndexType nb_transpositions;
typename TranspositionType::StorageIndex nb_transpositions;
internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions);
m_det_p = (nb_transpositions%2) ? -1 : 1;

View File

@ -41,10 +41,10 @@ template<typename T0, typename T1> inline bool amd_marked(const T0* w, const T1&
template<typename T0, typename T1> inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); }
/* clear w */
template<typename Index>
static int cs_wclear (Index mark, Index lemax, Index *w, Index n)
template<typename StorageIndex>
static StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n)
{
Index k;
StorageIndex k;
if(mark < 2 || (mark + lemax < 0))
{
for(k = 0; k < n; k++)
@ -56,10 +56,10 @@ static int cs_wclear (Index mark, Index lemax, Index *w, Index n)
}
/* depth-first search and postorder of a tree rooted at node j */
template<typename Index>
Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Index *stack)
template<typename StorageIndex>
StorageIndex cs_tdfs(StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack)
{
int i, p, top = 0;
StorageIndex i, p, top = 0;
if(!head || !next || !post || !stack) return (-1); /* check inputs */
stack[0] = j; /* place j on the stack */
while (top >= 0) /* while (stack is not empty) */
@ -87,40 +87,39 @@ Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Ind
* \returns the permutation P reducing the fill-in of the input matrix \a C
* The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional.
* On exit the values of C are destroyed */
template<typename Scalar, typename Index>
void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, PermutationMatrix<Dynamic,Dynamic,Index>& perm)
template<typename Scalar, typename StorageIndex>
void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,StorageIndex>& C, PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm)
{
using std::sqrt;
int d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t;
unsigned int h;
StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h;
Index n = C.cols();
dense = std::max<Index> (16, Index(10 * sqrt(double(n)))); /* find dense threshold */
dense = std::min<Index> (n-2, dense);
StorageIndex n = StorageIndex(C.cols());
dense = std::max<StorageIndex> (16, StorageIndex(10 * sqrt(double(n)))); /* find dense threshold */
dense = (std::min)(n-2, dense);
Index cnz = C.nonZeros();
StorageIndex cnz = StorageIndex(C.nonZeros());
perm.resize(n+1);
t = cnz + cnz/5 + 2*n; /* add elbow room to C */
C.resizeNonZeros(t);
// get workspace
ei_declare_aligned_stack_constructed_variable(Index,W,8*(n+1),0);
Index* len = W;
Index* nv = W + (n+1);
Index* next = W + 2*(n+1);
Index* head = W + 3*(n+1);
Index* elen = W + 4*(n+1);
Index* degree = W + 5*(n+1);
Index* w = W + 6*(n+1);
Index* hhead = W + 7*(n+1);
Index* last = perm.indices().data(); /* use P as workspace for last */
ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0);
StorageIndex* len = W;
StorageIndex* nv = W + (n+1);
StorageIndex* next = W + 2*(n+1);
StorageIndex* head = W + 3*(n+1);
StorageIndex* elen = W + 4*(n+1);
StorageIndex* degree = W + 5*(n+1);
StorageIndex* w = W + 6*(n+1);
StorageIndex* hhead = W + 7*(n+1);
StorageIndex* last = perm.indices().data(); /* use P as workspace for last */
/* --- Initialize quotient graph ---------------------------------------- */
Index* Cp = C.outerIndexPtr();
Index* Ci = C.innerIndexPtr();
StorageIndex* Cp = C.outerIndexPtr();
StorageIndex* Ci = C.innerIndexPtr();
for(k = 0; k < n; k++)
len[k] = Cp[k+1] - Cp[k];
len[n] = 0;
@ -137,7 +136,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
elen[i] = 0; // Ek of node i is empty
degree[i] = len[i]; // degree of node i
}
mark = internal::cs_wclear<Index>(0, 0, w, n); /* clear w */
mark = internal::cs_wclear<StorageIndex>(0, 0, w, n); /* clear w */
elen[n] = -2; /* n is a dead element */
Cp[n] = -1; /* n is a root of assembly tree */
w[n] = 0; /* n is a dead element */
@ -252,7 +251,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
elen[k] = -2; /* k is now an element */
/* --- Find set differences ----------------------------------------- */
mark = internal::cs_wclear<Index>(mark, lemax, w, n); /* clear w if necessary */
mark = internal::cs_wclear<StorageIndex>(mark, lemax, w, n); /* clear w if necessary */
for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */
{
i = Ci[pk];
@ -322,7 +321,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
}
else
{
degree[i] = std::min<Index> (degree[i], d); /* update degree(i) */
degree[i] = std::min<StorageIndex> (degree[i], d); /* update degree(i) */
Ci[pn] = Ci[p3]; /* move first node to end */
Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */
Ci[p1] = k; /* add k as 1st element in of Ei */
@ -330,12 +329,12 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
h %= n; /* finalize hash of i */
next[i] = hhead[h]; /* place i in hash bucket */
hhead[h] = i;
last[i] = h; /* save hash of i in last[i] */
last[i] = h; /* save hash of i in last[i] */
}
} /* scan2 is done */
degree[k] = dk; /* finalize |Lk| */
lemax = std::max<Index>(lemax, dk);
mark = internal::cs_wclear<Index>(mark+lemax, lemax, w, n); /* clear w */
lemax = std::max<StorageIndex>(lemax, dk);
mark = internal::cs_wclear<StorageIndex>(mark+lemax, lemax, w, n); /* clear w */
/* --- Supernode detection ------------------------------------------ */
for(pk = pk1; pk < pk2; pk++)
@ -383,12 +382,12 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */
nv[i] = nvi; /* restore nv[i] */
d = degree[i] + dk - nvi; /* compute external degree(i) */
d = std::min<Index> (d, n - nel - nvi);
d = std::min<StorageIndex> (d, n - nel - nvi);
if(head[d] != -1) last[head[d]] = i;
next[i] = head[d]; /* put i back in degree list */
last[i] = -1;
head[d] = i;
mindeg = std::min<Index> (mindeg, d); /* find new minimum degree */
mindeg = std::min<StorageIndex> (mindeg, d); /* find new minimum degree */
degree[i] = d;
Ci[p++] = i; /* place i in Lk */
}
@ -421,7 +420,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
}
for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */
{
if(Cp[i] == -1) k = internal::cs_tdfs<Index>(i, k, head, next, perm.indices().data(), w);
if(Cp[i] == -1) k = internal::cs_tdfs<StorageIndex>(i, k, head, next, perm.indices().data(), w);
}
perm.indices().conservativeResize(n);

View File

@ -135,54 +135,54 @@ namespace internal {
/* ========================================================================== */
// == Row and Column structures ==
template <typename Index>
template <typename IndexType>
struct colamd_col
{
Index start ; /* index for A of first row in this column, or DEAD */
IndexType start ; /* index for A of first row in this column, or DEAD */
/* if column is dead */
Index length ; /* number of rows in this column */
IndexType length ; /* number of rows in this column */
union
{
Index thickness ; /* number of original columns represented by this */
IndexType thickness ; /* number of original columns represented by this */
/* col, if the column is alive */
Index parent ; /* parent in parent tree super-column structure, if */
IndexType parent ; /* parent in parent tree super-column structure, if */
/* the column is dead */
} shared1 ;
union
{
Index score ; /* the score used to maintain heap, if col is alive */
Index order ; /* pivot ordering of this column, if col is dead */
IndexType score ; /* the score used to maintain heap, if col is alive */
IndexType order ; /* pivot ordering of this column, if col is dead */
} shared2 ;
union
{
Index headhash ; /* head of a hash bucket, if col is at the head of */
IndexType headhash ; /* head of a hash bucket, if col is at the head of */
/* a degree list */
Index hash ; /* hash value, if col is not in a degree list */
Index prev ; /* previous column in degree list, if col is in a */
IndexType hash ; /* hash value, if col is not in a degree list */
IndexType prev ; /* previous column in degree list, if col is in a */
/* degree list (but not at the head of a degree list) */
} shared3 ;
union
{
Index degree_next ; /* next column, if col is in a degree list */
Index hash_next ; /* next column, if col is in a hash list */
IndexType degree_next ; /* next column, if col is in a degree list */
IndexType hash_next ; /* next column, if col is in a hash list */
} shared4 ;
};
template <typename Index>
template <typename IndexType>
struct Colamd_Row
{
Index start ; /* index for A of first col in this row */
Index length ; /* number of principal columns in this row */
IndexType start ; /* index for A of first col in this row */
IndexType length ; /* number of principal columns in this row */
union
{
Index degree ; /* number of principal & non-principal columns in row */
Index p ; /* used as a row pointer in init_rows_cols () */
IndexType degree ; /* number of principal & non-principal columns in row */
IndexType p ; /* used as a row pointer in init_rows_cols () */
} shared1 ;
union
{
Index mark ; /* for computing set differences and marking dead rows*/
Index first_column ;/* first column in row (used in garbage collection) */
IndexType mark ; /* for computing set differences and marking dead rows*/
IndexType first_column ;/* first column in row (used in garbage collection) */
} shared2 ;
};
@ -202,38 +202,38 @@ struct Colamd_Row
This macro is not needed when using symamd.
Explicit typecast to Index added Sept. 23, 2002, COLAMD version 2.2, to avoid
Explicit typecast to IndexType added Sept. 23, 2002, COLAMD version 2.2, to avoid
gcc -pedantic warning messages.
*/
template <typename Index>
inline Index colamd_c(Index n_col)
{ return Index( ((n_col) + 1) * sizeof (colamd_col<Index>) / sizeof (Index) ) ; }
template <typename IndexType>
inline IndexType colamd_c(IndexType n_col)
{ return IndexType( ((n_col) + 1) * sizeof (colamd_col<IndexType>) / sizeof (IndexType) ) ; }
template <typename Index>
inline Index colamd_r(Index n_row)
{ return Index(((n_row) + 1) * sizeof (Colamd_Row<Index>) / sizeof (Index)); }
template <typename IndexType>
inline IndexType colamd_r(IndexType n_row)
{ return IndexType(((n_row) + 1) * sizeof (Colamd_Row<IndexType>) / sizeof (IndexType)); }
// Prototypes of non-user callable routines
template <typename Index>
static Index init_rows_cols (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> col [], Index A [], Index p [], Index stats[COLAMD_STATS] );
template <typename IndexType>
static IndexType init_rows_cols (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> col [], IndexType A [], IndexType p [], IndexType stats[COLAMD_STATS] );
template <typename Index>
static void init_scoring (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], double knobs[COLAMD_KNOBS], Index *p_n_row2, Index *p_n_col2, Index *p_max_deg);
template <typename IndexType>
static void init_scoring (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], double knobs[COLAMD_KNOBS], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg);
template <typename Index>
static Index find_ordering (Index n_row, Index n_col, Index Alen, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], Index n_col2, Index max_deg, Index pfree);
template <typename IndexType>
static IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType n_col2, IndexType max_deg, IndexType pfree);
template <typename Index>
static void order_children (Index n_col, colamd_col<Index> Col [], Index p []);
template <typename IndexType>
static void order_children (IndexType n_col, colamd_col<IndexType> Col [], IndexType p []);
template <typename Index>
static void detect_super_cols (colamd_col<Index> Col [], Index A [], Index head [], Index row_start, Index row_length ) ;
template <typename IndexType>
static void detect_super_cols (colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType row_start, IndexType row_length ) ;
template <typename Index>
static Index garbage_collection (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index *pfree) ;
template <typename IndexType>
static IndexType garbage_collection (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType *pfree) ;
template <typename Index>
static inline Index clear_mark (Index n_row, Colamd_Row<Index> Row [] ) ;
template <typename IndexType>
static inline IndexType clear_mark (IndexType n_row, Colamd_Row<IndexType> Row [] ) ;
/* === No debugging ========================================================= */
@ -260,8 +260,8 @@ static inline Index clear_mark (Index n_row, Colamd_Row<Index> Row [] ) ;
* \param n_col number of columns in A
* \return recommended value of Alen for use by colamd
*/
template <typename Index>
inline Index colamd_recommended ( Index nnz, Index n_row, Index n_col)
template <typename IndexType>
inline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType n_col)
{
if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0)
return (-1);
@ -325,22 +325,22 @@ static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS])
* \param knobs parameter settings for colamd
* \param stats colamd output statistics and error codes
*/
template <typename Index>
static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, double knobs[COLAMD_KNOBS], Index stats[COLAMD_STATS])
template <typename IndexType>
static bool colamd(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[COLAMD_KNOBS], IndexType stats[COLAMD_STATS])
{
/* === Local variables ================================================== */
Index i ; /* loop index */
Index nnz ; /* nonzeros in A */
Index Row_size ; /* size of Row [], in integers */
Index Col_size ; /* size of Col [], in integers */
Index need ; /* minimum required length of A */
Colamd_Row<Index> *Row ; /* pointer into A of Row [0..n_row] array */
colamd_col<Index> *Col ; /* pointer into A of Col [0..n_col] array */
Index n_col2 ; /* number of non-dense, non-empty columns */
Index n_row2 ; /* number of non-dense, non-empty rows */
Index ngarbage ; /* number of garbage collections performed */
Index max_deg ; /* maximum row degree */
IndexType i ; /* loop index */
IndexType nnz ; /* nonzeros in A */
IndexType Row_size ; /* size of Row [], in integers */
IndexType Col_size ; /* size of Col [], in integers */
IndexType need ; /* minimum required length of A */
Colamd_Row<IndexType> *Row ; /* pointer into A of Row [0..n_row] array */
colamd_col<IndexType> *Col ; /* pointer into A of Col [0..n_col] array */
IndexType n_col2 ; /* number of non-dense, non-empty columns */
IndexType n_row2 ; /* number of non-dense, non-empty rows */
IndexType ngarbage ; /* number of garbage collections performed */
IndexType max_deg ; /* maximum row degree */
double default_knobs [COLAMD_KNOBS] ; /* default knobs array */
@ -431,8 +431,8 @@ static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, dou
}
Alen -= Col_size + Row_size ;
Col = (colamd_col<Index> *) &A [Alen] ;
Row = (Colamd_Row<Index> *) &A [Alen + Col_size] ;
Col = (colamd_col<IndexType> *) &A [Alen] ;
Row = (Colamd_Row<IndexType> *) &A [Alen + Col_size] ;
/* === Construct the row and column data structures ===================== */
@ -485,29 +485,29 @@ static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, dou
column form of the matrix. Returns false if the matrix is invalid,
true otherwise. Not user-callable.
*/
template <typename Index>
static Index init_rows_cols /* returns true if OK, or false otherwise */
template <typename IndexType>
static IndexType init_rows_cols /* returns true if OK, or false otherwise */
(
/* === Parameters ======================================================= */
Index n_row, /* number of rows of A */
Index n_col, /* number of columns of A */
Colamd_Row<Index> Row [], /* of size n_row+1 */
colamd_col<Index> Col [], /* of size n_col+1 */
Index A [], /* row indices of A, of size Alen */
Index p [], /* pointers to columns in A, of size n_col+1 */
Index stats [COLAMD_STATS] /* colamd statistics */
IndexType n_row, /* number of rows of A */
IndexType n_col, /* number of columns of A */
Colamd_Row<IndexType> Row [], /* of size n_row+1 */
colamd_col<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* row indices of A, of size Alen */
IndexType p [], /* pointers to columns in A, of size n_col+1 */
IndexType stats [COLAMD_STATS] /* colamd statistics */
)
{
/* === Local variables ================================================== */
Index col ; /* a column index */
Index row ; /* a row index */
Index *cp ; /* a column pointer */
Index *cp_end ; /* a pointer to the end of a column */
Index *rp ; /* a row pointer */
Index *rp_end ; /* a pointer to the end of a row */
Index last_row ; /* previous row */
IndexType col ; /* a column index */
IndexType row ; /* a row index */
IndexType *cp ; /* a column pointer */
IndexType *cp_end ; /* a pointer to the end of a column */
IndexType *rp ; /* a row pointer */
IndexType *rp_end ; /* a pointer to the end of a row */
IndexType last_row ; /* previous row */
/* === Initialize columns, and check column pointers ==================== */
@ -701,40 +701,40 @@ static Index init_rows_cols /* returns true if OK, or false otherwise */
Kills dense or empty columns and rows, calculates an initial score for
each column, and places all columns in the degree lists. Not user-callable.
*/
template <typename Index>
template <typename IndexType>
static void init_scoring
(
/* === Parameters ======================================================= */
Index n_row, /* number of rows of A */
Index n_col, /* number of columns of A */
Colamd_Row<Index> Row [], /* of size n_row+1 */
colamd_col<Index> Col [], /* of size n_col+1 */
Index A [], /* column form and row form of A */
Index head [], /* of size n_col+1 */
IndexType n_row, /* number of rows of A */
IndexType n_col, /* number of columns of A */
Colamd_Row<IndexType> Row [], /* of size n_row+1 */
colamd_col<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* column form and row form of A */
IndexType head [], /* of size n_col+1 */
double knobs [COLAMD_KNOBS],/* parameters */
Index *p_n_row2, /* number of non-dense, non-empty rows */
Index *p_n_col2, /* number of non-dense, non-empty columns */
Index *p_max_deg /* maximum row degree */
IndexType *p_n_row2, /* number of non-dense, non-empty rows */
IndexType *p_n_col2, /* number of non-dense, non-empty columns */
IndexType *p_max_deg /* maximum row degree */
)
{
/* === Local variables ================================================== */
Index c ; /* a column index */
Index r, row ; /* a row index */
Index *cp ; /* a column pointer */
Index deg ; /* degree of a row or column */
Index *cp_end ; /* a pointer to the end of a column */
Index *new_cp ; /* new column pointer */
Index col_length ; /* length of pruned column */
Index score ; /* current column score */
Index n_col2 ; /* number of non-dense, non-empty columns */
Index n_row2 ; /* number of non-dense, non-empty rows */
Index dense_row_count ; /* remove rows with more entries than this */
Index dense_col_count ; /* remove cols with more entries than this */
Index min_score ; /* smallest column score */
Index max_deg ; /* maximum row degree */
Index next_col ; /* Used to add to degree list.*/
IndexType c ; /* a column index */
IndexType r, row ; /* a row index */
IndexType *cp ; /* a column pointer */
IndexType deg ; /* degree of a row or column */
IndexType *cp_end ; /* a pointer to the end of a column */
IndexType *new_cp ; /* new column pointer */
IndexType col_length ; /* length of pruned column */
IndexType score ; /* current column score */
IndexType n_col2 ; /* number of non-dense, non-empty columns */
IndexType n_row2 ; /* number of non-dense, non-empty rows */
IndexType dense_row_count ; /* remove rows with more entries than this */
IndexType dense_col_count ; /* remove cols with more entries than this */
IndexType min_score ; /* smallest column score */
IndexType max_deg ; /* maximum row degree */
IndexType next_col ; /* Used to add to degree list.*/
/* === Extract knobs ==================================================== */
@ -845,7 +845,7 @@ static void init_scoring
score = COLAMD_MIN (score, n_col) ;
}
/* determine pruned column length */
col_length = (Index) (new_cp - &A [Col [c].start]) ;
col_length = (IndexType) (new_cp - &A [Col [c].start]) ;
if (col_length == 0)
{
/* a newly-made null column (all rows in this col are "dense" */
@ -938,56 +938,56 @@ static void init_scoring
(no supercolumns on input). Uses a minimum approximate column minimum
degree ordering method. Not user-callable.
*/
template <typename Index>
static Index find_ordering /* return the number of garbage collections */
template <typename IndexType>
static IndexType find_ordering /* return the number of garbage collections */
(
/* === Parameters ======================================================= */
Index n_row, /* number of rows of A */
Index n_col, /* number of columns of A */
Index Alen, /* size of A, 2*nnz + n_col or larger */
Colamd_Row<Index> Row [], /* of size n_row+1 */
colamd_col<Index> Col [], /* of size n_col+1 */
Index A [], /* column form and row form of A */
Index head [], /* of size n_col+1 */
Index n_col2, /* Remaining columns to order */
Index max_deg, /* Maximum row degree */
Index pfree /* index of first free slot (2*nnz on entry) */
IndexType n_row, /* number of rows of A */
IndexType n_col, /* number of columns of A */
IndexType Alen, /* size of A, 2*nnz + n_col or larger */
Colamd_Row<IndexType> Row [], /* of size n_row+1 */
colamd_col<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* column form and row form of A */
IndexType head [], /* of size n_col+1 */
IndexType n_col2, /* Remaining columns to order */
IndexType max_deg, /* Maximum row degree */
IndexType pfree /* index of first free slot (2*nnz on entry) */
)
{
/* === Local variables ================================================== */
Index k ; /* current pivot ordering step */
Index pivot_col ; /* current pivot column */
Index *cp ; /* a column pointer */
Index *rp ; /* a row pointer */
Index pivot_row ; /* current pivot row */
Index *new_cp ; /* modified column pointer */
Index *new_rp ; /* modified row pointer */
Index pivot_row_start ; /* pointer to start of pivot row */
Index pivot_row_degree ; /* number of columns in pivot row */
Index pivot_row_length ; /* number of supercolumns in pivot row */
Index pivot_col_score ; /* score of pivot column */
Index needed_memory ; /* free space needed for pivot row */
Index *cp_end ; /* pointer to the end of a column */
Index *rp_end ; /* pointer to the end of a row */
Index row ; /* a row index */
Index col ; /* a column index */
Index max_score ; /* maximum possible score */
Index cur_score ; /* score of current column */
IndexType k ; /* current pivot ordering step */
IndexType pivot_col ; /* current pivot column */
IndexType *cp ; /* a column pointer */
IndexType *rp ; /* a row pointer */
IndexType pivot_row ; /* current pivot row */
IndexType *new_cp ; /* modified column pointer */
IndexType *new_rp ; /* modified row pointer */
IndexType pivot_row_start ; /* pointer to start of pivot row */
IndexType pivot_row_degree ; /* number of columns in pivot row */
IndexType pivot_row_length ; /* number of supercolumns in pivot row */
IndexType pivot_col_score ; /* score of pivot column */
IndexType needed_memory ; /* free space needed for pivot row */
IndexType *cp_end ; /* pointer to the end of a column */
IndexType *rp_end ; /* pointer to the end of a row */
IndexType row ; /* a row index */
IndexType col ; /* a column index */
IndexType max_score ; /* maximum possible score */
IndexType cur_score ; /* score of current column */
unsigned int hash ; /* hash value for supernode detection */
Index head_column ; /* head of hash bucket */
Index first_col ; /* first column in hash bucket */
Index tag_mark ; /* marker value for mark array */
Index row_mark ; /* Row [row].shared2.mark */
Index set_difference ; /* set difference size of row with pivot row */
Index min_score ; /* smallest column score */
Index col_thickness ; /* "thickness" (no. of columns in a supercol) */
Index max_mark ; /* maximum value of tag_mark */
Index pivot_col_thickness ; /* number of columns represented by pivot col */
Index prev_col ; /* Used by Dlist operations. */
Index next_col ; /* Used by Dlist operations. */
Index ngarbage ; /* number of garbage collections performed */
IndexType head_column ; /* head of hash bucket */
IndexType first_col ; /* first column in hash bucket */
IndexType tag_mark ; /* marker value for mark array */
IndexType row_mark ; /* Row [row].shared2.mark */
IndexType set_difference ; /* set difference size of row with pivot row */
IndexType min_score ; /* smallest column score */
IndexType col_thickness ; /* "thickness" (no. of columns in a supercol) */
IndexType max_mark ; /* maximum value of tag_mark */
IndexType pivot_col_thickness ; /* number of columns represented by pivot col */
IndexType prev_col ; /* Used by Dlist operations. */
IndexType next_col ; /* Used by Dlist operations. */
IndexType ngarbage ; /* number of garbage collections performed */
/* === Initialization and clear mark ==================================== */
@ -1277,7 +1277,7 @@ static Index find_ordering /* return the number of garbage collections */
}
/* recompute the column's length */
Col [col].length = (Index) (new_cp - &A [Col [col].start]) ;
Col [col].length = (IndexType) (new_cp - &A [Col [col].start]) ;
/* === Further mass elimination ================================= */
@ -1325,7 +1325,7 @@ static Index find_ordering /* return the number of garbage collections */
Col [col].shared4.hash_next = first_col ;
/* save hash function in Col [col].shared3.hash */
Col [col].shared3.hash = (Index) hash ;
Col [col].shared3.hash = (IndexType) hash ;
COLAMD_ASSERT (COL_IS_ALIVE (col)) ;
}
}
@ -1420,7 +1420,7 @@ static Index find_ordering /* return the number of garbage collections */
/* update pivot row length to reflect any cols that were killed */
/* during super-col detection and mass elimination */
Row [pivot_row].start = pivot_row_start ;
Row [pivot_row].length = (Index) (new_rp - &A[pivot_row_start]) ;
Row [pivot_row].length = (IndexType) (new_rp - &A[pivot_row_start]) ;
Row [pivot_row].shared1.degree = pivot_row_degree ;
Row [pivot_row].shared2.mark = 0 ;
/* pivot row is no longer dead */
@ -1449,22 +1449,22 @@ static Index find_ordering /* return the number of garbage collections */
taken by this routine is O (n_col), that is, linear in the number of
columns. Not user-callable.
*/
template <typename Index>
template <typename IndexType>
static inline void order_children
(
/* === Parameters ======================================================= */
Index n_col, /* number of columns of A */
colamd_col<Index> Col [], /* of size n_col+1 */
Index p [] /* p [0 ... n_col-1] is the column permutation*/
IndexType n_col, /* number of columns of A */
colamd_col<IndexType> Col [], /* of size n_col+1 */
IndexType p [] /* p [0 ... n_col-1] is the column permutation*/
)
{
/* === Local variables ================================================== */
Index i ; /* loop counter for all columns */
Index c ; /* column index */
Index parent ; /* index of column's parent */
Index order ; /* column's order */
IndexType i ; /* loop counter for all columns */
IndexType c ; /* column index */
IndexType parent ; /* index of column's parent */
IndexType order ; /* column's order */
/* === Order each non-principal column ================================== */
@ -1550,33 +1550,33 @@ static inline void order_children
just been computed in the approximate degree computation.
Not user-callable.
*/
template <typename Index>
template <typename IndexType>
static void detect_super_cols
(
/* === Parameters ======================================================= */
colamd_col<Index> Col [], /* of size n_col+1 */
Index A [], /* row indices of A */
Index head [], /* head of degree lists and hash buckets */
Index row_start, /* pointer to set of columns to check */
Index row_length /* number of columns to check */
colamd_col<IndexType> Col [], /* of size n_col+1 */
IndexType A [], /* row indices of A */
IndexType head [], /* head of degree lists and hash buckets */
IndexType row_start, /* pointer to set of columns to check */
IndexType row_length /* number of columns to check */
)
{
/* === Local variables ================================================== */
Index hash ; /* hash value for a column */
Index *rp ; /* pointer to a row */
Index c ; /* a column index */
Index super_c ; /* column index of the column to absorb into */
Index *cp1 ; /* column pointer for column super_c */
Index *cp2 ; /* column pointer for column c */
Index length ; /* length of column super_c */
Index prev_c ; /* column preceding c in hash bucket */
Index i ; /* loop counter */
Index *rp_end ; /* pointer to the end of the row */
Index col ; /* a column index in the row to check */
Index head_column ; /* first column in hash bucket or degree list */
Index first_col ; /* first column in hash bucket */
IndexType hash ; /* hash value for a column */
IndexType *rp ; /* pointer to a row */
IndexType c ; /* a column index */
IndexType super_c ; /* column index of the column to absorb into */
IndexType *cp1 ; /* column pointer for column super_c */
IndexType *cp2 ; /* column pointer for column c */
IndexType length ; /* length of column super_c */
IndexType prev_c ; /* column preceding c in hash bucket */
IndexType i ; /* loop counter */
IndexType *rp_end ; /* pointer to the end of the row */
IndexType col ; /* a column index in the row to check */
IndexType head_column ; /* first column in hash bucket or degree list */
IndexType first_col ; /* first column in hash bucket */
/* === Consider each column in the row ================================== */
@ -1701,27 +1701,27 @@ static void detect_super_cols
itself linear in the number of nonzeros in the input matrix.
Not user-callable.
*/
template <typename Index>
static Index garbage_collection /* returns the new value of pfree */
template <typename IndexType>
static IndexType garbage_collection /* returns the new value of pfree */
(
/* === Parameters ======================================================= */
Index n_row, /* number of rows */
Index n_col, /* number of columns */
Colamd_Row<Index> Row [], /* row info */
colamd_col<Index> Col [], /* column info */
Index A [], /* A [0 ... Alen-1] holds the matrix */
Index *pfree /* &A [0] ... pfree is in use */
IndexType n_row, /* number of rows */
IndexType n_col, /* number of columns */
Colamd_Row<IndexType> Row [], /* row info */
colamd_col<IndexType> Col [], /* column info */
IndexType A [], /* A [0 ... Alen-1] holds the matrix */
IndexType *pfree /* &A [0] ... pfree is in use */
)
{
/* === Local variables ================================================== */
Index *psrc ; /* source pointer */
Index *pdest ; /* destination pointer */
Index j ; /* counter */
Index r ; /* a row index */
Index c ; /* a column index */
Index length ; /* length of a row or column */
IndexType *psrc ; /* source pointer */
IndexType *pdest ; /* destination pointer */
IndexType j ; /* counter */
IndexType r ; /* a row index */
IndexType c ; /* a column index */
IndexType length ; /* length of a row or column */
/* === Defragment the columns =========================================== */
@ -1734,7 +1734,7 @@ static Index garbage_collection /* returns the new value of pfree */
/* move and compact the column */
COLAMD_ASSERT (pdest <= psrc) ;
Col [c].start = (Index) (pdest - &A [0]) ;
Col [c].start = (IndexType) (pdest - &A [0]) ;
length = Col [c].length ;
for (j = 0 ; j < length ; j++)
{
@ -1744,7 +1744,7 @@ static Index garbage_collection /* returns the new value of pfree */
*pdest++ = r ;
}
}
Col [c].length = (Index) (pdest - &A [Col [c].start]) ;
Col [c].length = (IndexType) (pdest - &A [Col [c].start]) ;
}
}
@ -1791,7 +1791,7 @@ static Index garbage_collection /* returns the new value of pfree */
/* move and compact the row */
COLAMD_ASSERT (pdest <= psrc) ;
Row [r].start = (Index) (pdest - &A [0]) ;
Row [r].start = (IndexType) (pdest - &A [0]) ;
length = Row [r].length ;
for (j = 0 ; j < length ; j++)
{
@ -1801,7 +1801,7 @@ static Index garbage_collection /* returns the new value of pfree */
*pdest++ = c ;
}
}
Row [r].length = (Index) (pdest - &A [Row [r].start]) ;
Row [r].length = (IndexType) (pdest - &A [Row [r].start]) ;
}
}
@ -1810,7 +1810,7 @@ static Index garbage_collection /* returns the new value of pfree */
/* === Return the new value of pfree ==================================== */
return ((Index) (pdest - &A [0])) ;
return ((IndexType) (pdest - &A [0])) ;
}
@ -1822,18 +1822,18 @@ static Index garbage_collection /* returns the new value of pfree */
Clears the Row [].shared2.mark array, and returns the new tag_mark.
Return value is the new tag_mark. Not user-callable.
*/
template <typename Index>
static inline Index clear_mark /* return the new value for tag_mark */
template <typename IndexType>
static inline IndexType clear_mark /* return the new value for tag_mark */
(
/* === Parameters ======================================================= */
Index n_row, /* number of rows in A */
Colamd_Row<Index> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
IndexType n_row, /* number of rows in A */
Colamd_Row<IndexType> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
)
{
/* === Local variables ================================================== */
Index r ;
IndexType r ;
for (r = 0 ; r < n_row ; r++)
{

View File

@ -111,12 +111,12 @@ class NaturalOrdering
* Functor computing the \em column \em approximate \em minimum \em degree ordering
* The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()).
*/
template<typename Index>
template<typename StorageIndex>
class COLAMDOrdering
{
public:
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
typedef Matrix<Index, Dynamic, 1> IndexVector;
typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
typedef Matrix<StorageIndex, Dynamic, 1> IndexVector;
/** Compute the permutation vector \a perm form the sparse matrix \a mat
* \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
@ -126,26 +126,26 @@ class COLAMDOrdering
{
eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering");
Index m = mat.rows();
Index n = mat.cols();
Index nnz = mat.nonZeros();
StorageIndex m = StorageIndex(mat.rows());
StorageIndex n = StorageIndex(mat.cols());
StorageIndex nnz = StorageIndex(mat.nonZeros());
// Get the recommended value of Alen to be used by colamd
Index Alen = internal::colamd_recommended(nnz, m, n);
StorageIndex Alen = internal::colamd_recommended(nnz, m, n);
// Set the default parameters
double knobs [COLAMD_KNOBS];
Index stats [COLAMD_STATS];
StorageIndex stats [COLAMD_STATS];
internal::colamd_set_defaults(knobs);
IndexVector p(n+1), A(Alen);
for(Index i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
for(Index i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
for(StorageIndex i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
for(StorageIndex i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
// Call Colamd routine to compute the ordering
Index info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
StorageIndex info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
EIGEN_UNUSED_VARIABLE(info);
eigen_assert( info && "COLAMD failed " );
perm.resize(n);
for (Index i = 0; i < n; i++) perm.indices()(p(i)) = i;
for (StorageIndex i = 0; i < n; i++) perm.indices()(p(i)) = i;
}
};

View File

@ -43,7 +43,7 @@ namespace internal
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::Index Index;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
template<typename _MatrixType, int Options>
@ -52,7 +52,7 @@ namespace internal
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::Index Index;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
template<typename _MatrixType, int Options>
@ -61,7 +61,7 @@ namespace internal
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::Index Index;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)
@ -138,7 +138,7 @@ class PastixBase : public SparseSolverBase<Derived>
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef SparseMatrix<Scalar, ColMajor> ColSpMatrix;
@ -162,7 +162,7 @@ class PastixBase : public SparseSolverBase<Derived>
* The statistics related to the different phases of factorization and solve are saved here as well
* \sa analyzePattern() factorize()
*/
Array<Index,IPARM_SIZE,1>& iparm()
Array<StorageIndex,IPARM_SIZE,1>& iparm()
{
return m_iparm;
}
@ -242,8 +242,8 @@ class PastixBase : public SparseSolverBase<Derived>
mutable int m_comm; // The MPI communicator identifier
mutable Matrix<int,IPARM_SIZE,1> m_iparm; // integer vector for the input parameters
mutable Matrix<double,DPARM_SIZE,1> m_dparm; // Scalar vector for the input parameters
mutable Matrix<Index,Dynamic,1> m_perm; // Permutation vector
mutable Matrix<Index,Dynamic,1> m_invp; // Inverse permutation vector
mutable Matrix<StorageIndex,Dynamic,1> m_perm; // Permutation vector
mutable Matrix<StorageIndex,Dynamic,1> m_invp; // Inverse permutation vector
mutable int m_size; // Size of the matrix
};
@ -308,7 +308,7 @@ void PastixBase<Derived>::analyzePattern(ColSpMatrix& mat)
if(m_size>0)
clean();
m_size = mat.rows();
m_size = internal::convert_index<int>(mat.rows());
m_perm.resize(m_size);
m_invp.resize(m_size);
@ -337,7 +337,7 @@ void PastixBase<Derived>::factorize(ColSpMatrix& mat)
eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase");
m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT;
m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT;
m_size = mat.rows();
m_size = internal::convert_index<int>(mat.rows());
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
@ -373,7 +373,7 @@ bool PastixBase<Base>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x
m_iparm[IPARM_START_TASK] = API_TASK_SOLVE;
m_iparm[IPARM_END_TASK] = API_TASK_REFINE;
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, x.rows(), 0, 0, 0,
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, internal::convert_index<int>(x.rows()), 0, 0, 0,
m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data());
}
@ -409,7 +409,7 @@ class PastixLU : public PastixBase< PastixLU<_MatrixType> >
typedef _MatrixType MatrixType;
typedef PastixBase<PastixLU<MatrixType> > Base;
typedef typename Base::ColSpMatrix ColSpMatrix;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::StorageIndex StorageIndex;
public:
PastixLU() : Base()

View File

@ -40,13 +40,13 @@ template<typename _MatrixType, int Options=Upper> class PardisoLDLT;
namespace internal
{
template<typename Index>
template<typename IndexType>
struct pardiso_run_selector
{
static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a,
IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x)
{
Index error = 0;
IndexType error = 0;
::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
return error;
}
@ -54,11 +54,11 @@ namespace internal
template<>
struct pardiso_run_selector<long long int>
{
typedef long long int Index;
static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
typedef long long int IndexTypeType;
static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a,
IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x)
{
Index error = 0;
IndexType error = 0;
::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
return error;
}
@ -72,7 +72,7 @@ namespace internal
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::Index Index;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
template<typename _MatrixType, int Options>
@ -81,7 +81,7 @@ namespace internal
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::Index Index;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
template<typename _MatrixType, int Options>
@ -90,7 +90,7 @@ namespace internal
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::Index Index;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
}
@ -110,19 +110,19 @@ class PardisoImpl : public SparseSolveBase<PardisoImpl<Derived>
typedef typename Traits::MatrixType MatrixType;
typedef typename Traits::Scalar Scalar;
typedef typename Traits::RealScalar RealScalar;
typedef typename Traits::Index Index;
typedef SparseMatrix<Scalar,RowMajor,Index> SparseMatrixType;
typedef typename Traits::StorageIndex StorageIndex;
typedef SparseMatrix<Scalar,RowMajor,StorageIndex> SparseMatrixType;
typedef Matrix<Scalar,Dynamic,1> VectorType;
typedef Matrix<Index, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
typedef Matrix<Index, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
typedef Array<Index,64,1,DontAlign> ParameterType;
typedef Matrix<StorageIndex, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
typedef Matrix<StorageIndex, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
typedef Array<StorageIndex,64,1,DontAlign> ParameterType;
enum {
ScalarIsComplex = NumTraits<Scalar>::IsComplex
};
PardisoImpl()
{
eigen_assert((sizeof(Index) >= sizeof(_INTEGER_t) && sizeof(Index) <= 8) && "Non-supported index type");
eigen_assert((sizeof(StorageIndex) >= sizeof(_INTEGER_t) && sizeof(StorageIndex) <= 8) && "Non-supported index type");
m_iparm.setZero();
m_msglvl = 0; // No output
m_isInitialized = false;
@ -181,7 +181,7 @@ class PardisoImpl : public SparseSolveBase<PardisoImpl<Derived>
{
if(m_isInitialized) // Factorization ran at least once
{
internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0,
internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0,
m_iparm.data(), m_msglvl, 0, 0);
}
}
@ -261,9 +261,9 @@ Derived& PardisoImpl<Derived>::compute(const MatrixType& a)
derived().getMatrix(a);
Index error;
error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 12, m_size,
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 12, m_size,
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
manageErrorCode(error);
m_analysisIsOk = true;
@ -284,9 +284,9 @@ Derived& PardisoImpl<Derived>::analyzePattern(const MatrixType& a)
derived().getMatrix(a);
Index error;
error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 11, m_size,
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 11, m_size,
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
manageErrorCode(error);
m_analysisIsOk = true;
@ -304,9 +304,9 @@ Derived& PardisoImpl<Derived>::factorize(const MatrixType& a)
derived().getMatrix(a);
Index error;
error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 22, m_size,
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 22, m_size,
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
manageErrorCode(error);
m_factorizationIsOk = true;
@ -348,10 +348,10 @@ bool PardisoImpl<Base>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XD
}
Index error;
error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 33, m_size,
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), nrhs, m_iparm.data(), m_msglvl,
rhs_ptr, x.derived().data());
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 33, m_size,
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
m_perm.data(), nrhs, m_iparm.data(), m_msglvl,
rhs_ptr, x.derived().data());
return error==0;
}
@ -424,7 +424,7 @@ class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
protected:
typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;
typedef typename Base::Scalar Scalar;
typedef typename Base::Index Index;
typedef typename Base::StorageIndex StorageIndex;
typedef typename Base::RealScalar RealScalar;
using Base::pardisoInit;
using Base::m_matrix;
@ -454,7 +454,7 @@ class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
void getMatrix(const MatrixType& matrix)
{
// PARDISO supports only upper, row-major matrices
PermutationMatrix<Dynamic,Dynamic,Index> p_null;
PermutationMatrix<Dynamic,Dynamic,StorageIndex> p_null;
m_matrix.resize(matrix.rows(), matrix.cols());
m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
}
@ -482,7 +482,7 @@ class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
protected:
typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;
typedef typename Base::Scalar Scalar;
typedef typename Base::Index Index;
typedef typename Base::StorageIndex StorageIndex;
typedef typename Base::RealScalar RealScalar;
using Base::pardisoInit;
using Base::m_matrix;
@ -510,7 +510,7 @@ class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
void getMatrix(const MatrixType& matrix)
{
// PARDISO supports only upper, row-major matrices
PermutationMatrix<Dynamic,Dynamic,Index> p_null;
PermutationMatrix<Dynamic,Dynamic,StorageIndex> p_null;
m_matrix.resize(matrix.rows(), matrix.cols());
m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
}

View File

@ -57,7 +57,8 @@ template<typename _MatrixType> class ColPivHouseholderQR
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
// FIXME should be int
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
@ -69,7 +70,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
private:
typedef typename PermutationType::Index PermIndexType;
typedef typename PermutationType::StorageIndex PermIndexType;
public:

View File

@ -66,10 +66,11 @@ template<typename _MatrixType> class FullPivHouseholderQR
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
// FIXME should be int
typedef typename MatrixType::StorageIndex StorageIndex;
typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef Matrix<Index, 1,
typedef Matrix<StorageIndex, 1,
EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime,RowsAtCompileTime), RowMajor, 1,
EIGEN_SIZE_MIN_PREFER_FIXED(MaxColsAtCompileTime,MaxRowsAtCompileTime)> IntDiagSizeVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
@ -556,7 +557,6 @@ template<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType
: public ReturnByValue<FullPivHouseholderQRMatrixQReturnType<MatrixType> >
{
public:
typedef typename MatrixType::Index Index;
typedef typename FullPivHouseholderQR<MatrixType>::IntDiagSizeVectorType IntDiagSizeVectorType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef Matrix<typename MatrixType::Scalar, 1, MatrixType::RowsAtCompileTime, RowMajor, 1,

View File

@ -53,7 +53,8 @@ template<typename _MatrixType> class HouseholderQR
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
// FIXME should be int
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
@ -224,7 +225,6 @@ namespace internal {
template<typename MatrixQR, typename HCoeffs>
void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0)
{
typedef typename MatrixQR::Index Index;
typedef typename MatrixQR::Scalar Scalar;
typedef typename MatrixQR::RealScalar RealScalar;
Index rows = mat.rows();
@ -263,11 +263,9 @@ template<typename MatrixQR, typename HCoeffs,
struct householder_qr_inplace_blocked
{
// This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h
static void run(MatrixQR& mat, HCoeffs& hCoeffs,
typename MatrixQR::Index maxBlockSize=32,
static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index maxBlockSize=32,
typename MatrixQR::Scalar* tempData = 0)
{
typedef typename MatrixQR::Index Index;
typedef typename MatrixQR::Scalar Scalar;
typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;

Some files were not shown because too many files have changed in this diff Show More