the Index types change.

As discussed on the list (too long to explain here).
This commit is contained in:
Benoit Jacob 2010-05-30 16:00:58 -04:00
parent faa3ff3be6
commit aaaade4b3d
158 changed files with 3137 additions and 2878 deletions

View File

@ -101,7 +101,7 @@ class Array
* is called a null matrix. This constructor is the unique way to create null matrices: resizing * is called a null matrix. This constructor is the unique way to create null matrices: resizing
* a matrix to 0 is not supported. * a matrix to 0 is not supported.
* *
* \sa resize(int,int) * \sa resize(Index,Index)
*/ */
EIGEN_STRONG_INLINE explicit Array() : Base() EIGEN_STRONG_INLINE explicit Array() : Base()
{ {
@ -126,7 +126,7 @@ class Array
* it is redundant to pass the dimension here, so it makes more sense to use the default * it is redundant to pass the dimension here, so it makes more sense to use the default
* constructor Matrix() instead. * constructor Matrix() instead.
*/ */
EIGEN_STRONG_INLINE explicit Array(int dim) EIGEN_STRONG_INLINE explicit Array(Index dim)
: Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim) : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
{ {
Base::_check_template_params(); Base::_check_template_params();
@ -149,7 +149,7 @@ class Array
* This is useful for dynamic-size matrices. For fixed-size matrices, * This is useful for dynamic-size matrices. For fixed-size matrices,
* it is redundant to pass these parameters, so one should use the default constructor * it is redundant to pass these parameters, so one should use the default constructor
* Matrix() instead. */ * Matrix() instead. */
Array(int rows, int cols); Array(Index rows, Index cols);
/** constructs an initialized 2D vector with given coefficients */ /** constructs an initialized 2D vector with given coefficients */
Array(const Scalar& x, const Scalar& y); Array(const Scalar& x, const Scalar& y);
#endif #endif
@ -217,8 +217,8 @@ class Array
void swap(ArrayBase<OtherDerived> EIGEN_REF_TO_TEMPORARY other) void swap(ArrayBase<OtherDerived> EIGEN_REF_TO_TEMPORARY other)
{ this->_swap(other.derived()); } { this->_swap(other.derived()); }
inline int innerStride() const { return 1; } inline Index innerStride() const { return 1; }
inline int outerStride() const { return this->innerSize(); } inline Index outerStride() const { return this->innerSize(); }
#ifdef EIGEN_ARRAY_PLUGIN #ifdef EIGEN_ARRAY_PLUGIN
#include EIGEN_ARRAY_PLUGIN #include EIGEN_ARRAY_PLUGIN

View File

@ -60,8 +60,11 @@ template<typename Derived> class ArrayBase
using ei_special_scalar_op_base<Derived,typename ei_traits<Derived>::Scalar, using ei_special_scalar_op_base<Derived,typename ei_traits<Derived>::Scalar,
typename NumTraits<typename ei_traits<Derived>::Scalar>::Real>::operator*; typename NumTraits<typename ei_traits<Derived>::Scalar>::Real>::operator*;
typedef typename ei_traits<Derived>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar; typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar; typedef typename ei_packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseBase<Derived> Base; typedef DenseBase<Derived> Base;
using Base::RowsAtCompileTime; using Base::RowsAtCompileTime;
@ -88,7 +91,6 @@ template<typename Derived> class ArrayBase
using Base::operator*=; using Base::operator*=;
using Base::operator/=; using Base::operator/=;
typedef typename Base::RealScalar RealScalar;
typedef typename Base::CoeffReturnType CoeffReturnType; typedef typename Base::CoeffReturnType CoeffReturnType;
#endif // not EIGEN_PARSED_BY_DOXYGEN #endif // not EIGEN_PARSED_BY_DOXYGEN
@ -161,8 +163,8 @@ template<typename Derived> class ArrayBase
ArrayBase() : Base() {} ArrayBase() : Base() {}
private: private:
explicit ArrayBase(int); explicit ArrayBase(Index);
ArrayBase(int,int); ArrayBase(Index,Index);
template<typename OtherDerived> explicit ArrayBase(const ArrayBase<OtherDerived>&); template<typename OtherDerived> explicit ArrayBase(const ArrayBase<OtherDerived>&);
}; };

View File

@ -53,51 +53,51 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
inline ArrayWrapper(const ExpressionType& matrix) : m_expression(matrix) {} inline ArrayWrapper(const ExpressionType& matrix) : m_expression(matrix) {}
inline int rows() const { return m_expression.rows(); } inline Index rows() const { return m_expression.rows(); }
inline int cols() const { return m_expression.cols(); } inline Index cols() const { return m_expression.cols(); }
inline int outerStride() const { return m_expression.outerStride(); } inline Index outerStride() const { return m_expression.outerStride(); }
inline int innerStride() const { return m_expression.innerStride(); } inline Index innerStride() const { return m_expression.innerStride(); }
inline const CoeffReturnType coeff(int row, int col) const inline const CoeffReturnType coeff(Index row, Index col) const
{ {
return m_expression.coeff(row, col); return m_expression.coeff(row, col);
} }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return m_expression.const_cast_derived().coeffRef(row, col); return m_expression.const_cast_derived().coeffRef(row, col);
} }
inline const CoeffReturnType coeff(int index) const inline const CoeffReturnType coeff(Index index) const
{ {
return m_expression.coeff(index); return m_expression.coeff(index);
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return m_expression.const_cast_derived().coeffRef(index); return m_expression.const_cast_derived().coeffRef(index);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int row, int col) const inline const PacketScalar packet(Index row, Index col) const
{ {
return m_expression.template packet<LoadMode>(row, col); return m_expression.template packet<LoadMode>(row, col);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int row, int col, const PacketScalar& x) inline void writePacket(Index row, Index col, const PacketScalar& x)
{ {
m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x); m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int index) const inline const PacketScalar packet(Index index) const
{ {
return m_expression.template packet<LoadMode>(index); return m_expression.template packet<LoadMode>(index);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int index, const PacketScalar& x) inline void writePacket(Index index, const PacketScalar& x)
{ {
m_expression.const_cast_derived().template writePacket<LoadMode>(index, x); m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
} }
@ -138,51 +138,51 @@ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
inline MatrixWrapper(const ExpressionType& matrix) : m_expression(matrix) {} inline MatrixWrapper(const ExpressionType& matrix) : m_expression(matrix) {}
inline int rows() const { return m_expression.rows(); } inline Index rows() const { return m_expression.rows(); }
inline int cols() const { return m_expression.cols(); } inline Index cols() const { return m_expression.cols(); }
inline int outerStride() const { return m_expression.outerStride(); } inline Index outerStride() const { return m_expression.outerStride(); }
inline int innerStride() const { return m_expression.innerStride(); } inline Index innerStride() const { return m_expression.innerStride(); }
inline const CoeffReturnType coeff(int row, int col) const inline const CoeffReturnType coeff(Index row, Index col) const
{ {
return m_expression.coeff(row, col); return m_expression.coeff(row, col);
} }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return m_expression.const_cast_derived().coeffRef(row, col); return m_expression.const_cast_derived().coeffRef(row, col);
} }
inline const CoeffReturnType coeff(int index) const inline const CoeffReturnType coeff(Index index) const
{ {
return m_expression.coeff(index); return m_expression.coeff(index);
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return m_expression.const_cast_derived().coeffRef(index); return m_expression.const_cast_derived().coeffRef(index);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int row, int col) const inline const PacketScalar packet(Index row, Index col) const
{ {
return m_expression.template packet<LoadMode>(row, col); return m_expression.template packet<LoadMode>(row, col);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int row, int col, const PacketScalar& x) inline void writePacket(Index row, Index col, const PacketScalar& x)
{ {
m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x); m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int index) const inline const PacketScalar packet(Index index) const
{ {
return m_expression.template packet<LoadMode>(index); return m_expression.template packet<LoadMode>(index);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int index, const PacketScalar& x) inline void writePacket(Index index, const PacketScalar& x)
{ {
m_expression.const_cast_derived().template writePacket<LoadMode>(index, x); m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
} }

View File

@ -97,8 +97,8 @@ inline bool DenseBase<Derived>::all() const
>::run(derived()); >::run(derived());
else else
{ {
for(int j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(int i = 0; i < rows(); ++i) for(Index i = 0; i < rows(); ++i)
if (!coeff(i, j)) return false; if (!coeff(i, j)) return false;
return true; return true;
} }
@ -121,8 +121,8 @@ inline bool DenseBase<Derived>::any() const
>::run(derived()); >::run(derived());
else else
{ {
for(int j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(int i = 0; i < rows(); ++i) for(Index i = 0; i < rows(); ++i)
if (coeff(i, j)) return true; if (coeff(i, j)) return true;
return false; return false;
} }
@ -135,9 +135,9 @@ inline bool DenseBase<Derived>::any() const
* \sa all(), any() * \sa all(), any()
*/ */
template<typename Derived> template<typename Derived>
inline int DenseBase<Derived>::count() const inline typename DenseBase<Derived>::Index DenseBase<Derived>::count() const
{ {
return derived().template cast<bool>().template cast<int>().sum(); return derived().template cast<bool>().template cast<Index>().sum();
} }
#endif // EIGEN_ALLANDANY_H #endif // EIGEN_ALLANDANY_H

View File

@ -27,7 +27,8 @@
template<typename Scalar> struct ei_scalar_random_op { template<typename Scalar> struct ei_scalar_random_op {
EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_random_op) EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_random_op)
inline const Scalar operator() (int, int = 0) const { return ei_random<Scalar>(); } template<typename Index>
inline const Scalar operator() (Index, Index = 0) const { return ei_random<Scalar>(); }
}; };
template<typename Scalar> template<typename Scalar>
struct ei_functor_traits<ei_scalar_random_op<Scalar> > struct ei_functor_traits<ei_scalar_random_op<Scalar> >
@ -51,11 +52,11 @@ struct ei_functor_traits<ei_scalar_random_op<Scalar> >
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices. * behavior with expressions involving random matrices.
* *
* \sa MatrixBase::setRandom(), MatrixBase::Random(int), MatrixBase::Random() * \sa MatrixBase::setRandom(), MatrixBase::Random(Index), MatrixBase::Random()
*/ */
template<typename Derived> template<typename Derived>
inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived> inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
DenseBase<Derived>::Random(int rows, int cols) DenseBase<Derived>::Random(Index rows, Index cols)
{ {
return NullaryExpr(rows, cols, ei_scalar_random_op<Scalar>()); return NullaryExpr(rows, cols, ei_scalar_random_op<Scalar>());
} }
@ -80,11 +81,11 @@ DenseBase<Derived>::Random(int rows, int cols)
* a temporary vector whenever it is nested in a larger expression. This prevents unexpected * a temporary vector whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices. * behavior with expressions involving random matrices.
* *
* \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random() * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random()
*/ */
template<typename Derived> template<typename Derived>
inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived> inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
DenseBase<Derived>::Random(int size) DenseBase<Derived>::Random(Index size)
{ {
return NullaryExpr(size, ei_scalar_random_op<Scalar>()); return NullaryExpr(size, ei_scalar_random_op<Scalar>());
} }
@ -103,7 +104,7 @@ DenseBase<Derived>::Random(int size)
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
* behavior with expressions involving random matrices. * behavior with expressions involving random matrices.
* *
* \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random(int) * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random(Index)
*/ */
template<typename Derived> template<typename Derived>
inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived> inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
@ -119,7 +120,7 @@ DenseBase<Derived>::Random()
* Example: \include MatrixBase_setRandom.cpp * Example: \include MatrixBase_setRandom.cpp
* Output: \verbinclude MatrixBase_setRandom.out * Output: \verbinclude MatrixBase_setRandom.out
* *
* \sa class CwiseNullaryOp, setRandom(int), setRandom(int,int) * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline Derived& DenseBase<Derived>::setRandom() inline Derived& DenseBase<Derived>::setRandom()
@ -134,11 +135,11 @@ inline Derived& DenseBase<Derived>::setRandom()
* Example: \include Matrix_setRandom_int.cpp * Example: \include Matrix_setRandom_int.cpp
* Output: \verbinclude Matrix_setRandom_int.out * Output: \verbinclude Matrix_setRandom_int.out
* *
* \sa MatrixBase::setRandom(), setRandom(int,int), class CwiseNullaryOp, MatrixBase::Random() * \sa MatrixBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, MatrixBase::Random()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_STRONG_INLINE Derived&
DenseStorageBase<Derived>::setRandom(int size) DenseStorageBase<Derived>::setRandom(Index size)
{ {
resize(size); resize(size);
return setRandom(); return setRandom();
@ -152,11 +153,11 @@ DenseStorageBase<Derived>::setRandom(int size)
* Example: \include Matrix_setRandom_int_int.cpp * Example: \include Matrix_setRandom_int_int.cpp
* Output: \verbinclude Matrix_setRandom_int_int.out * Output: \verbinclude Matrix_setRandom_int_int.out
* *
* \sa MatrixBase::setRandom(), setRandom(int), class CwiseNullaryOp, MatrixBase::Random() * \sa MatrixBase::setRandom(), setRandom(Index), class CwiseNullaryOp, MatrixBase::Random()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_STRONG_INLINE Derived&
DenseStorageBase<Derived>::setRandom(int rows, int cols) DenseStorageBase<Derived>::setRandom(Index rows, Index cols)
{ {
resize(rows, cols); resize(rows, cols);
return setRandom(); return setRandom();

View File

@ -90,28 +90,28 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
} }
inline int rows() const { return m_matrix.rows() * m_rowFactor.value(); } inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
inline int cols() const { return m_matrix.cols() * m_colFactor.value(); } inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
inline Scalar coeff(int row, int col) const inline Scalar coeff(Index row, Index col) const
{ {
// try to avoid using modulo; this is a pure optimization strategy // try to avoid using modulo; this is a pure optimization strategy
const int actual_row = ei_traits<MatrixType>::RowsAtCompileTime==1 ? 0 const Index actual_row = ei_traits<MatrixType>::RowsAtCompileTime==1 ? 0
: RowFactor==1 ? row : RowFactor==1 ? row
: row%m_matrix.rows(); : row%m_matrix.rows();
const int actual_col = ei_traits<MatrixType>::ColsAtCompileTime==1 ? 0 const Index actual_col = ei_traits<MatrixType>::ColsAtCompileTime==1 ? 0
: ColFactor==1 ? col : ColFactor==1 ? col
: col%m_matrix.cols(); : col%m_matrix.cols();
return m_matrix.coeff(actual_row, actual_col); return m_matrix.coeff(actual_row, actual_col);
} }
template<int LoadMode> template<int LoadMode>
inline PacketScalar packet(int row, int col) const inline PacketScalar packet(Index row, Index col) const
{ {
const int actual_row = ei_traits<MatrixType>::RowsAtCompileTime==1 ? 0 const Index actual_row = ei_traits<MatrixType>::RowsAtCompileTime==1 ? 0
: RowFactor==1 ? row : RowFactor==1 ? row
: row%m_matrix.rows(); : row%m_matrix.rows();
const int actual_col = ei_traits<MatrixType>::ColsAtCompileTime==1 ? 0 const Index actual_col = ei_traits<MatrixType>::ColsAtCompileTime==1 ? 0
: ColFactor==1 ? col : ColFactor==1 ? col
: col%m_matrix.cols(); : col%m_matrix.cols();
@ -121,8 +121,8 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
protected: protected:
const typename MatrixType::Nested m_matrix; const typename MatrixType::Nested m_matrix;
const ei_int_if_dynamic<RowFactor> m_rowFactor; const ei_variable_if_dynamic<Index, RowFactor> m_rowFactor;
const ei_int_if_dynamic<ColFactor> m_colFactor; const ei_variable_if_dynamic<Index, ColFactor> m_colFactor;
}; };
/** \nonstableyet /** \nonstableyet
@ -131,7 +131,7 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
* Example: \include MatrixBase_replicate.cpp * Example: \include MatrixBase_replicate.cpp
* Output: \verbinclude MatrixBase_replicate.out * Output: \verbinclude MatrixBase_replicate.out
* *
* \sa VectorwiseOp::replicate(), DenseBase::replicate(int,int), class Replicate * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate
*/ */
template<typename Derived> template<typename Derived>
template<int RowFactor, int ColFactor> template<int RowFactor, int ColFactor>
@ -151,7 +151,7 @@ DenseBase<Derived>::replicate() const
*/ */
template<typename Derived> template<typename Derived>
inline const Replicate<Derived,Dynamic,Dynamic> inline const Replicate<Derived,Dynamic,Dynamic>
DenseBase<Derived>::replicate(int rowFactor,int colFactor) const DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const
{ {
return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor); return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor);
} }
@ -166,7 +166,7 @@ DenseBase<Derived>::replicate(int rowFactor,int colFactor) const
*/ */
template<typename ExpressionType, int Direction> template<typename ExpressionType, int Direction>
const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
VectorwiseOp<ExpressionType,Direction>::replicate(int factor) const VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const
{ {
return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
(_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1); (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);

View File

@ -103,33 +103,33 @@ template<typename MatrixType, int Direction> class Reverse
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse)
inline int rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline int cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return m_matrix.const_cast_derived().coeffRef(ReverseRow ? m_matrix.rows() - row - 1 : row, return m_matrix.const_cast_derived().coeffRef(ReverseRow ? m_matrix.rows() - row - 1 : row,
ReverseCol ? m_matrix.cols() - col - 1 : col); ReverseCol ? m_matrix.cols() - col - 1 : col);
} }
inline const Scalar coeff(int row, int col) const inline const Scalar coeff(Index row, Index col) const
{ {
return m_matrix.coeff(ReverseRow ? m_matrix.rows() - row - 1 : row, return m_matrix.coeff(ReverseRow ? m_matrix.rows() - row - 1 : row,
ReverseCol ? m_matrix.cols() - col - 1 : col); ReverseCol ? m_matrix.cols() - col - 1 : col);
} }
inline const Scalar coeff(int index) const inline const Scalar coeff(Index index) const
{ {
return m_matrix.coeff(m_matrix.size() - index - 1); return m_matrix.coeff(m_matrix.size() - index - 1);
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return m_matrix.const_cast_derived().coeffRef(m_matrix.size() - index - 1); return m_matrix.const_cast_derived().coeffRef(m_matrix.size() - index - 1);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int row, int col) const inline const PacketScalar packet(Index row, Index col) const
{ {
return reverse_packet::run(m_matrix.template packet<LoadMode>( return reverse_packet::run(m_matrix.template packet<LoadMode>(
ReverseRow ? m_matrix.rows() - row - OffsetRow : row, ReverseRow ? m_matrix.rows() - row - OffsetRow : row,
@ -137,7 +137,7 @@ template<typename MatrixType, int Direction> class Reverse
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int row, int col, const PacketScalar& x) inline void writePacket(Index row, Index col, const PacketScalar& x)
{ {
m_matrix.const_cast_derived().template writePacket<LoadMode>( m_matrix.const_cast_derived().template writePacket<LoadMode>(
ReverseRow ? m_matrix.rows() - row - OffsetRow : row, ReverseRow ? m_matrix.rows() - row - OffsetRow : row,
@ -146,13 +146,13 @@ template<typename MatrixType, int Direction> class Reverse
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int index) const inline const PacketScalar packet(Index index) const
{ {
return ei_preverse(m_matrix.template packet<LoadMode>( m_matrix.size() - index - PacketSize )); return ei_preverse(m_matrix.template packet<LoadMode>( m_matrix.size() - index - PacketSize ));
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int index, const PacketScalar& x) inline void writePacket(Index index, const PacketScalar& x)
{ {
m_matrix.const_cast_derived().template writePacket<LoadMode>(m_matrix.size() - index - PacketSize, ei_preverse(x)); m_matrix.const_cast_derived().template writePacket<LoadMode>(m_matrix.size() - index - PacketSize, ei_preverse(x));
} }

View File

@ -81,10 +81,10 @@ class Select : ei_no_assignment_operator,
ei_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols()); ei_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
} }
int rows() const { return m_condition.rows(); } Index rows() const { return m_condition.rows(); }
int cols() const { return m_condition.cols(); } Index cols() const { return m_condition.cols(); }
const Scalar coeff(int i, int j) const const Scalar coeff(Index i, Index j) const
{ {
if (m_condition.coeff(i,j)) if (m_condition.coeff(i,j))
return m_then.coeff(i,j); return m_then.coeff(i,j);
@ -92,7 +92,7 @@ class Select : ei_no_assignment_operator,
return m_else.coeff(i,j); return m_else.coeff(i,j);
} }
const Scalar coeff(int i) const const Scalar coeff(Index i) const
{ {
if (m_condition.coeff(i)) if (m_condition.coeff(i))
return m_then.coeff(i); return m_then.coeff(i);

View File

@ -89,10 +89,10 @@ class PartialReduxExpr : ei_no_assignment_operator,
PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp()) PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp())
: m_matrix(mat), m_functor(func) {} : m_matrix(mat), m_functor(func) {}
int rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); } Index rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); }
int cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); } Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
const Scalar coeff(int i, int j) const const Scalar coeff(Index i, Index j) const
{ {
if (Direction==Vertical) if (Direction==Vertical)
return m_functor(m_matrix.col(j)); return m_functor(m_matrix.col(j));
@ -100,7 +100,7 @@ class PartialReduxExpr : ei_no_assignment_operator,
return m_functor(m_matrix.row(i)); return m_functor(m_matrix.row(i));
} }
const Scalar coeff(int index) const const Scalar coeff(Index index) const
{ {
if (Direction==Vertical) if (Direction==Vertical)
return m_functor(m_matrix.col(index)); return m_functor(m_matrix.col(index));
@ -177,7 +177,8 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
{ {
public: public:
typedef typename ei_traits<ExpressionType>::Scalar Scalar; typedef typename ExpressionType::Scalar Scalar;
typedef typename ExpressionType::Index Index;
typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret, typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
ExpressionType, const ExpressionType&>::ret ExpressionTypeNested; ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
@ -209,14 +210,14 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
typedef typename ei_meta_if<Direction==Vertical, typedef typename ei_meta_if<Direction==Vertical,
typename ExpressionType::ColXpr, typename ExpressionType::ColXpr,
typename ExpressionType::RowXpr>::ret SubVector; typename ExpressionType::RowXpr>::ret SubVector;
SubVector subVector(int i) SubVector subVector(Index i)
{ {
return SubVector(m_matrix.derived(),i); return SubVector(m_matrix.derived(),i);
} }
/** \internal /** \internal
* \returns the number of subvectors in the direction \c Direction */ * \returns the number of subvectors in the direction \c Direction */
int subVectors() const Index subVectors() const
{ return Direction==Vertical?m_matrix.cols():m_matrix.rows(); } { return Direction==Vertical?m_matrix.cols():m_matrix.rows(); }
template<typename OtherDerived> struct ExtendedType { template<typename OtherDerived> struct ExtendedType {
@ -362,7 +363,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* Output: \verbinclude PartialRedux_count.out * Output: \verbinclude PartialRedux_count.out
* *
* \sa DenseBase::count() */ * \sa DenseBase::count() */
const PartialReduxExpr<ExpressionType, ei_member_count<int>, Direction> count() const const PartialReduxExpr<ExpressionType, ei_member_count<Index>, Direction> count() const
{ return _expression(); } { return _expression(); }
/** \returns a row (or column) vector expression of the product /** \returns a row (or column) vector expression of the product
@ -387,7 +388,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
{ return Reverse<ExpressionType, Direction>( _expression() ); } { return Reverse<ExpressionType, Direction>( _expression() ); }
typedef Replicate<ExpressionType,Direction==Vertical?Dynamic:1,Direction==Horizontal?Dynamic:1> ReplicateReturnType; typedef Replicate<ExpressionType,Direction==Vertical?Dynamic:1,Direction==Horizontal?Dynamic:1> ReplicateReturnType;
const ReplicateReturnType replicate(int factor) const; const ReplicateReturnType replicate(Index factor) const;
/** \nonstableyet /** \nonstableyet
* \return an expression of the replication of each column (or row) of \c *this * \return an expression of the replication of each column (or row) of \c *this
@ -395,11 +396,11 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
* Example: \include DirectionWise_replicate.cpp * Example: \include DirectionWise_replicate.cpp
* Output: \verbinclude DirectionWise_replicate.out * Output: \verbinclude DirectionWise_replicate.out
* *
* \sa VectorwiseOp::replicate(int), DenseBase::replicate(), class Replicate * \sa VectorwiseOp::replicate(Index), DenseBase::replicate(), class Replicate
*/ */
// NOTE implemented here because of sunstudio's compilation errors // NOTE implemented here because of sunstudio's compilation errors
template<int Factor> const Replicate<ExpressionType,(IsVertical?Factor:1),(IsHorizontal?Factor:1)> template<int Factor> const Replicate<ExpressionType,(IsVertical?Factor:1),(IsHorizontal?Factor:1)>
replicate(int factor = Factor) const replicate(Index factor = Factor) const
{ {
return Replicate<ExpressionType,Direction==Vertical?Factor:1,Direction==Horizontal?Factor:1> return Replicate<ExpressionType,Direction==Vertical?Factor:1,Direction==Horizontal?Factor:1>
(_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1); (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
@ -413,7 +414,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
//ei_assert((m_matrix.isNull()) == (other.isNull())); FIXME //ei_assert((m_matrix.isNull()) == (other.isNull())); FIXME
for(int j=0; j<subVectors(); ++j) for(Index j=0; j<subVectors(); ++j)
subVector(j) = other; subVector(j) = other;
return const_cast<ExpressionType&>(m_matrix); return const_cast<ExpressionType&>(m_matrix);
} }
@ -423,7 +424,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
ExpressionType& operator+=(const DenseBase<OtherDerived>& other) ExpressionType& operator+=(const DenseBase<OtherDerived>& other)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
for(int j=0; j<subVectors(); ++j) for(Index j=0; j<subVectors(); ++j)
subVector(j) += other.derived(); subVector(j) += other.derived();
return const_cast<ExpressionType&>(m_matrix); return const_cast<ExpressionType&>(m_matrix);
} }
@ -433,7 +434,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
ExpressionType& operator-=(const DenseBase<OtherDerived>& other) ExpressionType& operator-=(const DenseBase<OtherDerived>& other)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
for(int j=0; j<subVectors(); ++j) for(Index j=0; j<subVectors(); ++j)
subVector(j) -= other.derived(); subVector(j) -= other.derived();
return const_cast<ExpressionType&>(m_matrix); return const_cast<ExpressionType&>(m_matrix);
} }

View File

@ -65,7 +65,8 @@ template<typename _MatrixType> class LDLT
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename ei_plain_col_type<MatrixType, int>::type IntColVectorType; typedef typename MatrixType::Index Index;
typedef typename ei_plain_col_type<MatrixType, Index>::type IntColVectorType;
typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType; typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType;
/** \brief Default Constructor. /** \brief Default Constructor.
@ -81,7 +82,7 @@ template<typename _MatrixType> class LDLT
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa LDLT() * \sa LDLT()
*/ */
LDLT(int size) : m_matrix(size, size), LDLT(Index size) : m_matrix(size, size),
m_p(size), m_p(size),
m_transpositions(size), m_transpositions(size),
m_temporary(size), m_temporary(size),
@ -168,8 +169,8 @@ template<typename _MatrixType> class LDLT
MatrixType reconstructedMatrix() const; MatrixType reconstructedMatrix() const;
inline int rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline int cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
protected: protected:
/** \internal /** \internal
@ -182,7 +183,7 @@ template<typename _MatrixType> class LDLT
IntColVectorType m_p; IntColVectorType m_p;
IntColVectorType m_transpositions; // FIXME do we really need to store permanently the transpositions? IntColVectorType m_transpositions; // FIXME do we really need to store permanently the transpositions?
TmpMatrixType m_temporary; TmpMatrixType m_temporary;
int m_sign; Index m_sign;
bool m_isInitialized; bool m_isInitialized;
}; };
@ -192,7 +193,7 @@ template<typename MatrixType>
LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a) LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
{ {
ei_assert(a.rows()==a.cols()); ei_assert(a.rows()==a.cols());
const int size = a.rows(); const Index size = a.rows();
m_matrix = a; m_matrix = a;
@ -215,10 +216,10 @@ LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
// have optimal alignment. // have optimal alignment.
m_temporary.resize(size); m_temporary.resize(size);
for (int j = 0; j < size; ++j) for (Index j = 0; j < size; ++j)
{ {
// Find largest diagonal element // Find largest diagonal element
int index_of_biggest_in_corner; Index index_of_biggest_in_corner;
biggest_in_corner = m_matrix.diagonal().tail(size-j).cwiseAbs() biggest_in_corner = m_matrix.diagonal().tail(size-j).cwiseAbs()
.maxCoeff(&index_of_biggest_in_corner); .maxCoeff(&index_of_biggest_in_corner);
index_of_biggest_in_corner += j; index_of_biggest_in_corner += j;
@ -236,7 +237,7 @@ LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
// Finish early if the matrix is not full rank. // Finish early if the matrix is not full rank.
if(biggest_in_corner < cutoff) if(biggest_in_corner < cutoff)
{ {
for(int i = j; i < size; i++) m_transpositions.coeffRef(i) = i; for(Index i = j; i < size; i++) m_transpositions.coeffRef(i) = i;
break; break;
} }
@ -256,7 +257,7 @@ LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
RealScalar Djj = ei_real(m_matrix.coeff(j,j) - m_matrix.row(j).head(j).dot(m_matrix.col(j).head(j))); RealScalar Djj = ei_real(m_matrix.coeff(j,j) - m_matrix.row(j).head(j).dot(m_matrix.col(j).head(j)));
m_matrix.coeffRef(j,j) = Djj; m_matrix.coeffRef(j,j) = Djj;
int endSize = size - j - 1; Index endSize = size - j - 1;
if (endSize > 0) { if (endSize > 0) {
m_temporary.tail(endSize).noalias() = m_matrix.block(j+1,0, endSize, j) m_temporary.tail(endSize).noalias() = m_matrix.block(j+1,0, endSize, j)
* m_matrix.col(j).head(j).conjugate(); * m_matrix.col(j).head(j).conjugate();
@ -272,8 +273,8 @@ LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
} }
// Reverse applied swaps to get P matrix. // Reverse applied swaps to get P matrix.
for(int k = 0; k < size; ++k) m_p.coeffRef(k) = k; for(Index k = 0; k < size; ++k) m_p.coeffRef(k) = k;
for(int k = size-1; k >= 0; --k) { for(Index k = size-1; k >= 0; --k) {
std::swap(m_p.coeffRef(k), m_p.coeffRef(m_transpositions.coeff(k))); std::swap(m_p.coeffRef(k), m_p.coeffRef(m_transpositions.coeff(k)));
} }
@ -310,11 +311,11 @@ template<typename Derived>
bool LDLT<MatrixType>::solveInPlace(MatrixBase<Derived> &bAndX) const bool LDLT<MatrixType>::solveInPlace(MatrixBase<Derived> &bAndX) const
{ {
ei_assert(m_isInitialized && "LDLT is not initialized."); ei_assert(m_isInitialized && "LDLT is not initialized.");
const int size = m_matrix.rows(); const Index size = m_matrix.rows();
ei_assert(size == bAndX.rows()); ei_assert(size == bAndX.rows());
// z = P b // z = P b
for(int i = 0; i < size; ++i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i)); for(Index i = 0; i < size; ++i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i));
// y = L^-1 z // y = L^-1 z
//matrixL().solveInPlace(bAndX); //matrixL().solveInPlace(bAndX);
@ -327,7 +328,7 @@ bool LDLT<MatrixType>::solveInPlace(MatrixBase<Derived> &bAndX) const
m_matrix.adjoint().template triangularView<UnitUpper>().solveInPlace(bAndX); m_matrix.adjoint().template triangularView<UnitUpper>().solveInPlace(bAndX);
// x = P^T u // x = P^T u
for (int i = size-1; i >= 0; --i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i)); for (Index i = size-1; i >= 0; --i) bAndX.row(m_transpositions.coeff(i)).swap(bAndX.row(i));
return true; return true;
} }
@ -339,12 +340,12 @@ template<typename MatrixType>
MatrixType LDLT<MatrixType>::reconstructedMatrix() const MatrixType LDLT<MatrixType>::reconstructedMatrix() const
{ {
ei_assert(m_isInitialized && "LDLT is not initialized."); ei_assert(m_isInitialized && "LDLT is not initialized.");
const int size = m_matrix.rows(); const Index size = m_matrix.rows();
MatrixType res(size,size); MatrixType res(size,size);
res.setIdentity(); res.setIdentity();
// PI // PI
for(int i = 0; i < size; ++i) res.row(m_transpositions.coeff(i)).swap(res.row(i)); for(Index i = 0; i < size; ++i) res.row(m_transpositions.coeff(i)).swap(res.row(i));
// L^* P // L^* P
res = matrixL().adjoint() * res; res = matrixL().adjoint() * res;
// D(L^*P) // D(L^*P)
@ -352,7 +353,7 @@ MatrixType LDLT<MatrixType>::reconstructedMatrix() const
// L(DL^*P) // L(DL^*P)
res = matrixL() * res; res = matrixL() * res;
// P^T (LDL^*P) // P^T (LDL^*P)
for (int i = size-1; i >= 0; --i) res.row(m_transpositions.coeff(i)).swap(res.row(i)); for (Index i = size-1; i >= 0; --i) res.row(m_transpositions.coeff(i)).swap(res.row(i));
return res; return res;
} }

View File

@ -65,6 +65,7 @@ template<typename _MatrixType, int _UpLo> class LLT
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
enum { enum {
PacketSize = ei_packet_traits<Scalar>::size, PacketSize = ei_packet_traits<Scalar>::size,
@ -88,7 +89,7 @@ template<typename _MatrixType, int _UpLo> class LLT
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa LLT() * \sa LLT()
*/ */
LLT(int size) : m_matrix(size, size), LLT(Index size) : m_matrix(size, size),
m_isInitialized(false) {} m_isInitialized(false) {}
LLT(const MatrixType& matrix) LLT(const MatrixType& matrix)
@ -149,8 +150,8 @@ template<typename _MatrixType, int _UpLo> class LLT
MatrixType reconstructedMatrix() const; MatrixType reconstructedMatrix() const;
inline int rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline int cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
protected: protected:
/** \internal /** \internal
@ -171,11 +172,12 @@ template<> struct ei_llt_inplace<Lower>
{ {
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
ei_assert(mat.rows()==mat.cols()); ei_assert(mat.rows()==mat.cols());
const int size = mat.rows(); const Index size = mat.rows();
for(int k = 0; k < size; ++k) for(Index k = 0; k < size; ++k)
{ {
int rs = size-k-1; // remaining size Index rs = size-k-1; // remaining size
Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1); Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);
Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k); Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
@ -195,19 +197,20 @@ template<> struct ei_llt_inplace<Lower>
template<typename MatrixType> template<typename MatrixType>
static bool blocked(MatrixType& m) static bool blocked(MatrixType& m)
{ {
typedef typename MatrixType::Index Index;
ei_assert(m.rows()==m.cols()); ei_assert(m.rows()==m.cols());
int size = m.rows(); Index size = m.rows();
if(size<32) if(size<32)
return unblocked(m); return unblocked(m);
int blockSize = size/8; Index blockSize = size/8;
blockSize = (blockSize/16)*16; blockSize = (blockSize/16)*16;
blockSize = std::min(std::max(blockSize,8), 128); blockSize = std::min(std::max(blockSize,Index(8)), Index(128));
for (int k=0; k<size; k+=blockSize) for (Index k=0; k<size; k+=blockSize)
{ {
int bs = std::min(blockSize, size-k); Index bs = std::min(blockSize, size-k);
int rs = size - k - bs; Index rs = size - k - bs;
Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs); Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs);
Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs); Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs);
@ -266,7 +269,7 @@ template<typename MatrixType, int _UpLo>
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a) LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
{ {
assert(a.rows()==a.cols()); assert(a.rows()==a.cols());
const int size = a.rows(); const Index size = a.rows();
m_matrix.resize(size, size); m_matrix.resize(size, size);
m_matrix = a; m_matrix = a;

View File

@ -254,12 +254,13 @@ struct ei_assign_impl;
template<typename Derived1, typename Derived2> template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, DefaultTraversal, NoUnrolling> struct ei_assign_impl<Derived1, Derived2, DefaultTraversal, NoUnrolling>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
const int innerSize = dst.innerSize(); const Index innerSize = dst.innerSize();
const int outerSize = dst.outerSize(); const Index outerSize = dst.outerSize();
for(int outer = 0; outer < outerSize; ++outer) for(Index outer = 0; outer < outerSize; ++outer)
for(int inner = 0; inner < innerSize; ++inner) for(Index inner = 0; inner < innerSize; ++inner)
dst.copyCoeffByOuterInner(outer, inner, src); dst.copyCoeffByOuterInner(outer, inner, src);
} }
}; };
@ -277,10 +278,11 @@ struct ei_assign_impl<Derived1, Derived2, DefaultTraversal, CompleteUnrolling>
template<typename Derived1, typename Derived2> template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, DefaultTraversal, InnerUnrolling> struct ei_assign_impl<Derived1, Derived2, DefaultTraversal, InnerUnrolling>
{ {
typedef typename Derived1::Index Index;
EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
{ {
const int outerSize = dst.outerSize(); const Index outerSize = dst.outerSize();
for(int outer = 0; outer < outerSize; ++outer) for(Index outer = 0; outer < outerSize; ++outer)
ei_assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime> ei_assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime>
::run(dst, src, outer); ::run(dst, src, outer);
} }
@ -293,10 +295,11 @@ struct ei_assign_impl<Derived1, Derived2, DefaultTraversal, InnerUnrolling>
template<typename Derived1, typename Derived2> template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, LinearTraversal, NoUnrolling> struct ei_assign_impl<Derived1, Derived2, LinearTraversal, NoUnrolling>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
const int size = dst.size(); const Index size = dst.size();
for(int i = 0; i < size; ++i) for(Index i = 0; i < size; ++i)
dst.copyCoeff(i, src); dst.copyCoeff(i, src);
} }
}; };
@ -318,13 +321,14 @@ struct ei_assign_impl<Derived1, Derived2, LinearTraversal, CompleteUnrolling>
template<typename Derived1, typename Derived2> template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, InnerVectorizedTraversal, NoUnrolling> struct ei_assign_impl<Derived1, Derived2, InnerVectorizedTraversal, NoUnrolling>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
const int innerSize = dst.innerSize(); const Index innerSize = dst.innerSize();
const int outerSize = dst.outerSize(); const Index outerSize = dst.outerSize();
const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size; const Index packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
for(int outer = 0; outer < outerSize; ++outer) for(Index outer = 0; outer < outerSize; ++outer)
for(int inner = 0; inner < innerSize; inner+=packetSize) for(Index inner = 0; inner < innerSize; inner+=packetSize)
dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, inner, src); dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, inner, src);
} }
}; };
@ -342,10 +346,11 @@ struct ei_assign_impl<Derived1, Derived2, InnerVectorizedTraversal, CompleteUnro
template<typename Derived1, typename Derived2> template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, InnerVectorizedTraversal, InnerUnrolling> struct ei_assign_impl<Derived1, Derived2, InnerVectorizedTraversal, InnerUnrolling>
{ {
typedef typename Derived1::Index Index;
EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
{ {
const int outerSize = dst.outerSize(); const Index outerSize = dst.outerSize();
for(int outer = 0; outer < outerSize; ++outer) for(Index outer = 0; outer < outerSize; ++outer)
ei_assign_innervec_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime> ei_assign_innervec_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime>
::run(dst, src, outer); ::run(dst, src, outer);
} }
@ -359,7 +364,7 @@ template <bool IsAligned = false>
struct ei_unaligned_assign_impl struct ei_unaligned_assign_impl
{ {
template <typename Derived, typename OtherDerived> template <typename Derived, typename OtherDerived>
static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, int, int) {} static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, typename Derived::Index, typename Derived::Index) {}
}; };
template <> template <>
@ -369,13 +374,13 @@ struct ei_unaligned_assign_impl<false>
// packet access path. // packet access path.
#ifdef _MSC_VER #ifdef _MSC_VER
template <typename Derived, typename OtherDerived> template <typename Derived, typename OtherDerived>
static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, int start, int end) static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
#else #else
template <typename Derived, typename OtherDerived> template <typename Derived, typename OtherDerived>
static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, int start, int end) static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
#endif #endif
{ {
for (int index = start; index < end; ++index) for (typename Derived::Index index = start; index < end; ++index)
dst.copyCoeff(index, src); dst.copyCoeff(index, src);
} }
}; };
@ -383,17 +388,18 @@ struct ei_unaligned_assign_impl<false>
template<typename Derived1, typename Derived2> template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, LinearVectorizedTraversal, NoUnrolling> struct ei_assign_impl<Derived1, Derived2, LinearVectorizedTraversal, NoUnrolling>
{ {
typedef typename Derived1::Index Index;
EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
{ {
const int size = dst.size(); const Index size = dst.size();
const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size; const Index packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
const int alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0 const Index alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
: ei_first_aligned(&dst.coeffRef(0), size); : ei_first_aligned(&dst.coeffRef(0), size);
const int alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
ei_unaligned_assign_impl<ei_assign_traits<Derived1,Derived2>::DstIsAligned!=0>::run(src,dst,0,alignedStart); ei_unaligned_assign_impl<ei_assign_traits<Derived1,Derived2>::DstIsAligned!=0>::run(src,dst,0,alignedStart);
for(int index = alignedStart; index < alignedEnd; index += packetSize) for(Index index = alignedStart; index < alignedEnd; index += packetSize)
{ {
dst.template copyPacket<Derived2, Aligned, ei_assign_traits<Derived1,Derived2>::JointAlignment>(index, src); dst.template copyPacket<Derived2, Aligned, ei_assign_traits<Derived1,Derived2>::JointAlignment>(index, src);
} }
@ -405,11 +411,12 @@ struct ei_assign_impl<Derived1, Derived2, LinearVectorizedTraversal, NoUnrolling
template<typename Derived1, typename Derived2> template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, LinearVectorizedTraversal, CompleteUnrolling> struct ei_assign_impl<Derived1, Derived2, LinearVectorizedTraversal, CompleteUnrolling>
{ {
typedef typename Derived1::Index Index;
EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
{ {
const int size = Derived1::SizeAtCompileTime; const Index size = Derived1::SizeAtCompileTime;
const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size; const Index packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
const int alignedSize = (size/packetSize)*packetSize; const Index alignedSize = (size/packetSize)*packetSize;
ei_assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, alignedSize>::run(dst, src); ei_assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, alignedSize>::run(dst, src);
ei_assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, alignedSize, size>::run(dst, src); ei_assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, alignedSize, size>::run(dst, src);
@ -423,32 +430,33 @@ struct ei_assign_impl<Derived1, Derived2, LinearVectorizedTraversal, CompleteUnr
template<typename Derived1, typename Derived2> template<typename Derived1, typename Derived2>
struct ei_assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling> struct ei_assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size; const Index packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
const int packetAlignedMask = packetSize - 1; const Index packetAlignedMask = packetSize - 1;
const int innerSize = dst.innerSize(); const Index innerSize = dst.innerSize();
const int outerSize = dst.outerSize(); const Index outerSize = dst.outerSize();
const int alignedStep = (packetSize - dst.outerStride() % packetSize) & packetAlignedMask; const Index alignedStep = (packetSize - dst.outerStride() % packetSize) & packetAlignedMask;
int alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0 Index alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
: ei_first_aligned(&dst.coeffRef(0,0), innerSize); : ei_first_aligned(&dst.coeffRef(0,0), innerSize);
for(int outer = 0; outer < outerSize; ++outer) for(Index outer = 0; outer < outerSize; ++outer)
{ {
const int alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
// do the non-vectorizable part of the assignment // do the non-vectorizable part of the assignment
for(int inner = 0; inner<alignedStart ; ++inner) for(Index inner = 0; inner<alignedStart ; ++inner)
dst.copyCoeffByOuterInner(outer, inner, src); dst.copyCoeffByOuterInner(outer, inner, src);
// do the vectorizable part of the assignment // do the vectorizable part of the assignment
for(int inner = alignedStart; inner<alignedEnd; inner+=packetSize) for(Index inner = alignedStart; inner<alignedEnd; inner+=packetSize)
dst.template copyPacketByOuterInner<Derived2, Aligned, Unaligned>(outer, inner, src); dst.template copyPacketByOuterInner<Derived2, Aligned, Unaligned>(outer, inner, src);
// do the non-vectorizable part of the assignment // do the non-vectorizable part of the assignment
for(int inner = alignedEnd; inner<innerSize ; ++inner) for(Index inner = alignedEnd; inner<innerSize ; ++inner)
dst.copyCoeffByOuterInner(outer, inner, src); dst.copyCoeffByOuterInner(outer, inner, src);
alignedStart = std::min<int>((alignedStart+alignedStep)%packetSize, innerSize); alignedStart = std::min<Index>((alignedStart+alignedStep)%packetSize, innerSize);
} }
} }
}; };

View File

@ -46,6 +46,7 @@ template<typename _Scalar, int Rows, int Cols, int Supers, int Subs, int Options
struct ei_traits<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> > struct ei_traits<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> >
{ {
typedef _Scalar Scalar; typedef _Scalar Scalar;
typedef Dense StorageKind;
enum { enum {
CoeffReadCost = NumTraits<Scalar>::ReadCost, CoeffReadCost = NumTraits<Scalar>::ReadCost,
RowsAtCompileTime = Rows, RowsAtCompileTime = Rows,
@ -71,6 +72,7 @@ class BandMatrix : public EigenBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Opt
}; };
typedef typename ei_traits<BandMatrix>::Scalar Scalar; typedef typename ei_traits<BandMatrix>::Scalar Scalar;
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType; typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
typedef typename DenseMatrixType::Index Index;
protected: protected:
enum { enum {
@ -83,7 +85,7 @@ class BandMatrix : public EigenBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Opt
public: public:
inline BandMatrix(int rows=Rows, int cols=Cols, int supers=Supers, int subs=Subs) inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs)
: m_data(1+supers+subs,cols), : m_data(1+supers+subs,cols),
m_rows(rows), m_supers(supers), m_subs(subs) m_rows(rows), m_supers(supers), m_subs(subs)
{ {
@ -91,32 +93,32 @@ class BandMatrix : public EigenBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Opt
} }
/** \returns the number of columns */ /** \returns the number of columns */
inline int rows() const { return m_rows.value(); } inline Index rows() const { return m_rows.value(); }
/** \returns the number of rows */ /** \returns the number of rows */
inline int cols() const { return m_data.cols(); } inline Index cols() const { return m_data.cols(); }
/** \returns the number of super diagonals */ /** \returns the number of super diagonals */
inline int supers() const { return m_supers.value(); } inline Index supers() const { return m_supers.value(); }
/** \returns the number of sub diagonals */ /** \returns the number of sub diagonals */
inline int subs() const { return m_subs.value(); } inline Index subs() const { return m_subs.value(); }
/** \returns a vector expression of the \a i -th column, /** \returns a vector expression of the \a i -th column,
* only the meaningful part is returned. * only the meaningful part is returned.
* \warning the internal storage must be column major. */ * \warning the internal storage must be column major. */
inline Block<DataType,Dynamic,1> col(int i) inline Block<DataType,Dynamic,1> col(Index i)
{ {
EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
int start = 0; Index start = 0;
int len = m_data.rows(); Index len = m_data.rows();
if (i<=supers()) if (i<=supers())
{ {
start = supers()-i; start = supers()-i;
len = std::min(rows(),std::max(0,m_data.rows() - (supers()-i))); len = std::min(rows(),std::max<Index>(0,m_data.rows() - (supers()-i)));
} }
else if (i>=rows()-subs()) else if (i>=rows()-subs())
len = std::max(0,m_data.rows() - (i + 1 - rows() + subs())); len = std::max<Index>(0,m_data.rows() - (i + 1 - rows() + subs()));
return Block<DataType,Dynamic,1>(m_data, start, i, len, 1); return Block<DataType,Dynamic,1>(m_data, start, i, len, 1);
} }
@ -146,30 +148,30 @@ class BandMatrix : public EigenBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Opt
BuildType>::ret Type; BuildType>::ret Type;
}; };
/** \returns a vector expression of the \a Index -th sub or super diagonal */ /** \returns a vector expression of the \a N -th sub or super diagonal */
template<int Index> inline typename DiagonalIntReturnType<Index>::Type diagonal() template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal()
{ {
return typename DiagonalIntReturnType<Index>::BuildType(m_data, supers()-Index, std::max(0,Index), 1, diagonalLength(Index)); return typename DiagonalIntReturnType<N>::BuildType(m_data, supers()-N, std::max(0,N), 1, diagonalLength(N));
} }
/** \returns a vector expression of the \a Index -th sub or super diagonal */ /** \returns a vector expression of the \a N -th sub or super diagonal */
template<int Index> inline const typename DiagonalIntReturnType<Index>::Type diagonal() const template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const
{ {
return typename DiagonalIntReturnType<Index>::BuildType(m_data, supers()-Index, std::max(0,Index), 1, diagonalLength(Index)); return typename DiagonalIntReturnType<N>::BuildType(m_data, supers()-N, std::max(0,N), 1, diagonalLength(N));
} }
/** \returns a vector expression of the \a i -th sub or super diagonal */ /** \returns a vector expression of the \a i -th sub or super diagonal */
inline Block<DataType,1,Dynamic> diagonal(int i) inline Block<DataType,1,Dynamic> diagonal(Index i)
{ {
ei_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); ei_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));
return Block<DataType,1,Dynamic>(m_data, supers()-i, std::max(0,i), 1, diagonalLength(i)); return Block<DataType,1,Dynamic>(m_data, supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));
} }
/** \returns a vector expression of the \a i -th sub or super diagonal */ /** \returns a vector expression of the \a i -th sub or super diagonal */
inline const Block<DataType,1,Dynamic> diagonal(int i) const inline const Block<DataType,1,Dynamic> diagonal(Index i) const
{ {
ei_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); ei_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));
return Block<DataType,1,Dynamic>(m_data, supers()-i, std::max(0,i), 1, diagonalLength(i)); return Block<DataType,1,Dynamic>(m_data, supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));
} }
template<typename Dest> inline void evalTo(Dest& dst) const template<typename Dest> inline void evalTo(Dest& dst) const
@ -177,9 +179,9 @@ class BandMatrix : public EigenBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Opt
dst.resize(rows(),cols()); dst.resize(rows(),cols());
dst.setZero(); dst.setZero();
dst.diagonal() = diagonal(); dst.diagonal() = diagonal();
for (int i=1; i<=supers();++i) for (Index i=1; i<=supers();++i)
dst.diagonal(i) = diagonal(i); dst.diagonal(i) = diagonal(i);
for (int i=1; i<=subs();++i) for (Index i=1; i<=subs();++i)
dst.diagonal(-i) = diagonal(-i); dst.diagonal(-i) = diagonal(-i);
} }
@ -192,13 +194,13 @@ class BandMatrix : public EigenBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Opt
protected: protected:
inline int diagonalLength(int i) const inline Index diagonalLength(Index i) const
{ return i<0 ? std::min(cols(),rows()+i) : std::min(rows(),cols()-i); } { return i<0 ? std::min(cols(),rows()+i) : std::min(rows(),cols()-i); }
DataType m_data; DataType m_data;
ei_int_if_dynamic<Rows> m_rows; ei_variable_if_dynamic<Index, Rows> m_rows;
ei_int_if_dynamic<Supers> m_supers; ei_variable_if_dynamic<Index, Supers> m_supers;
ei_int_if_dynamic<Subs> m_subs; ei_variable_if_dynamic<Index, Subs> m_subs;
}; };
/** \nonstableyet /** \nonstableyet
@ -216,8 +218,9 @@ template<typename Scalar, int Size, int Options>
class TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> class TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor>
{ {
typedef BandMatrix<Scalar,Size,Size,1,Options&SelfAdjoint?0:1,Options|RowMajor> Base; typedef BandMatrix<Scalar,Size,Size,1,Options&SelfAdjoint?0:1,Options|RowMajor> Base;
typedef typename Base::Index Index;
public: public:
TridiagonalMatrix(int size = Size) : Base(size,size,1,1) {} TridiagonalMatrix(Index size = Size) : Base(size,size,1,1) {}
inline typename Base::template DiagonalIntReturnType<1>::Type super() inline typename Base::template DiagonalIntReturnType<1>::Type super()
{ return Base::template diagonal<1>(); } { return Base::template diagonal<1>(); }

View File

@ -36,7 +36,7 @@
* \param _DirectAccessStatus \internal used for partial specialization * \param _DirectAccessStatus \internal used for partial specialization
* *
* This class represents an expression of either a fixed-size or dynamic-size block. It is the return * This class represents an expression of either a fixed-size or dynamic-size block. It is the return
* type of DenseBase::block(int,int,int,int) and DenseBase::block<int,int>(int,int) and * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
* most of the time this is the only way it is used. * most of the time this is the only way it is used.
* *
* However, if you want to directly maniputate block expressions, * However, if you want to directly maniputate block expressions,
@ -55,7 +55,7 @@
* \include class_FixedBlock.cpp * \include class_FixedBlock.cpp
* Output: \verbinclude class_FixedBlock.out * Output: \verbinclude class_FixedBlock.out
* *
* \sa DenseBase::block(int,int,int,int), DenseBase::block(int,int), class VectorBlock * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock
*/ */
template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess> template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess>
struct ei_traits<Block<XprType, BlockRows, BlockCols, HasDirectAccess> > : ei_traits<XprType> struct ei_traits<Block<XprType, BlockRows, BlockCols, HasDirectAccess> > : ei_traits<XprType>
@ -110,7 +110,7 @@ template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess> c
/** Column or Row constructor /** Column or Row constructor
*/ */
inline Block(const XprType& xpr, int i) inline Block(const XprType& xpr, Index i)
: m_xpr(xpr), : m_xpr(xpr),
// It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime, // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
// and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1, // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
@ -128,7 +128,7 @@ template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess> c
/** Fixed-size constructor /** Fixed-size constructor
*/ */
inline Block(const XprType& xpr, int startRow, int startCol) inline Block(const XprType& xpr, Index startRow, Index startCol)
: m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
m_blockRows(BlockRows), m_blockCols(BlockCols) m_blockRows(BlockRows), m_blockCols(BlockCols)
{ {
@ -140,8 +140,8 @@ template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess> c
/** Dynamic-size constructor /** Dynamic-size constructor
*/ */
inline Block(const XprType& xpr, inline Block(const XprType& xpr,
int startRow, int startCol, Index startRow, Index startCol,
int blockRows, int blockCols) Index blockRows, Index blockCols)
: m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
m_blockRows(blockRows), m_blockCols(blockCols) m_blockRows(blockRows), m_blockCols(blockCols)
{ {
@ -153,28 +153,28 @@ template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess> c
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
inline int rows() const { return m_blockRows.value(); } inline Index rows() const { return m_blockRows.value(); }
inline int cols() const { return m_blockCols.value(); } inline Index cols() const { return m_blockCols.value(); }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return m_xpr.const_cast_derived() return m_xpr.const_cast_derived()
.coeffRef(row + m_startRow.value(), col + m_startCol.value()); .coeffRef(row + m_startRow.value(), col + m_startCol.value());
} }
inline const CoeffReturnType coeff(int row, int col) const inline const CoeffReturnType coeff(Index row, Index col) const
{ {
return m_xpr.coeff(row + m_startRow.value(), col + m_startCol.value()); return m_xpr.coeff(row + m_startRow.value(), col + m_startCol.value());
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return m_xpr.const_cast_derived() return m_xpr.const_cast_derived()
.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
} }
inline const CoeffReturnType coeff(int index) const inline const CoeffReturnType coeff(Index index) const
{ {
return m_xpr return m_xpr
.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
@ -182,21 +182,21 @@ template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess> c
} }
template<int LoadMode> template<int LoadMode>
inline PacketScalar packet(int row, int col) const inline PacketScalar packet(Index row, Index col) const
{ {
return m_xpr.template packet<Unaligned> return m_xpr.template packet<Unaligned>
(row + m_startRow.value(), col + m_startCol.value()); (row + m_startRow.value(), col + m_startCol.value());
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int row, int col, const PacketScalar& x) inline void writePacket(Index row, Index col, const PacketScalar& x)
{ {
m_xpr.const_cast_derived().template writePacket<Unaligned> m_xpr.const_cast_derived().template writePacket<Unaligned>
(row + m_startRow.value(), col + m_startCol.value(), x); (row + m_startRow.value(), col + m_startCol.value(), x);
} }
template<int LoadMode> template<int LoadMode>
inline PacketScalar packet(int index) const inline PacketScalar packet(Index index) const
{ {
return m_xpr.template packet<Unaligned> return m_xpr.template packet<Unaligned>
(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
@ -204,7 +204,7 @@ template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess> c
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int index, const PacketScalar& x) inline void writePacket(Index index, const PacketScalar& x)
{ {
m_xpr.const_cast_derived().template writePacket<Unaligned> m_xpr.const_cast_derived().template writePacket<Unaligned>
(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
@ -214,17 +214,17 @@ template<typename XprType, int BlockRows, int BlockCols, bool HasDirectAccess> c
#ifdef EIGEN_PARSED_BY_DOXYGEN #ifdef EIGEN_PARSED_BY_DOXYGEN
/** \sa MapBase::data() */ /** \sa MapBase::data() */
inline const Scalar* data() const; inline const Scalar* data() const;
inline int innerStride() const; inline Index innerStride() const;
inline int outerStride() const; inline Index outerStride() const;
#endif #endif
protected: protected:
const typename XprType::Nested m_xpr; const typename XprType::Nested m_xpr;
const ei_int_if_dynamic<XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow; const ei_variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
const ei_int_if_dynamic<XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol; const ei_variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
const ei_int_if_dynamic<RowsAtCompileTime> m_blockRows; const ei_variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
const ei_int_if_dynamic<ColsAtCompileTime> m_blockCols; const ei_variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
}; };
/** \internal */ /** \internal */
@ -241,7 +241,7 @@ class Block<XprType,BlockRows,BlockCols,true>
/** Column or Row constructor /** Column or Row constructor
*/ */
inline Block(const XprType& xpr, int i) inline Block(const XprType& xpr, Index i)
: Base(&xpr.const_cast_derived().coeffRef( : Base(&xpr.const_cast_derived().coeffRef(
(BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0, (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0,
(BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
@ -257,7 +257,7 @@ class Block<XprType,BlockRows,BlockCols,true>
/** Fixed-size constructor /** Fixed-size constructor
*/ */
inline Block(const XprType& xpr, int startRow, int startCol) inline Block(const XprType& xpr, Index startRow, Index startCol)
: Base(&xpr.const_cast_derived().coeffRef(startRow,startCol)), m_xpr(xpr) : Base(&xpr.const_cast_derived().coeffRef(startRow,startCol)), m_xpr(xpr)
{ {
ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows() ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows()
@ -268,8 +268,8 @@ class Block<XprType,BlockRows,BlockCols,true>
/** Dynamic-size constructor /** Dynamic-size constructor
*/ */
inline Block(const XprType& xpr, inline Block(const XprType& xpr,
int startRow, int startCol, Index startRow, Index startCol,
int blockRows, int blockCols) Index blockRows, Index blockCols)
: Base(&xpr.const_cast_derived().coeffRef(startRow,startCol), blockRows, blockCols), : Base(&xpr.const_cast_derived().coeffRef(startRow,startCol), blockRows, blockCols),
m_xpr(xpr) m_xpr(xpr)
{ {
@ -281,7 +281,7 @@ class Block<XprType,BlockRows,BlockCols,true>
} }
/** \sa MapBase::innerStride() */ /** \sa MapBase::innerStride() */
inline int innerStride() const inline Index innerStride() const
{ {
return ei_traits<Block>::HasSameStorageOrderAsXprType return ei_traits<Block>::HasSameStorageOrderAsXprType
? m_xpr.innerStride() ? m_xpr.innerStride()
@ -289,7 +289,7 @@ class Block<XprType,BlockRows,BlockCols,true>
} }
/** \sa MapBase::outerStride() */ /** \sa MapBase::outerStride() */
inline int outerStride() const inline Index outerStride() const
{ {
return m_outerStride; return m_outerStride;
} }
@ -302,7 +302,7 @@ class Block<XprType,BlockRows,BlockCols,true>
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal used by allowAligned() */ /** \internal used by allowAligned() */
inline Block(const XprType& xpr, const Scalar* data, int blockRows, int blockCols) inline Block(const XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
: Base(data, blockRows, blockCols), m_xpr(xpr) : Base(data, blockRows, blockCols), m_xpr(xpr)
{ {
init(); init();
@ -335,19 +335,19 @@ class Block<XprType,BlockRows,BlockCols,true>
* when it is applied to a fixed-size matrix, it inherits a fixed maximal size, * when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation. * which means that evaluating it does not cause a dynamic memory allocation.
* *
* \sa class Block, block(int,int) * \sa class Block, block(Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline Block<Derived> DenseBase<Derived> inline Block<Derived> DenseBase<Derived>
::block(int startRow, int startCol, int blockRows, int blockCols) ::block(Index startRow, Index startCol, Index blockRows, Index blockCols)
{ {
return Block<Derived>(derived(), startRow, startCol, blockRows, blockCols); return Block<Derived>(derived(), startRow, startCol, blockRows, blockCols);
} }
/** This is the const version of block(int,int,int,int). */ /** This is the const version of block(Index,Index,Index,Index). */
template<typename Derived> template<typename Derived>
inline const Block<Derived> DenseBase<Derived> inline const Block<Derived> DenseBase<Derived>
::block(int startRow, int startCol, int blockRows, int blockCols) const ::block(Index startRow, Index startCol, Index blockRows, Index blockCols) const
{ {
return Block<Derived>(derived(), startRow, startCol, blockRows, blockCols); return Block<Derived>(derived(), startRow, startCol, blockRows, blockCols);
} }
@ -363,19 +363,19 @@ inline const Block<Derived> DenseBase<Derived>
* Example: \include MatrixBase_topRightCorner_int_int.cpp * Example: \include MatrixBase_topRightCorner_int_int.cpp
* Output: \verbinclude MatrixBase_topRightCorner_int_int.out * Output: \verbinclude MatrixBase_topRightCorner_int_int.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline Block<Derived> DenseBase<Derived> inline Block<Derived> DenseBase<Derived>
::topRightCorner(int cRows, int cCols) ::topRightCorner(Index cRows, Index cCols)
{ {
return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols); return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols);
} }
/** This is the const version of topRightCorner(int, int).*/ /** This is the const version of topRightCorner(Index, Index).*/
template<typename Derived> template<typename Derived>
inline const Block<Derived> inline const Block<Derived>
DenseBase<Derived>::topRightCorner(int cRows, int cCols) const DenseBase<Derived>::topRightCorner(Index cRows, Index cCols) const
{ {
return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols); return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols);
} }
@ -387,7 +387,7 @@ DenseBase<Derived>::topRightCorner(int cRows, int cCols) const
* Example: \include MatrixBase_template_int_int_topRightCorner.cpp * Example: \include MatrixBase_template_int_int_topRightCorner.cpp
* Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out * Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
template<int CRows, int CCols> template<int CRows, int CCols>
@ -417,19 +417,19 @@ DenseBase<Derived>::topRightCorner() const
* Example: \include MatrixBase_topLeftCorner_int_int.cpp * Example: \include MatrixBase_topLeftCorner_int_int.cpp
* Output: \verbinclude MatrixBase_topLeftCorner_int_int.out * Output: \verbinclude MatrixBase_topLeftCorner_int_int.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline Block<Derived> DenseBase<Derived> inline Block<Derived> DenseBase<Derived>
::topLeftCorner(int cRows, int cCols) ::topLeftCorner(Index cRows, Index cCols)
{ {
return Block<Derived>(derived(), 0, 0, cRows, cCols); return Block<Derived>(derived(), 0, 0, cRows, cCols);
} }
/** This is the const version of topLeftCorner(int, int).*/ /** This is the const version of topLeftCorner(Index, Index).*/
template<typename Derived> template<typename Derived>
inline const Block<Derived> inline const Block<Derived>
DenseBase<Derived>::topLeftCorner(int cRows, int cCols) const DenseBase<Derived>::topLeftCorner(Index cRows, Index cCols) const
{ {
return Block<Derived>(derived(), 0, 0, cRows, cCols); return Block<Derived>(derived(), 0, 0, cRows, cCols);
} }
@ -441,7 +441,7 @@ DenseBase<Derived>::topLeftCorner(int cRows, int cCols) const
* Example: \include MatrixBase_template_int_int_topLeftCorner.cpp * Example: \include MatrixBase_template_int_int_topLeftCorner.cpp
* Output: \verbinclude MatrixBase_template_int_int_topLeftCorner.out * Output: \verbinclude MatrixBase_template_int_int_topLeftCorner.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
template<int CRows, int CCols> template<int CRows, int CCols>
@ -473,19 +473,19 @@ DenseBase<Derived>::topLeftCorner() const
* Example: \include MatrixBase_bottomRightCorner_int_int.cpp * Example: \include MatrixBase_bottomRightCorner_int_int.cpp
* Output: \verbinclude MatrixBase_bottomRightCorner_int_int.out * Output: \verbinclude MatrixBase_bottomRightCorner_int_int.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline Block<Derived> DenseBase<Derived> inline Block<Derived> DenseBase<Derived>
::bottomRightCorner(int cRows, int cCols) ::bottomRightCorner(Index cRows, Index cCols)
{ {
return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols); return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
} }
/** This is the const version of bottomRightCorner(int, int).*/ /** This is the const version of bottomRightCorner(Index, Index).*/
template<typename Derived> template<typename Derived>
inline const Block<Derived> inline const Block<Derived>
DenseBase<Derived>::bottomRightCorner(int cRows, int cCols) const DenseBase<Derived>::bottomRightCorner(Index cRows, Index cCols) const
{ {
return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols); return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
} }
@ -497,7 +497,7 @@ DenseBase<Derived>::bottomRightCorner(int cRows, int cCols) const
* Example: \include MatrixBase_template_int_int_bottomRightCorner.cpp * Example: \include MatrixBase_template_int_int_bottomRightCorner.cpp
* Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner.out * Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
template<int CRows, int CCols> template<int CRows, int CCols>
@ -527,19 +527,19 @@ DenseBase<Derived>::bottomRightCorner() const
* Example: \include MatrixBase_bottomLeftCorner_int_int.cpp * Example: \include MatrixBase_bottomLeftCorner_int_int.cpp
* Output: \verbinclude MatrixBase_bottomLeftCorner_int_int.out * Output: \verbinclude MatrixBase_bottomLeftCorner_int_int.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline Block<Derived> DenseBase<Derived> inline Block<Derived> DenseBase<Derived>
::bottomLeftCorner(int cRows, int cCols) ::bottomLeftCorner(Index cRows, Index cCols)
{ {
return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols); return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols);
} }
/** This is the const version of bottomLeftCorner(int, int).*/ /** This is the const version of bottomLeftCorner(Index, Index).*/
template<typename Derived> template<typename Derived>
inline const Block<Derived> inline const Block<Derived>
DenseBase<Derived>::bottomLeftCorner(int cRows, int cCols) const DenseBase<Derived>::bottomLeftCorner(Index cRows, Index cCols) const
{ {
return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols); return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols);
} }
@ -551,7 +551,7 @@ DenseBase<Derived>::bottomLeftCorner(int cRows, int cCols) const
* Example: \include MatrixBase_template_int_int_bottomLeftCorner.cpp * Example: \include MatrixBase_template_int_int_bottomLeftCorner.cpp
* Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner.out * Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
template<int CRows, int CCols> template<int CRows, int CCols>
@ -579,19 +579,19 @@ DenseBase<Derived>::bottomLeftCorner() const
* Example: \include MatrixBase_topRows_int.cpp * Example: \include MatrixBase_topRows_int.cpp
* Output: \verbinclude MatrixBase_topRows_int.out * Output: \verbinclude MatrixBase_topRows_int.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline typename DenseBase<Derived>::RowsBlockXpr DenseBase<Derived> inline typename DenseBase<Derived>::RowsBlockXpr DenseBase<Derived>
::topRows(int n) ::topRows(Index n)
{ {
return RowsBlockXpr(derived(), 0, 0, n, cols()); return RowsBlockXpr(derived(), 0, 0, n, cols());
} }
/** This is the const version of topRows(int).*/ /** This is the const version of topRows(Index).*/
template<typename Derived> template<typename Derived>
inline const typename DenseBase<Derived>::RowsBlockXpr inline const typename DenseBase<Derived>::RowsBlockXpr
DenseBase<Derived>::topRows(int n) const DenseBase<Derived>::topRows(Index n) const
{ {
return RowsBlockXpr(derived(), 0, 0, n, cols()); return RowsBlockXpr(derived(), 0, 0, n, cols());
} }
@ -603,7 +603,7 @@ DenseBase<Derived>::topRows(int n) const
* Example: \include MatrixBase_template_int_topRows.cpp * Example: \include MatrixBase_template_int_topRows.cpp
* Output: \verbinclude MatrixBase_template_int_topRows.out * Output: \verbinclude MatrixBase_template_int_topRows.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
template<int N> template<int N>
@ -633,19 +633,19 @@ DenseBase<Derived>::topRows() const
* Example: \include MatrixBase_bottomRows_int.cpp * Example: \include MatrixBase_bottomRows_int.cpp
* Output: \verbinclude MatrixBase_bottomRows_int.out * Output: \verbinclude MatrixBase_bottomRows_int.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline typename DenseBase<Derived>::RowsBlockXpr DenseBase<Derived> inline typename DenseBase<Derived>::RowsBlockXpr DenseBase<Derived>
::bottomRows(int n) ::bottomRows(Index n)
{ {
return RowsBlockXpr(derived(), rows() - n, 0, n, cols()); return RowsBlockXpr(derived(), rows() - n, 0, n, cols());
} }
/** This is the const version of bottomRows(int).*/ /** This is the const version of bottomRows(Index).*/
template<typename Derived> template<typename Derived>
inline const typename DenseBase<Derived>::RowsBlockXpr inline const typename DenseBase<Derived>::RowsBlockXpr
DenseBase<Derived>::bottomRows(int n) const DenseBase<Derived>::bottomRows(Index n) const
{ {
return RowsBlockXpr(derived(), rows() - n, 0, n, cols()); return RowsBlockXpr(derived(), rows() - n, 0, n, cols());
} }
@ -657,7 +657,7 @@ DenseBase<Derived>::bottomRows(int n) const
* Example: \include MatrixBase_template_int_bottomRows.cpp * Example: \include MatrixBase_template_int_bottomRows.cpp
* Output: \verbinclude MatrixBase_template_int_bottomRows.out * Output: \verbinclude MatrixBase_template_int_bottomRows.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
template<int N> template<int N>
@ -687,19 +687,19 @@ DenseBase<Derived>::bottomRows() const
* Example: \include MatrixBase_leftCols_int.cpp * Example: \include MatrixBase_leftCols_int.cpp
* Output: \verbinclude MatrixBase_leftCols_int.out * Output: \verbinclude MatrixBase_leftCols_int.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline typename DenseBase<Derived>::ColsBlockXpr DenseBase<Derived> inline typename DenseBase<Derived>::ColsBlockXpr DenseBase<Derived>
::leftCols(int n) ::leftCols(Index n)
{ {
return ColsBlockXpr(derived(), 0, 0, rows(), n); return ColsBlockXpr(derived(), 0, 0, rows(), n);
} }
/** This is the const version of leftCols(int).*/ /** This is the const version of leftCols(Index).*/
template<typename Derived> template<typename Derived>
inline const typename DenseBase<Derived>::ColsBlockXpr inline const typename DenseBase<Derived>::ColsBlockXpr
DenseBase<Derived>::leftCols(int n) const DenseBase<Derived>::leftCols(Index n) const
{ {
return ColsBlockXpr(derived(), 0, 0, rows(), n); return ColsBlockXpr(derived(), 0, 0, rows(), n);
} }
@ -711,7 +711,7 @@ DenseBase<Derived>::leftCols(int n) const
* Example: \include MatrixBase_template_int_leftCols.cpp * Example: \include MatrixBase_template_int_leftCols.cpp
* Output: \verbinclude MatrixBase_template_int_leftCols.out * Output: \verbinclude MatrixBase_template_int_leftCols.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
template<int N> template<int N>
@ -741,19 +741,19 @@ DenseBase<Derived>::leftCols() const
* Example: \include MatrixBase_rightCols_int.cpp * Example: \include MatrixBase_rightCols_int.cpp
* Output: \verbinclude MatrixBase_rightCols_int.out * Output: \verbinclude MatrixBase_rightCols_int.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline typename DenseBase<Derived>::ColsBlockXpr DenseBase<Derived> inline typename DenseBase<Derived>::ColsBlockXpr DenseBase<Derived>
::rightCols(int n) ::rightCols(Index n)
{ {
return ColsBlockXpr(derived(), 0, cols() - n, rows(), n); return ColsBlockXpr(derived(), 0, cols() - n, rows(), n);
} }
/** This is the const version of rightCols(int).*/ /** This is the const version of rightCols(Index).*/
template<typename Derived> template<typename Derived>
inline const typename DenseBase<Derived>::ColsBlockXpr inline const typename DenseBase<Derived>::ColsBlockXpr
DenseBase<Derived>::rightCols(int n) const DenseBase<Derived>::rightCols(Index n) const
{ {
return ColsBlockXpr(derived(), 0, cols() - n, rows(), n); return ColsBlockXpr(derived(), 0, cols() - n, rows(), n);
} }
@ -765,7 +765,7 @@ DenseBase<Derived>::rightCols(int n) const
* Example: \include MatrixBase_template_int_rightCols.cpp * Example: \include MatrixBase_template_int_rightCols.cpp
* Output: \verbinclude MatrixBase_template_int_rightCols.out * Output: \verbinclude MatrixBase_template_int_rightCols.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
template<int N> template<int N>
@ -802,21 +802,21 @@ DenseBase<Derived>::rightCols() const
* \note since block is a templated member, the keyword template has to be used * \note since block is a templated member, the keyword template has to be used
* if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode * if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
template<int BlockRows, int BlockCols> template<int BlockRows, int BlockCols>
inline Block<Derived, BlockRows, BlockCols> inline Block<Derived, BlockRows, BlockCols>
DenseBase<Derived>::block(int startRow, int startCol) DenseBase<Derived>::block(Index startRow, Index startCol)
{ {
return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol); return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
} }
/** This is the const version of block<>(int, int). */ /** This is the const version of block<>(Index, Index). */
template<typename Derived> template<typename Derived>
template<int BlockRows, int BlockCols> template<int BlockRows, int BlockCols>
inline const Block<Derived, BlockRows, BlockCols> inline const Block<Derived, BlockRows, BlockCols>
DenseBase<Derived>::block(int startRow, int startCol) const DenseBase<Derived>::block(Index startRow, Index startCol) const
{ {
return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol); return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
} }
@ -829,7 +829,7 @@ DenseBase<Derived>::block(int startRow, int startCol) const
* \sa row(), class Block */ * \sa row(), class Block */
template<typename Derived> template<typename Derived>
inline typename DenseBase<Derived>::ColXpr inline typename DenseBase<Derived>::ColXpr
DenseBase<Derived>::col(int i) DenseBase<Derived>::col(Index i)
{ {
return ColXpr(derived(), i); return ColXpr(derived(), i);
} }
@ -837,7 +837,7 @@ DenseBase<Derived>::col(int i)
/** This is the const version of col(). */ /** This is the const version of col(). */
template<typename Derived> template<typename Derived>
inline const typename DenseBase<Derived>::ColXpr inline const typename DenseBase<Derived>::ColXpr
DenseBase<Derived>::col(int i) const DenseBase<Derived>::col(Index i) const
{ {
return ColXpr(derived(), i); return ColXpr(derived(), i);
} }
@ -850,7 +850,7 @@ DenseBase<Derived>::col(int i) const
* \sa col(), class Block */ * \sa col(), class Block */
template<typename Derived> template<typename Derived>
inline typename DenseBase<Derived>::RowXpr inline typename DenseBase<Derived>::RowXpr
DenseBase<Derived>::row(int i) DenseBase<Derived>::row(Index i)
{ {
return RowXpr(derived(), i); return RowXpr(derived(), i);
} }
@ -858,7 +858,7 @@ DenseBase<Derived>::row(int i)
/** This is the const version of row(). */ /** This is the const version of row(). */
template<typename Derived> template<typename Derived>
inline const typename DenseBase<Derived>::RowXpr inline const typename DenseBase<Derived>::RowXpr
DenseBase<Derived>::row(int i) const DenseBase<Derived>::row(Index i) const
{ {
return RowXpr(derived(), i); return RowXpr(derived(), i);
} }

View File

@ -39,7 +39,9 @@
template<typename XprType> template<typename XprType>
struct CommaInitializer struct CommaInitializer
{ {
typedef typename ei_traits<XprType>::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename XprType::Index Index;
inline CommaInitializer(XprType& xpr, const Scalar& s) inline CommaInitializer(XprType& xpr, const Scalar& s)
: m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
{ {
@ -113,9 +115,9 @@ struct CommaInitializer
inline XprType& finished() { return m_xpr; } inline XprType& finished() { return m_xpr; }
XprType& m_xpr; // target expression XprType& m_xpr; // target expression
int m_row; // current row id Index m_row; // current row id
int m_col; // current col id Index m_col; // current col id
int m_currentBlockRows; // current block height Index m_currentBlockRows; // current block height
}; };
/** \anchor MatrixBaseCommaInitRef /** \anchor MatrixBaseCommaInitRef

View File

@ -123,14 +123,14 @@ class CwiseBinaryOp : ei_no_assignment_operator,
ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols()); ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols());
} }
EIGEN_STRONG_INLINE int rows() const { EIGEN_STRONG_INLINE Index rows() const {
// return the fixed size type if available to enable compile time optimizations // return the fixed size type if available to enable compile time optimizations
if (ei_traits<typename ei_cleantype<LhsNested>::type>::RowsAtCompileTime==Dynamic) if (ei_traits<typename ei_cleantype<LhsNested>::type>::RowsAtCompileTime==Dynamic)
return m_rhs.rows(); return m_rhs.rows();
else else
return m_lhs.rows(); return m_lhs.rows();
} }
EIGEN_STRONG_INLINE int cols() const { EIGEN_STRONG_INLINE Index cols() const {
// return the fixed size type if available to enable compile time optimizations // return the fixed size type if available to enable compile time optimizations
if (ei_traits<typename ei_cleantype<LhsNested>::type>::ColsAtCompileTime==Dynamic) if (ei_traits<typename ei_cleantype<LhsNested>::type>::ColsAtCompileTime==Dynamic)
return m_rhs.cols(); return m_rhs.cols();
@ -161,27 +161,27 @@ class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Dense>
typedef typename ei_dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base; typedef typename ei_dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE( Derived ) EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{ {
return derived().functor()(derived().lhs().coeff(row, col), return derived().functor()(derived().lhs().coeff(row, col),
derived().rhs().coeff(row, col)); derived().rhs().coeff(row, col));
} }
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{ {
return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(row, col), return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(row, col),
derived().rhs().template packet<LoadMode>(row, col)); derived().rhs().template packet<LoadMode>(row, col));
} }
EIGEN_STRONG_INLINE const Scalar coeff(int index) const EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{ {
return derived().functor()(derived().lhs().coeff(index), return derived().functor()(derived().lhs().coeff(index),
derived().rhs().coeff(index)); derived().rhs().coeff(index));
} }
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int index) const EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{ {
return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(index), return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(index),
derived().rhs().template packet<LoadMode>(index)); derived().rhs().template packet<LoadMode>(index));

View File

@ -63,7 +63,7 @@ class CwiseNullaryOp : ei_no_assignment_operator,
typedef typename ei_dense_xpr_base<CwiseNullaryOp>::type Base; typedef typename ei_dense_xpr_base<CwiseNullaryOp>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
CwiseNullaryOp(int rows, int cols, const NullaryOp& func = NullaryOp()) CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp())
: m_rows(rows), m_cols(cols), m_functor(func) : m_rows(rows), m_cols(cols), m_functor(func)
{ {
ei_assert(rows >= 0 ei_assert(rows >= 0
@ -72,34 +72,34 @@ class CwiseNullaryOp : ei_no_assignment_operator,
&& (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
} }
EIGEN_STRONG_INLINE int rows() const { return m_rows.value(); } EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }
EIGEN_STRONG_INLINE int cols() const { return m_cols.value(); } EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }
EIGEN_STRONG_INLINE const Scalar coeff(int rows, int cols) const EIGEN_STRONG_INLINE const Scalar coeff(Index rows, Index cols) const
{ {
return m_functor(rows, cols); return m_functor(rows, cols);
} }
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{ {
return m_functor.packetOp(row, col); return m_functor.packetOp(row, col);
} }
EIGEN_STRONG_INLINE const Scalar coeff(int index) const EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{ {
return m_functor(index); return m_functor(index);
} }
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int index) const EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{ {
return m_functor.packetOp(index); return m_functor.packetOp(index);
} }
protected: protected:
const ei_int_if_dynamic<RowsAtCompileTime> m_rows; const ei_variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
const ei_int_if_dynamic<ColsAtCompileTime> m_cols; const ei_variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
const NullaryOp m_functor; const NullaryOp m_functor;
}; };
@ -120,7 +120,7 @@ class CwiseNullaryOp : ei_no_assignment_operator,
template<typename Derived> template<typename Derived>
template<typename CustomNullaryOp> template<typename CustomNullaryOp>
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived> EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
DenseBase<Derived>::NullaryExpr(int rows, int cols, const CustomNullaryOp& func) DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func)
{ {
return CwiseNullaryOp<CustomNullaryOp, Derived>(rows, cols, func); return CwiseNullaryOp<CustomNullaryOp, Derived>(rows, cols, func);
} }
@ -143,7 +143,7 @@ DenseBase<Derived>::NullaryExpr(int rows, int cols, const CustomNullaryOp& func)
template<typename Derived> template<typename Derived>
template<typename CustomNullaryOp> template<typename CustomNullaryOp>
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived> EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
DenseBase<Derived>::NullaryExpr(int size, const CustomNullaryOp& func) DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, Derived>(1, size, func); if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, Derived>(1, size, func);
@ -182,7 +182,7 @@ DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Constant(int rows, int cols, const Scalar& value) DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
{ {
return DenseBase<Derived>::NullaryExpr(rows, cols, ei_scalar_constant_op<Scalar>(value)); return DenseBase<Derived>::NullaryExpr(rows, cols, ei_scalar_constant_op<Scalar>(value));
} }
@ -204,7 +204,7 @@ DenseBase<Derived>::Constant(int rows, int cols, const Scalar& value)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Constant(int size, const Scalar& value) DenseBase<Derived>::Constant(Index size, const Scalar& value)
{ {
return DenseBase<Derived>::NullaryExpr(size, ei_scalar_constant_op<Scalar>(value)); return DenseBase<Derived>::NullaryExpr(size, ei_scalar_constant_op<Scalar>(value));
} }
@ -239,11 +239,11 @@ DenseBase<Derived>::Constant(const Scalar& value)
* Example: \include DenseBase_LinSpaced_seq.cpp * Example: \include DenseBase_LinSpaced_seq.cpp
* Output: \verbinclude DenseBase_LinSpaced_seq.out * Output: \verbinclude DenseBase_LinSpaced_seq.out
* *
* \sa setLinSpaced(const Scalar&,const Scalar&,int), LinSpaced(Scalar,Scalar,int), CwiseNullaryOp * \sa setLinSpaced(const Scalar&,const Scalar&,Index), LinSpaced(Scalar,Scalar,Index), CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, int size) DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, Index size)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return DenseBase<Derived>::NullaryExpr(size, ei_linspaced_op<Scalar,false>(low,high,size)); return DenseBase<Derived>::NullaryExpr(size, ei_linspaced_op<Scalar,false>(low,high,size));
@ -259,11 +259,11 @@ DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& hig
* Example: \include DenseBase_LinSpaced.cpp * Example: \include DenseBase_LinSpaced.cpp
* Output: \verbinclude DenseBase_LinSpaced.out * Output: \verbinclude DenseBase_LinSpaced.out
* *
* \sa setLinSpaced(const Scalar&,const Scalar&,int), LinSpaced(Sequential_t,const Scalar&,const Scalar&,int), CwiseNullaryOp * \sa setLinSpaced(const Scalar&,const Scalar&,Index), LinSpaced(Sequential_t,const Scalar&,const Scalar&,Index), CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high, int size) DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high, Index size)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return DenseBase<Derived>::NullaryExpr(size, ei_linspaced_op<Scalar,true>(low,high,size)); return DenseBase<Derived>::NullaryExpr(size, ei_linspaced_op<Scalar,true>(low,high,size));
@ -274,8 +274,8 @@ template<typename Derived>
bool DenseBase<Derived>::isApproxToConstant bool DenseBase<Derived>::isApproxToConstant
(const Scalar& value, RealScalar prec) const (const Scalar& value, RealScalar prec) const
{ {
for(int j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(int i = 0; i < rows(); ++i) for(Index i = 0; i < rows(); ++i)
if(!ei_isApprox(this->coeff(i, j), value, prec)) if(!ei_isApprox(this->coeff(i, j), value, prec))
return false; return false;
return true; return true;
@ -303,7 +303,7 @@ EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& value)
/** Sets all coefficients in this expression to \a value. /** Sets all coefficients in this expression to \a value.
* *
* \sa fill(), setConstant(int,const Scalar&), setConstant(int,int,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value) EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value)
@ -318,11 +318,11 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value
* Example: \include Matrix_setConstant_int.cpp * Example: \include Matrix_setConstant_int.cpp
* Output: \verbinclude Matrix_setConstant_int.out * Output: \verbinclude Matrix_setConstant_int.out
* *
* \sa MatrixBase::setConstant(const Scalar&), setConstant(int,int,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_STRONG_INLINE Derived&
DenseStorageBase<Derived>::setConstant(int size, const Scalar& value) DenseStorageBase<Derived>::setConstant(Index size, const Scalar& value)
{ {
resize(size); resize(size);
return setConstant(value); return setConstant(value);
@ -336,11 +336,11 @@ DenseStorageBase<Derived>::setConstant(int size, const Scalar& value)
* Example: \include Matrix_setConstant_int_int.cpp * Example: \include Matrix_setConstant_int_int.cpp
* Output: \verbinclude Matrix_setConstant_int_int.out * Output: \verbinclude Matrix_setConstant_int_int.out
* *
* \sa MatrixBase::setConstant(const Scalar&), setConstant(int,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_STRONG_INLINE Derived&
DenseStorageBase<Derived>::setConstant(int rows, int cols, const Scalar& value) DenseStorageBase<Derived>::setConstant(Index rows, Index cols, const Scalar& value)
{ {
resize(rows, cols); resize(rows, cols);
return setConstant(value); return setConstant(value);
@ -359,7 +359,7 @@ DenseStorageBase<Derived>::setConstant(int rows, int cols, const Scalar& value)
* \sa CwiseNullaryOp * \sa CwiseNullaryOp
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high, int size) EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high, Index size)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return derived() = Derived::NullaryExpr(size, ei_linspaced_op<Scalar,false>(low,high,size)); return derived() = Derived::NullaryExpr(size, ei_linspaced_op<Scalar,false>(low,high,size));
@ -379,11 +379,11 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low,
* Example: \include MatrixBase_zero_int_int.cpp * Example: \include MatrixBase_zero_int_int.cpp
* Output: \verbinclude MatrixBase_zero_int_int.out * Output: \verbinclude MatrixBase_zero_int_int.out
* *
* \sa Zero(), Zero(int) * \sa Zero(), Zero(Index)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Zero(int rows, int cols) DenseBase<Derived>::Zero(Index rows, Index cols)
{ {
return Constant(rows, cols, Scalar(0)); return Constant(rows, cols, Scalar(0));
} }
@ -402,11 +402,11 @@ DenseBase<Derived>::Zero(int rows, int cols)
* Example: \include MatrixBase_zero_int.cpp * Example: \include MatrixBase_zero_int.cpp
* Output: \verbinclude MatrixBase_zero_int.out * Output: \verbinclude MatrixBase_zero_int.out
* *
* \sa Zero(), Zero(int,int) * \sa Zero(), Zero(Index,Index)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Zero(int size) DenseBase<Derived>::Zero(Index size)
{ {
return Constant(size, Scalar(0)); return Constant(size, Scalar(0));
} }
@ -419,7 +419,7 @@ DenseBase<Derived>::Zero(int size)
* Example: \include MatrixBase_zero.cpp * Example: \include MatrixBase_zero.cpp
* Output: \verbinclude MatrixBase_zero.out * Output: \verbinclude MatrixBase_zero.out
* *
* \sa Zero(int), Zero(int,int) * \sa Zero(Index), Zero(Index,Index)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
@ -439,8 +439,8 @@ DenseBase<Derived>::Zero()
template<typename Derived> template<typename Derived>
bool DenseBase<Derived>::isZero(RealScalar prec) const bool DenseBase<Derived>::isZero(RealScalar prec) const
{ {
for(int j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(int i = 0; i < rows(); ++i) for(Index i = 0; i < rows(); ++i)
if(!ei_isMuchSmallerThan(this->coeff(i, j), static_cast<Scalar>(1), prec)) if(!ei_isMuchSmallerThan(this->coeff(i, j), static_cast<Scalar>(1), prec))
return false; return false;
return true; return true;
@ -466,11 +466,11 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
* Example: \include Matrix_setZero_int.cpp * Example: \include Matrix_setZero_int.cpp
* Output: \verbinclude Matrix_setZero_int.out * Output: \verbinclude Matrix_setZero_int.out
* *
* \sa DenseBase::setZero(), setZero(int,int), class CwiseNullaryOp, DenseBase::Zero() * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_STRONG_INLINE Derived&
DenseStorageBase<Derived>::setZero(int size) DenseStorageBase<Derived>::setZero(Index size)
{ {
resize(size); resize(size);
return setConstant(Scalar(0)); return setConstant(Scalar(0));
@ -484,11 +484,11 @@ DenseStorageBase<Derived>::setZero(int size)
* Example: \include Matrix_setZero_int_int.cpp * Example: \include Matrix_setZero_int_int.cpp
* Output: \verbinclude Matrix_setZero_int_int.out * Output: \verbinclude Matrix_setZero_int_int.out
* *
* \sa DenseBase::setZero(), setZero(int), class CwiseNullaryOp, DenseBase::Zero() * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_STRONG_INLINE Derived&
DenseStorageBase<Derived>::setZero(int rows, int cols) DenseStorageBase<Derived>::setZero(Index rows, Index cols)
{ {
resize(rows, cols); resize(rows, cols);
return setConstant(Scalar(0)); return setConstant(Scalar(0));
@ -508,11 +508,11 @@ DenseStorageBase<Derived>::setZero(int rows, int cols)
* Example: \include MatrixBase_ones_int_int.cpp * Example: \include MatrixBase_ones_int_int.cpp
* Output: \verbinclude MatrixBase_ones_int_int.out * Output: \verbinclude MatrixBase_ones_int_int.out
* *
* \sa Ones(), Ones(int), isOnes(), class Ones * \sa Ones(), Ones(Index), isOnes(), class Ones
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Ones(int rows, int cols) DenseBase<Derived>::Ones(Index rows, Index cols)
{ {
return Constant(rows, cols, Scalar(1)); return Constant(rows, cols, Scalar(1));
} }
@ -531,11 +531,11 @@ DenseBase<Derived>::Ones(int rows, int cols)
* Example: \include MatrixBase_ones_int.cpp * Example: \include MatrixBase_ones_int.cpp
* Output: \verbinclude MatrixBase_ones_int.out * Output: \verbinclude MatrixBase_ones_int.out
* *
* \sa Ones(), Ones(int,int), isOnes(), class Ones * \sa Ones(), Ones(Index,Index), isOnes(), class Ones
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
DenseBase<Derived>::Ones(int size) DenseBase<Derived>::Ones(Index size)
{ {
return Constant(size, Scalar(1)); return Constant(size, Scalar(1));
} }
@ -548,7 +548,7 @@ DenseBase<Derived>::Ones(int size)
* Example: \include MatrixBase_ones.cpp * Example: \include MatrixBase_ones.cpp
* Output: \verbinclude MatrixBase_ones.out * Output: \verbinclude MatrixBase_ones.out
* *
* \sa Ones(int), Ones(int,int), isOnes(), class Ones * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
@ -592,11 +592,11 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
* Example: \include Matrix_setOnes_int.cpp * Example: \include Matrix_setOnes_int.cpp
* Output: \verbinclude Matrix_setOnes_int.out * Output: \verbinclude Matrix_setOnes_int.out
* *
* \sa MatrixBase::setOnes(), setOnes(int,int), class CwiseNullaryOp, MatrixBase::Ones() * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_STRONG_INLINE Derived&
DenseStorageBase<Derived>::setOnes(int size) DenseStorageBase<Derived>::setOnes(Index size)
{ {
resize(size); resize(size);
return setConstant(Scalar(1)); return setConstant(Scalar(1));
@ -610,11 +610,11 @@ DenseStorageBase<Derived>::setOnes(int size)
* Example: \include Matrix_setOnes_int_int.cpp * Example: \include Matrix_setOnes_int_int.cpp
* Output: \verbinclude Matrix_setOnes_int_int.out * Output: \verbinclude Matrix_setOnes_int_int.out
* *
* \sa MatrixBase::setOnes(), setOnes(int), class CwiseNullaryOp, MatrixBase::Ones() * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& EIGEN_STRONG_INLINE Derived&
DenseStorageBase<Derived>::setOnes(int rows, int cols) DenseStorageBase<Derived>::setOnes(Index rows, Index cols)
{ {
resize(rows, cols); resize(rows, cols);
return setConstant(Scalar(1)); return setConstant(Scalar(1));
@ -638,7 +638,7 @@ DenseStorageBase<Derived>::setOnes(int rows, int cols)
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
MatrixBase<Derived>::Identity(int rows, int cols) MatrixBase<Derived>::Identity(Index rows, Index cols)
{ {
return DenseBase<Derived>::NullaryExpr(rows, cols, ei_scalar_identity_op<Scalar>()); return DenseBase<Derived>::NullaryExpr(rows, cols, ei_scalar_identity_op<Scalar>());
} }
@ -651,7 +651,7 @@ MatrixBase<Derived>::Identity(int rows, int cols)
* Example: \include MatrixBase_identity.cpp * Example: \include MatrixBase_identity.cpp
* Output: \verbinclude MatrixBase_identity.out * Output: \verbinclude MatrixBase_identity.out
* *
* \sa Identity(int,int), setIdentity(), isIdentity() * \sa Identity(Index,Index), setIdentity(), isIdentity()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
@ -668,15 +668,15 @@ MatrixBase<Derived>::Identity()
* Example: \include MatrixBase_isIdentity.cpp * Example: \include MatrixBase_isIdentity.cpp
* Output: \verbinclude MatrixBase_isIdentity.out * Output: \verbinclude MatrixBase_isIdentity.out
* *
* \sa class CwiseNullaryOp, Identity(), Identity(int,int), setIdentity() * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity()
*/ */
template<typename Derived> template<typename Derived>
bool MatrixBase<Derived>::isIdentity bool MatrixBase<Derived>::isIdentity
(RealScalar prec) const (RealScalar prec) const
{ {
for(int j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
{ {
for(int i = 0; i < rows(); ++i) for(Index i = 0; i < rows(); ++i)
{ {
if(i == j) if(i == j)
{ {
@ -705,11 +705,12 @@ struct ei_setIdentity_impl
template<typename Derived> template<typename Derived>
struct ei_setIdentity_impl<Derived, true> struct ei_setIdentity_impl<Derived, true>
{ {
typedef typename Derived::Index Index;
static EIGEN_STRONG_INLINE Derived& run(Derived& m) static EIGEN_STRONG_INLINE Derived& run(Derived& m)
{ {
m.setZero(); m.setZero();
const int size = std::min(m.rows(), m.cols()); const Index size = std::min(m.rows(), m.cols());
for(int i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
return m; return m;
} }
}; };
@ -719,7 +720,7 @@ struct ei_setIdentity_impl<Derived, true>
* Example: \include MatrixBase_setIdentity.cpp * Example: \include MatrixBase_setIdentity.cpp
* Output: \verbinclude MatrixBase_setIdentity.out * Output: \verbinclude MatrixBase_setIdentity.out
* *
* \sa class CwiseNullaryOp, Identity(), Identity(int,int), isIdentity() * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity() EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
@ -738,7 +739,7 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(int rows, int cols) EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols)
{ {
derived().resize(rows, cols); derived().resize(rows, cols);
return setIdentity(); return setIdentity();
@ -748,10 +749,10 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(int rows, int cols
* *
* \only_for_vectors * \only_for_vectors
* *
* \sa MatrixBase::Unit(int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(int size, int i) EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index size, Index i)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return BasisReturnType(SquareMatrixType::Identity(size,size), i); return BasisReturnType(SquareMatrixType::Identity(size,size), i);
@ -763,10 +764,10 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* *
* This variant is for fixed-size vector only. * This variant is for fixed-size vector only.
* *
* \sa MatrixBase::Unit(int,int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(int i) EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return BasisReturnType(SquareMatrixType::Identity(),i); return BasisReturnType(SquareMatrixType::Identity(),i);
@ -776,7 +777,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* *
* \only_for_vectors * \only_for_vectors
* *
* \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX() EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()
@ -786,7 +787,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* *
* \only_for_vectors * \only_for_vectors
* *
* \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY() EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()
@ -796,7 +797,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* *
* \only_for_vectors * \only_for_vectors
* *
* \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ() EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()
@ -806,7 +807,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
* *
* \only_for_vectors * \only_for_vectors
* *
* \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/ */
template<typename Derived> template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW() EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()

View File

@ -76,8 +76,8 @@ class CwiseUnaryOp : ei_no_assignment_operator,
inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
: m_xpr(xpr), m_functor(func) {} : m_xpr(xpr), m_functor(func) {}
EIGEN_STRONG_INLINE int rows() const { return m_xpr.rows(); } EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); }
EIGEN_STRONG_INLINE int cols() const { return m_xpr.cols(); } EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); }
/** \returns the functor representing the unary operation */ /** \returns the functor representing the unary operation */
const UnaryOp& functor() const { return m_functor; } const UnaryOp& functor() const { return m_functor; }
@ -100,32 +100,31 @@ class CwiseUnaryOp : ei_no_assignment_operator,
template<typename UnaryOp, typename XprType> template<typename UnaryOp, typename XprType>
class CwiseUnaryOpImpl<UnaryOp,XprType,Dense> class CwiseUnaryOpImpl<UnaryOp,XprType,Dense>
: public ei_dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type : public ei_dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type
{ {
typedef CwiseUnaryOp<UnaryOp, XprType> Derived;
public: public:
typedef CwiseUnaryOp<UnaryOp, XprType> Derived;
typedef typename ei_dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base; typedef typename ei_dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived) EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{ {
return derived().functor()(derived().nestedExpression().coeff(row, col)); return derived().functor()(derived().nestedExpression().coeff(row, col));
} }
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{ {
return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(row, col)); return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(row, col));
} }
EIGEN_STRONG_INLINE const Scalar coeff(int index) const EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{ {
return derived().functor()(derived().nestedExpression().coeff(index)); return derived().functor()(derived().nestedExpression().coeff(index));
} }
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int index) const EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{ {
return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(index)); return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(index));
} }

View File

@ -74,8 +74,8 @@ class CwiseUnaryView : ei_no_assignment_operator,
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView)
EIGEN_STRONG_INLINE int rows() const { return m_matrix.rows(); } EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); }
EIGEN_STRONG_INLINE int cols() const { return m_matrix.cols(); } EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); }
/** \returns the functor representing unary operation */ /** \returns the functor representing unary operation */
const ViewOp& functor() const { return m_functor; } const ViewOp& functor() const { return m_functor; }
@ -98,40 +98,39 @@ template<typename ViewOp, typename MatrixType>
class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense> class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
: public ei_dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type : public ei_dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type
{ {
typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
public: public:
typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
typedef typename ei_dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base; typedef typename ei_dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base;
inline int innerStride() const EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
inline Index innerStride() const
{ {
return derived().nestedExpression().innerStride() * sizeof(typename ei_traits<MatrixType>::Scalar) / sizeof(Scalar); return derived().nestedExpression().innerStride() * sizeof(typename ei_traits<MatrixType>::Scalar) / sizeof(Scalar);
} }
inline int outerStride() const inline Index outerStride() const
{ {
return derived().nestedExpression().outerStride(); return derived().nestedExpression().outerStride();
} }
EIGEN_DENSE_PUBLIC_INTERFACE(Derived) EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
EIGEN_STRONG_INLINE CoeffReturnType coeff(int row, int col) const
{ {
return derived().functor()(derived().nestedExpression().coeff(row, col)); return derived().functor()(derived().nestedExpression().coeff(row, col));
} }
EIGEN_STRONG_INLINE CoeffReturnType coeff(int index) const EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{ {
return derived().functor()(derived().nestedExpression().coeff(index)); return derived().functor()(derived().nestedExpression().coeff(index));
} }
EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col) EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
{ {
return derived().functor()(const_cast_derived().nestedExpression().coeffRef(row, col)); return derived().functor()(const_cast_derived().nestedExpression().coeffRef(row, col));
} }
EIGEN_STRONG_INLINE Scalar& coeffRef(int index) EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
{ {
return derived().functor()(const_cast_derived().nestedExpression().coeffRef(index)); return derived().functor()(const_cast_derived().nestedExpression().coeffRef(index));
} }

View File

@ -50,8 +50,12 @@ template<typename Derived> class DenseBase
class InnerIterator; class InnerIterator;
typedef typename ei_traits<Derived>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar; typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar; typedef typename ei_packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseCoeffsBase<Derived> Base; typedef DenseCoeffsBase<Derived> Base;
using Base::derived; using Base::derived;
using Base::const_cast_derived; using Base::const_cast_derived;
@ -168,19 +172,9 @@ template<typename Derived> class DenseBase
OuterStrideAtCompileTime = ei_outer_stride_at_compile_time<Derived>::ret OuterStrideAtCompileTime = ei_outer_stride_at_compile_time<Derived>::ret
}; };
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is the "real scalar" type; if the \a Scalar type is already real numbers
* (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
* \a Scalar is \a std::complex<T> then RealScalar is \a T.
*
* \sa class NumTraits
*/
typedef typename NumTraits<Scalar>::Real RealScalar;
#endif // not EIGEN_PARSED_BY_DOXYGEN
/** \returns the number of nonzero coefficients which is in practice the number /** \returns the number of nonzero coefficients which is in practice the number
* of stored coefficients. */ * of stored coefficients. */
inline int nonZeros() const { return size(); } inline Index nonZeros() const { return size(); }
/** \returns true if either the number of rows or the number of columns is equal to 1. /** \returns true if either the number of rows or the number of columns is equal to 1.
* In other words, this function returns * In other words, this function returns
* \code rows()==1 || cols()==1 \endcode * \code rows()==1 || cols()==1 \endcode
@ -191,7 +185,7 @@ template<typename Derived> class DenseBase
* \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
* with respect to the storage order, i.e., the number of columns for a column-major matrix, * with respect to the storage order, i.e., the number of columns for a column-major matrix,
* and the number of rows for a row-major matrix. */ * and the number of rows for a row-major matrix. */
int outerSize() const Index outerSize() const
{ {
return IsVectorAtCompileTime ? 1 return IsVectorAtCompileTime ? 1
: int(IsRowMajor) ? this->rows() : this->cols(); : int(IsRowMajor) ? this->rows() : this->cols();
@ -202,7 +196,7 @@ template<typename Derived> class DenseBase
* \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
* with respect to the storage order, i.e., the number of rows for a column-major matrix, * with respect to the storage order, i.e., the number of rows for a column-major matrix,
* and the number of columns for a row-major matrix. */ * and the number of columns for a row-major matrix. */
int innerSize() const Index innerSize() const
{ {
return IsVectorAtCompileTime ? this->size() return IsVectorAtCompileTime ? this->size()
: int(IsRowMajor) ? this->cols() : this->rows(); : int(IsRowMajor) ? this->cols() : this->rows();
@ -212,7 +206,7 @@ template<typename Derived> class DenseBase
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
* nothing else. * nothing else.
*/ */
void resize(int size) void resize(Index size)
{ {
EIGEN_ONLY_USED_FOR_DEBUG(size); EIGEN_ONLY_USED_FOR_DEBUG(size);
ei_assert(size == this->size() ei_assert(size == this->size()
@ -222,7 +216,7 @@ template<typename Derived> class DenseBase
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
* nothing else. * nothing else.
*/ */
void resize(int rows, int cols) void resize(Index rows, Index cols)
{ {
EIGEN_ONLY_USED_FOR_DEBUG(rows); EIGEN_ONLY_USED_FOR_DEBUG(rows);
EIGEN_ONLY_USED_FOR_DEBUG(cols); EIGEN_ONLY_USED_FOR_DEBUG(cols);
@ -301,41 +295,41 @@ template<typename Derived> class DenseBase
public: public:
#endif #endif
RowXpr row(int i); RowXpr row(Index i);
const RowXpr row(int i) const; const RowXpr row(Index i) const;
ColXpr col(int i); ColXpr col(Index i);
const ColXpr col(int i) const; const ColXpr col(Index i) const;
Block<Derived> block(int startRow, int startCol, int blockRows, int blockCols); Block<Derived> block(Index startRow, Index startCol, Index blockRows, Index blockCols);
const Block<Derived> block(int startRow, int startCol, int blockRows, int blockCols) const; const Block<Derived> block(Index startRow, Index startCol, Index blockRows, Index blockCols) const;
VectorBlock<Derived> segment(int start, int size); VectorBlock<Derived> segment(Index start, Index size);
const VectorBlock<Derived> segment(int start, int size) const; const VectorBlock<Derived> segment(Index start, Index size) const;
VectorBlock<Derived> head(int size); VectorBlock<Derived> head(Index size);
const VectorBlock<Derived> head(int size) const; const VectorBlock<Derived> head(Index size) const;
VectorBlock<Derived> tail(int size); VectorBlock<Derived> tail(Index size);
const VectorBlock<Derived> tail(int size) const; const VectorBlock<Derived> tail(Index size) const;
Block<Derived> topLeftCorner(int cRows, int cCols); Block<Derived> topLeftCorner(Index cRows, Index cCols);
const Block<Derived> topLeftCorner(int cRows, int cCols) const; const Block<Derived> topLeftCorner(Index cRows, Index cCols) const;
Block<Derived> topRightCorner(int cRows, int cCols); Block<Derived> topRightCorner(Index cRows, Index cCols);
const Block<Derived> topRightCorner(int cRows, int cCols) const; const Block<Derived> topRightCorner(Index cRows, Index cCols) const;
Block<Derived> bottomLeftCorner(int cRows, int cCols); Block<Derived> bottomLeftCorner(Index cRows, Index cCols);
const Block<Derived> bottomLeftCorner(int cRows, int cCols) const; const Block<Derived> bottomLeftCorner(Index cRows, Index cCols) const;
Block<Derived> bottomRightCorner(int cRows, int cCols); Block<Derived> bottomRightCorner(Index cRows, Index cCols);
const Block<Derived> bottomRightCorner(int cRows, int cCols) const; const Block<Derived> bottomRightCorner(Index cRows, Index cCols) const;
RowsBlockXpr topRows(int n); RowsBlockXpr topRows(Index n);
const RowsBlockXpr topRows(int n) const; const RowsBlockXpr topRows(Index n) const;
RowsBlockXpr bottomRows(int n); RowsBlockXpr bottomRows(Index n);
const RowsBlockXpr bottomRows(int n) const; const RowsBlockXpr bottomRows(Index n) const;
ColsBlockXpr leftCols(int n); ColsBlockXpr leftCols(Index n);
const ColsBlockXpr leftCols(int n) const; const ColsBlockXpr leftCols(Index n) const;
ColsBlockXpr rightCols(int n); ColsBlockXpr rightCols(Index n);
const ColsBlockXpr rightCols(int n) const; const ColsBlockXpr rightCols(Index n) const;
template<int CRows, int CCols> Block<Derived, CRows, CCols> topLeftCorner(); template<int CRows, int CCols> Block<Derived, CRows, CCols> topLeftCorner();
template<int CRows, int CCols> const Block<Derived, CRows, CCols> topLeftCorner() const; template<int CRows, int CCols> const Block<Derived, CRows, CCols> topLeftCorner() const;
@ -356,9 +350,9 @@ template<typename Derived> class DenseBase
template<int NCols> const typename NColsBlockXpr<NCols>::Type rightCols() const; template<int NCols> const typename NColsBlockXpr<NCols>::Type rightCols() const;
template<int BlockRows, int BlockCols> template<int BlockRows, int BlockCols>
Block<Derived, BlockRows, BlockCols> block(int startRow, int startCol); Block<Derived, BlockRows, BlockCols> block(Index startRow, Index startCol);
template<int BlockRows, int BlockCols> template<int BlockRows, int BlockCols>
const Block<Derived, BlockRows, BlockCols> block(int startRow, int startCol) const; const Block<Derived, BlockRows, BlockCols> block(Index startRow, Index startCol) const;
template<int Size> VectorBlock<Derived,Size> head(void); template<int Size> VectorBlock<Derived,Size> head(void);
template<int Size> const VectorBlock<Derived,Size> head() const; template<int Size> const VectorBlock<Derived,Size> head() const;
@ -366,8 +360,8 @@ template<typename Derived> class DenseBase
template<int Size> VectorBlock<Derived,Size> tail(); template<int Size> VectorBlock<Derived,Size> tail();
template<int Size> const VectorBlock<Derived,Size> tail() const; template<int Size> const VectorBlock<Derived,Size> tail() const;
template<int Size> VectorBlock<Derived,Size> segment(int start); template<int Size> VectorBlock<Derived,Size> segment(Index start);
template<int Size> const VectorBlock<Derived,Size> segment(int start) const; template<int Size> const VectorBlock<Derived,Size> segment(Index start) const;
Diagonal<Derived,0> diagonal(); Diagonal<Derived,0> diagonal();
const Diagonal<Derived,0> diagonal() const; const Diagonal<Derived,0> diagonal() const;
@ -375,8 +369,8 @@ template<typename Derived> class DenseBase
template<int Index> Diagonal<Derived,Index> diagonal(); template<int Index> Diagonal<Derived,Index> diagonal();
template<int Index> const Diagonal<Derived,Index> diagonal() const; template<int Index> const Diagonal<Derived,Index> diagonal() const;
Diagonal<Derived, Dynamic> diagonal(int index); Diagonal<Derived, Dynamic> diagonal(Index index);
const Diagonal<Derived, Dynamic> diagonal(int index) const; const Diagonal<Derived, Dynamic> diagonal(Index index) const;
template<unsigned int Mode> TriangularView<Derived, Mode> part(); template<unsigned int Mode> TriangularView<Derived, Mode> part();
template<unsigned int Mode> const TriangularView<Derived, Mode> part() const; template<unsigned int Mode> const TriangularView<Derived, Mode> part() const;
@ -388,37 +382,37 @@ template<typename Derived> class DenseBase
template<unsigned int UpLo> const SelfAdjointView<Derived, UpLo> selfadjointView() const; template<unsigned int UpLo> const SelfAdjointView<Derived, UpLo> selfadjointView() const;
static const ConstantReturnType static const ConstantReturnType
Constant(int rows, int cols, const Scalar& value); Constant(Index rows, Index cols, const Scalar& value);
static const ConstantReturnType static const ConstantReturnType
Constant(int size, const Scalar& value); Constant(Index size, const Scalar& value);
static const ConstantReturnType static const ConstantReturnType
Constant(const Scalar& value); Constant(const Scalar& value);
static const SequentialLinSpacedReturnType static const SequentialLinSpacedReturnType
LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, int size); LinSpaced(Sequential_t, const Scalar& low, const Scalar& high, Index size);
static const RandomAccessLinSpacedReturnType static const RandomAccessLinSpacedReturnType
LinSpaced(const Scalar& low, const Scalar& high, int size); LinSpaced(const Scalar& low, const Scalar& high, Index size);
template<typename CustomNullaryOp> template<typename CustomNullaryOp>
static const CwiseNullaryOp<CustomNullaryOp, Derived> static const CwiseNullaryOp<CustomNullaryOp, Derived>
NullaryExpr(int rows, int cols, const CustomNullaryOp& func); NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func);
template<typename CustomNullaryOp> template<typename CustomNullaryOp>
static const CwiseNullaryOp<CustomNullaryOp, Derived> static const CwiseNullaryOp<CustomNullaryOp, Derived>
NullaryExpr(int size, const CustomNullaryOp& func); NullaryExpr(Index size, const CustomNullaryOp& func);
template<typename CustomNullaryOp> template<typename CustomNullaryOp>
static const CwiseNullaryOp<CustomNullaryOp, Derived> static const CwiseNullaryOp<CustomNullaryOp, Derived>
NullaryExpr(const CustomNullaryOp& func); NullaryExpr(const CustomNullaryOp& func);
static const ConstantReturnType Zero(int rows, int cols); static const ConstantReturnType Zero(Index rows, Index cols);
static const ConstantReturnType Zero(int size); static const ConstantReturnType Zero(Index size);
static const ConstantReturnType Zero(); static const ConstantReturnType Zero();
static const ConstantReturnType Ones(int rows, int cols); static const ConstantReturnType Ones(Index rows, Index cols);
static const ConstantReturnType Ones(int size); static const ConstantReturnType Ones(Index size);
static const ConstantReturnType Ones(); static const ConstantReturnType Ones();
void fill(const Scalar& value); void fill(const Scalar& value);
Derived& setConstant(const Scalar& value); Derived& setConstant(const Scalar& value);
Derived& setLinSpaced(const Scalar& low, const Scalar& high, int size); Derived& setLinSpaced(const Scalar& low, const Scalar& high, Index size);
Derived& setZero(); Derived& setZero();
Derived& setOnes(); Derived& setOnes();
Derived& setRandom(); Derived& setRandom();
@ -471,11 +465,11 @@ template<typename Derived> class DenseBase
typename ei_traits<Derived>::Scalar minCoeff() const; typename ei_traits<Derived>::Scalar minCoeff() const;
typename ei_traits<Derived>::Scalar maxCoeff() const; typename ei_traits<Derived>::Scalar maxCoeff() const;
typename ei_traits<Derived>::Scalar minCoeff(int* row, int* col) const; typename ei_traits<Derived>::Scalar minCoeff(Index* row, Index* col) const;
typename ei_traits<Derived>::Scalar maxCoeff(int* row, int* col) const; typename ei_traits<Derived>::Scalar maxCoeff(Index* row, Index* col) const;
typename ei_traits<Derived>::Scalar minCoeff(int* index) const; typename ei_traits<Derived>::Scalar minCoeff(Index* index) const;
typename ei_traits<Derived>::Scalar maxCoeff(int* index) const; typename ei_traits<Derived>::Scalar maxCoeff(Index* index) const;
template<typename BinaryOp> template<typename BinaryOp>
typename ei_result_of<BinaryOp(typename ei_traits<Derived>::Scalar)>::type typename ei_result_of<BinaryOp(typename ei_traits<Derived>::Scalar)>::type
@ -490,15 +484,15 @@ template<typename Derived> class DenseBase
bool all(void) const; bool all(void) const;
bool any(void) const; bool any(void) const;
int count() const; Index count() const;
const VectorwiseOp<Derived,Horizontal> rowwise() const; const VectorwiseOp<Derived,Horizontal> rowwise() const;
VectorwiseOp<Derived,Horizontal> rowwise(); VectorwiseOp<Derived,Horizontal> rowwise();
const VectorwiseOp<Derived,Vertical> colwise() const; const VectorwiseOp<Derived,Vertical> colwise() const;
VectorwiseOp<Derived,Vertical> colwise(); VectorwiseOp<Derived,Vertical> colwise();
static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int rows, int cols); static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(Index rows, Index cols);
static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int size); static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(Index size);
static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(); static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random();
template<typename ThenDerived,typename ElseDerived> template<typename ThenDerived,typename ElseDerived>
@ -518,7 +512,7 @@ template<typename Derived> class DenseBase
template<int RowFactor, int ColFactor> template<int RowFactor, int ColFactor>
const Replicate<Derived,RowFactor,ColFactor> replicate() const; const Replicate<Derived,RowFactor,ColFactor> replicate() const;
const Replicate<Derived,Dynamic,Dynamic> replicate(int rowFacor,int colFactor) const; const Replicate<Derived,Dynamic,Dynamic> replicate(Index rowFacor,Index colFactor) const;
Eigen::Reverse<Derived, BothDirections> reverse(); Eigen::Reverse<Derived, BothDirections> reverse();
const Eigen::Reverse<Derived, BothDirections> reverse() const; const Eigen::Reverse<Derived, BothDirections> reverse() const;
@ -526,8 +520,8 @@ template<typename Derived> class DenseBase
#ifdef EIGEN2_SUPPORT #ifdef EIGEN2_SUPPORT
Block<Derived> corner(CornerType type, int cRows, int cCols); Block<Derived> corner(CornerType type, Index cRows, Index cCols);
const Block<Derived> corner(CornerType type, int cRows, int cCols) const; const Block<Derived> corner(CornerType type, Index cRows, Index cCols) const;
template<int CRows, int CCols> template<int CRows, int CCols>
Block<Derived, CRows, CCols> corner(CornerType type); Block<Derived, CRows, CCols> corner(CornerType type);
template<int CRows, int CCols> template<int CRows, int CCols>

View File

@ -29,7 +29,10 @@ template<typename Derived, bool EnableDirectAccessAPI>
class DenseCoeffsBase : public EigenBase<Derived> class DenseCoeffsBase : public EigenBase<Derived>
{ {
public: public:
typedef typename ei_traits<Derived>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar; typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
typedef typename ei_meta_if<ei_has_direct_access<Derived>::ret, const Scalar&, Scalar>::ret CoeffReturnType; typedef typename ei_meta_if<ei_has_direct_access<Derived>::ret, const Scalar&, Scalar>::ret CoeffReturnType;
typedef EigenBase<Derived> Base; typedef EigenBase<Derived> Base;
@ -38,7 +41,7 @@ class DenseCoeffsBase : public EigenBase<Derived>
using Base::size; using Base::size;
using Base::derived; using Base::derived;
EIGEN_STRONG_INLINE int rowIndexByOuterInner(int outer, int inner) const EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const
{ {
return int(Derived::RowsAtCompileTime) == 1 ? 0 return int(Derived::RowsAtCompileTime) == 1 ? 0
: int(Derived::ColsAtCompileTime) == 1 ? inner : int(Derived::ColsAtCompileTime) == 1 ? inner
@ -46,7 +49,7 @@ class DenseCoeffsBase : public EigenBase<Derived>
: inner; : inner;
} }
EIGEN_STRONG_INLINE int colIndexByOuterInner(int outer, int inner) const EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const
{ {
return int(Derived::ColsAtCompileTime) == 1 ? 0 return int(Derived::ColsAtCompileTime) == 1 ? 0
: int(Derived::RowsAtCompileTime) == 1 ? inner : int(Derived::RowsAtCompileTime) == 1 ? inner
@ -55,27 +58,27 @@ class DenseCoeffsBase : public EigenBase<Derived>
} }
/** Short version: don't use this function, use /** Short version: don't use this function, use
* \link operator()(int,int) const \endlink instead. * \link operator()(Index,Index) const \endlink instead.
* *
* Long version: this function is similar to * Long version: this function is similar to
* \link operator()(int,int) const \endlink, but without the assertion. * \link operator()(Index,Index) const \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing * Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the * repeated coefficient access. Only use this when it is guaranteed that the
* parameters \a row and \a col are in range. * parameters \a row and \a col are in range.
* *
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
* function equivalent to \link operator()(int,int) const \endlink. * function equivalent to \link operator()(Index,Index) const \endlink.
* *
* \sa operator()(int,int) const, coeffRef(int,int), coeff(int) const * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const
*/ */
EIGEN_STRONG_INLINE const CoeffReturnType coeff(int row, int col) const EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const
{ {
ei_internal_assert(row >= 0 && row < rows() ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
return derived().coeff(row, col); return derived().coeff(row, col);
} }
EIGEN_STRONG_INLINE const CoeffReturnType coeffByOuterInner(int outer, int inner) const EIGEN_STRONG_INLINE const CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
{ {
return coeff(rowIndexByOuterInner(outer, inner), return coeff(rowIndexByOuterInner(outer, inner),
colIndexByOuterInner(outer, inner)); colIndexByOuterInner(outer, inner));
@ -83,9 +86,9 @@ class DenseCoeffsBase : public EigenBase<Derived>
/** \returns the coefficient at given the given row and column. /** \returns the coefficient at given the given row and column.
* *
* \sa operator()(int,int), operator[](int) * \sa operator()(Index,Index), operator[](Index)
*/ */
EIGEN_STRONG_INLINE const CoeffReturnType operator()(int row, int col) const EIGEN_STRONG_INLINE const CoeffReturnType operator()(Index row, Index col) const
{ {
ei_assert(row >= 0 && row < rows() ei_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
@ -93,22 +96,22 @@ class DenseCoeffsBase : public EigenBase<Derived>
} }
/** Short version: don't use this function, use /** Short version: don't use this function, use
* \link operator[](int) const \endlink instead. * \link operator[](Index) const \endlink instead.
* *
* Long version: this function is similar to * Long version: this function is similar to
* \link operator[](int) const \endlink, but without the assertion. * \link operator[](Index) const \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing * Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the * repeated coefficient access. Only use this when it is guaranteed that the
* parameter \a index is in range. * parameter \a index is in range.
* *
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
* function equivalent to \link operator[](int) const \endlink. * function equivalent to \link operator[](Index) const \endlink.
* *
* \sa operator[](int) const, coeffRef(int), coeff(int,int) const * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const
*/ */
EIGEN_STRONG_INLINE const CoeffReturnType EIGEN_STRONG_INLINE const CoeffReturnType
coeff(int index) const coeff(Index index) const
{ {
ei_internal_assert(index >= 0 && index < size()); ei_internal_assert(index >= 0 && index < size());
return derived().coeff(index); return derived().coeff(index);
@ -119,12 +122,12 @@ class DenseCoeffsBase : public EigenBase<Derived>
* *
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
* *
* \sa operator[](int), operator()(int,int) const, x() const, y() const, * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
* z() const, w() const * z() const, w() const
*/ */
EIGEN_STRONG_INLINE const CoeffReturnType EIGEN_STRONG_INLINE const CoeffReturnType
operator[](int index) const operator[](Index index) const
{ {
EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
@ -134,16 +137,16 @@ class DenseCoeffsBase : public EigenBase<Derived>
/** \returns the coefficient at given index. /** \returns the coefficient at given index.
* *
* This is synonymous to operator[](int) const. * This is synonymous to operator[](Index) const.
* *
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
* *
* \sa operator[](int), operator()(int,int) const, x() const, y() const, * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
* z() const, w() const * z() const, w() const
*/ */
EIGEN_STRONG_INLINE const CoeffReturnType EIGEN_STRONG_INLINE const CoeffReturnType
operator()(int index) const operator()(Index index) const
{ {
ei_assert(index >= 0 && index < size()); ei_assert(index >= 0 && index < size());
return derived().coeff(index); return derived().coeff(index);
@ -180,7 +183,7 @@ class DenseCoeffsBase : public EigenBase<Derived>
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE typename ei_packet_traits<Scalar>::type EIGEN_STRONG_INLINE typename ei_packet_traits<Scalar>::type
packet(int row, int col) const packet(Index row, Index col) const
{ {
ei_internal_assert(row >= 0 && row < rows() ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
@ -190,7 +193,7 @@ class DenseCoeffsBase : public EigenBase<Derived>
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE typename ei_packet_traits<Scalar>::type EIGEN_STRONG_INLINE typename ei_packet_traits<Scalar>::type
packetByOuterInner(int outer, int inner) const packetByOuterInner(Index outer, Index inner) const
{ {
return packet<LoadMode>(rowIndexByOuterInner(outer, inner), return packet<LoadMode>(rowIndexByOuterInner(outer, inner),
colIndexByOuterInner(outer, inner)); colIndexByOuterInner(outer, inner));
@ -207,7 +210,7 @@ class DenseCoeffsBase : public EigenBase<Derived>
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE typename ei_packet_traits<Scalar>::type EIGEN_STRONG_INLINE typename ei_packet_traits<Scalar>::type
packet(int index) const packet(Index index) const
{ {
ei_internal_assert(index >= 0 && index < size()); ei_internal_assert(index >= 0 && index < size());
return derived().template packet<LoadMode>(index); return derived().template packet<LoadMode>(index);
@ -240,8 +243,14 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
public: public:
typedef DenseCoeffsBase<Derived, false> Base; typedef DenseCoeffsBase<Derived, false> Base;
typedef typename ei_traits<Derived>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar; typedef typename ei_traits<Derived>::Scalar Scalar;
using typename Base::CoeffReturnType; typedef typename ei_packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename Base::CoeffReturnType CoeffReturnType;
using Base::coeff; using Base::coeff;
using Base::rows; using Base::rows;
using Base::cols; using Base::cols;
@ -257,20 +266,20 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
using Base::w; using Base::w;
/** Short version: don't use this function, use /** Short version: don't use this function, use
* \link operator()(int,int) \endlink instead. * \link operator()(Index,Index) \endlink instead.
* *
* Long version: this function is similar to * Long version: this function is similar to
* \link operator()(int,int) \endlink, but without the assertion. * \link operator()(Index,Index) \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing * Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the * repeated coefficient access. Only use this when it is guaranteed that the
* parameters \a row and \a col are in range. * parameters \a row and \a col are in range.
* *
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
* function equivalent to \link operator()(int,int) \endlink. * function equivalent to \link operator()(Index,Index) \endlink.
* *
* \sa operator()(int,int), coeff(int, int) const, coeffRef(int) * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)
*/ */
EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col) EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
{ {
ei_internal_assert(row >= 0 && row < rows() ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
@ -278,7 +287,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
} }
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
coeffRefByOuterInner(int outer, int inner) coeffRefByOuterInner(Index outer, Index inner)
{ {
return coeffRef(rowIndexByOuterInner(outer, inner), return coeffRef(rowIndexByOuterInner(outer, inner),
colIndexByOuterInner(outer, inner)); colIndexByOuterInner(outer, inner));
@ -286,11 +295,11 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
/** \returns a reference to the coefficient at given the given row and column. /** \returns a reference to the coefficient at given the given row and column.
* *
* \sa operator[](int) * \sa operator[](Index)
*/ */
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
operator()(int row, int col) operator()(Index row, Index col)
{ {
ei_assert(row >= 0 && row < rows() ei_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
@ -299,22 +308,22 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
/** Short version: don't use this function, use /** Short version: don't use this function, use
* \link operator[](int) \endlink instead. * \link operator[](Index) \endlink instead.
* *
* Long version: this function is similar to * Long version: this function is similar to
* \link operator[](int) \endlink, but without the assertion. * \link operator[](Index) \endlink, but without the assertion.
* Use this for limiting the performance cost of debugging code when doing * Use this for limiting the performance cost of debugging code when doing
* repeated coefficient access. Only use this when it is guaranteed that the * repeated coefficient access. Only use this when it is guaranteed that the
* parameters \a row and \a col are in range. * parameters \a row and \a col are in range.
* *
* If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
* function equivalent to \link operator[](int) \endlink. * function equivalent to \link operator[](Index) \endlink.
* *
* \sa operator[](int), coeff(int) const, coeffRef(int,int) * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)
*/ */
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
coeffRef(int index) coeffRef(Index index)
{ {
ei_internal_assert(index >= 0 && index < size()); ei_internal_assert(index >= 0 && index < size());
return derived().coeffRef(index); return derived().coeffRef(index);
@ -324,11 +333,11 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
* *
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
* *
* \sa operator[](int) const, operator()(int,int), x(), y(), z(), w() * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
*/ */
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
operator[](int index) operator[](Index index)
{ {
EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
@ -338,15 +347,15 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
/** \returns a reference to the coefficient at given index. /** \returns a reference to the coefficient at given index.
* *
* This is synonymous to operator[](int). * This is synonymous to operator[](Index).
* *
* This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
* *
* \sa operator[](int) const, operator()(int,int), x(), y(), z(), w() * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
*/ */
EIGEN_STRONG_INLINE Scalar& EIGEN_STRONG_INLINE Scalar&
operator()(int index) operator()(Index index)
{ {
ei_assert(index >= 0 && index < size()); ei_assert(index >= 0 && index < size());
return derived().coeffRef(index); return derived().coeffRef(index);
@ -383,7 +392,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
template<int StoreMode> template<int StoreMode>
EIGEN_STRONG_INLINE void writePacket EIGEN_STRONG_INLINE void writePacket
(int row, int col, const typename ei_packet_traits<Scalar>::type& x) (Index row, Index col, const typename ei_packet_traits<Scalar>::type& x)
{ {
ei_internal_assert(row >= 0 && row < rows() ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
@ -393,7 +402,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
template<int StoreMode> template<int StoreMode>
EIGEN_STRONG_INLINE void writePacketByOuterInner EIGEN_STRONG_INLINE void writePacketByOuterInner
(int outer, int inner, const typename ei_packet_traits<Scalar>::type& x) (Index outer, Index inner, const typename ei_packet_traits<Scalar>::type& x)
{ {
writePacket<StoreMode>(rowIndexByOuterInner(outer, inner), writePacket<StoreMode>(rowIndexByOuterInner(outer, inner),
colIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner),
@ -411,7 +420,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
template<int StoreMode> template<int StoreMode>
EIGEN_STRONG_INLINE void writePacket EIGEN_STRONG_INLINE void writePacket
(int index, const typename ei_packet_traits<Scalar>::type& x) (Index index, const typename ei_packet_traits<Scalar>::type& x)
{ {
ei_internal_assert(index >= 0 && index < size()); ei_internal_assert(index >= 0 && index < size());
derived().template writePacket<StoreMode>(index,x); derived().template writePacket<StoreMode>(index,x);
@ -428,7 +437,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
*/ */
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_STRONG_INLINE void copyCoeff(int row, int col, const DenseBase<OtherDerived>& other) EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
{ {
ei_internal_assert(row >= 0 && row < rows() ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
@ -444,7 +453,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
*/ */
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_STRONG_INLINE void copyCoeff(int index, const DenseBase<OtherDerived>& other) EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
{ {
ei_internal_assert(index >= 0 && index < size()); ei_internal_assert(index >= 0 && index < size());
derived().coeffRef(index) = other.derived().coeff(index); derived().coeffRef(index) = other.derived().coeff(index);
@ -452,10 +461,10 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_STRONG_INLINE void copyCoeffByOuterInner(int outer, int inner, const DenseBase<OtherDerived>& other) EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
{ {
const int row = rowIndexByOuterInner(outer,inner); const Index row = rowIndexByOuterInner(outer,inner);
const int col = colIndexByOuterInner(outer,inner); const Index col = colIndexByOuterInner(outer,inner);
// derived() is important here: copyCoeff() may be reimplemented in Derived! // derived() is important here: copyCoeff() may be reimplemented in Derived!
derived().copyCoeff(row, col, other); derived().copyCoeff(row, col, other);
} }
@ -469,7 +478,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
*/ */
template<typename OtherDerived, int StoreMode, int LoadMode> template<typename OtherDerived, int StoreMode, int LoadMode>
EIGEN_STRONG_INLINE void copyPacket(int row, int col, const DenseBase<OtherDerived>& other) EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
{ {
ei_internal_assert(row >= 0 && row < rows() ei_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols()); && col >= 0 && col < cols());
@ -486,7 +495,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
*/ */
template<typename OtherDerived, int StoreMode, int LoadMode> template<typename OtherDerived, int StoreMode, int LoadMode>
EIGEN_STRONG_INLINE void copyPacket(int index, const DenseBase<OtherDerived>& other) EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase<OtherDerived>& other)
{ {
ei_internal_assert(index >= 0 && index < size()); ei_internal_assert(index >= 0 && index < size());
derived().template writePacket<StoreMode>(index, derived().template writePacket<StoreMode>(index,
@ -494,10 +503,10 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
} }
template<typename OtherDerived, int StoreMode, int LoadMode> template<typename OtherDerived, int StoreMode, int LoadMode>
EIGEN_STRONG_INLINE void copyPacketByOuterInner(int outer, int inner, const DenseBase<OtherDerived>& other) EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
{ {
const int row = rowIndexByOuterInner(outer,inner); const Index row = rowIndexByOuterInner(outer,inner);
const int col = colIndexByOuterInner(outer,inner); const Index col = colIndexByOuterInner(outer,inner);
// derived() is important here: copyCoeff() may be reimplemented in Derived! // derived() is important here: copyCoeff() may be reimplemented in Derived!
derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other); derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other);
} }
@ -507,7 +516,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
* *
* \sa outerStride(), rowStride(), colStride() * \sa outerStride(), rowStride(), colStride()
*/ */
inline int innerStride() const inline Index innerStride() const
{ {
return derived().innerStride(); return derived().innerStride();
} }
@ -517,12 +526,12 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
* *
* \sa innerStride(), rowStride(), colStride() * \sa innerStride(), rowStride(), colStride()
*/ */
inline int outerStride() const inline Index outerStride() const
{ {
return derived().outerStride(); return derived().outerStride();
} }
inline int stride() const inline Index stride() const
{ {
return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
} }
@ -531,7 +540,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
* *
* \sa innerStride(), outerStride(), colStride() * \sa innerStride(), outerStride(), colStride()
*/ */
inline int rowStride() const inline Index rowStride() const
{ {
return Derived::IsRowMajor ? outerStride() : innerStride(); return Derived::IsRowMajor ? outerStride() : innerStride();
} }
@ -540,7 +549,7 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
* *
* \sa innerStride(), outerStride(), rowStride() * \sa innerStride(), outerStride(), rowStride()
*/ */
inline int colStride() const inline Index colStride() const
{ {
return Derived::IsRowMajor ? innerStride() : outerStride(); return Derived::IsRowMajor ? innerStride() : outerStride();
} }
@ -549,14 +558,14 @@ class DenseCoeffsBase<Derived, true> : public DenseCoeffsBase<Derived, false>
template<typename Derived, bool JustReturnZero> template<typename Derived, bool JustReturnZero>
struct ei_first_aligned_impl struct ei_first_aligned_impl
{ {
inline static int run(const Derived&) inline static typename Derived::Index run(const Derived&)
{ return 0; } { return 0; }
}; };
template<typename Derived> template<typename Derived>
struct ei_first_aligned_impl<Derived, false> struct ei_first_aligned_impl<Derived, false>
{ {
inline static int run(const Derived& m) inline static typename Derived::Index run(const Derived& m)
{ {
return ei_first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size()); return ei_first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size());
} }
@ -568,7 +577,7 @@ struct ei_first_aligned_impl<Derived, false>
* documentation. * documentation.
*/ */
template<typename Derived> template<typename Derived>
inline static int ei_first_aligned(const Derived& m) inline static typename Derived::Index ei_first_aligned(const Derived& m)
{ {
return ei_first_aligned_impl return ei_first_aligned_impl
<Derived, (Derived::Flags & AlignedBit) || !(Derived::Flags & DirectAccessBit)> <Derived, (Derived::Flags & AlignedBit) || !(Derived::Flags & DirectAccessBit)>

View File

@ -44,9 +44,13 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
public: public:
enum { Options = ei_traits<Derived>::Options }; enum { Options = ei_traits<Derived>::Options };
typedef typename ei_dense_xpr_base<Derived>::type Base; typedef typename ei_dense_xpr_base<Derived>::type Base;
typedef typename Base::PlainObject PlainObject;
typedef typename Base::Scalar Scalar; typedef typename ei_traits<Derived>::StorageKind StorageKind;
typedef typename Base::PacketScalar PacketScalar; typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
using Base::RowsAtCompileTime; using Base::RowsAtCompileTime;
using Base::ColsAtCompileTime; using Base::ColsAtCompileTime;
using Base::SizeAtCompileTime; using Base::SizeAtCompileTime;
@ -72,10 +76,10 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
Base& base() { return *static_cast<Base*>(this); } Base& base() { return *static_cast<Base*>(this); }
const Base& base() const { return *static_cast<const Base*>(this); } const Base& base() const { return *static_cast<const Base*>(this); }
EIGEN_STRONG_INLINE int rows() const { return m_storage.rows(); } EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); }
EIGEN_STRONG_INLINE int cols() const { return m_storage.cols(); } EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); }
EIGEN_STRONG_INLINE const Scalar& coeff(int row, int col) const EIGEN_STRONG_INLINE const Scalar& coeff(Index row, Index col) const
{ {
if(Flags & RowMajorBit) if(Flags & RowMajorBit)
return m_storage.data()[col + row * m_storage.cols()]; return m_storage.data()[col + row * m_storage.cols()];
@ -83,12 +87,12 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
return m_storage.data()[row + col * m_storage.rows()]; return m_storage.data()[row + col * m_storage.rows()];
} }
EIGEN_STRONG_INLINE const Scalar& coeff(int index) const EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
{ {
return m_storage.data()[index]; return m_storage.data()[index];
} }
EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col) EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
{ {
if(Flags & RowMajorBit) if(Flags & RowMajorBit)
return m_storage.data()[col + row * m_storage.cols()]; return m_storage.data()[col + row * m_storage.cols()];
@ -96,13 +100,13 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
return m_storage.data()[row + col * m_storage.rows()]; return m_storage.data()[row + col * m_storage.rows()];
} }
EIGEN_STRONG_INLINE Scalar& coeffRef(int index) EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
{ {
return m_storage.data()[index]; return m_storage.data()[index];
} }
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{ {
return ei_ploadt<Scalar, LoadMode> return ei_ploadt<Scalar, LoadMode>
(m_storage.data() + (Flags & RowMajorBit (m_storage.data() + (Flags & RowMajorBit
@ -111,13 +115,13 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
} }
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int index) const EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{ {
return ei_ploadt<Scalar, LoadMode>(m_storage.data() + index); return ei_ploadt<Scalar, LoadMode>(m_storage.data() + index);
} }
template<int StoreMode> template<int StoreMode>
EIGEN_STRONG_INLINE void writePacket(int row, int col, const PacketScalar& x) EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketScalar& x)
{ {
ei_pstoret<Scalar, PacketScalar, StoreMode> ei_pstoret<Scalar, PacketScalar, StoreMode>
(m_storage.data() + (Flags & RowMajorBit (m_storage.data() + (Flags & RowMajorBit
@ -126,7 +130,7 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
} }
template<int StoreMode> template<int StoreMode>
EIGEN_STRONG_INLINE void writePacket(int index, const PacketScalar& x) EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& x)
{ {
ei_pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x); ei_pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
} }
@ -143,7 +147,7 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
* *
* This method is intended for dynamic-size matrices, although it is legal to call it on any * This method is intended for dynamic-size matrices, although it is legal to call it on any
* matrix as long as fixed dimensions are left unchanged. If you only want to change the number * matrix as long as fixed dimensions are left unchanged. If you only want to change the number
* of rows and/or of columns, you can use resize(NoChange_t, int), resize(int, NoChange_t). * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t).
* *
* If the current number of coefficients of \c *this exactly matches the * If the current number of coefficients of \c *this exactly matches the
* product \a rows * \a cols, then no memory allocation is performed and * product \a rows * \a cols, then no memory allocation is performed and
@ -153,12 +157,12 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
* Example: \include Matrix_resize_int_int.cpp * Example: \include Matrix_resize_int_int.cpp
* Output: \verbinclude Matrix_resize_int_int.out * Output: \verbinclude Matrix_resize_int_int.out
* *
* \sa resize(int) for vectors, resize(NoChange_t, int), resize(int, NoChange_t) * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)
*/ */
inline void resize(int rows, int cols) inline void resize(Index rows, Index cols)
{ {
#ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
int size = rows*cols; Index size = rows*cols;
bool size_changed = size != this->size(); bool size_changed = size != this->size();
m_storage.resize(size, rows, cols); m_storage.resize(size, rows, cols);
if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
@ -176,9 +180,9 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
* Example: \include Matrix_resize_int.cpp * Example: \include Matrix_resize_int.cpp
* Output: \verbinclude Matrix_resize_int.out * Output: \verbinclude Matrix_resize_int.out
* *
* \sa resize(int,int), resize(NoChange_t, int), resize(int, NoChange_t) * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t)
*/ */
inline void resize(int size) inline void resize(Index size)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(DenseStorageBase) EIGEN_STATIC_ASSERT_VECTOR_ONLY(DenseStorageBase)
ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
@ -200,9 +204,9 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
* Example: \include Matrix_resize_NoChange_int.cpp * Example: \include Matrix_resize_NoChange_int.cpp
* Output: \verbinclude Matrix_resize_NoChange_int.out * Output: \verbinclude Matrix_resize_NoChange_int.out
* *
* \sa resize(int,int) * \sa resize(Index,Index)
*/ */
inline void resize(NoChange_t, int cols) inline void resize(NoChange_t, Index cols)
{ {
resize(rows(), cols); resize(rows(), cols);
} }
@ -213,9 +217,9 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
* Example: \include Matrix_resize_int_NoChange.cpp * Example: \include Matrix_resize_int_NoChange.cpp
* Output: \verbinclude Matrix_resize_int_NoChange.out * Output: \verbinclude Matrix_resize_int_NoChange.out
* *
* \sa resize(int,int) * \sa resize(Index,Index)
*/ */
inline void resize(int rows, NoChange_t) inline void resize(Index rows, NoChange_t)
{ {
resize(rows, cols()); resize(rows, cols());
} }
@ -231,7 +235,7 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other) EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
{ {
const OtherDerived& other = _other.derived(); const OtherDerived& other = _other.derived();
const int othersize = other.rows()*other.cols(); const Index othersize = other.rows()*other.cols();
if(RowsAtCompileTime == 1) if(RowsAtCompileTime == 1)
{ {
ei_assert(other.rows() == 1 || other.cols() == 1); ei_assert(other.rows() == 1 || other.cols() == 1);
@ -248,26 +252,26 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
/** Resizes \c *this to a \a rows x \a cols matrix while leaving old values of \c *this untouched. /** Resizes \c *this to a \a rows x \a cols matrix while leaving old values of \c *this untouched.
* *
* This method is intended for dynamic-size matrices. If you only want to change the number * This method is intended for dynamic-size matrices. If you only want to change the number
* of rows and/or of columns, you can use conservativeResize(NoChange_t, int), * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index),
* conservativeResize(int, NoChange_t). * conservativeResize(Index, NoChange_t).
* *
* The top-left part of the resized matrix will be the same as the overlapping top-left corner * The top-left part of the resized matrix will be the same as the overlapping top-left corner
* of \c *this. In case values need to be appended to the matrix they will be uninitialized. * of \c *this. In case values need to be appended to the matrix they will be uninitialized.
*/ */
EIGEN_STRONG_INLINE void conservativeResize(int rows, int cols) EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols)
{ {
ei_conservative_resize_like_impl<Derived>::run(*this, rows, cols); ei_conservative_resize_like_impl<Derived>::run(*this, rows, cols);
} }
EIGEN_STRONG_INLINE void conservativeResize(int rows, NoChange_t) EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t)
{ {
// Note: see the comment in conservativeResize(int,int) // Note: see the comment in conservativeResize(Index,Index)
conservativeResize(rows, cols()); conservativeResize(rows, cols());
} }
EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, int cols) EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols)
{ {
// Note: see the comment in conservativeResize(int,int) // Note: see the comment in conservativeResize(Index,Index)
conservativeResize(rows(), cols); conservativeResize(rows(), cols);
} }
@ -279,7 +283,7 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
* *
* When values are appended, they will be uninitialized. * When values are appended, they will be uninitialized.
*/ */
EIGEN_STRONG_INLINE void conservativeResize(int size) EIGEN_STRONG_INLINE void conservativeResize(Index size)
{ {
ei_conservative_resize_like_impl<Derived>::run(*this, size); ei_conservative_resize_like_impl<Derived>::run(*this, size);
} }
@ -329,7 +333,7 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
} }
#endif #endif
EIGEN_STRONG_INLINE DenseStorageBase(int size, int rows, int cols) EIGEN_STRONG_INLINE DenseStorageBase(Index size, Index rows, Index cols)
: m_storage(size, rows, cols) : m_storage(size, rows, cols)
{ {
// _check_template_params(); // _check_template_params();
@ -370,44 +374,44 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
{ return UnalignedMapType(data); } { return UnalignedMapType(data); }
inline static UnalignedMapType Map(Scalar* data) inline static UnalignedMapType Map(Scalar* data)
{ return UnalignedMapType(data); } { return UnalignedMapType(data); }
inline static const UnalignedMapType Map(const Scalar* data, int size) inline static const UnalignedMapType Map(const Scalar* data, Index size)
{ return UnalignedMapType(data, size); } { return UnalignedMapType(data, size); }
inline static UnalignedMapType Map(Scalar* data, int size) inline static UnalignedMapType Map(Scalar* data, Index size)
{ return UnalignedMapType(data, size); } { return UnalignedMapType(data, size); }
inline static const UnalignedMapType Map(const Scalar* data, int rows, int cols) inline static const UnalignedMapType Map(const Scalar* data, Index rows, Index cols)
{ return UnalignedMapType(data, rows, cols); } { return UnalignedMapType(data, rows, cols); }
inline static UnalignedMapType Map(Scalar* data, int rows, int cols) inline static UnalignedMapType Map(Scalar* data, Index rows, Index cols)
{ return UnalignedMapType(data, rows, cols); } { return UnalignedMapType(data, rows, cols); }
inline static const AlignedMapType MapAligned(const Scalar* data) inline static const AlignedMapType MapAligned(const Scalar* data)
{ return AlignedMapType(data); } { return AlignedMapType(data); }
inline static AlignedMapType MapAligned(Scalar* data) inline static AlignedMapType MapAligned(Scalar* data)
{ return AlignedMapType(data); } { return AlignedMapType(data); }
inline static const AlignedMapType MapAligned(const Scalar* data, int size) inline static const AlignedMapType MapAligned(const Scalar* data, Index size)
{ return AlignedMapType(data, size); } { return AlignedMapType(data, size); }
inline static AlignedMapType MapAligned(Scalar* data, int size) inline static AlignedMapType MapAligned(Scalar* data, Index size)
{ return AlignedMapType(data, size); } { return AlignedMapType(data, size); }
inline static const AlignedMapType MapAligned(const Scalar* data, int rows, int cols) inline static const AlignedMapType MapAligned(const Scalar* data, Index rows, Index cols)
{ return AlignedMapType(data, rows, cols); } { return AlignedMapType(data, rows, cols); }
inline static AlignedMapType MapAligned(Scalar* data, int rows, int cols) inline static AlignedMapType MapAligned(Scalar* data, Index rows, Index cols)
{ return AlignedMapType(data, rows, cols); } { return AlignedMapType(data, rows, cols); }
//@} //@}
using Base::setConstant; using Base::setConstant;
Derived& setConstant(int size, const Scalar& value); Derived& setConstant(Index size, const Scalar& value);
Derived& setConstant(int rows, int cols, const Scalar& value); Derived& setConstant(Index rows, Index cols, const Scalar& value);
using Base::setZero; using Base::setZero;
Derived& setZero(int size); Derived& setZero(Index size);
Derived& setZero(int rows, int cols); Derived& setZero(Index rows, Index cols);
using Base::setOnes; using Base::setOnes;
Derived& setOnes(int size); Derived& setOnes(Index size);
Derived& setOnes(int rows, int cols); Derived& setOnes(Index rows, Index cols);
using Base::setRandom; using Base::setRandom;
Derived& setRandom(int size); Derived& setRandom(Index size);
Derived& setRandom(int rows, int cols); Derived& setRandom(Index rows, Index cols);
#ifdef EIGEN_DENSESTORAGEBASE_PLUGIN #ifdef EIGEN_DENSESTORAGEBASE_PLUGIN
#include EIGEN_DENSESTORAGEBASE_PLUGIN #include EIGEN_DENSESTORAGEBASE_PLUGIN
@ -474,7 +478,7 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
} }
template<typename T0, typename T1> template<typename T0, typename T1>
EIGEN_STRONG_INLINE void _init2(int rows, int cols, typename ei_enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0) EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename ei_enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
{ {
ei_assert(rows > 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) ei_assert(rows > 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
&& cols > 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); && cols > 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
@ -526,7 +530,8 @@ class DenseStorageBase : public ei_dense_xpr_base<Derived>::type
template <typename Derived, typename OtherDerived, bool IsVector> template <typename Derived, typename OtherDerived, bool IsVector>
struct ei_conservative_resize_like_impl struct ei_conservative_resize_like_impl
{ {
static void run(DenseBase<Derived>& _this, int rows, int cols) typedef typename Derived::Index Index;
static void run(DenseBase<Derived>& _this, Index rows, Index cols)
{ {
if (_this.rows() == rows && _this.cols() == cols) return; if (_this.rows() == rows && _this.cols() == cols) return;
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
@ -540,8 +545,8 @@ struct ei_conservative_resize_like_impl
{ {
// The storage order does not allow us to use reallocation. // The storage order does not allow us to use reallocation.
typename Derived::PlainObject tmp(rows,cols); typename Derived::PlainObject tmp(rows,cols);
const int common_rows = std::min(rows, _this.rows()); const Index common_rows = std::min(rows, _this.rows());
const int common_cols = std::min(cols, _this.cols()); const Index common_cols = std::min(cols, _this.cols());
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
_this.derived().swap(tmp); _this.derived().swap(tmp);
} }
@ -551,10 +556,10 @@ struct ei_conservative_resize_like_impl
{ {
if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
// Note: Here is space for improvement. Basically, for conservativeResize(int,int), // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index),
// neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the
// dimensions is dynamic, one could use either conservativeResize(int rows, NoChange_t) or // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or
// conservativeResize(NoChange_t, int cols). For these methods new static asserts like // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like
// EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good. // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good.
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived) EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived)
@ -562,8 +567,8 @@ struct ei_conservative_resize_like_impl
if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows
(!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns (!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns
{ {
const int new_rows = other.rows() - _this.rows(); const Index new_rows = other.rows() - _this.rows();
const int new_cols = other.cols() - _this.cols(); const Index new_cols = other.cols() - _this.cols();
_this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols()); _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols());
if (new_rows>0) if (new_rows>0)
_this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows); _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows);
@ -574,8 +579,8 @@ struct ei_conservative_resize_like_impl
{ {
// The storage order does not allow us to use reallocation. // The storage order does not allow us to use reallocation.
typename Derived::PlainObject tmp(other); typename Derived::PlainObject tmp(other);
const int common_rows = std::min(tmp.rows(), _this.rows()); const Index common_rows = std::min(tmp.rows(), _this.rows());
const int common_cols = std::min(tmp.cols(), _this.cols()); const Index common_cols = std::min(tmp.cols(), _this.cols());
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
_this.derived().swap(tmp); _this.derived().swap(tmp);
} }
@ -585,10 +590,11 @@ struct ei_conservative_resize_like_impl
template <typename Derived, typename OtherDerived> template <typename Derived, typename OtherDerived>
struct ei_conservative_resize_like_impl<Derived,OtherDerived,true> struct ei_conservative_resize_like_impl<Derived,OtherDerived,true>
{ {
static void run(DenseBase<Derived>& _this, int size) typedef typename Derived::Index Index;
static void run(DenseBase<Derived>& _this, Index size)
{ {
const int new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;
const int new_cols = Derived::RowsAtCompileTime==1 ? size : 1; const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1;
_this.derived().m_storage.conservativeResize(size,new_rows,new_cols); _this.derived().m_storage.conservativeResize(size,new_rows,new_cols);
} }
@ -596,10 +602,10 @@ struct ei_conservative_resize_like_impl<Derived,OtherDerived,true>
{ {
if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
const int num_new_elements = other.size() - _this.size(); const Index num_new_elements = other.size() - _this.size();
const int new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows(); const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows();
const int new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1; const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1;
_this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols); _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);
if (num_new_elements > 0) if (num_new_elements > 0)

View File

@ -30,33 +30,34 @@
* \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix
* *
* \param MatrixType the type of the object in which we are taking a sub/main/super diagonal * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal
* \param Index the index of the sub/super diagonal. The default is 0 and it means the main diagonal. * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
* A positive value means a superdiagonal, a negative value means a subdiagonal. * A positive value means a superdiagonal, a negative value means a subdiagonal.
* You can also use Dynamic so the index can be set at runtime. * You can also use Dynamic so the index can be set at runtime.
* *
* The matrix is not required to be square. * The matrix is not required to be square.
* *
* This class represents an expression of the main diagonal, or any sub/super diagonal * This class represents an expression of the main diagonal, or any sub/super diagonal
* of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(int) and most of the * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the
* time this is the only way it is used. * time this is the only way it is used.
* *
* \sa MatrixBase::diagonal(), MatrixBase::diagonal(int) * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index)
*/ */
template<typename MatrixType, int Index> template<typename MatrixType, int DiagIndex>
struct ei_traits<Diagonal<MatrixType,Index> > struct ei_traits<Diagonal<MatrixType,DiagIndex> >
: ei_traits<MatrixType> : ei_traits<MatrixType>
{ {
typedef typename ei_nested<MatrixType>::type MatrixTypeNested; typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested; typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
typedef typename MatrixType::StorageKind StorageKind;
enum { enum {
AbsIndex = Index<0 ? -Index : Index, // only used if Index != Dynamic AbsDiagIndex = DiagIndex<0 ? -DiagIndex : DiagIndex, // only used if DiagIndex != Dynamic
RowsAtCompileTime = (int(Index) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic RowsAtCompileTime = (int(DiagIndex) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
: (EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime, : (EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime,
MatrixType::ColsAtCompileTime) - AbsIndex), MatrixType::ColsAtCompileTime) - AbsDiagIndex),
ColsAtCompileTime = 1, ColsAtCompileTime = 1,
MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
: Index == Dynamic ? EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) : DiagIndex == Dynamic ? EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime)
: (EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) - AbsIndex), : (EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) - AbsDiagIndex),
MaxColsAtCompileTime = 1, MaxColsAtCompileTime = 1,
Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit) & ~RowMajorBit, Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit) & ~RowMajorBit,
CoeffReadCost = _MatrixTypeNested::CoeffReadCost, CoeffReadCost = _MatrixTypeNested::CoeffReadCost,
@ -66,61 +67,62 @@ struct ei_traits<Diagonal<MatrixType,Index> >
}; };
}; };
template<typename MatrixType, int Index> class Diagonal template<typename MatrixType, int DiagIndex> class Diagonal
: public ei_dense_xpr_base< Diagonal<MatrixType,Index> >::type : public ei_dense_xpr_base< Diagonal<MatrixType,DiagIndex> >::type
{ {
// some compilers may fail to optimize std::max etc in case of compile-time constants...
EIGEN_STRONG_INLINE int absIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
EIGEN_STRONG_INLINE int rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
EIGEN_STRONG_INLINE int colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
public: public:
typedef typename ei_dense_xpr_base<Diagonal>::type Base; typedef typename ei_dense_xpr_base<Diagonal>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
inline Diagonal(const MatrixType& matrix, int index = Index) : m_matrix(matrix), m_index(index) {} inline Diagonal(const MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
inline int rows() const inline Index rows() const
{ return m_index.value()<0 ? std::min(m_matrix.cols(),m_matrix.rows()+m_index.value()) : std::min(m_matrix.rows(),m_matrix.cols()-m_index.value()); } { return m_index.value()<0 ? std::min(m_matrix.cols(),m_matrix.rows()+m_index.value()) : std::min(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
inline int cols() const { return 1; } inline Index cols() const { return 1; }
inline int innerStride() const inline Index innerStride() const
{ {
return m_matrix.outerStride() + 1; return m_matrix.outerStride() + 1;
} }
inline int outerStride() const inline Index outerStride() const
{ {
return 0; return 0;
} }
inline Scalar& coeffRef(int row, int) inline Scalar& coeffRef(Index row, Index)
{ {
return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset()); return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset());
} }
inline CoeffReturnType coeff(int row, int) const inline CoeffReturnType coeff(Index row, Index) const
{ {
return m_matrix.coeff(row+rowOffset(), row+colOffset()); return m_matrix.coeff(row+rowOffset(), row+colOffset());
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset()); return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset());
} }
inline CoeffReturnType coeff(int index) const inline CoeffReturnType coeff(Index index) const
{ {
return m_matrix.coeff(index+rowOffset(), index+colOffset()); return m_matrix.coeff(index+rowOffset(), index+colOffset());
} }
protected: protected:
const typename MatrixType::Nested m_matrix; const typename MatrixType::Nested m_matrix;
const ei_int_if_dynamic<Index> m_index; const ei_variable_if_dynamic<Index, DiagIndex> m_index;
private:
// some compilers may fail to optimize std::max etc in case of compile-time constants...
EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
}; };
/** \returns an expression of the main diagonal of the matrix \c *this /** \returns an expression of the main diagonal of the matrix \c *this
@ -146,12 +148,12 @@ MatrixBase<Derived>::diagonal() const
return Diagonal<Derived, 0>(derived()); return Diagonal<Derived, 0>(derived());
} }
/** \returns an expression of the \a Index-th sub or super diagonal of the matrix \c *this /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
* *
* \c *this is not required to be square. * \c *this is not required to be square.
* *
* The template parameter \a Index represent a super diagonal if \a Index > 0 * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
* and a sub diagonal otherwise. \a Index == 0 is equivalent to the main diagonal. * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
* *
* Example: \include MatrixBase_diagonal_int.cpp * Example: \include MatrixBase_diagonal_int.cpp
* Output: \verbinclude MatrixBase_diagonal_int.out * Output: \verbinclude MatrixBase_diagonal_int.out
@ -159,45 +161,45 @@ MatrixBase<Derived>::diagonal() const
* \sa MatrixBase::diagonal(), class Diagonal */ * \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived> template<typename Derived>
inline Diagonal<Derived, Dynamic> inline Diagonal<Derived, Dynamic>
MatrixBase<Derived>::diagonal(int index) MatrixBase<Derived>::diagonal(Index index)
{ {
return Diagonal<Derived, Dynamic>(derived(), index); return Diagonal<Derived, Dynamic>(derived(), index);
} }
/** This is the const version of diagonal(int). */ /** This is the const version of diagonal(Index). */
template<typename Derived> template<typename Derived>
inline const Diagonal<Derived, Dynamic> inline const Diagonal<Derived, Dynamic>
MatrixBase<Derived>::diagonal(int index) const MatrixBase<Derived>::diagonal(Index index) const
{ {
return Diagonal<Derived, Dynamic>(derived(), index); return Diagonal<Derived, Dynamic>(derived(), index);
} }
/** \returns an expression of the \a Index-th sub or super diagonal of the matrix \c *this /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
* *
* \c *this is not required to be square. * \c *this is not required to be square.
* *
* The template parameter \a Index represent a super diagonal if \a Index > 0 * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
* and a sub diagonal otherwise. \a Index == 0 is equivalent to the main diagonal. * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
* *
* Example: \include MatrixBase_diagonal_template_int.cpp * Example: \include MatrixBase_diagonal_template_int.cpp
* Output: \verbinclude MatrixBase_diagonal_template_int.out * Output: \verbinclude MatrixBase_diagonal_template_int.out
* *
* \sa MatrixBase::diagonal(), class Diagonal */ * \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived> template<typename Derived>
template<int Index> template<int DiagIndex>
inline Diagonal<Derived,Index> inline Diagonal<Derived,DiagIndex>
MatrixBase<Derived>::diagonal() MatrixBase<Derived>::diagonal()
{ {
return Diagonal<Derived,Index>(derived()); return Diagonal<Derived,DiagIndex>(derived());
} }
/** This is the const version of diagonal<int>(). */ /** This is the const version of diagonal<int>(). */
template<typename Derived> template<typename Derived>
template<int Index> template<int DiagIndex>
inline const Diagonal<Derived,Index> inline const Diagonal<Derived,DiagIndex>
MatrixBase<Derived>::diagonal() const MatrixBase<Derived>::diagonal() const
{ {
return Diagonal<Derived,Index>(derived()); return Diagonal<Derived,DiagIndex>(derived());
} }
#endif // EIGEN_DIAGONAL_H #endif // EIGEN_DIAGONAL_H

View File

@ -33,6 +33,8 @@ class DiagonalBase : public EigenBase<Derived>
public: public:
typedef typename ei_traits<Derived>::DiagonalVectorType DiagonalVectorType; typedef typename ei_traits<Derived>::DiagonalVectorType DiagonalVectorType;
typedef typename DiagonalVectorType::Scalar Scalar; typedef typename DiagonalVectorType::Scalar Scalar;
typedef typename ei_traits<Derived>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
enum { enum {
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
@ -61,8 +63,8 @@ class DiagonalBase : public EigenBase<Derived>
inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); } inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
inline DiagonalVectorType& diagonal() { return derived().diagonal(); } inline DiagonalVectorType& diagonal() { return derived().diagonal(); }
inline int rows() const { return diagonal().size(); } inline Index rows() const { return diagonal().size(); }
inline int cols() const { return diagonal().size(); } inline Index cols() const { return diagonal().size(); }
template<typename MatrixDerived> template<typename MatrixDerived>
const DiagonalProduct<MatrixDerived, Derived, OnTheLeft> const DiagonalProduct<MatrixDerived, Derived, OnTheLeft>
@ -100,6 +102,7 @@ struct ei_traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime>
: ei_traits<Matrix<_Scalar,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> > : ei_traits<Matrix<_Scalar,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
{ {
typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType; typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;
typedef Dense StorageKind;
}; };
template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime> template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
@ -111,6 +114,8 @@ class DiagonalMatrix
typedef typename ei_traits<DiagonalMatrix>::DiagonalVectorType DiagonalVectorType; typedef typename ei_traits<DiagonalMatrix>::DiagonalVectorType DiagonalVectorType;
typedef const DiagonalMatrix& Nested; typedef const DiagonalMatrix& Nested;
typedef _Scalar Scalar; typedef _Scalar Scalar;
typedef typename ei_traits<DiagonalMatrix>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
#endif #endif
protected: protected:
@ -128,7 +133,7 @@ class DiagonalMatrix
inline DiagonalMatrix() {} inline DiagonalMatrix() {}
/** Constructs a diagonal matrix with given dimension */ /** Constructs a diagonal matrix with given dimension */
inline DiagonalMatrix(int dim) : m_diagonal(dim) {} inline DiagonalMatrix(Index dim) : m_diagonal(dim) {}
/** 2D constructor. */ /** 2D constructor. */
inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {} inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {}
@ -170,15 +175,15 @@ class DiagonalMatrix
#endif #endif
/** Resizes to given size. */ /** Resizes to given size. */
inline void resize(int size) { m_diagonal.resize(size); } inline void resize(Index size) { m_diagonal.resize(size); }
/** Sets all coefficients to zero. */ /** Sets all coefficients to zero. */
inline void setZero() { m_diagonal.setZero(); } inline void setZero() { m_diagonal.setZero(); }
/** Resizes and sets all coefficients to zero. */ /** Resizes and sets all coefficients to zero. */
inline void setZero(int size) { m_diagonal.setZero(size); } inline void setZero(Index size) { m_diagonal.setZero(size); }
/** Sets this matrix to be the identity matrix of the current size. */ /** Sets this matrix to be the identity matrix of the current size. */
inline void setIdentity() { m_diagonal.setOnes(); } inline void setIdentity() { m_diagonal.setOnes(); }
/** Sets this matrix to be the identity matrix of the given size. */ /** Sets this matrix to be the identity matrix of the given size. */
inline void setIdentity(int size) { m_diagonal.setOnes(size); } inline void setIdentity(Index size) { m_diagonal.setOnes(size); }
}; };
/** \class DiagonalWrapper /** \class DiagonalWrapper
@ -198,6 +203,7 @@ struct ei_traits<DiagonalWrapper<_DiagonalVectorType> >
{ {
typedef _DiagonalVectorType DiagonalVectorType; typedef _DiagonalVectorType DiagonalVectorType;
typedef typename DiagonalVectorType::Scalar Scalar; typedef typename DiagonalVectorType::Scalar Scalar;
typedef typename DiagonalVectorType::StorageKind StorageKind;
enum { enum {
RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
@ -257,13 +263,13 @@ bool MatrixBase<Derived>::isDiagonal
{ {
if(cols() != rows()) return false; if(cols() != rows()) return false;
RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1); RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
for(int j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
{ {
RealScalar absOnDiagonal = ei_abs(coeff(j,j)); RealScalar absOnDiagonal = ei_abs(coeff(j,j));
if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
} }
for(int j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(int i = 0; i < j; ++i) for(Index i = 0; i < j; ++i)
{ {
if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;
if(!ei_isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; if(!ei_isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;

View File

@ -57,23 +57,23 @@ class DiagonalProduct : ei_no_assignment_operator,
ei_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols())); ei_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols()));
} }
inline int rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline int cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
const Scalar coeff(int row, int col) const const Scalar coeff(Index row, Index col) const
{ {
return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col); return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col);
} }
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{ {
enum { enum {
StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor, StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor,
InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime, InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned
}; };
const int indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col; const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
if((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft) if((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)) ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight))

View File

@ -159,11 +159,11 @@ template<typename Derived>
bool MatrixBase<Derived>::isUnitary(RealScalar prec) const bool MatrixBase<Derived>::isUnitary(RealScalar prec) const
{ {
typename Derived::Nested nested(derived()); typename Derived::Nested nested(derived());
for(int i = 0; i < cols(); ++i) for(Index i = 0; i < cols(); ++i)
{ {
if(!ei_isApprox(nested.col(i).squaredNorm(), static_cast<RealScalar>(1), prec)) if(!ei_isApprox(nested.col(i).squaredNorm(), static_cast<RealScalar>(1), prec))
return false; return false;
for(int j = 0; j < i; ++j) for(Index j = 0; j < i; ++j)
if(!ei_isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast<Scalar>(1), prec)) if(!ei_isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast<Scalar>(1), prec))
return false; return false;
} }

View File

@ -39,6 +39,9 @@ template<typename Derived> struct EigenBase
{ {
// typedef typename ei_plain_matrix_type<Derived>::type PlainObject; // typedef typename ei_plain_matrix_type<Derived>::type PlainObject;
typedef typename ei_traits<Derived>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
/** \returns a reference to the derived object */ /** \returns a reference to the derived object */
Derived& derived() { return *static_cast<Derived*>(this); } Derived& derived() { return *static_cast<Derived*>(this); }
/** \returns a const reference to the derived object */ /** \returns a const reference to the derived object */
@ -48,12 +51,12 @@ template<typename Derived> struct EigenBase
{ return *static_cast<Derived*>(const_cast<EigenBase*>(this)); } { return *static_cast<Derived*>(const_cast<EigenBase*>(this)); }
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */ /** \returns the number of rows. \sa cols(), RowsAtCompileTime */
inline int rows() const { return derived().rows(); } inline Index rows() const { return derived().rows(); }
/** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
inline int cols() const { return derived().cols(); } inline Index cols() const { return derived().cols(); }
/** \returns the number of coefficients, which is rows()*cols(). /** \returns the number of coefficients, which is rows()*cols().
* \sa rows(), cols(), SizeAtCompileTime. */ * \sa rows(), cols(), SizeAtCompileTime. */
inline int size() const { return rows() * cols(); } inline Index size() const { return rows() * cols(); }
/** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */
template<typename Dest> inline void evalTo(Dest& dst) const template<typename Dest> inline void evalTo(Dest& dst) const

View File

@ -58,51 +58,51 @@ template<typename ExpressionType, unsigned int Added, unsigned int Removed> clas
inline Flagged(const ExpressionType& matrix) : m_matrix(matrix) {} inline Flagged(const ExpressionType& matrix) : m_matrix(matrix) {}
inline int rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline int cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
inline int outerStride() const { return m_matrix.outerStride(); } inline Index outerStride() const { return m_matrix.outerStride(); }
inline int innerStride() const { return m_matrix.innerStride(); } inline Index innerStride() const { return m_matrix.innerStride(); }
inline const Scalar coeff(int row, int col) const inline const Scalar coeff(Index row, Index col) const
{ {
return m_matrix.coeff(row, col); return m_matrix.coeff(row, col);
} }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return m_matrix.const_cast_derived().coeffRef(row, col); return m_matrix.const_cast_derived().coeffRef(row, col);
} }
inline const Scalar coeff(int index) const inline const Scalar coeff(Index index) const
{ {
return m_matrix.coeff(index); return m_matrix.coeff(index);
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return m_matrix.const_cast_derived().coeffRef(index); return m_matrix.const_cast_derived().coeffRef(index);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int row, int col) const inline const PacketScalar packet(Index row, Index col) const
{ {
return m_matrix.template packet<LoadMode>(row, col); return m_matrix.template packet<LoadMode>(row, col);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int row, int col, const PacketScalar& x) inline void writePacket(Index row, Index col, const PacketScalar& x)
{ {
m_matrix.const_cast_derived().template writePacket<LoadMode>(row, col, x); m_matrix.const_cast_derived().template writePacket<LoadMode>(row, col, x);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int index) const inline const PacketScalar packet(Index index) const
{ {
return m_matrix.template packet<LoadMode>(index); return m_matrix.template packet<LoadMode>(index);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int index, const PacketScalar& x) inline void writePacket(Index index, const PacketScalar& x)
{ {
m_matrix.const_cast_derived().template writePacket<LoadMode>(index, x); m_matrix.const_cast_derived().template writePacket<LoadMode>(index, x);
} }

View File

@ -50,51 +50,51 @@ template<typename ExpressionType> class ForceAlignedAccess
inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {} inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {}
inline int rows() const { return m_expression.rows(); } inline Index rows() const { return m_expression.rows(); }
inline int cols() const { return m_expression.cols(); } inline Index cols() const { return m_expression.cols(); }
inline int outerStride() const { return m_expression.outerStride(); } inline Index outerStride() const { return m_expression.outerStride(); }
inline int innerStride() const { return m_expression.innerStride(); } inline Index innerStride() const { return m_expression.innerStride(); }
inline const CoeffReturnType coeff(int row, int col) const inline const CoeffReturnType coeff(Index row, Index col) const
{ {
return m_expression.coeff(row, col); return m_expression.coeff(row, col);
} }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return m_expression.const_cast_derived().coeffRef(row, col); return m_expression.const_cast_derived().coeffRef(row, col);
} }
inline const CoeffReturnType coeff(int index) const inline const CoeffReturnType coeff(Index index) const
{ {
return m_expression.coeff(index); return m_expression.coeff(index);
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return m_expression.const_cast_derived().coeffRef(index); return m_expression.const_cast_derived().coeffRef(index);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int row, int col) const inline const PacketScalar packet(Index row, Index col) const
{ {
return m_expression.template packet<Aligned>(row, col); return m_expression.template packet<Aligned>(row, col);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int row, int col, const PacketScalar& x) inline void writePacket(Index row, Index col, const PacketScalar& x)
{ {
m_expression.const_cast_derived().template writePacket<Aligned>(row, col, x); m_expression.const_cast_derived().template writePacket<Aligned>(row, col, x);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int index) const inline const PacketScalar packet(Index index) const
{ {
return m_expression.template packet<Aligned>(index); return m_expression.template packet<Aligned>(index);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int index, const PacketScalar& x) inline void writePacket(Index index, const PacketScalar& x)
{ {
m_expression.const_cast_derived().template writePacket<Aligned>(index, x); m_expression.const_cast_derived().template writePacket<Aligned>(index, x);
} }

View File

@ -464,8 +464,10 @@ struct ei_scalar_constant_op {
typedef typename ei_packet_traits<Scalar>::type PacketScalar; typedef typename ei_packet_traits<Scalar>::type PacketScalar;
EIGEN_STRONG_INLINE ei_scalar_constant_op(const ei_scalar_constant_op& other) : m_other(other.m_other) { } EIGEN_STRONG_INLINE ei_scalar_constant_op(const ei_scalar_constant_op& other) : m_other(other.m_other) { }
EIGEN_STRONG_INLINE ei_scalar_constant_op(const Scalar& other) : m_other(other) { } EIGEN_STRONG_INLINE ei_scalar_constant_op(const Scalar& other) : m_other(other) { }
EIGEN_STRONG_INLINE const Scalar operator() (int, int = 0) const { return m_other; } template<typename Index>
EIGEN_STRONG_INLINE const PacketScalar packetOp(int, int = 0) const { return ei_pset1(m_other); } EIGEN_STRONG_INLINE const Scalar operator() (Index, Index = 0) const { return m_other; }
template<typename Index>
EIGEN_STRONG_INLINE const PacketScalar packetOp(Index, Index = 0) const { return ei_pset1(m_other); }
const Scalar m_other; const Scalar m_other;
}; };
template<typename Scalar> template<typename Scalar>
@ -474,7 +476,8 @@ struct ei_functor_traits<ei_scalar_constant_op<Scalar> >
template<typename Scalar> struct ei_scalar_identity_op { template<typename Scalar> struct ei_scalar_identity_op {
EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_identity_op) EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_identity_op)
EIGEN_STRONG_INLINE const Scalar operator() (int row, int col) const { return row==col ? Scalar(1) : Scalar(0); } template<typename Index>
EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { return row==col ? Scalar(1) : Scalar(0); }
}; };
template<typename Scalar> template<typename Scalar>
struct ei_functor_traits<ei_scalar_identity_op<Scalar> > struct ei_functor_traits<ei_scalar_identity_op<Scalar> >
@ -497,8 +500,10 @@ struct ei_linspaced_op_impl<Scalar,false>
m_packetStep(ei_pset1(ei_packet_traits<Scalar>::size*step)), m_packetStep(ei_pset1(ei_packet_traits<Scalar>::size*step)),
m_base(ei_padd(ei_pset1(low),ei_pmul(ei_pset1(step),ei_plset<Scalar>(-ei_packet_traits<Scalar>::size)))) {} m_base(ei_padd(ei_pset1(low),ei_pmul(ei_pset1(step),ei_plset<Scalar>(-ei_packet_traits<Scalar>::size)))) {}
EIGEN_STRONG_INLINE const Scalar operator() (int i) const { return m_low+i*m_step; } template<typename Index>
EIGEN_STRONG_INLINE const PacketScalar packetOp(int) const { return m_base = ei_padd(m_base,m_packetStep); } EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
template<typename Index>
EIGEN_STRONG_INLINE const PacketScalar packetOp(Index) const { return m_base = ei_padd(m_base,m_packetStep); }
const Scalar m_low; const Scalar m_low;
const Scalar m_step; const Scalar m_step;
@ -518,8 +523,10 @@ struct ei_linspaced_op_impl<Scalar,true>
m_low(low), m_step(step), m_low(low), m_step(step),
m_lowPacket(ei_pset1(m_low)), m_stepPacket(ei_pset1(m_step)), m_interPacket(ei_plset<Scalar>(0)) {} m_lowPacket(ei_pset1(m_low)), m_stepPacket(ei_pset1(m_step)), m_interPacket(ei_plset<Scalar>(0)) {}
EIGEN_STRONG_INLINE const Scalar operator() (int i) const { return m_low+i*m_step; } template<typename Index>
EIGEN_STRONG_INLINE const PacketScalar packetOp(int i) const EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
template<typename Index>
EIGEN_STRONG_INLINE const PacketScalar packetOp(Index i) const
{ return ei_padd(m_lowPacket, ei_pmul(m_stepPacket, ei_padd(ei_pset1<Scalar>(i),m_interPacket))); } { return ei_padd(m_lowPacket, ei_pmul(m_stepPacket, ei_padd(ei_pset1<Scalar>(i),m_interPacket))); }
const Scalar m_low; const Scalar m_low;
@ -541,8 +548,10 @@ template <typename Scalar, bool RandomAccess> struct ei_linspaced_op
{ {
typedef typename ei_packet_traits<Scalar>::type PacketScalar; typedef typename ei_packet_traits<Scalar>::type PacketScalar;
ei_linspaced_op(Scalar low, Scalar high, int num_steps) : impl(low, (high-low)/(num_steps-1)) {} ei_linspaced_op(Scalar low, Scalar high, int num_steps) : impl(low, (high-low)/(num_steps-1)) {}
EIGEN_STRONG_INLINE const Scalar operator() (int i, int = 0) const { return impl(i); } template<typename Index>
EIGEN_STRONG_INLINE const PacketScalar packetOp(int i, int = 0) const { return impl.packetOp(i); } EIGEN_STRONG_INLINE const Scalar operator() (Index i, Index = 0) const { return impl(i); }
template<typename Index>
EIGEN_STRONG_INLINE const PacketScalar packetOp(Index i, Index = 0) const { return impl.packetOp(i); }
// This proxy object handles the actual required temporaries, the different // This proxy object handles the actual required temporaries, the different
// implementations (random vs. sequential access) as well as the piping // implementations (random vs. sequential access) as well as the piping
// correct piping to size 2/4 packet operations. // correct piping to size 2/4 packet operations.

View File

@ -201,13 +201,14 @@ template<typename Derived, typename OtherDerived>
struct ei_fuzzy_selector<Derived,OtherDerived,false> struct ei_fuzzy_selector<Derived,OtherDerived,false>
{ {
typedef typename Derived::RealScalar RealScalar; typedef typename Derived::RealScalar RealScalar;
typedef typename Derived::Index Index;
static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec) static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec)
{ {
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
ei_assert(self.rows() == other.rows() && self.cols() == other.cols()); ei_assert(self.rows() == other.rows() && self.cols() == other.cols());
typename Derived::Nested nested(self); typename Derived::Nested nested(self);
typename OtherDerived::Nested otherNested(other); typename OtherDerived::Nested otherNested(other);
for(int i = 0; i < self.cols(); ++i) for(Index i = 0; i < self.cols(); ++i)
if((nested.col(i) - otherNested.col(i)).squaredNorm() if((nested.col(i) - otherNested.col(i)).squaredNorm()
> std::min(nested.col(i).squaredNorm(), otherNested.col(i).squaredNorm()) * prec * prec) > std::min(nested.col(i).squaredNorm(), otherNested.col(i).squaredNorm()) * prec * prec)
return false; return false;
@ -216,7 +217,7 @@ struct ei_fuzzy_selector<Derived,OtherDerived,false>
static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec) static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec)
{ {
typename Derived::Nested nested(self); typename Derived::Nested nested(self);
for(int i = 0; i < self.cols(); ++i) for(Index i = 0; i < self.cols(); ++i)
if(nested.col(i).squaredNorm() > ei_abs2(other * prec)) if(nested.col(i).squaredNorm() > ei_abs2(other * prec))
return false; return false;
return true; return true;
@ -227,7 +228,7 @@ struct ei_fuzzy_selector<Derived,OtherDerived,false>
ei_assert(self.rows() == other.rows() && self.cols() == other.cols()); ei_assert(self.rows() == other.rows() && self.cols() == other.cols());
typename Derived::Nested nested(self); typename Derived::Nested nested(self);
typename OtherDerived::Nested otherNested(other); typename OtherDerived::Nested otherNested(other);
for(int i = 0; i < self.cols(); ++i) for(Index i = 0; i < self.cols(); ++i)
if(nested.col(i).squaredNorm() > otherNested.col(i).squaredNorm() * prec * prec) if(nested.col(i).squaredNorm() > otherNested.col(i).squaredNorm() * prec * prec)
return false; return false;
return true; return true;

View File

@ -157,8 +157,9 @@ std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOForm
{ {
const typename Derived::Nested m = _m; const typename Derived::Nested m = _m;
typedef typename Derived::Scalar Scalar; typedef typename Derived::Scalar Scalar;
typedef typename Derived::Index Index;
int width = 0; Index width = 0;
std::streamsize explicit_precision; std::streamsize explicit_precision;
if(fmt.precision == StreamPrecision) if(fmt.precision == StreamPrecision)
@ -185,26 +186,26 @@ std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOForm
if(align_cols) if(align_cols)
{ {
// compute the largest width // compute the largest width
for(int j = 1; j < m.cols(); ++j) for(Index j = 1; j < m.cols(); ++j)
for(int i = 0; i < m.rows(); ++i) for(Index i = 0; i < m.rows(); ++i)
{ {
std::stringstream sstr; std::stringstream sstr;
if(explicit_precision) sstr.precision(explicit_precision); if(explicit_precision) sstr.precision(explicit_precision);
sstr << m.coeff(i,j); sstr << m.coeff(i,j);
width = std::max<int>(width, int(sstr.str().length())); width = std::max<Index>(width, Index(sstr.str().length()));
} }
} }
std::streamsize old_precision = 0; std::streamsize old_precision = 0;
if(explicit_precision) old_precision = s.precision(explicit_precision); if(explicit_precision) old_precision = s.precision(explicit_precision);
s << fmt.matPrefix; s << fmt.matPrefix;
for(int i = 0; i < m.rows(); ++i) for(Index i = 0; i < m.rows(); ++i)
{ {
if (i) if (i)
s << fmt.rowSpacer; s << fmt.rowSpacer;
s << fmt.rowPrefix; s << fmt.rowPrefix;
if(width) s.width(width); if(width) s.width(width);
s << m.coeff(i, 0); s << m.coeff(i, 0);
for(int j = 1; j < m.cols(); ++j) for(Index j = 1; j < m.cols(); ++j)
{ {
s << fmt.coeffSeparator; s << fmt.coeffSeparator;
if (width) s.width(width); if (width) s.width(width);

View File

@ -109,12 +109,12 @@ template<typename PlainObjectType, int MapOptions, typename StrideType> class Ma
EIGEN_DENSE_PUBLIC_INTERFACE(Map) EIGEN_DENSE_PUBLIC_INTERFACE(Map)
inline int innerStride() const inline Index innerStride() const
{ {
return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
} }
inline int outerStride() const inline Index outerStride() const
{ {
return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
: IsVectorAtCompileTime ? this->size() : IsVectorAtCompileTime ? this->size()
@ -139,7 +139,7 @@ template<typename PlainObjectType, int MapOptions, typename StrideType> class Ma
* \param size the size of the vector expression * \param size the size of the vector expression
* \param stride optional Stride object, passing the strides. * \param stride optional Stride object, passing the strides.
*/ */
inline Map(const Scalar* data, int size, const StrideType& stride = StrideType()) inline Map(const Scalar* data, Index size, const StrideType& stride = StrideType())
: Base(data, size), m_stride(stride) : Base(data, size), m_stride(stride)
{ {
PlainObjectType::Base::_check_template_params(); PlainObjectType::Base::_check_template_params();
@ -152,7 +152,7 @@ template<typename PlainObjectType, int MapOptions, typename StrideType> class Ma
* \param cols the number of columns of the matrix expression * \param cols the number of columns of the matrix expression
* \param stride optional Stride object, passing the strides. * \param stride optional Stride object, passing the strides.
*/ */
inline Map(const Scalar* data, int rows, int cols, const StrideType& stride = StrideType()) inline Map(const Scalar* data, Index rows, Index cols, const StrideType& stride = StrideType())
: Base(data, rows, cols), m_stride(stride) : Base(data, rows, cols), m_stride(stride)
{ {
PlainObjectType::Base::_check_template_params(); PlainObjectType::Base::_check_template_params();

View File

@ -44,8 +44,13 @@ template<typename Derived> class MapBase
SizeAtCompileTime = Base::SizeAtCompileTime SizeAtCompileTime = Base::SizeAtCompileTime
}; };
typedef typename ei_traits<Derived>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar; typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename Base::PacketScalar PacketScalar; typedef typename ei_packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
using Base::derived; using Base::derived;
// using Base::RowsAtCompileTime; // using Base::RowsAtCompileTime;
// using Base::ColsAtCompileTime; // using Base::ColsAtCompileTime;
@ -82,8 +87,8 @@ template<typename Derived> class MapBase
typedef typename Base::CoeffReturnType CoeffReturnType; typedef typename Base::CoeffReturnType CoeffReturnType;
inline int rows() const { return m_rows.value(); } inline Index rows() const { return m_rows.value(); }
inline int cols() const { return m_cols.value(); } inline Index cols() const { return m_cols.value(); }
/** Returns a pointer to the first coefficient of the matrix or vector. /** Returns a pointer to the first coefficient of the matrix or vector.
* *
@ -93,50 +98,50 @@ template<typename Derived> class MapBase
*/ */
inline const Scalar* data() const { return m_data; } inline const Scalar* data() const { return m_data; }
inline const Scalar& coeff(int row, int col) const inline const Scalar& coeff(Index row, Index col) const
{ {
return m_data[col * colStride() + row * rowStride()]; return m_data[col * colStride() + row * rowStride()];
} }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return const_cast<Scalar*>(m_data)[col * colStride() + row * rowStride()]; return const_cast<Scalar*>(m_data)[col * colStride() + row * rowStride()];
} }
inline const Scalar& coeff(int index) const inline const Scalar& coeff(Index index) const
{ {
ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit)); ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit));
return m_data[index * innerStride()]; return m_data[index * innerStride()];
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit)); ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit));
return const_cast<Scalar*>(m_data)[index * innerStride()]; return const_cast<Scalar*>(m_data)[index * innerStride()];
} }
template<int LoadMode> template<int LoadMode>
inline PacketScalar packet(int row, int col) const inline PacketScalar packet(Index row, Index col) const
{ {
return ei_ploadt<Scalar, LoadMode> return ei_ploadt<Scalar, LoadMode>
(m_data + (col * colStride() + row * rowStride())); (m_data + (col * colStride() + row * rowStride()));
} }
template<int LoadMode> template<int LoadMode>
inline PacketScalar packet(int index) const inline PacketScalar packet(Index index) const
{ {
return ei_ploadt<Scalar, LoadMode>(m_data + index * innerStride()); return ei_ploadt<Scalar, LoadMode>(m_data + index * innerStride());
} }
template<int StoreMode> template<int StoreMode>
inline void writePacket(int row, int col, const PacketScalar& x) inline void writePacket(Index row, Index col, const PacketScalar& x)
{ {
ei_pstoret<Scalar, PacketScalar, StoreMode> ei_pstoret<Scalar, PacketScalar, StoreMode>
(const_cast<Scalar*>(m_data) + (col * colStride() + row * rowStride()), x); (const_cast<Scalar*>(m_data) + (col * colStride() + row * rowStride()), x);
} }
template<int StoreMode> template<int StoreMode>
inline void writePacket(int index, const PacketScalar& x) inline void writePacket(Index index, const PacketScalar& x)
{ {
ei_pstoret<Scalar, PacketScalar, StoreMode> ei_pstoret<Scalar, PacketScalar, StoreMode>
(const_cast<Scalar*>(m_data) + index * innerStride(), x); (const_cast<Scalar*>(m_data) + index * innerStride(), x);
@ -148,10 +153,10 @@ template<typename Derived> class MapBase
checkSanity(); checkSanity();
} }
inline MapBase(const Scalar* data, int size) inline MapBase(const Scalar* data, Index size)
: m_data(data), : m_data(data),
m_rows(RowsAtCompileTime == Dynamic ? size : RowsAtCompileTime), m_rows(RowsAtCompileTime == Dynamic ? size : Index(RowsAtCompileTime)),
m_cols(ColsAtCompileTime == Dynamic ? size : ColsAtCompileTime) m_cols(ColsAtCompileTime == Dynamic ? size : Index(ColsAtCompileTime))
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
ei_assert(size >= 0); ei_assert(size >= 0);
@ -159,7 +164,7 @@ template<typename Derived> class MapBase
checkSanity(); checkSanity();
} }
inline MapBase(const Scalar* data, int rows, int cols) inline MapBase(const Scalar* data, Index rows, Index cols)
: m_data(data), m_rows(rows), m_cols(cols) : m_data(data), m_rows(rows), m_cols(cols)
{ {
ei_assert( (data == 0) ei_assert( (data == 0)
@ -187,8 +192,8 @@ template<typename Derived> class MapBase
} }
const Scalar* EIGEN_RESTRICT m_data; const Scalar* EIGEN_RESTRICT m_data;
const ei_int_if_dynamic<RowsAtCompileTime> m_rows; const ei_variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
const ei_int_if_dynamic<ColsAtCompileTime> m_cols; const ei_variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
}; };
#endif // EIGEN_MAPBASE_H #endif // EIGEN_MAPBASE_H

View File

@ -657,7 +657,7 @@ struct ei_pow_default_impl<Scalar, true>
{ {
static inline Scalar run(Scalar x, Scalar y) static inline Scalar run(Scalar x, Scalar y)
{ {
int res = 1; Scalar res = 1;
ei_assert(!NumTraits<Scalar>::IsSigned || y >= 0); ei_assert(!NumTraits<Scalar>::IsSigned || y >= 0);
if(y & 1) res *= x; if(y & 1) res *= x;
y >>= 1; y >>= 1;

View File

@ -206,7 +206,7 @@ class Matrix
* is called a null matrix. This constructor is the unique way to create null matrices: resizing * is called a null matrix. This constructor is the unique way to create null matrices: resizing
* a matrix to 0 is not supported. * a matrix to 0 is not supported.
* *
* \sa resize(int,int) * \sa resize(Index,Index)
*/ */
EIGEN_STRONG_INLINE explicit Matrix() : Base() EIGEN_STRONG_INLINE explicit Matrix() : Base()
{ {
@ -225,7 +225,7 @@ class Matrix
* it is redundant to pass the dimension here, so it makes more sense to use the default * it is redundant to pass the dimension here, so it makes more sense to use the default
* constructor Matrix() instead. * constructor Matrix() instead.
*/ */
EIGEN_STRONG_INLINE explicit Matrix(int dim) EIGEN_STRONG_INLINE explicit Matrix(Index dim)
: Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim) : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
{ {
Base::_check_template_params(); Base::_check_template_params();
@ -248,7 +248,7 @@ class Matrix
* This is useful for dynamic-size matrices. For fixed-size matrices, * This is useful for dynamic-size matrices. For fixed-size matrices,
* it is redundant to pass these parameters, so one should use the default constructor * it is redundant to pass these parameters, so one should use the default constructor
* Matrix() instead. */ * Matrix() instead. */
Matrix(int rows, int cols); Matrix(Index rows, Index cols);
/** \brief Constructs an initialized 2D vector with given coefficients */ /** \brief Constructs an initialized 2D vector with given coefficients */
Matrix(const Scalar& x, const Scalar& y); Matrix(const Scalar& x, const Scalar& y);
#endif #endif
@ -321,8 +321,8 @@ class Matrix
void swap(MatrixBase<OtherDerived> EIGEN_REF_TO_TEMPORARY other) void swap(MatrixBase<OtherDerived> EIGEN_REF_TO_TEMPORARY other)
{ this->_swap(other.derived()); } { this->_swap(other.derived()); }
inline int innerStride() const { return 1; } inline Index innerStride() const { return 1; }
inline int outerStride() const { return this->innerSize(); } inline Index outerStride() const { return this->innerSize(); }
/////////// Geometry module /////////// /////////// Geometry module ///////////

View File

@ -56,14 +56,14 @@ template<typename Derived> class MatrixBase
{ {
public: public:
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
/** The base class for a given storage type. */
typedef MatrixBase StorageBaseType; typedef MatrixBase StorageBaseType;
typedef typename ei_traits<Derived>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_traits<Derived>::Scalar Scalar; typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar; typedef typename ei_packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseBase<Derived> Base; typedef DenseBase<Derived> Base;
using Base::RowsAtCompileTime; using Base::RowsAtCompileTime;
using Base::ColsAtCompileTime; using Base::ColsAtCompileTime;
using Base::SizeAtCompileTime; using Base::SizeAtCompileTime;
@ -97,14 +97,6 @@ template<typename Derived> class MatrixBase
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is the "real scalar" type; if the \a Scalar type is already real numbers
* (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
* \a Scalar is \a std::complex<T> then RealScalar is \a T.
*
* \sa class NumTraits
*/
typedef typename NumTraits<Scalar>::Real RealScalar;
/** type of the equivalent square matrix */ /** type of the equivalent square matrix */
typedef Matrix<Scalar,EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime), typedef Matrix<Scalar,EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime),
EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType; EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
@ -112,7 +104,7 @@ template<typename Derived> class MatrixBase
/** \returns the size of the main diagonal, which is min(rows(),cols()). /** \returns the size of the main diagonal, which is min(rows(),cols()).
* \sa rows(), cols(), SizeAtCompileTime. */ * \sa rows(), cols(), SizeAtCompileTime. */
inline int diagonalSize() const { return std::min(rows(),cols()); } inline Index diagonalSize() const { return std::min(rows(),cols()); }
/** \brief The plain matrix type corresponding to this expression. /** \brief The plain matrix type corresponding to this expression.
* *
@ -211,8 +203,8 @@ template<typename Derived> class MatrixBase
template<int Index> Diagonal<Derived,Index> diagonal(); template<int Index> Diagonal<Derived,Index> diagonal();
template<int Index> const Diagonal<Derived,Index> diagonal() const; template<int Index> const Diagonal<Derived,Index> diagonal() const;
Diagonal<Derived, Dynamic> diagonal(int index); Diagonal<Derived, Dynamic> diagonal(Index index);
const Diagonal<Derived, Dynamic> diagonal(int index) const; const Diagonal<Derived, Dynamic> diagonal(Index index) const;
template<unsigned int Mode> TriangularView<Derived, Mode> part(); template<unsigned int Mode> TriangularView<Derived, Mode> part();
template<unsigned int Mode> const TriangularView<Derived, Mode> part() const; template<unsigned int Mode> const TriangularView<Derived, Mode> part() const;
@ -224,9 +216,9 @@ template<typename Derived> class MatrixBase
template<unsigned int UpLo> const SelfAdjointView<Derived, UpLo> selfadjointView() const; template<unsigned int UpLo> const SelfAdjointView<Derived, UpLo> selfadjointView() const;
static const IdentityReturnType Identity(); static const IdentityReturnType Identity();
static const IdentityReturnType Identity(int rows, int cols); static const IdentityReturnType Identity(Index rows, Index cols);
static const BasisReturnType Unit(int size, int i); static const BasisReturnType Unit(Index size, Index i);
static const BasisReturnType Unit(int i); static const BasisReturnType Unit(Index i);
static const BasisReturnType UnitX(); static const BasisReturnType UnitX();
static const BasisReturnType UnitY(); static const BasisReturnType UnitY();
static const BasisReturnType UnitZ(); static const BasisReturnType UnitZ();
@ -235,7 +227,7 @@ template<typename Derived> class MatrixBase
const DiagonalWrapper<Derived> asDiagonal() const; const DiagonalWrapper<Derived> asDiagonal() const;
Derived& setIdentity(); Derived& setIdentity();
Derived& setIdentity(int rows, int cols); Derived& setIdentity(Index rows, Index cols);
bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const; bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const; bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
@ -329,7 +321,7 @@ template<typename Derived> class MatrixBase
template<typename OtherDerived> template<typename OtherDerived>
PlainObject cross3(const MatrixBase<OtherDerived>& other) const; PlainObject cross3(const MatrixBase<OtherDerived>& other) const;
PlainObject unitOrthogonal(void) const; PlainObject unitOrthogonal(void) const;
Matrix<Scalar,3,1> eulerAngles(int a0, int a1, int a2) const; Matrix<Scalar,3,1> eulerAngles(Index a0, Index a1, Index a2) const;
const ScalarMultipleReturnType operator*(const UniformScaling<Scalar>& s) const; const ScalarMultipleReturnType operator*(const UniformScaling<Scalar>& s) const;
enum { enum {
SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1 SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1
@ -362,9 +354,9 @@ template<typename Derived> class MatrixBase
///////// Jacobi module ///////// ///////// Jacobi module /////////
template<typename OtherScalar> template<typename OtherScalar>
void applyOnTheLeft(int p, int q, const PlanarRotation<OtherScalar>& j); void applyOnTheLeft(Index p, Index q, const PlanarRotation<OtherScalar>& j);
template<typename OtherScalar> template<typename OtherScalar>
void applyOnTheRight(int p, int q, const PlanarRotation<OtherScalar>& j); void applyOnTheRight(Index p, Index q, const PlanarRotation<OtherScalar>& j);
///////// MatrixFunctions module ///////// ///////// MatrixFunctions module /////////
@ -398,17 +390,17 @@ template<typename Derived> class MatrixBase
inline const Cwise<Derived> cwise() const; inline const Cwise<Derived> cwise() const;
inline Cwise<Derived> cwise(); inline Cwise<Derived> cwise();
VectorBlock<Derived> start(int size); VectorBlock<Derived> start(Index size);
const VectorBlock<Derived> start(int size) const; const VectorBlock<Derived> start(Index size) const;
VectorBlock<Derived> end(int size); VectorBlock<Derived> end(Index size);
const VectorBlock<Derived> end(int size) const; const VectorBlock<Derived> end(Index size) const;
template<int Size> VectorBlock<Derived,Size> start(); template<int Size> VectorBlock<Derived,Size> start();
template<int Size> const VectorBlock<Derived,Size> start() const; template<int Size> const VectorBlock<Derived,Size> start() const;
template<int Size> VectorBlock<Derived,Size> end(); template<int Size> VectorBlock<Derived,Size> end();
template<int Size> const VectorBlock<Derived,Size> end() const; template<int Size> const VectorBlock<Derived,Size> end() const;
Minor<Derived> minor(int row, int col); Minor<Derived> minor(Index row, Index col);
const Minor<Derived> minor(int row, int col) const; const Minor<Derived> minor(Index row, Index col) const;
#endif #endif
protected: protected:

View File

@ -97,12 +97,12 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class ei_matr
inline explicit ei_matrix_storage() {} inline explicit ei_matrix_storage() {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(ei_constructor_without_unaligned_array_assert()) {} : m_data(ei_constructor_without_unaligned_array_assert()) {}
inline ei_matrix_storage(int,int,int) {} inline ei_matrix_storage(DenseIndex,DenseIndex,DenseIndex) {}
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); } inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); }
inline static int rows(void) {return _Rows;} inline static DenseIndex rows(void) {return _Rows;}
inline static int cols(void) {return _Cols;} inline static DenseIndex cols(void) {return _Cols;}
inline void conservativeResize(int,int,int) {} inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
inline void resize(int,int,int) {} inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
inline const T *data() const { return m_data.array; } inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; } inline T *data() { return m_data.array; }
}; };
@ -113,12 +113,12 @@ template<typename T, int _Rows, int _Cols, int _Options> class ei_matrix_storage
public: public:
inline explicit ei_matrix_storage() {} inline explicit ei_matrix_storage() {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) {}
inline ei_matrix_storage(int,int,int) {} inline ei_matrix_storage(DenseIndex,DenseIndex,DenseIndex) {}
inline void swap(ei_matrix_storage& ) {} inline void swap(ei_matrix_storage& ) {}
inline static int rows(void) {return _Rows;} inline static DenseIndex rows(void) {return _Rows;}
inline static int cols(void) {return _Cols;} inline static DenseIndex cols(void) {return _Cols;}
inline void conservativeResize(int,int,int) {} inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
inline void resize(int,int,int) {} inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
inline const T *data() const { return 0; } inline const T *data() const { return 0; }
inline T *data() { return 0; } inline T *data() { return 0; }
}; };
@ -127,19 +127,19 @@ template<typename T, int _Rows, int _Cols, int _Options> class ei_matrix_storage
template<typename T, int Size, int _Options> class ei_matrix_storage<T, Size, Dynamic, Dynamic, _Options> template<typename T, int Size, int _Options> class ei_matrix_storage<T, Size, Dynamic, Dynamic, _Options>
{ {
ei_matrix_array<T,Size,_Options> m_data; ei_matrix_array<T,Size,_Options> m_data;
int m_rows; DenseIndex m_rows;
int m_cols; DenseIndex m_cols;
public: public:
inline explicit ei_matrix_storage() : m_rows(0), m_cols(0) {} inline explicit ei_matrix_storage() : m_rows(0), m_cols(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} : m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
inline ei_matrix_storage(int, int rows, int cols) : m_rows(rows), m_cols(cols) {} inline ei_matrix_storage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {}
inline void swap(ei_matrix_storage& other) inline void swap(ei_matrix_storage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
inline int rows(void) const {return m_rows;} inline DenseIndex rows(void) const {return m_rows;}
inline int cols(void) const {return m_cols;} inline DenseIndex cols(void) const {return m_cols;}
inline void conservativeResize(int, int rows, int cols) { m_rows = rows; m_cols = cols; } inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
inline void resize(int, int rows, int cols) { m_rows = rows; m_cols = cols; } inline void resize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
inline const T *data() const { return m_data.array; } inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; } inline T *data() { return m_data.array; }
}; };
@ -148,17 +148,17 @@ template<typename T, int Size, int _Options> class ei_matrix_storage<T, Size, Dy
template<typename T, int Size, int _Cols, int _Options> class ei_matrix_storage<T, Size, Dynamic, _Cols, _Options> template<typename T, int Size, int _Cols, int _Options> class ei_matrix_storage<T, Size, Dynamic, _Cols, _Options>
{ {
ei_matrix_array<T,Size,_Options> m_data; ei_matrix_array<T,Size,_Options> m_data;
int m_rows; DenseIndex m_rows;
public: public:
inline explicit ei_matrix_storage() : m_rows(0) {} inline explicit ei_matrix_storage() : m_rows(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0) {} : m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0) {}
inline ei_matrix_storage(int, int rows, int) : m_rows(rows) {} inline ei_matrix_storage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {}
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
inline int rows(void) const {return m_rows;} inline DenseIndex rows(void) const {return m_rows;}
inline int cols(void) const {return _Cols;} inline DenseIndex cols(void) const {return _Cols;}
inline void conservativeResize(int, int rows, int) { m_rows = rows; } inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
inline void resize(int, int rows, int) { m_rows = rows; } inline void resize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
inline const T *data() const { return m_data.array; } inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; } inline T *data() { return m_data.array; }
}; };
@ -167,17 +167,17 @@ template<typename T, int Size, int _Cols, int _Options> class ei_matrix_storage<
template<typename T, int Size, int _Rows, int _Options> class ei_matrix_storage<T, Size, _Rows, Dynamic, _Options> template<typename T, int Size, int _Rows, int _Options> class ei_matrix_storage<T, Size, _Rows, Dynamic, _Options>
{ {
ei_matrix_array<T,Size,_Options> m_data; ei_matrix_array<T,Size,_Options> m_data;
int m_cols; DenseIndex m_cols;
public: public:
inline explicit ei_matrix_storage() : m_cols(0) {} inline explicit ei_matrix_storage() : m_cols(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(ei_constructor_without_unaligned_array_assert()), m_cols(0) {} : m_data(ei_constructor_without_unaligned_array_assert()), m_cols(0) {}
inline ei_matrix_storage(int, int, int cols) : m_cols(cols) {} inline ei_matrix_storage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {}
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
inline int rows(void) const {return _Rows;} inline DenseIndex rows(void) const {return _Rows;}
inline int cols(void) const {return m_cols;} inline DenseIndex cols(void) const {return m_cols;}
inline void conservativeResize(int, int, int cols) { m_cols = cols; } inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
inline void resize(int, int, int cols) { m_cols = cols; } inline void resize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
inline const T *data() const { return m_data.array; } inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; } inline T *data() { return m_data.array; }
}; };
@ -186,27 +186,27 @@ template<typename T, int Size, int _Rows, int _Options> class ei_matrix_storage<
template<typename T, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic, Dynamic, _Options> template<typename T, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic, Dynamic, _Options>
{ {
T *m_data; T *m_data;
int m_rows; DenseIndex m_rows;
int m_cols; DenseIndex m_cols;
public: public:
inline explicit ei_matrix_storage() : m_data(0), m_rows(0), m_cols(0) {} inline explicit ei_matrix_storage() : m_data(0), m_rows(0), m_cols(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
: m_data(0), m_rows(0), m_cols(0) {} : m_data(0), m_rows(0), m_cols(0) {}
inline ei_matrix_storage(int size, int rows, int cols) inline ei_matrix_storage(DenseIndex size, DenseIndex rows, DenseIndex cols)
: m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols)
{ EIGEN_INT_DEBUG_MATRIX_CTOR } { EIGEN_INT_DEBUG_MATRIX_CTOR }
inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); } inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
inline void swap(ei_matrix_storage& other) inline void swap(ei_matrix_storage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
inline int rows(void) const {return m_rows;} inline DenseIndex rows(void) const {return m_rows;}
inline int cols(void) const {return m_cols;} inline DenseIndex cols(void) const {return m_cols;}
inline void conservativeResize(int size, int rows, int cols) inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols)
{ {
m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols); m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
m_rows = rows; m_rows = rows;
m_cols = cols; m_cols = cols;
} }
void resize(int size, int rows, int cols) void resize(DenseIndex size, DenseIndex rows, DenseIndex cols)
{ {
if(size != m_rows*m_cols) if(size != m_rows*m_cols)
{ {
@ -228,22 +228,22 @@ template<typename T, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic,
template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic, _Rows, Dynamic, _Options> template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic, _Rows, Dynamic, _Options>
{ {
T *m_data; T *m_data;
int m_cols; DenseIndex m_cols;
public: public:
inline explicit ei_matrix_storage() : m_data(0), m_cols(0) {} inline explicit ei_matrix_storage() : m_data(0), m_cols(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
inline ei_matrix_storage(int size, int, int cols) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_cols(cols) inline ei_matrix_storage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_cols(cols)
{ EIGEN_INT_DEBUG_MATRIX_CTOR } { EIGEN_INT_DEBUG_MATRIX_CTOR }
inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); } inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
inline static int rows(void) {return _Rows;} inline static DenseIndex rows(void) {return _Rows;}
inline int cols(void) const {return m_cols;} inline DenseIndex cols(void) const {return m_cols;}
inline void conservativeResize(int size, int, int cols) inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols)
{ {
m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols); m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
m_cols = cols; m_cols = cols;
} }
void resize(int size, int, int cols) void resize(DenseIndex size, DenseIndex, DenseIndex cols)
{ {
if(size != _Rows*m_cols) if(size != _Rows*m_cols)
{ {
@ -264,22 +264,22 @@ template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic
template<typename T, int _Cols, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic, _Cols, _Options> template<typename T, int _Cols, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic, _Cols, _Options>
{ {
T *m_data; T *m_data;
int m_rows; DenseIndex m_rows;
public: public:
inline explicit ei_matrix_storage() : m_data(0), m_rows(0) {} inline explicit ei_matrix_storage() : m_data(0), m_rows(0) {}
inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
inline ei_matrix_storage(int size, int rows, int) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows) inline ei_matrix_storage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(ei_conditional_aligned_new<T,(_Options&DontAlign)==0>(size)), m_rows(rows)
{ EIGEN_INT_DEBUG_MATRIX_CTOR } { EIGEN_INT_DEBUG_MATRIX_CTOR }
inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); } inline ~ei_matrix_storage() { ei_conditional_aligned_delete<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
inline int rows(void) const {return m_rows;} inline DenseIndex rows(void) const {return m_rows;}
inline static int cols(void) {return _Cols;} inline static DenseIndex cols(void) {return _Cols;}
inline void conservativeResize(int size, int rows, int) inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex)
{ {
m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols); m_data = ei_conditional_aligned_realloc_new<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
m_rows = rows; m_rows = rows;
} }
void resize(int size, int rows, int) void resize(DenseIndex size, DenseIndex rows, DenseIndex)
{ {
if(size != m_rows*_Cols) if(size != m_rows*_Cols)
{ {

View File

@ -51,51 +51,51 @@ template<typename ExpressionType> class NestByValue
inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {} inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {}
inline int rows() const { return m_expression.rows(); } inline Index rows() const { return m_expression.rows(); }
inline int cols() const { return m_expression.cols(); } inline Index cols() const { return m_expression.cols(); }
inline int outerStride() const { return m_expression.outerStride(); } inline Index outerStride() const { return m_expression.outerStride(); }
inline int innerStride() const { return m_expression.innerStride(); } inline Index innerStride() const { return m_expression.innerStride(); }
inline const CoeffReturnType coeff(int row, int col) const inline const CoeffReturnType coeff(Index row, Index col) const
{ {
return m_expression.coeff(row, col); return m_expression.coeff(row, col);
} }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return m_expression.const_cast_derived().coeffRef(row, col); return m_expression.const_cast_derived().coeffRef(row, col);
} }
inline const CoeffReturnType coeff(int index) const inline const CoeffReturnType coeff(Index index) const
{ {
return m_expression.coeff(index); return m_expression.coeff(index);
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return m_expression.const_cast_derived().coeffRef(index); return m_expression.const_cast_derived().coeffRef(index);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int row, int col) const inline const PacketScalar packet(Index row, Index col) const
{ {
return m_expression.template packet<LoadMode>(row, col); return m_expression.template packet<LoadMode>(row, col);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int row, int col, const PacketScalar& x) inline void writePacket(Index row, Index col, const PacketScalar& x)
{ {
m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x); m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int index) const inline const PacketScalar packet(Index index) const
{ {
return m_expression.template packet<LoadMode>(index); return m_expression.template packet<LoadMode>(index);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int index, const PacketScalar& x) inline void writePacket(Index index, const PacketScalar& x)
{ {
m_expression.const_cast_derived().template writePacket<LoadMode>(index, x); m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
} }

View File

@ -216,10 +216,11 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
template<> struct ei_outer_product_selector<ColMajor> { template<> struct ei_outer_product_selector<ColMajor> {
template<typename ProductType, typename Dest> template<typename ProductType, typename Dest>
EIGEN_DONT_INLINE static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { EIGEN_DONT_INLINE static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
typedef typename Dest::Index Index;
// FIXME make sure lhs is sequentially stored // FIXME make sure lhs is sequentially stored
// FIXME not very good if rhs is real and lhs complex while alpha is real too // FIXME not very good if rhs is real and lhs complex while alpha is real too
const int cols = dest.cols(); const Index cols = dest.cols();
for (int j=0; j<cols; ++j) for (Index j=0; j<cols; ++j)
dest.col(j) += (alpha * prod.rhs().coeff(j)) * prod.lhs(); dest.col(j) += (alpha * prod.rhs().coeff(j)) * prod.lhs();
} }
}; };
@ -227,10 +228,11 @@ template<> struct ei_outer_product_selector<ColMajor> {
template<> struct ei_outer_product_selector<RowMajor> { template<> struct ei_outer_product_selector<RowMajor> {
template<typename ProductType, typename Dest> template<typename ProductType, typename Dest>
EIGEN_DONT_INLINE static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { EIGEN_DONT_INLINE static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
typedef typename Dest::Index Index;
// FIXME make sure rhs is sequentially stored // FIXME make sure rhs is sequentially stored
// FIXME not very good if lhs is real and rhs complex while alpha is real too // FIXME not very good if lhs is real and rhs complex while alpha is real too
const int rows = dest.rows(); const Index rows = dest.rows();
for (int i=0; i<rows; ++i) for (Index i=0; i<rows; ++i)
dest.row(i) += (alpha * prod.lhs().coeff(i)) * prod.rhs(); dest.row(i) += (alpha * prod.lhs().coeff(i)) * prod.rhs();
} }
}; };
@ -383,9 +385,10 @@ template<> struct ei_gemv_selector<OnTheRight,ColMajor,false>
template<typename ProductType, typename Dest> template<typename ProductType, typename Dest>
static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
{ {
typedef typename Dest::Index Index;
// TODO makes sure dest is sequentially stored in memory, otherwise use a temp // TODO makes sure dest is sequentially stored in memory, otherwise use a temp
const int size = prod.rhs().rows(); const Index size = prod.rhs().rows();
for(int k=0; k<size; ++k) for(Index k=0; k<size; ++k)
dest += (alpha*prod.rhs().coeff(k)) * prod.lhs().col(k); dest += (alpha*prod.rhs().coeff(k)) * prod.lhs().col(k);
} }
}; };
@ -395,9 +398,10 @@ template<> struct ei_gemv_selector<OnTheRight,RowMajor,false>
template<typename ProductType, typename Dest> template<typename ProductType, typename Dest>
static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
{ {
typedef typename Dest::Index Index;
// TODO makes sure rhs is sequentially stored in memory, otherwise use a temp // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
const int rows = prod.rows(); const Index rows = prod.rows();
for(int i=0; i<rows; ++i) for(Index i=0; i<rows; ++i)
dest.coeffRef(i) += alpha * (prod.lhs().row(i).cwiseProduct(prod.rhs().transpose())).sum(); dest.coeffRef(i) += alpha * (prod.lhs().row(i).cwiseProduct(prod.rhs().transpose())).sum();
} }
}; };

View File

@ -100,8 +100,8 @@ class ProductBase : public MatrixBase<Derived>
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions"); && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
} }
inline int rows() const { return m_lhs.rows(); } inline Index rows() const { return m_lhs.rows(); }
inline int cols() const { return m_rhs.cols(); } inline Index cols() const { return m_rhs.cols(); }
template<typename Dest> template<typename Dest>
inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); } inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); }
@ -133,7 +133,7 @@ class ProductBase : public MatrixBase<Derived>
const Diagonal<FullyLazyCoeffBaseProductType,Index> diagonal() const const Diagonal<FullyLazyCoeffBaseProductType,Index> diagonal() const
{ return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
const Diagonal<FullyLazyCoeffBaseProductType,Dynamic> diagonal(int index) const const Diagonal<FullyLazyCoeffBaseProductType,Dynamic> diagonal(Index index) const
{ return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); } { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); }
protected: protected:
@ -146,10 +146,10 @@ class ProductBase : public MatrixBase<Derived>
private: private:
// discard coeff methods // discard coeff methods
void coeff(int,int) const; void coeff(Index,Index) const;
void coeffRef(int,int); void coeffRef(Index,Index);
void coeff(int) const; void coeff(Index) const;
void coeffRef(int); void coeffRef(Index);
}; };
// here we need to overload the nested rule for products // here we need to overload the nested rule for products

View File

@ -176,15 +176,16 @@ template<typename Func, typename Derived>
struct ei_redux_impl<Func, Derived, DefaultTraversal, NoUnrolling> struct ei_redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>
{ {
typedef typename Derived::Scalar Scalar; typedef typename Derived::Scalar Scalar;
typedef typename Derived::Index Index;
static Scalar run(const Derived& mat, const Func& func) static Scalar run(const Derived& mat, const Func& func)
{ {
ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix"); ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix");
Scalar res; Scalar res;
res = mat.coeffByOuterInner(0, 0); res = mat.coeffByOuterInner(0, 0);
for(int i = 1; i < mat.innerSize(); ++i) for(Index i = 1; i < mat.innerSize(); ++i)
res = func(res, mat.coeffByOuterInner(0, i)); res = func(res, mat.coeffByOuterInner(0, i));
for(int i = 1; i < mat.outerSize(); ++i) for(Index i = 1; i < mat.outerSize(); ++i)
for(int j = 0; j < mat.innerSize(); ++j) for(Index j = 0; j < mat.innerSize(); ++j)
res = func(res, mat.coeffByOuterInner(i, j)); res = func(res, mat.coeffByOuterInner(i, j));
return res; return res;
} }
@ -200,37 +201,38 @@ struct ei_redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
{ {
typedef typename Derived::Scalar Scalar; typedef typename Derived::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar; typedef typename ei_packet_traits<Scalar>::type PacketScalar;
typedef typename Derived::Index Index;
static Scalar run(const Derived& mat, const Func& func) static Scalar run(const Derived& mat, const Func& func)
{ {
const int size = mat.size(); const Index size = mat.size();
const int packetSize = ei_packet_traits<Scalar>::size; const Index packetSize = ei_packet_traits<Scalar>::size;
const int alignedStart = ei_first_aligned(mat); const Index alignedStart = ei_first_aligned(mat);
enum { enum {
alignment = (Derived::Flags & DirectAccessBit) || (Derived::Flags & AlignedBit) alignment = (Derived::Flags & DirectAccessBit) || (Derived::Flags & AlignedBit)
? Aligned : Unaligned ? Aligned : Unaligned
}; };
const int alignedSize = ((size-alignedStart)/packetSize)*packetSize; const Index alignedSize = ((size-alignedStart)/packetSize)*packetSize;
const int alignedEnd = alignedStart + alignedSize; const Index alignedEnd = alignedStart + alignedSize;
Scalar res; Scalar res;
if(alignedSize) if(alignedSize)
{ {
PacketScalar packet_res = mat.template packet<alignment>(alignedStart); PacketScalar packet_res = mat.template packet<alignment>(alignedStart);
for(int index = alignedStart + packetSize; index < alignedEnd; index += packetSize) for(Index index = alignedStart + packetSize; index < alignedEnd; index += packetSize)
packet_res = func.packetOp(packet_res, mat.template packet<alignment>(index)); packet_res = func.packetOp(packet_res, mat.template packet<alignment>(index));
res = func.predux(packet_res); res = func.predux(packet_res);
for(int index = 0; index < alignedStart; ++index) for(Index index = 0; index < alignedStart; ++index)
res = func(res,mat.coeff(index)); res = func(res,mat.coeff(index));
for(int index = alignedEnd; index < size; ++index) for(Index index = alignedEnd; index < size; ++index)
res = func(res,mat.coeff(index)); res = func(res,mat.coeff(index));
} }
else // too small to vectorize anything. else // too small to vectorize anything.
// since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
{ {
res = mat.coeff(0); res = mat.coeff(0);
for(int index = 1; index < size; ++index) for(Index index = 1; index < size; ++index)
res = func(res,mat.coeff(index)); res = func(res,mat.coeff(index));
} }
@ -243,26 +245,27 @@ struct ei_redux_impl<Func, Derived, SliceVectorizedTraversal, NoUnrolling>
{ {
typedef typename Derived::Scalar Scalar; typedef typename Derived::Scalar Scalar;
typedef typename ei_packet_traits<Scalar>::type PacketScalar; typedef typename ei_packet_traits<Scalar>::type PacketScalar;
typedef typename Derived::Index Index;
static Scalar run(const Derived& mat, const Func& func) static Scalar run(const Derived& mat, const Func& func)
{ {
const int innerSize = mat.innerSize(); const Index innerSize = mat.innerSize();
const int outerSize = mat.outerSize(); const Index outerSize = mat.outerSize();
enum { enum {
packetSize = ei_packet_traits<Scalar>::size packetSize = ei_packet_traits<Scalar>::size
}; };
const int packetedInnerSize = ((innerSize)/packetSize)*packetSize; const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize;
Scalar res; Scalar res;
if(packetedInnerSize) if(packetedInnerSize)
{ {
PacketScalar packet_res = mat.template packet<Unaligned>(0,0); PacketScalar packet_res = mat.template packet<Unaligned>(0,0);
for(int j=0; j<outerSize; ++j) for(Index j=0; j<outerSize; ++j)
for(int i=(j==0?packetSize:0); i<packetedInnerSize; i+=int(packetSize)) for(Index i=(j==0?packetSize:0); i<packetedInnerSize; i+=Index(packetSize))
packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned>(j,i)); packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned>(j,i));
res = func.predux(packet_res); res = func.predux(packet_res);
for(int j=0; j<outerSize; ++j) for(Index j=0; j<outerSize; ++j)
for(int i=packetedInnerSize; i<innerSize; ++i) for(Index i=packetedInnerSize; i<innerSize; ++i)
res = func(res, mat.coeffByOuterInner(j,i)); res = func(res, mat.coeffByOuterInner(j,i));
} }
else // too small to vectorize anything. else // too small to vectorize anything.

View File

@ -57,14 +57,15 @@ template<typename Derived> class ReturnByValue
{ {
public: public:
typedef typename ei_traits<Derived>::ReturnType ReturnType; typedef typename ei_traits<Derived>::ReturnType ReturnType;
typedef typename ei_dense_xpr_base<ReturnByValue>::type Base; typedef typename ei_dense_xpr_base<ReturnByValue>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue) EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue)
template<typename Dest> template<typename Dest>
inline void evalTo(Dest& dst) const inline void evalTo(Dest& dst) const
{ static_cast<const Derived* const>(this)->evalTo(dst); } { static_cast<const Derived* const>(this)->evalTo(dst); }
inline int rows() const { return static_cast<const Derived* const>(this)->rows(); } inline Index rows() const { return static_cast<const Derived* const>(this)->rows(); }
inline int cols() const { return static_cast<const Derived* const>(this)->cols(); } inline Index cols() const { return static_cast<const Derived* const>(this)->cols(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT #define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT
@ -72,10 +73,10 @@ template<typename Derived> class ReturnByValue
Unusable(const Unusable&) {} Unusable(const Unusable&) {}
Unusable& operator=(const Unusable&) {return *this;} Unusable& operator=(const Unusable&) {return *this;}
}; };
const Unusable& coeff(int) const { return *reinterpret_cast<const Unusable*>(this); } const Unusable& coeff(Index) const { return *reinterpret_cast<const Unusable*>(this); }
const Unusable& coeff(int,int) const { return *reinterpret_cast<const Unusable*>(this); } const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
Unusable& coeffRef(int) { return *reinterpret_cast<Unusable*>(this); } Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
Unusable& coeffRef(int,int) { return *reinterpret_cast<Unusable*>(this); } Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
#endif #endif
}; };

View File

@ -65,6 +65,8 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
typedef TriangularBase<SelfAdjointView> Base; typedef TriangularBase<SelfAdjointView> Base;
typedef typename ei_traits<SelfAdjointView>::Scalar Scalar; typedef typename ei_traits<SelfAdjointView>::Scalar Scalar;
typedef typename MatrixType::Index Index;
enum { enum {
Mode = ei_traits<SelfAdjointView>::Mode Mode = ei_traits<SelfAdjointView>::Mode
}; };
@ -73,15 +75,15 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
inline SelfAdjointView(const MatrixType& matrix) : m_matrix(matrix) inline SelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
{ ei_assert(ei_are_flags_consistent<Mode>::ret); } { ei_assert(ei_are_flags_consistent<Mode>::ret); }
inline int rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline int cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
inline int outerStride() const { return m_matrix.outerStride(); } inline Index outerStride() const { return m_matrix.outerStride(); }
inline int innerStride() const { return m_matrix.innerStride(); } inline Index innerStride() const { return m_matrix.innerStride(); }
/** \sa MatrixBase::coeff() /** \sa MatrixBase::coeff()
* \warning the coordinates must fit into the referenced triangular part * \warning the coordinates must fit into the referenced triangular part
*/ */
inline Scalar coeff(int row, int col) const inline Scalar coeff(Index row, Index col) const
{ {
Base::check_coordinates_internal(row, col); Base::check_coordinates_internal(row, col);
return m_matrix.coeff(row, col); return m_matrix.coeff(row, col);
@ -90,7 +92,7 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
/** \sa MatrixBase::coeffRef() /** \sa MatrixBase::coeffRef()
* \warning the coordinates must fit into the referenced triangular part * \warning the coordinates must fit into the referenced triangular part
*/ */
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
Base::check_coordinates_internal(row, col); Base::check_coordinates_internal(row, col);
return m_matrix.const_cast_derived().coeffRef(row, col); return m_matrix.const_cast_derived().coeffRef(row, col);
@ -230,11 +232,12 @@ struct ei_triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower,
template<typename Derived1, typename Derived2, bool ClearOpposite> template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, Dynamic, ClearOpposite> struct ei_triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, Dynamic, ClearOpposite>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
for(int j = 0; j < dst.cols(); ++j) for(Index j = 0; j < dst.cols(); ++j)
{ {
for(int i = 0; i < j; ++i) for(Index i = 0; i < j; ++i)
{ {
dst.copyCoeff(i, j, src); dst.copyCoeff(i, j, src);
dst.coeffRef(j,i) = ei_conj(dst.coeff(i,j)); dst.coeffRef(j,i) = ei_conj(dst.coeff(i,j));
@ -249,9 +252,10 @@ struct ei_triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower,
{ {
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
for(int i = 0; i < dst.rows(); ++i) typedef typename Derived1::Index Index;
for(Index i = 0; i < dst.rows(); ++i)
{ {
for(int j = 0; j < i; ++j) for(Index j = 0; j < i; ++j)
{ {
dst.copyCoeff(i, j, src); dst.copyCoeff(i, j, src);
dst.coeffRef(j,i) = ei_conj(dst.coeff(i,j)); dst.coeffRef(j,i) = ei_conj(dst.coeff(i,j));

View File

@ -55,28 +55,28 @@ template<typename BinaryOp, typename MatrixType> class SelfCwiseBinaryOp
inline SelfCwiseBinaryOp(MatrixType& xpr, const BinaryOp& func = BinaryOp()) : m_matrix(xpr), m_functor(func) {} inline SelfCwiseBinaryOp(MatrixType& xpr, const BinaryOp& func = BinaryOp()) : m_matrix(xpr), m_functor(func) {}
inline int rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline int cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
inline int outerStride() const { return m_matrix.outerStride(); } inline Index outerStride() const { return m_matrix.outerStride(); }
inline int innerStride() const { return m_matrix.innerStride(); } inline Index innerStride() const { return m_matrix.innerStride(); }
inline const Scalar* data() const { return m_matrix.data(); } inline const Scalar* data() const { return m_matrix.data(); }
// note that this function is needed by assign to correctly align loads/stores // note that this function is needed by assign to correctly align loads/stores
// TODO make Assign use .data() // TODO make Assign use .data()
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return m_matrix.const_cast_derived().coeffRef(row, col); return m_matrix.const_cast_derived().coeffRef(row, col);
} }
// note that this function is needed by assign to correctly align loads/stores // note that this function is needed by assign to correctly align loads/stores
// TODO make Assign use .data() // TODO make Assign use .data()
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return m_matrix.const_cast_derived().coeffRef(index); return m_matrix.const_cast_derived().coeffRef(index);
} }
template<typename OtherDerived> template<typename OtherDerived>
void copyCoeff(int row, int col, const DenseBase<OtherDerived>& other) void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
{ {
OtherDerived& _other = other.const_cast_derived(); OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(row >= 0 && row < rows() ei_internal_assert(row >= 0 && row < rows()
@ -86,7 +86,7 @@ template<typename BinaryOp, typename MatrixType> class SelfCwiseBinaryOp
} }
template<typename OtherDerived> template<typename OtherDerived>
void copyCoeff(int index, const DenseBase<OtherDerived>& other) void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
{ {
OtherDerived& _other = other.const_cast_derived(); OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(index >= 0 && index < m_matrix.size()); ei_internal_assert(index >= 0 && index < m_matrix.size());
@ -95,7 +95,7 @@ template<typename BinaryOp, typename MatrixType> class SelfCwiseBinaryOp
} }
template<typename OtherDerived, int StoreMode, int LoadMode> template<typename OtherDerived, int StoreMode, int LoadMode>
void copyPacket(int row, int col, const DenseBase<OtherDerived>& other) void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
{ {
OtherDerived& _other = other.const_cast_derived(); OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(row >= 0 && row < rows() ei_internal_assert(row >= 0 && row < rows()
@ -105,7 +105,7 @@ template<typename BinaryOp, typename MatrixType> class SelfCwiseBinaryOp
} }
template<typename OtherDerived, int StoreMode, int LoadMode> template<typename OtherDerived, int StoreMode, int LoadMode>
void copyPacket(int index, const DenseBase<OtherDerived>& other) void copyPacket(Index index, const DenseBase<OtherDerived>& other)
{ {
OtherDerived& _other = other.const_cast_derived(); OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(index >= 0 && index < m_matrix.size()); ei_internal_assert(index >= 0 && index < m_matrix.size());

View File

@ -56,29 +56,30 @@ struct ei_triangular_solver_selector<Lhs,Rhs,OnTheLeft,Mode,NoUnrolling,RowMajor
typedef typename Rhs::Scalar Scalar; typedef typename Rhs::Scalar Scalar;
typedef ei_blas_traits<Lhs> LhsProductTraits; typedef ei_blas_traits<Lhs> LhsProductTraits;
typedef typename LhsProductTraits::ExtractType ActualLhsType; typedef typename LhsProductTraits::ExtractType ActualLhsType;
typedef typename Lhs::Index Index;
enum { enum {
IsLower = ((Mode&Lower)==Lower) IsLower = ((Mode&Lower)==Lower)
}; };
static void run(const Lhs& lhs, Rhs& other) static void run(const Lhs& lhs, Rhs& other)
{ {
static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
ActualLhsType actualLhs = LhsProductTraits::extract(lhs); ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
const int size = lhs.cols(); const Index size = lhs.cols();
for(int pi=IsLower ? 0 : size; for(Index pi=IsLower ? 0 : size;
IsLower ? pi<size : pi>0; IsLower ? pi<size : pi>0;
IsLower ? pi+=PanelWidth : pi-=PanelWidth) IsLower ? pi+=PanelWidth : pi-=PanelWidth)
{ {
int actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
int r = IsLower ? pi : size - pi; // remaining size Index r = IsLower ? pi : size - pi; // remaining size
if (r > 0) if (r > 0)
{ {
// let's directly call the low level product function because: // let's directly call the low level product function because:
// 1 - it is faster to compile // 1 - it is faster to compile
// 2 - it is slighlty faster at runtime // 2 - it is slighlty faster at runtime
int startRow = IsLower ? pi : pi-actualPanelWidth; Index startRow = IsLower ? pi : pi-actualPanelWidth;
int startCol = IsLower ? 0 : pi; Index startCol = IsLower ? 0 : pi;
VectorBlock<Rhs,Dynamic> target(other,startRow,actualPanelWidth); VectorBlock<Rhs,Dynamic> target(other,startRow,actualPanelWidth);
ei_cache_friendly_product_rowmajor_times_vector<LhsProductTraits::NeedToConjugate,false>( ei_cache_friendly_product_rowmajor_times_vector<LhsProductTraits::NeedToConjugate,false>(
@ -87,10 +88,10 @@ struct ei_triangular_solver_selector<Lhs,Rhs,OnTheLeft,Mode,NoUnrolling,RowMajor
target, Scalar(-1)); target, Scalar(-1));
} }
for(int k=0; k<actualPanelWidth; ++k) for(Index k=0; k<actualPanelWidth; ++k)
{ {
int i = IsLower ? pi+k : pi-k-1; Index i = IsLower ? pi+k : pi-k-1;
int s = IsLower ? pi : i+1; Index s = IsLower ? pi : i+1;
if (k>0) if (k>0)
other.coeffRef(i) -= (lhs.row(i).segment(s,k).transpose().cwiseProduct(other.segment(s,k))).sum(); other.coeffRef(i) -= (lhs.row(i).segment(s,k).transpose().cwiseProduct(other.segment(s,k))).sum();
@ -109,6 +110,7 @@ struct ei_triangular_solver_selector<Lhs,Rhs,OnTheLeft,Mode,NoUnrolling,ColMajor
typedef typename ei_packet_traits<Scalar>::type Packet; typedef typename ei_packet_traits<Scalar>::type Packet;
typedef ei_blas_traits<Lhs> LhsProductTraits; typedef ei_blas_traits<Lhs> LhsProductTraits;
typedef typename LhsProductTraits::ExtractType ActualLhsType; typedef typename LhsProductTraits::ExtractType ActualLhsType;
typedef typename Lhs::Index Index;
enum { enum {
PacketSize = ei_packet_traits<Scalar>::size, PacketSize = ei_packet_traits<Scalar>::size,
IsLower = ((Mode&Lower)==Lower) IsLower = ((Mode&Lower)==Lower)
@ -116,30 +118,30 @@ struct ei_triangular_solver_selector<Lhs,Rhs,OnTheLeft,Mode,NoUnrolling,ColMajor
static void run(const Lhs& lhs, Rhs& other) static void run(const Lhs& lhs, Rhs& other)
{ {
static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
ActualLhsType actualLhs = LhsProductTraits::extract(lhs); ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
const int size = lhs.cols(); const Index size = lhs.cols();
for(int pi=IsLower ? 0 : size; for(Index pi=IsLower ? 0 : size;
IsLower ? pi<size : pi>0; IsLower ? pi<size : pi>0;
IsLower ? pi+=PanelWidth : pi-=PanelWidth) IsLower ? pi+=PanelWidth : pi-=PanelWidth)
{ {
int actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth); Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
int startBlock = IsLower ? pi : pi-actualPanelWidth; Index startBlock = IsLower ? pi : pi-actualPanelWidth;
int endBlock = IsLower ? pi + actualPanelWidth : 0; Index endBlock = IsLower ? pi + actualPanelWidth : 0;
for(int k=0; k<actualPanelWidth; ++k) for(Index k=0; k<actualPanelWidth; ++k)
{ {
int i = IsLower ? pi+k : pi-k-1; Index i = IsLower ? pi+k : pi-k-1;
if(!(Mode & UnitDiag)) if(!(Mode & UnitDiag))
other.coeffRef(i) /= lhs.coeff(i,i); other.coeffRef(i) /= lhs.coeff(i,i);
int r = actualPanelWidth - k - 1; // remaining size Index r = actualPanelWidth - k - 1; // remaining size
int s = IsLower ? i+1 : i-r; Index s = IsLower ? i+1 : i-r;
if (r>0) if (r>0)
other.segment(s,r) -= other.coeffRef(i) * Block<Lhs,Dynamic,1>(lhs, s, i, r, 1); other.segment(s,r) -= other.coeffRef(i) * Block<Lhs,Dynamic,1>(lhs, s, i, r, 1);
} }
int r = IsLower ? size - endBlock : startBlock; // remaining size Index r = IsLower ? size - endBlock : startBlock; // remaining size
if (r > 0) if (r > 0)
{ {
// let's directly call the low level product function because: // let's directly call the low level product function because:
@ -168,7 +170,7 @@ struct ei_triangular_solver_selector<Lhs,Rhs,OnTheRight,Mode,Unrolling,StorageOr
} }
}; };
template <typename Scalar, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder> template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder>
struct ei_triangular_solve_matrix; struct ei_triangular_solve_matrix;
// the rhs is a matrix // the rhs is a matrix
@ -176,12 +178,13 @@ template<typename Lhs, typename Rhs, int Side, int Mode, int StorageOrder>
struct ei_triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,StorageOrder,Dynamic> struct ei_triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,StorageOrder,Dynamic>
{ {
typedef typename Rhs::Scalar Scalar; typedef typename Rhs::Scalar Scalar;
typedef typename Rhs::Index Index;
typedef ei_blas_traits<Lhs> LhsProductTraits; typedef ei_blas_traits<Lhs> LhsProductTraits;
typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType; typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType;
static void run(const Lhs& lhs, Rhs& rhs) static void run(const Lhs& lhs, Rhs& rhs)
{ {
const ActualLhsType actualLhs = LhsProductTraits::extract(lhs); const ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
ei_triangular_solve_matrix<Scalar,Side,Mode,LhsProductTraits::NeedToConjugate,StorageOrder, ei_triangular_solve_matrix<Scalar,Index,Side,Mode,LhsProductTraits::NeedToConjugate,StorageOrder,
(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor> (Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor>
::run(lhs.rows(), Side==OnTheLeft? rhs.cols() : rhs.rows(), &actualLhs.coeff(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride()); ::run(lhs.rows(), Side==OnTheLeft? rhs.cols() : rhs.rows(), &actualLhs.coeff(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride());
} }

View File

@ -54,15 +54,15 @@ template<typename Derived>
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
MatrixBase<Derived>::stableNorm() const MatrixBase<Derived>::stableNorm() const
{ {
const int blockSize = 4096; const Index blockSize = 4096;
RealScalar scale = 0; RealScalar scale = 0;
RealScalar invScale = 1; RealScalar invScale = 1;
RealScalar ssq = 0; // sum of square RealScalar ssq = 0; // sum of square
enum { enum {
Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0 Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0
}; };
int n = size(); Index n = size();
int bi = ei_first_aligned(derived()); Index bi = ei_first_aligned(derived());
if (bi>0) if (bi>0)
ei_stable_norm_kernel(this->head(bi), ssq, scale, invScale); ei_stable_norm_kernel(this->head(bi), ssq, scale, invScale);
for (; bi<n; bi+=blockSize) for (; bi<n; bi+=blockSize)
@ -83,11 +83,11 @@ template<typename Derived>
inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
MatrixBase<Derived>::blueNorm() const MatrixBase<Derived>::blueNorm() const
{ {
static int nmax = -1; static Index nmax = -1;
static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr; static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
if(nmax <= 0) if(nmax <= 0)
{ {
int nbig, ibeta, it, iemin, iemax, iexp; Index nbig, ibeta, it, iemin, iemax, iexp;
RealScalar abig, eps; RealScalar abig, eps;
// This program calculates the machine-dependent constants // This program calculates the machine-dependent constants
// bl, b2, slm, s2m, relerr overfl, nmax // bl, b2, slm, s2m, relerr overfl, nmax
@ -97,7 +97,7 @@ MatrixBase<Derived>::blueNorm() const
// For portability, the PORT subprograms "ilmaeh" and "rlmach" // For portability, the PORT subprograms "ilmaeh" and "rlmach"
// are used. For any specific computer, each of the assignment // are used. For any specific computer, each of the assignment
// statements can be replaced // statements can be replaced
nbig = std::numeric_limits<int>::max(); // largest integer nbig = std::numeric_limits<Index>::max(); // largest integer
ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
@ -121,12 +121,12 @@ MatrixBase<Derived>::blueNorm() const
if (RealScalar(nbig)>abig) nmax = int(abig); // largest safe n if (RealScalar(nbig)>abig) nmax = int(abig); // largest safe n
else nmax = nbig; else nmax = nbig;
} }
int n = size(); Index n = size();
RealScalar ab2 = b2 / RealScalar(n); RealScalar ab2 = b2 / RealScalar(n);
RealScalar asml = RealScalar(0); RealScalar asml = RealScalar(0);
RealScalar amed = RealScalar(0); RealScalar amed = RealScalar(0);
RealScalar abig = RealScalar(0); RealScalar abig = RealScalar(0);
for(int j=0; j<n; ++j) for(Index j=0; j<n; ++j)
{ {
RealScalar ax = ei_abs(coeff(j)); RealScalar ax = ei_abs(coeff(j));
if(ax > ab2) abig += ei_abs2(ax*s2m); if(ax > ab2) abig += ei_abs2(ax*s2m);

View File

@ -86,8 +86,8 @@ class Stride
inline int inner() const { return m_inner.value(); } inline int inner() const { return m_inner.value(); }
protected: protected:
ei_int_if_dynamic<OuterStrideAtCompileTime> m_outer; ei_variable_if_dynamic<int, OuterStrideAtCompileTime> m_outer;
ei_int_if_dynamic<InnerStrideAtCompileTime> m_inner; ei_variable_if_dynamic<int, InnerStrideAtCompileTime> m_inner;
}; };
/** \brief Convenience specialization of Stride to specify only an inner stride */ /** \brief Convenience specialization of Stride to specify only an inner stride */

View File

@ -45,23 +45,23 @@ template<typename ExpressionType> class SwapWrapper
inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {} inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {}
inline int rows() const { return m_expression.rows(); } inline Index rows() const { return m_expression.rows(); }
inline int cols() const { return m_expression.cols(); } inline Index cols() const { return m_expression.cols(); }
inline int outerStride() const { return m_expression.outerStride(); } inline Index outerStride() const { return m_expression.outerStride(); }
inline int innerStride() const { return m_expression.innerStride(); } inline Index innerStride() const { return m_expression.innerStride(); }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return m_expression.const_cast_derived().coeffRef(row, col); return m_expression.const_cast_derived().coeffRef(row, col);
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return m_expression.const_cast_derived().coeffRef(index); return m_expression.const_cast_derived().coeffRef(index);
} }
template<typename OtherDerived> template<typename OtherDerived>
void copyCoeff(int row, int col, const DenseBase<OtherDerived>& other) void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
{ {
OtherDerived& _other = other.const_cast_derived(); OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(row >= 0 && row < rows() ei_internal_assert(row >= 0 && row < rows()
@ -72,7 +72,7 @@ template<typename ExpressionType> class SwapWrapper
} }
template<typename OtherDerived> template<typename OtherDerived>
void copyCoeff(int index, const DenseBase<OtherDerived>& other) void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
{ {
OtherDerived& _other = other.const_cast_derived(); OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(index >= 0 && index < m_expression.size()); ei_internal_assert(index >= 0 && index < m_expression.size());
@ -82,7 +82,7 @@ template<typename ExpressionType> class SwapWrapper
} }
template<typename OtherDerived, int StoreMode, int LoadMode> template<typename OtherDerived, int StoreMode, int LoadMode>
void copyPacket(int row, int col, const DenseBase<OtherDerived>& other) void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
{ {
OtherDerived& _other = other.const_cast_derived(); OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(row >= 0 && row < rows() ei_internal_assert(row >= 0 && row < rows()
@ -95,7 +95,7 @@ template<typename ExpressionType> class SwapWrapper
} }
template<typename OtherDerived, int StoreMode, int LoadMode> template<typename OtherDerived, int StoreMode, int LoadMode>
void copyPacket(int index, const DenseBase<OtherDerived>& other) void copyPacket(Index index, const DenseBase<OtherDerived>& other)
{ {
OtherDerived& _other = other.const_cast_derived(); OtherDerived& _other = other.const_cast_derived();
ei_internal_assert(index >= 0 && index < m_expression.size()); ei_internal_assert(index >= 0 && index < m_expression.size());

View File

@ -72,8 +72,8 @@ template<typename MatrixType> class Transpose
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
inline int rows() const { return m_matrix.cols(); } inline Index rows() const { return m_matrix.cols(); }
inline int cols() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.rows(); }
/** \returns the nested expression */ /** \returns the nested expression */
const typename ei_cleantype<typename MatrixType::Nested>::type& const typename ei_cleantype<typename MatrixType::Nested>::type&
@ -107,51 +107,51 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
typedef typename ei_TransposeImpl_base<MatrixType>::type Base; typedef typename ei_TransposeImpl_base<MatrixType>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>) EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
inline int innerStride() const { return derived().nestedExpression().innerStride(); } inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
inline int outerStride() const { return derived().nestedExpression().outerStride(); } inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
inline Scalar* data() { return derived().nestedExpression().data(); } inline Scalar* data() { return derived().nestedExpression().data(); }
inline const Scalar* data() const { return derived().nestedExpression().data(); } inline const Scalar* data() const { return derived().nestedExpression().data(); }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return const_cast_derived().nestedExpression().coeffRef(col, row); return const_cast_derived().nestedExpression().coeffRef(col, row);
} }
inline Scalar& coeffRef(int index) inline Scalar& coeffRef(Index index)
{ {
return const_cast_derived().nestedExpression().coeffRef(index); return const_cast_derived().nestedExpression().coeffRef(index);
} }
inline const CoeffReturnType coeff(int row, int col) const inline const CoeffReturnType coeff(Index row, Index col) const
{ {
return derived().nestedExpression().coeff(col, row); return derived().nestedExpression().coeff(col, row);
} }
inline const CoeffReturnType coeff(int index) const inline const CoeffReturnType coeff(Index index) const
{ {
return derived().nestedExpression().coeff(index); return derived().nestedExpression().coeff(index);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int row, int col) const inline const PacketScalar packet(Index row, Index col) const
{ {
return derived().nestedExpression().template packet<LoadMode>(col, row); return derived().nestedExpression().template packet<LoadMode>(col, row);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int row, int col, const PacketScalar& x) inline void writePacket(Index row, Index col, const PacketScalar& x)
{ {
const_cast_derived().nestedExpression().template writePacket<LoadMode>(col, row, x); const_cast_derived().nestedExpression().template writePacket<LoadMode>(col, row, x);
} }
template<int LoadMode> template<int LoadMode>
inline const PacketScalar packet(int index) const inline const PacketScalar packet(Index index) const
{ {
return derived().nestedExpression().template packet<LoadMode>(index); return derived().nestedExpression().template packet<LoadMode>(index);
} }
template<int LoadMode> template<int LoadMode>
inline void writePacket(int index, const PacketScalar& x) inline void writePacket(Index index, const PacketScalar& x)
{ {
const_cast_derived().nestedExpression().template writePacket<LoadMode>(index, x); const_cast_derived().nestedExpression().template writePacket<LoadMode>(index, x);
} }

View File

@ -45,31 +45,33 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
MaxColsAtCompileTime = ei_traits<Derived>::MaxColsAtCompileTime MaxColsAtCompileTime = ei_traits<Derived>::MaxColsAtCompileTime
}; };
typedef typename ei_traits<Derived>::Scalar Scalar; typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename ei_traits<Derived>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
inline TriangularBase() { ei_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); } inline TriangularBase() { ei_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }
inline int rows() const { return derived().rows(); } inline Index rows() const { return derived().rows(); }
inline int cols() const { return derived().cols(); } inline Index cols() const { return derived().cols(); }
inline int outerStride() const { return derived().outerStride(); } inline Index outerStride() const { return derived().outerStride(); }
inline int innerStride() const { return derived().innerStride(); } inline Index innerStride() const { return derived().innerStride(); }
inline Scalar coeff(int row, int col) const { return derived().coeff(row,col); } inline Scalar coeff(Index row, Index col) const { return derived().coeff(row,col); }
inline Scalar& coeffRef(int row, int col) { return derived().coeffRef(row,col); } inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); }
/** \see MatrixBase::copyCoeff(row,col) /** \see MatrixBase::copyCoeff(row,col)
*/ */
template<typename Other> template<typename Other>
EIGEN_STRONG_INLINE void copyCoeff(int row, int col, Other& other) EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other)
{ {
derived().coeffRef(row, col) = other.coeff(row, col); derived().coeffRef(row, col) = other.coeff(row, col);
} }
inline Scalar operator()(int row, int col) const inline Scalar operator()(Index row, Index col) const
{ {
check_coordinates(row, col); check_coordinates(row, col);
return coeff(row,col); return coeff(row,col);
} }
inline Scalar& operator()(int row, int col) inline Scalar& operator()(Index row, Index col)
{ {
check_coordinates(row, col); check_coordinates(row, col);
return coeffRef(row,col); return coeffRef(row,col);
@ -87,7 +89,7 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
protected: protected:
void check_coordinates(int row, int col) void check_coordinates(Index row, Index col)
{ {
EIGEN_ONLY_USED_FOR_DEBUG(row); EIGEN_ONLY_USED_FOR_DEBUG(row);
EIGEN_ONLY_USED_FOR_DEBUG(col); EIGEN_ONLY_USED_FOR_DEBUG(col);
@ -99,12 +101,12 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
} }
#ifdef EIGEN_INTERNAL_DEBUGGING #ifdef EIGEN_INTERNAL_DEBUGGING
void check_coordinates_internal(int row, int col) void check_coordinates_internal(Index row, Index col)
{ {
check_coordinates(row, col); check_coordinates(row, col);
} }
#else #else
void check_coordinates_internal(int , int ) {} void check_coordinates_internal(Index , Index ) {}
#endif #endif
}; };
@ -156,6 +158,9 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
typedef typename ei_cleantype<MatrixTypeNested>::type _MatrixTypeNested; typedef typename ei_cleantype<MatrixTypeNested>::type _MatrixTypeNested;
using TriangularBase<TriangularView<_MatrixType, _Mode> >::evalToLazy; using TriangularBase<TriangularView<_MatrixType, _Mode> >::evalToLazy;
typedef typename ei_traits<TriangularView>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
enum { enum {
Mode = _Mode, Mode = _Mode,
TransposeMode = (Mode & Upper ? Lower : 0) TransposeMode = (Mode & Upper ? Lower : 0)
@ -167,10 +172,10 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
inline TriangularView(const MatrixType& matrix) : m_matrix(matrix) inline TriangularView(const MatrixType& matrix) : m_matrix(matrix)
{ ei_assert(ei_are_flags_consistent<Mode>::ret); } { ei_assert(ei_are_flags_consistent<Mode>::ret); }
inline int rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline int cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
inline int outerStride() const { return m_matrix.outerStride(); } inline Index outerStride() const { return m_matrix.outerStride(); }
inline int innerStride() const { return m_matrix.innerStride(); } inline Index innerStride() const { return m_matrix.innerStride(); }
/** \sa MatrixBase::operator+=() */ /** \sa MatrixBase::operator+=() */
template<typename Other> TriangularView& operator+=(const Other& other) { return *this = m_matrix + other; } template<typename Other> TriangularView& operator+=(const Other& other) { return *this = m_matrix + other; }
@ -194,7 +199,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
/** \sa MatrixBase::coeff() /** \sa MatrixBase::coeff()
* \warning the coordinates must fit into the referenced triangular part * \warning the coordinates must fit into the referenced triangular part
*/ */
inline Scalar coeff(int row, int col) const inline Scalar coeff(Index row, Index col) const
{ {
Base::check_coordinates_internal(row, col); Base::check_coordinates_internal(row, col);
return m_matrix.coeff(row, col); return m_matrix.coeff(row, col);
@ -203,7 +208,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
/** \sa MatrixBase::coeffRef() /** \sa MatrixBase::coeffRef()
* \warning the coordinates must fit into the referenced triangular part * \warning the coordinates must fit into the referenced triangular part
*/ */
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
Base::check_coordinates_internal(row, col); Base::check_coordinates_internal(row, col);
return m_matrix.const_cast_derived().coeffRef(row, col); return m_matrix.const_cast_derived().coeffRef(row, col);
@ -371,15 +376,16 @@ struct ei_triangular_assignment_selector<Derived1, Derived2, Mode, 0, ClearOppos
template<typename Derived1, typename Derived2, bool ClearOpposite> template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearOpposite> struct ei_triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearOpposite>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
for(int j = 0; j < dst.cols(); ++j) for(Index j = 0; j < dst.cols(); ++j)
{ {
int maxi = std::min(j, dst.rows()-1); Index maxi = std::min(j, dst.rows()-1);
for(int i = 0; i <= maxi; ++i) for(Index i = 0; i <= maxi; ++i)
dst.copyCoeff(i, j, src); dst.copyCoeff(i, j, src);
if (ClearOpposite) if (ClearOpposite)
for(int i = maxi+1; i < dst.rows(); ++i) for(Index i = maxi+1; i < dst.rows(); ++i)
dst.coeffRef(i, j) = 0; dst.coeffRef(i, j) = 0;
} }
} }
@ -388,15 +394,16 @@ struct ei_triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, Cle
template<typename Derived1, typename Derived2, bool ClearOpposite> template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearOpposite> struct ei_triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearOpposite>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
for(int j = 0; j < dst.cols(); ++j) for(Index j = 0; j < dst.cols(); ++j)
{ {
for(int i = j; i < dst.rows(); ++i) for(Index i = j; i < dst.rows(); ++i)
dst.copyCoeff(i, j, src); dst.copyCoeff(i, j, src);
int maxi = std::min(j, dst.rows()); Index maxi = std::min(j, dst.rows());
if (ClearOpposite) if (ClearOpposite)
for(int i = 0; i < maxi; ++i) for(Index i = 0; i < maxi; ++i)
dst.coeffRef(i, j) = 0; dst.coeffRef(i, j) = 0;
} }
} }
@ -405,15 +412,16 @@ struct ei_triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, Cle
template<typename Derived1, typename Derived2, bool ClearOpposite> template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic, ClearOpposite> struct ei_triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic, ClearOpposite>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
for(int j = 0; j < dst.cols(); ++j) for(Index j = 0; j < dst.cols(); ++j)
{ {
int maxi = std::min(j, dst.rows()); Index maxi = std::min(j, dst.rows());
for(int i = 0; i < maxi; ++i) for(Index i = 0; i < maxi; ++i)
dst.copyCoeff(i, j, src); dst.copyCoeff(i, j, src);
if (ClearOpposite) if (ClearOpposite)
for(int i = maxi; i < dst.rows(); ++i) for(Index i = maxi; i < dst.rows(); ++i)
dst.coeffRef(i, j) = 0; dst.coeffRef(i, j) = 0;
} }
} }
@ -422,15 +430,16 @@ struct ei_triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dyna
template<typename Derived1, typename Derived2, bool ClearOpposite> template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic, ClearOpposite> struct ei_triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic, ClearOpposite>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
for(int j = 0; j < dst.cols(); ++j) for(Index j = 0; j < dst.cols(); ++j)
{ {
for(int i = j+1; i < dst.rows(); ++i) for(Index i = j+1; i < dst.rows(); ++i)
dst.copyCoeff(i, j, src); dst.copyCoeff(i, j, src);
int maxi = std::min(j, dst.rows()-1); Index maxi = std::min(j, dst.rows()-1);
if (ClearOpposite) if (ClearOpposite)
for(int i = 0; i <= maxi; ++i) for(Index i = 0; i <= maxi; ++i)
dst.coeffRef(i, j) = 0; dst.coeffRef(i, j) = 0;
} }
} }
@ -439,16 +448,17 @@ struct ei_triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dyna
template<typename Derived1, typename Derived2, bool ClearOpposite> template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, ClearOpposite> struct ei_triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, ClearOpposite>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
for(int j = 0; j < dst.cols(); ++j) for(Index j = 0; j < dst.cols(); ++j)
{ {
int maxi = std::min(j, dst.rows()); Index maxi = std::min(j, dst.rows());
for(int i = 0; i < maxi; ++i) for(Index i = 0; i < maxi; ++i)
dst.copyCoeff(i, j, src); dst.copyCoeff(i, j, src);
if (ClearOpposite) if (ClearOpposite)
{ {
for(int i = maxi+1; i < dst.rows(); ++i) for(Index i = maxi+1; i < dst.rows(); ++i)
dst.coeffRef(i, j) = 0; dst.coeffRef(i, j) = 0;
} }
} }
@ -458,16 +468,17 @@ struct ei_triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic,
template<typename Derived1, typename Derived2, bool ClearOpposite> template<typename Derived1, typename Derived2, bool ClearOpposite>
struct ei_triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, ClearOpposite> struct ei_triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, ClearOpposite>
{ {
typedef typename Derived1::Index Index;
inline static void run(Derived1 &dst, const Derived2 &src) inline static void run(Derived1 &dst, const Derived2 &src)
{ {
for(int j = 0; j < dst.cols(); ++j) for(Index j = 0; j < dst.cols(); ++j)
{ {
int maxi = std::min(j, dst.rows()); Index maxi = std::min(j, dst.rows());
for(int i = maxi+1; i < dst.rows(); ++i) for(Index i = maxi+1; i < dst.rows(); ++i)
dst.copyCoeff(i, j, src); dst.copyCoeff(i, j, src);
if (ClearOpposite) if (ClearOpposite)
{ {
for(int i = 0; i < maxi; ++i) for(Index i = 0; i < maxi; ++i)
dst.coeffRef(i, j) = 0; dst.coeffRef(i, j) = 0;
} }
} }
@ -638,18 +649,18 @@ template<typename Derived>
bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
{ {
RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1); RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
for(int j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
{ {
int maxi = std::min(j, rows()-1); Index maxi = std::min(j, rows()-1);
for(int i = 0; i <= maxi; ++i) for(Index i = 0; i <= maxi; ++i)
{ {
RealScalar absValue = ei_abs(coeff(i,j)); RealScalar absValue = ei_abs(coeff(i,j));
if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue; if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;
} }
} }
RealScalar threshold = maxAbsOnUpperPart * prec; RealScalar threshold = maxAbsOnUpperPart * prec;
for(int j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(int i = j+1; i < rows(); ++i) for(Index i = j+1; i < rows(); ++i)
if(ei_abs(coeff(i, j)) > threshold) return false; if(ei_abs(coeff(i, j)) > threshold) return false;
return true; return true;
} }
@ -663,17 +674,17 @@ template<typename Derived>
bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
{ {
RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1); RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);
for(int j = 0; j < cols(); ++j) for(Index j = 0; j < cols(); ++j)
for(int i = j; i < rows(); ++i) for(Index i = j; i < rows(); ++i)
{ {
RealScalar absValue = ei_abs(coeff(i,j)); RealScalar absValue = ei_abs(coeff(i,j));
if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue; if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;
} }
RealScalar threshold = maxAbsOnLowerPart * prec; RealScalar threshold = maxAbsOnLowerPart * prec;
for(int j = 1; j < cols(); ++j) for(Index j = 1; j < cols(); ++j)
{ {
int maxi = std::min(j, rows()-1); Index maxi = std::min(j, rows()-1);
for(int i = 0; i < maxi; ++i) for(Index i = 0; i < maxi; ++i)
if(ei_abs(coeff(i, j)) > threshold) return false; if(ei_abs(coeff(i, j)) > threshold) return false;
} }
return true; return true;

View File

@ -34,7 +34,7 @@
* \param Size size of the sub-vector we are taking at compile time (optional) * \param Size size of the sub-vector we are taking at compile time (optional)
* *
* This class represents an expression of either a fixed-size or dynamic-size sub-vector. * This class represents an expression of either a fixed-size or dynamic-size sub-vector.
* It is the return type of DenseBase::segment(int,int) and DenseBase::segment<int>(int) and * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and
* most of the time this is the only way it is used. * most of the time this is the only way it is used.
* *
* However, if you want to directly maniputate sub-vector expressions, * However, if you want to directly maniputate sub-vector expressions,
@ -53,7 +53,7 @@
* \include class_FixedVectorBlock.cpp * \include class_FixedVectorBlock.cpp
* Output: \verbinclude class_FixedVectorBlock.out * Output: \verbinclude class_FixedVectorBlock.out
* *
* \sa class Block, DenseBase::segment(int,int,int,int), DenseBase::segment(int,int) * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index)
*/ */
template<typename VectorType, int Size> template<typename VectorType, int Size>
struct ei_traits<VectorBlock<VectorType, Size> > struct ei_traits<VectorBlock<VectorType, Size> >
@ -81,7 +81,7 @@ template<typename VectorType, int Size> class VectorBlock
/** Dynamic-size constructor /** Dynamic-size constructor
*/ */
inline VectorBlock(const VectorType& vector, int start, int size) inline VectorBlock(const VectorType& vector, Index start, Index size)
: Base(vector, : Base(vector,
IsColVector ? start : 0, IsColVector ? 0 : start, IsColVector ? start : 0, IsColVector ? 0 : start,
IsColVector ? size : 1, IsColVector ? 1 : size) IsColVector ? size : 1, IsColVector ? 1 : size)
@ -91,7 +91,7 @@ template<typename VectorType, int Size> class VectorBlock
/** Fixed-size constructor /** Fixed-size constructor
*/ */
inline VectorBlock(const VectorType& vector, int start) inline VectorBlock(const VectorType& vector, Index start)
: Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start) : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock); EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
@ -113,20 +113,20 @@ template<typename VectorType, int Size> class VectorBlock
* when it is applied to a fixed-size vector, it inherits a fixed maximal size, * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation. * which means that evaluating it does not cause a dynamic memory allocation.
* *
* \sa class Block, segment(int) * \sa class Block, segment(Index)
*/ */
template<typename Derived> template<typename Derived>
inline VectorBlock<Derived> DenseBase<Derived> inline VectorBlock<Derived> DenseBase<Derived>
::segment(int start, int size) ::segment(Index start, Index size)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), start, size); return VectorBlock<Derived>(derived(), start, size);
} }
/** This is the const version of segment(int,int).*/ /** This is the const version of segment(Index,Index).*/
template<typename Derived> template<typename Derived>
inline const VectorBlock<Derived> inline const VectorBlock<Derived>
DenseBase<Derived>::segment(int start, int size) const DenseBase<Derived>::segment(Index start, Index size) const
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), start, size); return VectorBlock<Derived>(derived(), start, size);
@ -145,20 +145,20 @@ DenseBase<Derived>::segment(int start, int size) const
* when it is applied to a fixed-size vector, it inherits a fixed maximal size, * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation. * which means that evaluating it does not cause a dynamic memory allocation.
* *
* \sa class Block, block(int,int) * \sa class Block, block(Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline VectorBlock<Derived> inline VectorBlock<Derived>
DenseBase<Derived>::head(int size) DenseBase<Derived>::head(Index size)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), 0, size); return VectorBlock<Derived>(derived(), 0, size);
} }
/** This is the const version of head(int).*/ /** This is the const version of head(Index).*/
template<typename Derived> template<typename Derived>
inline const VectorBlock<Derived> inline const VectorBlock<Derived>
DenseBase<Derived>::head(int size) const DenseBase<Derived>::head(Index size) const
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), 0, size); return VectorBlock<Derived>(derived(), 0, size);
@ -177,20 +177,20 @@ DenseBase<Derived>::head(int size) const
* when it is applied to a fixed-size vector, it inherits a fixed maximal size, * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation. * which means that evaluating it does not cause a dynamic memory allocation.
* *
* \sa class Block, block(int,int) * \sa class Block, block(Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline VectorBlock<Derived> inline VectorBlock<Derived>
DenseBase<Derived>::tail(int size) DenseBase<Derived>::tail(Index size)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), this->size() - size, size); return VectorBlock<Derived>(derived(), this->size() - size, size);
} }
/** This is the const version of tail(int).*/ /** This is the const version of tail(Index).*/
template<typename Derived> template<typename Derived>
inline const VectorBlock<Derived> inline const VectorBlock<Derived>
DenseBase<Derived>::tail(int size) const DenseBase<Derived>::tail(Index size) const
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), this->size() - size, size); return VectorBlock<Derived>(derived(), this->size() - size, size);
@ -212,17 +212,17 @@ DenseBase<Derived>::tail(int size) const
template<typename Derived> template<typename Derived>
template<int Size> template<int Size>
inline VectorBlock<Derived,Size> inline VectorBlock<Derived,Size>
DenseBase<Derived>::segment(int start) DenseBase<Derived>::segment(Index start)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived,Size>(derived(), start); return VectorBlock<Derived,Size>(derived(), start);
} }
/** This is the const version of segment<int>(int).*/ /** This is the const version of segment<int>(Index).*/
template<typename Derived> template<typename Derived>
template<int Size> template<int Size>
inline const VectorBlock<Derived,Size> inline const VectorBlock<Derived,Size>
DenseBase<Derived>::segment(int start) const DenseBase<Derived>::segment(Index start) const
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived,Size>(derived(), start); return VectorBlock<Derived,Size>(derived(), start);

View File

@ -52,13 +52,14 @@ struct ei_visitor_impl<Visitor, Derived, 1>
template<typename Visitor, typename Derived> template<typename Visitor, typename Derived>
struct ei_visitor_impl<Visitor, Derived, Dynamic> struct ei_visitor_impl<Visitor, Derived, Dynamic>
{ {
typedef typename Derived::Index Index;
inline static void run(const Derived& mat, Visitor& visitor) inline static void run(const Derived& mat, Visitor& visitor)
{ {
visitor.init(mat.coeff(0,0), 0, 0); visitor.init(mat.coeff(0,0), 0, 0);
for(int i = 1; i < mat.rows(); ++i) for(Index i = 1; i < mat.rows(); ++i)
visitor(mat.coeff(i, 0), i, 0); visitor(mat.coeff(i, 0), i, 0);
for(int j = 1; j < mat.cols(); ++j) for(Index j = 1; j < mat.cols(); ++j)
for(int i = 0; i < mat.rows(); ++i) for(Index i = 0; i < mat.rows(); ++i)
visitor(mat.coeff(i, j), i, j); visitor(mat.coeff(i, j), i, j);
} }
}; };
@ -70,16 +71,16 @@ struct ei_visitor_impl<Visitor, Derived, Dynamic>
* \code * \code
* struct MyVisitor { * struct MyVisitor {
* // called for the first coefficient * // called for the first coefficient
* void init(const Scalar& value, int i, int j); * void init(const Scalar& value, Index i, Index j);
* // called for all other coefficients * // called for all other coefficients
* void operator() (const Scalar& value, int i, int j); * void operator() (const Scalar& value, Index i, Index j);
* }; * };
* \endcode * \endcode
* *
* \note compared to one or two \em for \em loops, visitors offer automatic * \note compared to one or two \em for \em loops, visitors offer automatic
* unrolling for small fixed size matrix. * unrolling for small fixed size matrix.
* *
* \sa minCoeff(int*,int*), maxCoeff(int*,int*), DenseBase::redux() * \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux()
*/ */
template<typename Derived> template<typename Derived>
template<typename Visitor> template<typename Visitor>
@ -96,12 +97,14 @@ void DenseBase<Derived>::visit(Visitor& visitor) const
/** \internal /** \internal
* \brief Base class to implement min and max visitors * \brief Base class to implement min and max visitors
*/ */
template <typename Scalar> template <typename Derived>
struct ei_coeff_visitor struct ei_coeff_visitor
{ {
int row, col; typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
Index row, col;
Scalar res; Scalar res;
inline void init(const Scalar& value, int i, int j) inline void init(const Scalar& value, Index i, Index j)
{ {
res = value; res = value;
row = i; row = i;
@ -112,12 +115,14 @@ struct ei_coeff_visitor
/** \internal /** \internal
* \brief Visitor computing the min coefficient with its value and coordinates * \brief Visitor computing the min coefficient with its value and coordinates
* *
* \sa DenseBase::minCoeff(int*, int*) * \sa DenseBase::minCoeff(Index*, Index*)
*/ */
template <typename Scalar> template <typename Derived>
struct ei_min_coeff_visitor : ei_coeff_visitor<Scalar> struct ei_min_coeff_visitor : ei_coeff_visitor<Derived>
{ {
void operator() (const Scalar& value, int i, int j) typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
void operator() (const Scalar& value, Index i, Index j)
{ {
if(value < this->res) if(value < this->res)
{ {
@ -138,12 +143,14 @@ struct ei_functor_traits<ei_min_coeff_visitor<Scalar> > {
/** \internal /** \internal
* \brief Visitor computing the max coefficient with its value and coordinates * \brief Visitor computing the max coefficient with its value and coordinates
* *
* \sa DenseBase::maxCoeff(int*, int*) * \sa DenseBase::maxCoeff(Index*, Index*)
*/ */
template <typename Scalar> template <typename Derived>
struct ei_max_coeff_visitor : ei_coeff_visitor<Scalar> struct ei_max_coeff_visitor : ei_coeff_visitor<Derived>
{ {
void operator() (const Scalar& value, int i, int j) typedef typename Derived::Index Index;
typedef typename Derived::Scalar Scalar;
void operator() (const Scalar& value, Index i, Index j)
{ {
if(value > this->res) if(value > this->res)
{ {
@ -164,13 +171,13 @@ struct ei_functor_traits<ei_max_coeff_visitor<Scalar> > {
/** \returns the minimum of all coefficients of *this /** \returns the minimum of all coefficients of *this
* and puts in *row and *col its location. * and puts in *row and *col its location.
* *
* \sa DenseBase::minCoeff(int*), DenseBase::maxCoeff(int*,int*), DenseBase::visitor(), DenseBase::minCoeff() * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
*/ */
template<typename Derived> template<typename Derived>
typename ei_traits<Derived>::Scalar typename ei_traits<Derived>::Scalar
DenseBase<Derived>::minCoeff(int* row, int* col) const DenseBase<Derived>::minCoeff(Index* row, Index* col) const
{ {
ei_min_coeff_visitor<Scalar> minVisitor; ei_min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor); this->visit(minVisitor);
*row = minVisitor.row; *row = minVisitor.row;
if (col) *col = minVisitor.col; if (col) *col = minVisitor.col;
@ -180,14 +187,14 @@ DenseBase<Derived>::minCoeff(int* row, int* col) const
/** \returns the minimum of all coefficients of *this /** \returns the minimum of all coefficients of *this
* and puts in *index its location. * and puts in *index its location.
* *
* \sa DenseBase::minCoeff(int*,int*), DenseBase::maxCoeff(int*,int*), DenseBase::visitor(), DenseBase::minCoeff() * \sa DenseBase::minCoeff(Index*,Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
*/ */
template<typename Derived> template<typename Derived>
typename ei_traits<Derived>::Scalar typename ei_traits<Derived>::Scalar
DenseBase<Derived>::minCoeff(int* index) const DenseBase<Derived>::minCoeff(Index* index) const
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
ei_min_coeff_visitor<Scalar> minVisitor; ei_min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor); this->visit(minVisitor);
*index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row; *index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row;
return minVisitor.res; return minVisitor.res;
@ -196,13 +203,13 @@ DenseBase<Derived>::minCoeff(int* index) const
/** \returns the maximum of all coefficients of *this /** \returns the maximum of all coefficients of *this
* and puts in *row and *col its location. * and puts in *row and *col its location.
* *
* \sa DenseBase::minCoeff(int*,int*), DenseBase::visitor(), DenseBase::maxCoeff() * \sa DenseBase::minCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::maxCoeff()
*/ */
template<typename Derived> template<typename Derived>
typename ei_traits<Derived>::Scalar typename ei_traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff(int* row, int* col) const DenseBase<Derived>::maxCoeff(Index* row, Index* col) const
{ {
ei_max_coeff_visitor<Scalar> maxVisitor; ei_max_coeff_visitor<Derived> maxVisitor;
this->visit(maxVisitor); this->visit(maxVisitor);
*row = maxVisitor.row; *row = maxVisitor.row;
if (col) *col = maxVisitor.col; if (col) *col = maxVisitor.col;
@ -212,14 +219,14 @@ DenseBase<Derived>::maxCoeff(int* row, int* col) const
/** \returns the maximum of all coefficients of *this /** \returns the maximum of all coefficients of *this
* and puts in *index its location. * and puts in *index its location.
* *
* \sa DenseBase::maxCoeff(int*,int*), DenseBase::minCoeff(int*,int*), DenseBase::visitor(), DenseBase::maxCoeff() * \sa DenseBase::maxCoeff(Index*,Index*), DenseBase::minCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::maxCoeff()
*/ */
template<typename Derived> template<typename Derived>
typename ei_traits<Derived>::Scalar typename ei_traits<Derived>::Scalar
DenseBase<Derived>::maxCoeff(int* index) const DenseBase<Derived>::maxCoeff(Index* index) const
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
ei_max_coeff_visitor<Scalar> maxVisitor; ei_max_coeff_visitor<Derived> maxVisitor;
this->visit(maxVisitor); this->visit(maxVisitor);
*index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row; *index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row;
return maxVisitor.res; return maxVisitor.res;

View File

@ -39,10 +39,10 @@
* Note that here the inner-loops should always be unrolled. * Note that here the inner-loops should always be unrolled.
*/ */
template<int Traversal, int Index, typename Lhs, typename Rhs, typename RetScalar> template<int Traversal, int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl; struct ei_product_coeff_impl;
template<int StorageOrder, int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode> template<int StorageOrder, int UnrollingIndex, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl; struct ei_product_packet_impl;
template<typename LhsNested, typename RhsNested, int NestingFlags> template<typename LhsNested, typename RhsNested, int NestingFlags>
@ -159,10 +159,10 @@ class CoeffBasedProduct
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions"); && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
} }
EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); } EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); } EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{ {
Scalar res; Scalar res;
ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res); ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
@ -172,17 +172,17 @@ class CoeffBasedProduct
/* Allow index-based non-packet access. It is impossible though to allow index-based packed access, /* Allow index-based non-packet access. It is impossible though to allow index-based packed access,
* which is why we don't set the LinearAccessBit. * which is why we don't set the LinearAccessBit.
*/ */
EIGEN_STRONG_INLINE const Scalar coeff(int index) const EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{ {
Scalar res; Scalar res;
const int row = RowsAtCompileTime == 1 ? 0 : index; const Index row = RowsAtCompileTime == 1 ? 0 : index;
const int col = RowsAtCompileTime == 1 ? index : 0; const Index col = RowsAtCompileTime == 1 ? index : 0;
ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res); ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
return res; return res;
} }
template<int LoadMode> template<int LoadMode>
EIGEN_STRONG_INLINE const PacketScalar packet(int row, int col) const EIGEN_STRONG_INLINE const PacketScalar packet(Index row, Index col) const
{ {
PacketScalar res; PacketScalar res;
ei_product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor, ei_product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
@ -205,11 +205,11 @@ class CoeffBasedProduct
const Diagonal<LazyCoeffBasedProductType,0> diagonal() const const Diagonal<LazyCoeffBasedProductType,0> diagonal() const
{ return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); } { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); }
template<int Index> template<int DiagonalIndex>
const Diagonal<LazyCoeffBasedProductType,Index> diagonal() const const Diagonal<LazyCoeffBasedProductType,DiagonalIndex> diagonal() const
{ return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); } { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); }
const Diagonal<LazyCoeffBasedProductType,Dynamic> diagonal(int index) const const Diagonal<LazyCoeffBasedProductType,Dynamic> diagonal(Index index) const
{ return reinterpret_cast<const LazyCoeffBasedProductType&>(*this).diagonal(index); } { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this).diagonal(index); }
protected: protected:
@ -235,20 +235,22 @@ struct ei_nested<CoeffBasedProduct<Lhs,Rhs,EvalBeforeNestingBit|EvalBeforeAssign
*** Scalar path - no vectorization *** *** Scalar path - no vectorization ***
**************************************/ **************************************/
template<int Index, typename Lhs, typename Rhs, typename RetScalar> template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl<DefaultTraversal, Index, Lhs, Rhs, RetScalar> struct ei_product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{ {
ei_product_coeff_impl<DefaultTraversal, Index-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res); ei_product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
res += lhs.coeff(row, Index) * rhs.coeff(Index, col); res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
} }
}; };
template<typename Lhs, typename Rhs, typename RetScalar> template<typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar> struct ei_product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{ {
res = lhs.coeff(row, 0) * rhs.coeff(0, col); res = lhs.coeff(row, 0) * rhs.coeff(0, col);
} }
@ -257,11 +259,12 @@ struct ei_product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
template<typename Lhs, typename Rhs, typename RetScalar> template<typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar> struct ei_product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar& res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res)
{ {
ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); ei_assert(lhs.cols()>0 && "you are using a non initialized matrix");
res = lhs.coeff(row, 0) * rhs.coeff(0, col); res = lhs.coeff(row, 0) * rhs.coeff(0, col);
for(int i = 1; i < lhs.cols(); ++i) for(Index i = 1; i < lhs.cols(); ++i)
res += lhs.coeff(row, i) * rhs.coeff(i, col); res += lhs.coeff(row, i) * rhs.coeff(i, col);
} }
}; };
@ -270,43 +273,47 @@ struct ei_product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
template<typename Lhs, typename Rhs, typename RetScalar> template<typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl<DefaultTraversal, -1, Lhs, Rhs, RetScalar> struct ei_product_coeff_impl<DefaultTraversal, -1, Lhs, Rhs, RetScalar>
{ {
EIGEN_STRONG_INLINE static void run(int, int, const Lhs&, const Rhs&, RetScalar&) {} typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index, Index, const Lhs&, const Rhs&, RetScalar&) {}
}; };
/******************************************* /*******************************************
*** Scalar path with inner vectorization *** *** Scalar path with inner vectorization ***
*******************************************/ *******************************************/
template<int Index, typename Lhs, typename Rhs, typename PacketScalar> template<int UnrollingIndex, typename Lhs, typename Rhs, typename PacketScalar>
struct ei_product_coeff_vectorized_unroller struct ei_product_coeff_vectorized_unroller
{ {
typedef typename Lhs::Index Index;
enum { PacketSize = ei_packet_traits<typename Lhs::Scalar>::size }; enum { PacketSize = ei_packet_traits<typename Lhs::Scalar>::size };
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
{ {
ei_product_coeff_vectorized_unroller<Index-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres); ei_product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres);
pres = ei_padd(pres, ei_pmul( lhs.template packet<Aligned>(row, Index) , rhs.template packet<Aligned>(Index, col) )); pres = ei_padd(pres, ei_pmul( lhs.template packet<Aligned>(row, UnrollingIndex) , rhs.template packet<Aligned>(UnrollingIndex, col) ));
} }
}; };
template<typename Lhs, typename Rhs, typename PacketScalar> template<typename Lhs, typename Rhs, typename PacketScalar>
struct ei_product_coeff_vectorized_unroller<0, Lhs, Rhs, PacketScalar> struct ei_product_coeff_vectorized_unroller<0, Lhs, Rhs, PacketScalar>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
{ {
pres = ei_pmul(lhs.template packet<Aligned>(row, 0) , rhs.template packet<Aligned>(0, col)); pres = ei_pmul(lhs.template packet<Aligned>(row, 0) , rhs.template packet<Aligned>(0, col));
} }
}; };
template<int Index, typename Lhs, typename Rhs, typename RetScalar> template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl<InnerVectorizedTraversal, Index, Lhs, Rhs, RetScalar> struct ei_product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
{ {
typedef typename Lhs::PacketScalar PacketScalar; typedef typename Lhs::PacketScalar PacketScalar;
typedef typename Lhs::Index Index;
enum { PacketSize = ei_packet_traits<typename Lhs::Scalar>::size }; enum { PacketSize = ei_packet_traits<typename Lhs::Scalar>::size };
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{ {
PacketScalar pres; PacketScalar pres;
ei_product_coeff_vectorized_unroller<Index+1-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres); ei_product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres);
ei_product_coeff_impl<DefaultTraversal,Index,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res); ei_product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
res = ei_predux(pres); res = ei_predux(pres);
} }
}; };
@ -314,7 +321,8 @@ struct ei_product_coeff_impl<InnerVectorizedTraversal, Index, Lhs, Rhs, RetScala
template<typename Lhs, typename Rhs, int LhsRows = Lhs::RowsAtCompileTime, int RhsCols = Rhs::ColsAtCompileTime> template<typename Lhs, typename Rhs, int LhsRows = Lhs::RowsAtCompileTime, int RhsCols = Rhs::ColsAtCompileTime>
struct ei_product_coeff_vectorized_dyn_selector struct ei_product_coeff_vectorized_dyn_selector
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
{ {
res = lhs.row(row).cwiseProduct(rhs.col(col)).sum(); res = lhs.row(row).cwiseProduct(rhs.col(col)).sum();
} }
@ -325,7 +333,8 @@ struct ei_product_coeff_vectorized_dyn_selector
template<typename Lhs, typename Rhs, int RhsCols> template<typename Lhs, typename Rhs, int RhsCols>
struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,RhsCols> struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,RhsCols>
{ {
EIGEN_STRONG_INLINE static void run(int /*row*/, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
{ {
res = lhs.cwiseProduct(rhs.col(col)).sum(); res = lhs.cwiseProduct(rhs.col(col)).sum();
} }
@ -334,7 +343,8 @@ struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,RhsCols>
template<typename Lhs, typename Rhs, int LhsRows> template<typename Lhs, typename Rhs, int LhsRows>
struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,LhsRows,1> struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,LhsRows,1>
{ {
EIGEN_STRONG_INLINE static void run(int row, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
{ {
res = lhs.row(row).cwiseProduct(rhs).sum(); res = lhs.row(row).cwiseProduct(rhs).sum();
} }
@ -343,7 +353,8 @@ struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,LhsRows,1>
template<typename Lhs, typename Rhs> template<typename Lhs, typename Rhs>
struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,1> struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,1>
{ {
EIGEN_STRONG_INLINE static void run(int /*row*/, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
{ {
res = lhs.cwiseProduct(rhs).sum(); res = lhs.cwiseProduct(rhs).sum();
} }
@ -352,7 +363,8 @@ struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,1>
template<typename Lhs, typename Rhs, typename RetScalar> template<typename Lhs, typename Rhs, typename RetScalar>
struct ei_product_coeff_impl<InnerVectorizedTraversal, Dynamic, Lhs, Rhs, RetScalar> struct ei_product_coeff_impl<InnerVectorizedTraversal, Dynamic, Lhs, Rhs, RetScalar>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
{ {
ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs>::run(row, col, lhs, rhs, res); ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs>::run(row, col, lhs, rhs, res);
} }
@ -362,30 +374,33 @@ struct ei_product_coeff_impl<InnerVectorizedTraversal, Dynamic, Lhs, Rhs, RetSca
*** Packet path *** *** Packet path ***
*******************/ *******************/
template<int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode> template<int UnrollingIndex, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl<RowMajor, Index, Lhs, Rhs, PacketScalar, LoadMode> struct ei_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, PacketScalar, LoadMode>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
{ {
ei_product_packet_impl<RowMajor, Index-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res); ei_product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res);
res = ei_pmadd(ei_pset1(lhs.coeff(row, Index)), rhs.template packet<LoadMode>(Index, col), res); res = ei_pmadd(ei_pset1(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
} }
}; };
template<int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode> template<int UnrollingIndex, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl<ColMajor, Index, Lhs, Rhs, PacketScalar, LoadMode> struct ei_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, PacketScalar, LoadMode>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
{ {
ei_product_packet_impl<ColMajor, Index-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res); ei_product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res);
res = ei_pmadd(lhs.template packet<LoadMode>(row, Index), ei_pset1(rhs.coeff(Index, col)), res); res = ei_pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), ei_pset1(rhs.coeff(UnrollingIndex, col)), res);
} }
}; };
template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode> template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl<RowMajor, 0, Lhs, Rhs, PacketScalar, LoadMode> struct ei_product_packet_impl<RowMajor, 0, Lhs, Rhs, PacketScalar, LoadMode>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
{ {
res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col)); res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
} }
@ -394,7 +409,8 @@ struct ei_product_packet_impl<RowMajor, 0, Lhs, Rhs, PacketScalar, LoadMode>
template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode> template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl<ColMajor, 0, Lhs, Rhs, PacketScalar, LoadMode> struct ei_product_packet_impl<ColMajor, 0, Lhs, Rhs, PacketScalar, LoadMode>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
{ {
res = ei_pmul(lhs.template packet<LoadMode>(row, 0), ei_pset1(rhs.coeff(0, col))); res = ei_pmul(lhs.template packet<LoadMode>(row, 0), ei_pset1(rhs.coeff(0, col)));
} }
@ -403,11 +419,12 @@ struct ei_product_packet_impl<ColMajor, 0, Lhs, Rhs, PacketScalar, LoadMode>
template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode> template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMode> struct ei_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMode>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res)
{ {
ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); ei_assert(lhs.cols()>0 && "you are using a non initialized matrix");
res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col)); res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
for(int i = 1; i < lhs.cols(); ++i) for(Index i = 1; i < lhs.cols(); ++i)
res = ei_pmadd(ei_pset1(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res); res = ei_pmadd(ei_pset1(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
} }
}; };
@ -415,11 +432,12 @@ struct ei_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMod
template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode> template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
struct ei_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMode> struct ei_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMode>
{ {
EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res) typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res)
{ {
ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); ei_assert(lhs.cols()>0 && "you are using a non initialized matrix");
res = ei_pmul(lhs.template packet<LoadMode>(row, 0), ei_pset1(rhs.coeff(0, col))); res = ei_pmul(lhs.template packet<LoadMode>(row, 0), ei_pset1(rhs.coeff(0, col)));
for(int i = 1; i < lhs.cols(); ++i) for(Index i = 1; i < lhs.cols(); ++i)
res = ei_pmadd(lhs.template packet<LoadMode>(row, i), ei_pset1(rhs.coeff(i, col)), res); res = ei_pmadd(lhs.template packet<LoadMode>(row, i), ei_pset1(rhs.coeff(i, col)), res);
} }
}; };

View File

@ -35,40 +35,40 @@
#endif #endif
// optimized GEneral packed Block * packed Panel product kernel // optimized GEneral packed Block * packed Panel product kernel
template<typename Scalar, int mr, int nr, typename Conj> template<typename Scalar, typename Index, int mr, int nr, typename Conj>
struct ei_gebp_kernel struct ei_gebp_kernel
{ {
void operator()(Scalar* res, int resStride, const Scalar* blockA, const Scalar* blockB, int rows, int depth, int cols, void operator()(Scalar* res, Index resStride, const Scalar* blockA, const Scalar* blockB, Index rows, Index depth, Index cols,
int strideA=-1, int strideB=-1, int offsetA=0, int offsetB=0, Scalar* unpackedB = 0) Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, Scalar* unpackedB = 0)
{ {
typedef typename ei_packet_traits<Scalar>::type PacketType; typedef typename ei_packet_traits<Scalar>::type PacketType;
enum { PacketSize = ei_packet_traits<Scalar>::size }; enum { PacketSize = ei_packet_traits<Scalar>::size };
if(strideA==-1) strideA = depth; if(strideA==-1) strideA = depth;
if(strideB==-1) strideB = depth; if(strideB==-1) strideB = depth;
Conj cj; Conj cj;
int packet_cols = (cols/nr) * nr; Index packet_cols = (cols/nr) * nr;
const int peeled_mc = (rows/mr)*mr; const Index peeled_mc = (rows/mr)*mr;
const int peeled_mc2 = peeled_mc + (rows-peeled_mc >= PacketSize ? PacketSize : 0); const Index peeled_mc2 = peeled_mc + (rows-peeled_mc >= PacketSize ? PacketSize : 0);
const int peeled_kc = (depth/4)*4; const Index peeled_kc = (depth/4)*4;
if(unpackedB==0) if(unpackedB==0)
unpackedB = const_cast<Scalar*>(blockB - strideB * nr * PacketSize); unpackedB = const_cast<Scalar*>(blockB - strideB * nr * PacketSize);
// loops on each micro vertical panel of rhs (depth x nr) // loops on each micro vertical panel of rhs (depth x nr)
for(int j2=0; j2<packet_cols; j2+=nr) for(Index j2=0; j2<packet_cols; j2+=nr)
{ {
// unpack B // unpack B
{ {
const Scalar* blB = &blockB[j2*strideB+offsetB*nr]; const Scalar* blB = &blockB[j2*strideB+offsetB*nr];
int n = depth*nr; Index n = depth*nr;
for(int k=0; k<n; k++) for(Index k=0; k<n; k++)
ei_pstore(&unpackedB[k*PacketSize], ei_pset1(blB[k])); ei_pstore(&unpackedB[k*PacketSize], ei_pset1(blB[k]));
/*Scalar* dest = unpackedB; /*Scalar* dest = unpackedB;
for(int k=0; k<n; k+=4*PacketSize) for(Index k=0; k<n; k+=4*PacketSize)
{ {
#ifdef EIGEN_VECTORIZE_SSE #ifdef EIGEN_VECTORIZE_SSE
const int S = 128; const Index S = 128;
const int G = 16; const Index G = 16;
_mm_prefetch((const char*)(&blB[S/2+0]), _MM_HINT_T0); _mm_prefetch((const char*)(&blB[S/2+0]), _MM_HINT_T0);
_mm_prefetch((const char*)(&dest[S+0*G]), _MM_HINT_T0); _mm_prefetch((const char*)(&dest[S+0*G]), _MM_HINT_T0);
_mm_prefetch((const char*)(&dest[S+1*G]), _MM_HINT_T0); _mm_prefetch((const char*)(&dest[S+1*G]), _MM_HINT_T0);
@ -114,7 +114,7 @@ struct ei_gebp_kernel
// loops on each micro horizontal panel of lhs (mr x depth) // loops on each micro horizontal panel of lhs (mr x depth)
// => we select a mr x nr micro block of res which is entirely // => we select a mr x nr micro block of res which is entirely
// stored into mr/packet_size x nr registers. // stored into mr/packet_size x nr registers.
for(int i=0; i<peeled_mc; i+=mr) for(Index i=0; i<peeled_mc; i+=mr)
{ {
const Scalar* blA = &blockA[i*strideA+offsetA*mr]; const Scalar* blA = &blockA[i*strideA+offsetA*mr];
ei_prefetch(&blA[0]); ei_prefetch(&blA[0]);
@ -146,7 +146,7 @@ struct ei_gebp_kernel
// TODO let's check wether the folowing peeled loop could not be // TODO let's check wether the folowing peeled loop could not be
// optimized via optimal prefetching from one loop to the other // optimized via optimal prefetching from one loop to the other
const Scalar* blB = unpackedB; const Scalar* blB = unpackedB;
for(int k=0; k<peeled_kc; k+=4) for(Index k=0; k<peeled_kc; k+=4)
{ {
if(nr==2) if(nr==2)
{ {
@ -257,7 +257,7 @@ struct ei_gebp_kernel
blA += 4*mr; blA += 4*mr;
} }
// process remaining peeled loop // process remaining peeled loop
for(int k=peeled_kc; k<depth; k++) for(Index k=peeled_kc; k<depth; k++)
{ {
if(nr==2) if(nr==2)
{ {
@ -328,7 +328,7 @@ struct ei_gebp_kernel
} }
if(rows-peeled_mc>=PacketSize) if(rows-peeled_mc>=PacketSize)
{ {
int i = peeled_mc; Index i = peeled_mc;
const Scalar* blA = &blockA[i*strideA+offsetA*PacketSize]; const Scalar* blA = &blockA[i*strideA+offsetA*PacketSize];
ei_prefetch(&blA[0]); ei_prefetch(&blA[0]);
@ -341,7 +341,7 @@ struct ei_gebp_kernel
// performs "inner" product // performs "inner" product
const Scalar* blB = unpackedB; const Scalar* blB = unpackedB;
for(int k=0; k<peeled_kc; k+=4) for(Index k=0; k<peeled_kc; k+=4)
{ {
if(nr==2) if(nr==2)
{ {
@ -417,7 +417,7 @@ struct ei_gebp_kernel
blA += 4*PacketSize; blA += 4*PacketSize;
} }
// process remaining peeled loop // process remaining peeled loop
for(int k=peeled_kc; k<depth; k++) for(Index k=peeled_kc; k<depth; k++)
{ {
if(nr==2) if(nr==2)
{ {
@ -455,7 +455,7 @@ struct ei_gebp_kernel
if(nr==4) ei_pstoreu(&res[(j2+2)*resStride + i], C2); if(nr==4) ei_pstoreu(&res[(j2+2)*resStride + i], C2);
if(nr==4) ei_pstoreu(&res[(j2+3)*resStride + i], C3); if(nr==4) ei_pstoreu(&res[(j2+3)*resStride + i], C3);
} }
for(int i=peeled_mc2; i<rows; i++) for(Index i=peeled_mc2; i<rows; i++)
{ {
const Scalar* blA = &blockA[i*strideA+offsetA]; const Scalar* blA = &blockA[i*strideA+offsetA];
ei_prefetch(&blA[0]); ei_prefetch(&blA[0]);
@ -464,7 +464,7 @@ struct ei_gebp_kernel
Scalar C0(0), C1(0), C2(0), C3(0); Scalar C0(0), C1(0), C2(0), C3(0);
// TODO directly use blockB ??? // TODO directly use blockB ???
const Scalar* blB = unpackedB;//&blockB[j2*strideB+offsetB*nr]; const Scalar* blB = unpackedB;//&blockB[j2*strideB+offsetB*nr];
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
{ {
if(nr==2) if(nr==2)
{ {
@ -504,16 +504,16 @@ struct ei_gebp_kernel
// process remaining rhs/res columns one at a time // process remaining rhs/res columns one at a time
// => do the same but with nr==1 // => do the same but with nr==1
for(int j2=packet_cols; j2<cols; j2++) for(Index j2=packet_cols; j2<cols; j2++)
{ {
// unpack B // unpack B
{ {
const Scalar* blB = &blockB[j2*strideB+offsetB]; const Scalar* blB = &blockB[j2*strideB+offsetB];
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
ei_pstore(&unpackedB[k*PacketSize], ei_pset1(blB[k])); ei_pstore(&unpackedB[k*PacketSize], ei_pset1(blB[k]));
} }
for(int i=0; i<peeled_mc; i+=mr) for(Index i=0; i<peeled_mc; i+=mr)
{ {
const Scalar* blA = &blockA[i*strideA+offsetA*mr]; const Scalar* blA = &blockA[i*strideA+offsetA*mr];
ei_prefetch(&blA[0]); ei_prefetch(&blA[0]);
@ -526,7 +526,7 @@ struct ei_gebp_kernel
C4 = ei_ploadu(&res[(j2+0)*resStride + i + PacketSize]); C4 = ei_ploadu(&res[(j2+0)*resStride + i + PacketSize]);
const Scalar* blB = unpackedB; const Scalar* blB = unpackedB;
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
{ {
PacketType B0, A0, A1, T0, T1; PacketType B0, A0, A1, T0, T1;
@ -545,14 +545,14 @@ struct ei_gebp_kernel
} }
if(rows-peeled_mc>=PacketSize) if(rows-peeled_mc>=PacketSize)
{ {
int i = peeled_mc; Index i = peeled_mc;
const Scalar* blA = &blockA[i*strideA+offsetA*PacketSize]; const Scalar* blA = &blockA[i*strideA+offsetA*PacketSize];
ei_prefetch(&blA[0]); ei_prefetch(&blA[0]);
PacketType C0 = ei_ploadu(&res[(j2+0)*resStride + i]); PacketType C0 = ei_ploadu(&res[(j2+0)*resStride + i]);
const Scalar* blB = unpackedB; const Scalar* blB = unpackedB;
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
{ {
C0 = cj.pmadd(ei_pload(blA), ei_pload(blB), C0); C0 = cj.pmadd(ei_pload(blA), ei_pload(blB), C0);
blB += PacketSize; blB += PacketSize;
@ -561,7 +561,7 @@ struct ei_gebp_kernel
ei_pstoreu(&res[(j2+0)*resStride + i], C0); ei_pstoreu(&res[(j2+0)*resStride + i], C0);
} }
for(int i=peeled_mc2; i<rows; i++) for(Index i=peeled_mc2; i<rows; i++)
{ {
const Scalar* blA = &blockA[i*strideA+offsetA]; const Scalar* blA = &blockA[i*strideA+offsetA];
ei_prefetch(&blA[0]); ei_prefetch(&blA[0]);
@ -570,7 +570,7 @@ struct ei_gebp_kernel
Scalar C0(0); Scalar C0(0);
// FIXME directly use blockB ?? // FIXME directly use blockB ??
const Scalar* blB = unpackedB; const Scalar* blB = unpackedB;
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
C0 = cj.pmadd(blA[k], blB[k*PacketSize], C0); C0 = cj.pmadd(blA[k], blB[k*PacketSize], C0);
res[(j2+0)*resStride + i] += C0; res[(j2+0)*resStride + i] += C0;
} }
@ -594,39 +594,39 @@ struct ei_gebp_kernel
// //
// 32 33 34 35 ... // 32 33 34 35 ...
// 36 36 38 39 ... // 36 36 38 39 ...
template<typename Scalar, int mr, int StorageOrder, bool Conjugate, bool PanelMode> template<typename Scalar, typename Index, int mr, int StorageOrder, bool Conjugate, bool PanelMode>
struct ei_gemm_pack_lhs struct ei_gemm_pack_lhs
{ {
void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, int lhsStride, int depth, int rows, void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows,
int stride=0, int offset=0) Index stride=0, Index offset=0)
{ {
enum { PacketSize = ei_packet_traits<Scalar>::size }; enum { PacketSize = ei_packet_traits<Scalar>::size };
ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
ei_conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj; ei_conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
ei_const_blas_data_mapper<Scalar, StorageOrder> lhs(_lhs,lhsStride); ei_const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
int count = 0; Index count = 0;
int peeled_mc = (rows/mr)*mr; Index peeled_mc = (rows/mr)*mr;
for(int i=0; i<peeled_mc; i+=mr) for(Index i=0; i<peeled_mc; i+=mr)
{ {
if(PanelMode) count += mr * offset; if(PanelMode) count += mr * offset;
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
for(int w=0; w<mr; w++) for(Index w=0; w<mr; w++)
blockA[count++] = cj(lhs(i+w, k)); blockA[count++] = cj(lhs(i+w, k));
if(PanelMode) count += mr * (stride-offset-depth); if(PanelMode) count += mr * (stride-offset-depth);
} }
if(rows-peeled_mc>=PacketSize) if(rows-peeled_mc>=PacketSize)
{ {
if(PanelMode) count += PacketSize*offset; if(PanelMode) count += PacketSize*offset;
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
for(int w=0; w<PacketSize; w++) for(Index w=0; w<PacketSize; w++)
blockA[count++] = cj(lhs(peeled_mc+w, k)); blockA[count++] = cj(lhs(peeled_mc+w, k));
if(PanelMode) count += PacketSize * (stride-offset-depth); if(PanelMode) count += PacketSize * (stride-offset-depth);
peeled_mc += PacketSize; peeled_mc += PacketSize;
} }
for(int i=peeled_mc; i<rows; i++) for(Index i=peeled_mc; i<rows; i++)
{ {
if(PanelMode) count += offset; if(PanelMode) count += offset;
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
blockA[count++] = cj(lhs(i, k)); blockA[count++] = cj(lhs(i, k));
if(PanelMode) count += (stride-offset-depth); if(PanelMode) count += (stride-offset-depth);
} }
@ -640,19 +640,19 @@ struct ei_gemm_pack_lhs
// 4 5 6 7 16 17 18 19 25 28 // 4 5 6 7 16 17 18 19 25 28
// 8 9 10 11 20 21 22 23 26 29 // 8 9 10 11 20 21 22 23 26 29
// . . . . . . . . . . // . . . . . . . . . .
template<typename Scalar, int nr, bool PanelMode> template<typename Scalar, typename Index, int nr, bool PanelMode>
struct ei_gemm_pack_rhs<Scalar, nr, ColMajor, PanelMode> struct ei_gemm_pack_rhs<Scalar, Index, nr, ColMajor, PanelMode>
{ {
typedef typename ei_packet_traits<Scalar>::type Packet; typedef typename ei_packet_traits<Scalar>::type Packet;
enum { PacketSize = ei_packet_traits<Scalar>::size }; enum { PacketSize = ei_packet_traits<Scalar>::size };
void operator()(Scalar* blockB, const Scalar* rhs, int rhsStride, Scalar alpha, int depth, int cols, void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Scalar alpha, Index depth, Index cols,
int stride=0, int offset=0) Index stride=0, Index offset=0)
{ {
ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
bool hasAlpha = alpha != Scalar(1); bool hasAlpha = alpha != Scalar(1);
int packet_cols = (cols/nr) * nr; Index packet_cols = (cols/nr) * nr;
int count = 0; Index count = 0;
for(int j2=0; j2<packet_cols; j2+=nr) for(Index j2=0; j2<packet_cols; j2+=nr)
{ {
// skip what we have before // skip what we have before
if(PanelMode) count += nr * offset; if(PanelMode) count += nr * offset;
@ -661,7 +661,7 @@ struct ei_gemm_pack_rhs<Scalar, nr, ColMajor, PanelMode>
const Scalar* b2 = &rhs[(j2+2)*rhsStride]; const Scalar* b2 = &rhs[(j2+2)*rhsStride];
const Scalar* b3 = &rhs[(j2+3)*rhsStride]; const Scalar* b3 = &rhs[(j2+3)*rhsStride];
if (hasAlpha) if (hasAlpha)
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
{ {
blockB[count+0] = alpha*b0[k]; blockB[count+0] = alpha*b0[k];
blockB[count+1] = alpha*b1[k]; blockB[count+1] = alpha*b1[k];
@ -670,7 +670,7 @@ struct ei_gemm_pack_rhs<Scalar, nr, ColMajor, PanelMode>
count += nr; count += nr;
} }
else else
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
{ {
blockB[count+0] = b0[k]; blockB[count+0] = b0[k];
blockB[count+1] = b1[k]; blockB[count+1] = b1[k];
@ -683,18 +683,18 @@ struct ei_gemm_pack_rhs<Scalar, nr, ColMajor, PanelMode>
} }
// copy the remaining columns one at a time (nr==1) // copy the remaining columns one at a time (nr==1)
for(int j2=packet_cols; j2<cols; ++j2) for(Index j2=packet_cols; j2<cols; ++j2)
{ {
if(PanelMode) count += offset; if(PanelMode) count += offset;
const Scalar* b0 = &rhs[(j2+0)*rhsStride]; const Scalar* b0 = &rhs[(j2+0)*rhsStride];
if (hasAlpha) if (hasAlpha)
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
{ {
blockB[count] = alpha*b0[k]; blockB[count] = alpha*b0[k];
count += 1; count += 1;
} }
else else
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
{ {
blockB[count] = b0[k]; blockB[count] = b0[k];
count += 1; count += 1;
@ -705,24 +705,24 @@ struct ei_gemm_pack_rhs<Scalar, nr, ColMajor, PanelMode>
}; };
// this version is optimized for row major matrices // this version is optimized for row major matrices
template<typename Scalar, int nr, bool PanelMode> template<typename Scalar, typename Index, int nr, bool PanelMode>
struct ei_gemm_pack_rhs<Scalar, nr, RowMajor, PanelMode> struct ei_gemm_pack_rhs<Scalar, Index, nr, RowMajor, PanelMode>
{ {
enum { PacketSize = ei_packet_traits<Scalar>::size }; enum { PacketSize = ei_packet_traits<Scalar>::size };
void operator()(Scalar* blockB, const Scalar* rhs, int rhsStride, Scalar alpha, int depth, int cols, void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Scalar alpha, Index depth, Index cols,
int stride=0, int offset=0) Index stride=0, Index offset=0)
{ {
ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); ei_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
bool hasAlpha = alpha != Scalar(1); bool hasAlpha = alpha != Scalar(1);
int packet_cols = (cols/nr) * nr; Index packet_cols = (cols/nr) * nr;
int count = 0; Index count = 0;
for(int j2=0; j2<packet_cols; j2+=nr) for(Index j2=0; j2<packet_cols; j2+=nr)
{ {
// skip what we have before // skip what we have before
if(PanelMode) count += nr * offset; if(PanelMode) count += nr * offset;
if (hasAlpha) if (hasAlpha)
{ {
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
{ {
const Scalar* b0 = &rhs[k*rhsStride + j2]; const Scalar* b0 = &rhs[k*rhsStride + j2];
blockB[count+0] = alpha*b0[0]; blockB[count+0] = alpha*b0[0];
@ -734,7 +734,7 @@ struct ei_gemm_pack_rhs<Scalar, nr, RowMajor, PanelMode>
} }
else else
{ {
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
{ {
const Scalar* b0 = &rhs[k*rhsStride + j2]; const Scalar* b0 = &rhs[k*rhsStride + j2];
blockB[count+0] = b0[0]; blockB[count+0] = b0[0];
@ -748,11 +748,11 @@ struct ei_gemm_pack_rhs<Scalar, nr, RowMajor, PanelMode>
if(PanelMode) count += nr * (stride-offset-depth); if(PanelMode) count += nr * (stride-offset-depth);
} }
// copy the remaining columns one at a time (nr==1) // copy the remaining columns one at a time (nr==1)
for(int j2=packet_cols; j2<cols; ++j2) for(Index j2=packet_cols; j2<cols; ++j2)
{ {
if(PanelMode) count += offset; if(PanelMode) count += offset;
const Scalar* b0 = &rhs[j2]; const Scalar* b0 = &rhs[j2];
for(int k=0; k<depth; k++) for(Index k=0; k<depth; k++)
{ {
blockB[count] = alpha*b0[k*rhsStride]; blockB[count] = alpha*b0[k*rhsStride];
count += 1; count += 1;

View File

@ -29,21 +29,21 @@
/* Specialization for a row-major destination matrix => simple transposition of the product */ /* Specialization for a row-major destination matrix => simple transposition of the product */
template< template<
typename Scalar, typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs, int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs> int RhsStorageOrder, bool ConjugateRhs>
struct ei_general_matrix_matrix_product<Scalar,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,RowMajor> struct ei_general_matrix_matrix_product<Scalar,Index,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,RowMajor>
{ {
static EIGEN_STRONG_INLINE void run( static EIGEN_STRONG_INLINE void run(
int rows, int cols, int depth, Index rows, Index cols, Index depth,
const Scalar* lhs, int lhsStride, const Scalar* lhs, Index lhsStride,
const Scalar* rhs, int rhsStride, const Scalar* rhs, Index rhsStride,
Scalar* res, int resStride, Scalar* res, Index resStride,
Scalar alpha, Scalar alpha,
GemmParallelInfo<Scalar>* info = 0) GemmParallelInfo<Scalar, Index>* info = 0)
{ {
// transpose the product such that the result is column major // transpose the product such that the result is column major
ei_general_matrix_matrix_product<Scalar, ei_general_matrix_matrix_product<Scalar, Index,
RhsStorageOrder==RowMajor ? ColMajor : RowMajor, RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
ConjugateRhs, ConjugateRhs,
LhsStorageOrder==RowMajor ? ColMajor : RowMajor, LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
@ -56,20 +56,20 @@ struct ei_general_matrix_matrix_product<Scalar,LhsStorageOrder,ConjugateLhs,RhsS
/* Specialization for a col-major destination matrix /* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */ * => Blocking algorithm following Goto's paper */
template< template<
typename Scalar, typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs, int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs> int RhsStorageOrder, bool ConjugateRhs>
struct ei_general_matrix_matrix_product<Scalar,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> struct ei_general_matrix_matrix_product<Scalar,Index,LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor>
{ {
static void run(int rows, int cols, int depth, static void run(Index rows, Index cols, Index depth,
const Scalar* _lhs, int lhsStride, const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, int rhsStride, const Scalar* _rhs, Index rhsStride,
Scalar* res, int resStride, Scalar* res, Index resStride,
Scalar alpha, Scalar alpha,
GemmParallelInfo<Scalar>* info = 0) GemmParallelInfo<Scalar,Index>* info = 0)
{ {
ei_const_blas_data_mapper<Scalar, LhsStorageOrder> lhs(_lhs,lhsStride); ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
ei_const_blas_data_mapper<Scalar, RhsStorageOrder> rhs(_rhs,rhsStride); ei_const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
if (ConjugateRhs) if (ConjugateRhs)
alpha = ei_conj(alpha); alpha = ei_conj(alpha);
@ -77,19 +77,19 @@ static void run(int rows, int cols, int depth,
typedef typename ei_packet_traits<Scalar>::type PacketType; typedef typename ei_packet_traits<Scalar>::type PacketType;
typedef ei_product_blocking_traits<Scalar> Blocking; typedef ei_product_blocking_traits<Scalar> Blocking;
int kc = std::min<int>(Blocking::Max_kc,depth); // cache block size along the K direction Index kc = std::min<Index>(Blocking::Max_kc,depth); // cache block size along the K direction
int mc = std::min<int>(Blocking::Max_mc,rows); // cache block size along the M direction Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
ei_gemm_pack_rhs<Scalar, Blocking::nr, RhsStorageOrder> pack_rhs; ei_gemm_pack_rhs<Scalar, Index, Blocking::nr, RhsStorageOrder> pack_rhs;
ei_gemm_pack_lhs<Scalar, Blocking::mr, LhsStorageOrder> pack_lhs; ei_gemm_pack_lhs<Scalar, Index, Blocking::mr, LhsStorageOrder> pack_lhs;
ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp; ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp;
#ifdef EIGEN_HAS_OPENMP #ifdef EIGEN_HAS_OPENMP
if(info) if(info)
{ {
// this is the parallel version! // this is the parallel version!
int tid = omp_get_thread_num(); Index tid = omp_get_thread_num();
int threads = omp_get_num_threads(); Index threads = omp_get_num_threads();
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeW = kc*Blocking::PacketSize*Blocking::nr*8; std::size_t sizeW = kc*Blocking::PacketSize*Blocking::nr*8;
@ -98,9 +98,9 @@ static void run(int rows, int cols, int depth,
// For each horizontal panel of the rhs, and corresponding panel of the lhs... // For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1) // (==GEMM_VAR1)
for(int k=0; k<depth; k+=kc) for(Index k=0; k<depth; k+=kc)
{ {
const int actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A' const Index actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other, // In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'. // let's start by packing A'.
@ -121,9 +121,9 @@ static void run(int rows, int cols, int depth,
info[tid].sync = k; info[tid].sync = k;
// Computes C_i += A' * B' per B'_j // Computes C_i += A' * B' per B'_j
for(int shift=0; shift<threads; ++shift) for(Index shift=0; shift<threads; ++shift)
{ {
int j = (tid+shift)%threads; Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j, // At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access. // we use testAndSetOrdered to mimic a volatile access.
@ -135,9 +135,9 @@ static void run(int rows, int cols, int depth,
} }
// Then keep going as usual with the remaining A' // Then keep going as usual with the remaining A'
for(int i=mc; i<rows; i+=mc) for(Index i=mc; i<rows; i+=mc)
{ {
const int actual_mc = std::min(i+mc,rows)-i; const Index actual_mc = std::min(i+mc,rows)-i;
// pack A_i,k to A' // pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc); pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
@ -148,7 +148,7 @@ static void run(int rows, int cols, int depth,
// Release all the sub blocks B'_j of B' for the current thread, // Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1 // i.e., we simply decrement the number of users by 1
for(int j=0; j<threads; ++j) for(Index j=0; j<threads; ++j)
#pragma omp atomic #pragma omp atomic
--(info[j].users); --(info[j].users);
} }
@ -168,9 +168,9 @@ static void run(int rows, int cols, int depth,
// For each horizontal panel of the rhs, and corresponding panel of the lhs... // For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1) // (==GEMM_VAR1)
for(int k2=0; k2<depth; k2+=kc) for(Index k2=0; k2<depth; k2+=kc)
{ {
const int actual_kc = std::min(k2+kc,depth)-k2; const Index actual_kc = std::min(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching) // => Pack rhs's panel into a sequential chunk of memory (L2 caching)
@ -181,9 +181,9 @@ static void run(int rows, int cols, int depth,
// For each mc x kc block of the lhs's vertical panel... // For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1) // (==GEPP_VAR1)
for(int i2=0; i2<rows; i2+=mc) for(Index i2=0; i2<rows; i2+=mc)
{ {
const int actual_mc = std::min(i2+mc,rows)-i2; const Index actual_mc = std::min(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching) // We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of // Note that this block will be read a very high number of times, which is equal to the number of
@ -215,7 +215,7 @@ struct ei_traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: ei_traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> > : ei_traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{}; {};
template<typename Scalar, typename Gemm, typename Lhs, typename Rhs, typename Dest> template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest>
struct ei_gemm_functor struct ei_gemm_functor
{ {
typedef typename Rhs::Scalar BlockBScalar; typedef typename Rhs::Scalar BlockBScalar;
@ -224,7 +224,7 @@ struct ei_gemm_functor
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha)
{} {}
void operator() (int row, int rows, int col=0, int cols=-1, GemmParallelInfo<BlockBScalar>* info=0) const void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<BlockBScalar,Index>* info=0) const
{ {
if(cols==-1) if(cols==-1)
cols = m_rhs.cols(); cols = m_rhs.cols();
@ -237,9 +237,9 @@ struct ei_gemm_functor
} }
int sharedBlockBSize() const Index sharedBlockBSize() const
{ {
return std::min<int>(ei_product_blocking_traits<Scalar>::Max_kc,m_rhs.rows()) * m_rhs.cols(); return std::min<Index>(ei_product_blocking_traits<Scalar>::Max_kc,m_rhs.rows()) * m_rhs.cols();
} }
protected: protected:
@ -273,9 +273,9 @@ class GeneralProduct<Lhs, Rhs, GemmProduct>
* RhsBlasTraits::extractScalarFactor(m_rhs); * RhsBlasTraits::extractScalarFactor(m_rhs);
typedef ei_gemm_functor< typedef ei_gemm_functor<
Scalar, Scalar, Index,
ei_general_matrix_matrix_product< ei_general_matrix_matrix_product<
Scalar, Scalar, Index,
(_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
(_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,

View File

@ -32,11 +32,11 @@
* same alignment pattern. * same alignment pattern.
* TODO: since rhs gets evaluated only once, no need to evaluate it * TODO: since rhs gets evaluated only once, no need to evaluate it
*/ */
template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename RhsType> template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename Index, typename RhsType>
static EIGEN_DONT_INLINE static EIGEN_DONT_INLINE
void ei_cache_friendly_product_colmajor_times_vector( void ei_cache_friendly_product_colmajor_times_vector(
int size, Index size,
const Scalar* lhs, int lhsStride, const Scalar* lhs, Index lhsStride,
const RhsType& rhs, const RhsType& rhs,
Scalar* res, Scalar* res,
Scalar alpha) Scalar alpha)
@ -59,30 +59,30 @@ void ei_cache_friendly_product_colmajor_times_vector(
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename ei_packet_traits<Scalar>::type Packet; typedef typename ei_packet_traits<Scalar>::type Packet;
const int PacketSize = sizeof(Packet)/sizeof(Scalar); const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned }; enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
const int columnsAtOnce = 4; const Index columnsAtOnce = 4;
const int peels = 2; const Index peels = 2;
const int PacketAlignedMask = PacketSize-1; const Index PacketAlignedMask = PacketSize-1;
const int PeelAlignedMask = PacketSize*peels-1; const Index PeelAlignedMask = PacketSize*peels-1;
// How many coeffs of the result do we have to skip to be aligned. // How many coeffs of the result do we have to skip to be aligned.
// Here we assume data are at least aligned on the base scalar type. // Here we assume data are at least aligned on the base scalar type.
int alignedStart = ei_first_aligned(res,size); Index alignedStart = ei_first_aligned(res,size);
int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0; Index alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0;
const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0; const Index alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0;
int alignmentPattern = alignmentStep==0 ? AllAligned Index alignmentPattern = alignmentStep==0 ? AllAligned
: alignmentStep==(PacketSize/2) ? EvenAligned : alignmentStep==(PacketSize/2) ? EvenAligned
: FirstAligned; : FirstAligned;
// we cannot assume the first element is aligned because of sub-matrices // we cannot assume the first element is aligned because of sub-matrices
const int lhsAlignmentOffset = ei_first_aligned(lhs,size); const Index lhsAlignmentOffset = ei_first_aligned(lhs,size);
// find how many columns do we have to skip to be aligned with the result (if possible) // find how many columns do we have to skip to be aligned with the result (if possible)
int skipColumns = 0; Index skipColumns = 0;
// if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats) // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
if( (size_t(lhs)%sizeof(RealScalar)) || (size_t(res)%sizeof(RealScalar)) ) if( (size_t(lhs)%sizeof(RealScalar)) || (size_t(res)%sizeof(RealScalar)) )
{ {
@ -114,11 +114,11 @@ void ei_cache_friendly_product_colmajor_times_vector(
|| (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(Packet))==0); || (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(Packet))==0);
} }
int offset1 = (FirstAligned && alignmentStep==1?3:1); Index offset1 = (FirstAligned && alignmentStep==1?3:1);
int offset3 = (FirstAligned && alignmentStep==1?1:3); Index offset3 = (FirstAligned && alignmentStep==1?1:3);
int columnBound = ((rhs.size()-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns; Index columnBound = ((rhs.size()-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
for (int i=skipColumns; i<columnBound; i+=columnsAtOnce) for (Index i=skipColumns; i<columnBound; i+=columnsAtOnce)
{ {
Packet ptmp0 = ei_pset1(alpha*rhs[i]), ptmp1 = ei_pset1(alpha*rhs[i+offset1]), Packet ptmp0 = ei_pset1(alpha*rhs[i]), ptmp1 = ei_pset1(alpha*rhs[i+offset1]),
ptmp2 = ei_pset1(alpha*rhs[i+2]), ptmp3 = ei_pset1(alpha*rhs[i+offset3]); ptmp2 = ei_pset1(alpha*rhs[i+2]), ptmp3 = ei_pset1(alpha*rhs[i+offset3]);
@ -131,7 +131,7 @@ void ei_cache_friendly_product_colmajor_times_vector(
{ {
/* explicit vectorization */ /* explicit vectorization */
// process initial unaligned coeffs // process initial unaligned coeffs
for (int j=0; j<alignedStart; ++j) for (Index j=0; j<alignedStart; ++j)
{ {
res[j] = cj.pmadd(lhs0[j], ei_pfirst(ptmp0), res[j]); res[j] = cj.pmadd(lhs0[j], ei_pfirst(ptmp0), res[j]);
res[j] = cj.pmadd(lhs1[j], ei_pfirst(ptmp1), res[j]); res[j] = cj.pmadd(lhs1[j], ei_pfirst(ptmp1), res[j]);
@ -144,11 +144,11 @@ void ei_cache_friendly_product_colmajor_times_vector(
switch(alignmentPattern) switch(alignmentPattern)
{ {
case AllAligned: case AllAligned:
for (int j = alignedStart; j<alignedSize; j+=PacketSize) for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,d,d); _EIGEN_ACCUMULATE_PACKETS(d,d,d);
break; break;
case EvenAligned: case EvenAligned:
for (int j = alignedStart; j<alignedSize; j+=PacketSize) for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,d); _EIGEN_ACCUMULATE_PACKETS(d,du,d);
break; break;
case FirstAligned: case FirstAligned:
@ -160,7 +160,7 @@ void ei_cache_friendly_product_colmajor_times_vector(
A02 = ei_pload(&lhs2[alignedStart-2]); A02 = ei_pload(&lhs2[alignedStart-2]);
A03 = ei_pload(&lhs3[alignedStart-3]); A03 = ei_pload(&lhs3[alignedStart-3]);
for (int j = alignedStart; j<peeledSize; j+=peels*PacketSize) for (Index j = alignedStart; j<peeledSize; j+=peels*PacketSize)
{ {
A11 = ei_pload(&lhs1[j-1+PacketSize]); ei_palign<1>(A01,A11); A11 = ei_pload(&lhs1[j-1+PacketSize]); ei_palign<1>(A01,A11);
A12 = ei_pload(&lhs2[j-2+PacketSize]); ei_palign<2>(A02,A12); A12 = ei_pload(&lhs2[j-2+PacketSize]); ei_palign<2>(A02,A12);
@ -184,11 +184,11 @@ void ei_cache_friendly_product_colmajor_times_vector(
ei_pstore(&res[j+PacketSize],A10); ei_pstore(&res[j+PacketSize],A10);
} }
} }
for (int j = peeledSize; j<alignedSize; j+=PacketSize) for (Index j = peeledSize; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,du); _EIGEN_ACCUMULATE_PACKETS(d,du,du);
break; break;
default: default:
for (int j = alignedStart; j<alignedSize; j+=PacketSize) for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(du,du,du); _EIGEN_ACCUMULATE_PACKETS(du,du,du);
break; break;
} }
@ -196,7 +196,7 @@ void ei_cache_friendly_product_colmajor_times_vector(
} // end explicit vectorization } // end explicit vectorization
/* process remaining coeffs (or all if there is no explicit vectorization) */ /* process remaining coeffs (or all if there is no explicit vectorization) */
for (int j=alignedSize; j<size; ++j) for (Index j=alignedSize; j<size; ++j)
{ {
res[j] = cj.pmadd(lhs0[j], ei_pfirst(ptmp0), res[j]); res[j] = cj.pmadd(lhs0[j], ei_pfirst(ptmp0), res[j]);
res[j] = cj.pmadd(lhs1[j], ei_pfirst(ptmp1), res[j]); res[j] = cj.pmadd(lhs1[j], ei_pfirst(ptmp1), res[j]);
@ -206,11 +206,11 @@ void ei_cache_friendly_product_colmajor_times_vector(
} }
// process remaining first and last columns (at most columnsAtOnce-1) // process remaining first and last columns (at most columnsAtOnce-1)
int end = rhs.size(); Index end = rhs.size();
int start = columnBound; Index start = columnBound;
do do
{ {
for (int i=start; i<end; ++i) for (Index i=start; i<end; ++i)
{ {
Packet ptmp0 = ei_pset1(alpha*rhs[i]); Packet ptmp0 = ei_pset1(alpha*rhs[i]);
const Scalar* lhs0 = lhs + i*lhsStride; const Scalar* lhs0 = lhs + i*lhsStride;
@ -219,20 +219,20 @@ void ei_cache_friendly_product_colmajor_times_vector(
{ {
/* explicit vectorization */ /* explicit vectorization */
// process first unaligned result's coeffs // process first unaligned result's coeffs
for (int j=0; j<alignedStart; ++j) for (Index j=0; j<alignedStart; ++j)
res[j] += cj.pmul(lhs0[j], ei_pfirst(ptmp0)); res[j] += cj.pmul(lhs0[j], ei_pfirst(ptmp0));
// process aligned result's coeffs // process aligned result's coeffs
if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0) if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0)
for (int j = alignedStart;j<alignedSize;j+=PacketSize) for (Index j = alignedStart;j<alignedSize;j+=PacketSize)
ei_pstore(&res[j], cj.pmadd(ei_pload(&lhs0[j]), ptmp0, ei_pload(&res[j]))); ei_pstore(&res[j], cj.pmadd(ei_pload(&lhs0[j]), ptmp0, ei_pload(&res[j])));
else else
for (int j = alignedStart;j<alignedSize;j+=PacketSize) for (Index j = alignedStart;j<alignedSize;j+=PacketSize)
ei_pstore(&res[j], cj.pmadd(ei_ploadu(&lhs0[j]), ptmp0, ei_pload(&res[j]))); ei_pstore(&res[j], cj.pmadd(ei_ploadu(&lhs0[j]), ptmp0, ei_pload(&res[j])));
} }
// process remaining scalars (or all if no explicit vectorization) // process remaining scalars (or all if no explicit vectorization)
for (int j=alignedSize; j<size; ++j) for (Index j=alignedSize; j<size; ++j)
res[j] += cj.pmul(lhs0[j], ei_pfirst(ptmp0)); res[j] += cj.pmul(lhs0[j], ei_pfirst(ptmp0));
} }
if (skipColumns) if (skipColumns)
@ -248,10 +248,10 @@ void ei_cache_friendly_product_colmajor_times_vector(
} }
// TODO add peeling to mask unaligned load/stores // TODO add peeling to mask unaligned load/stores
template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename ResType> template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename Index, typename ResType>
static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector( static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
const Scalar* lhs, int lhsStride, const Scalar* lhs, Index lhsStride,
const Scalar* rhs, int rhsSize, const Scalar* rhs, Index rhsSize,
ResType& res, ResType& res,
Scalar alpha) Scalar alpha)
{ {
@ -270,32 +270,32 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename ei_packet_traits<Scalar>::type Packet; typedef typename ei_packet_traits<Scalar>::type Packet;
const int PacketSize = sizeof(Packet)/sizeof(Scalar); const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 }; enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
const int rowsAtOnce = 4; const Index rowsAtOnce = 4;
const int peels = 2; const Index peels = 2;
const int PacketAlignedMask = PacketSize-1; const Index PacketAlignedMask = PacketSize-1;
const int PeelAlignedMask = PacketSize*peels-1; const Index PeelAlignedMask = PacketSize*peels-1;
const int size = rhsSize; const Index size = rhsSize;
// How many coeffs of the result do we have to skip to be aligned. // How many coeffs of the result do we have to skip to be aligned.
// Here we assume data are at least aligned on the base scalar type // Here we assume data are at least aligned on the base scalar type
// if that's not the case then vectorization is discarded, see below. // if that's not the case then vectorization is discarded, see below.
int alignedStart = ei_first_aligned(rhs, size); Index alignedStart = ei_first_aligned(rhs, size);
int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0; Index alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0;
const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0; const Index alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0;
int alignmentPattern = alignmentStep==0 ? AllAligned Index alignmentPattern = alignmentStep==0 ? AllAligned
: alignmentStep==(PacketSize/2) ? EvenAligned : alignmentStep==(PacketSize/2) ? EvenAligned
: FirstAligned; : FirstAligned;
// we cannot assume the first element is aligned because of sub-matrices // we cannot assume the first element is aligned because of sub-matrices
const int lhsAlignmentOffset = ei_first_aligned(lhs,size); const Index lhsAlignmentOffset = ei_first_aligned(lhs,size);
// find how many rows do we have to skip to be aligned with rhs (if possible) // find how many rows do we have to skip to be aligned with rhs (if possible)
int skipRows = 0; Index skipRows = 0;
// if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats) // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
if( (size_t(lhs)%sizeof(RealScalar)) || (size_t(rhs)%sizeof(RealScalar)) ) if( (size_t(lhs)%sizeof(RealScalar)) || (size_t(rhs)%sizeof(RealScalar)) )
{ {
@ -317,7 +317,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
} }
else else
{ {
skipRows = std::min(skipRows,res.size()); skipRows = std::min(skipRows,Index(res.size()));
// note that the skiped columns are processed later. // note that the skiped columns are processed later.
} }
ei_internal_assert( alignmentPattern==NoneAligned ei_internal_assert( alignmentPattern==NoneAligned
@ -327,11 +327,11 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
|| (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(Packet))==0); || (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(Packet))==0);
} }
int offset1 = (FirstAligned && alignmentStep==1?3:1); Index offset1 = (FirstAligned && alignmentStep==1?3:1);
int offset3 = (FirstAligned && alignmentStep==1?1:3); Index offset3 = (FirstAligned && alignmentStep==1?1:3);
int rowBound = ((res.size()-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows; Index rowBound = ((res.size()-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
for (int i=skipRows; i<rowBound; i+=rowsAtOnce) for (Index i=skipRows; i<rowBound; i+=rowsAtOnce)
{ {
Scalar tmp0 = Scalar(0), tmp1 = Scalar(0), tmp2 = Scalar(0), tmp3 = Scalar(0); Scalar tmp0 = Scalar(0), tmp1 = Scalar(0), tmp2 = Scalar(0), tmp3 = Scalar(0);
@ -346,7 +346,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
// process initial unaligned coeffs // process initial unaligned coeffs
// FIXME this loop get vectorized by the compiler ! // FIXME this loop get vectorized by the compiler !
for (int j=0; j<alignedStart; ++j) for (Index j=0; j<alignedStart; ++j)
{ {
Scalar b = rhs[j]; Scalar b = rhs[j];
tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b); tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
@ -358,11 +358,11 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
switch(alignmentPattern) switch(alignmentPattern)
{ {
case AllAligned: case AllAligned:
for (int j = alignedStart; j<alignedSize; j+=PacketSize) for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,d,d); _EIGEN_ACCUMULATE_PACKETS(d,d,d);
break; break;
case EvenAligned: case EvenAligned:
for (int j = alignedStart; j<alignedSize; j+=PacketSize) for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,d); _EIGEN_ACCUMULATE_PACKETS(d,du,d);
break; break;
case FirstAligned: case FirstAligned:
@ -379,7 +379,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
A02 = ei_pload(&lhs2[alignedStart-2]); A02 = ei_pload(&lhs2[alignedStart-2]);
A03 = ei_pload(&lhs3[alignedStart-3]); A03 = ei_pload(&lhs3[alignedStart-3]);
for (int j = alignedStart; j<peeledSize; j+=peels*PacketSize) for (Index j = alignedStart; j<peeledSize; j+=peels*PacketSize)
{ {
b = ei_pload(&rhs[j]); b = ei_pload(&rhs[j]);
A11 = ei_pload(&lhs1[j-1+PacketSize]); ei_palign<1>(A01,A11); A11 = ei_pload(&lhs1[j-1+PacketSize]); ei_palign<1>(A01,A11);
@ -401,11 +401,11 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
ptmp3 = cj.pmadd(A13, b, ptmp3); ptmp3 = cj.pmadd(A13, b, ptmp3);
} }
} }
for (int j = peeledSize; j<alignedSize; j+=PacketSize) for (Index j = peeledSize; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,du); _EIGEN_ACCUMULATE_PACKETS(d,du,du);
break; break;
default: default:
for (int j = alignedStart; j<alignedSize; j+=PacketSize) for (Index j = alignedStart; j<alignedSize; j+=PacketSize)
_EIGEN_ACCUMULATE_PACKETS(du,du,du); _EIGEN_ACCUMULATE_PACKETS(du,du,du);
break; break;
} }
@ -418,7 +418,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
// process remaining coeffs (or all if no explicit vectorization) // process remaining coeffs (or all if no explicit vectorization)
// FIXME this loop get vectorized by the compiler ! // FIXME this loop get vectorized by the compiler !
for (int j=alignedSize; j<size; ++j) for (Index j=alignedSize; j<size; ++j)
{ {
Scalar b = rhs[j]; Scalar b = rhs[j];
tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b); tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
@ -428,35 +428,35 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
} }
// process remaining first and last rows (at most columnsAtOnce-1) // process remaining first and last rows (at most columnsAtOnce-1)
int end = res.size(); Index end = res.size();
int start = rowBound; Index start = rowBound;
do do
{ {
for (int i=start; i<end; ++i) for (Index i=start; i<end; ++i)
{ {
Scalar tmp0 = Scalar(0); Scalar tmp0 = Scalar(0);
Packet ptmp0 = ei_pset1(tmp0); Packet ptmp0 = ei_pset1(tmp0);
const Scalar* lhs0 = lhs + i*lhsStride; const Scalar* lhs0 = lhs + i*lhsStride;
// process first unaligned result's coeffs // process first unaligned result's coeffs
// FIXME this loop get vectorized by the compiler ! // FIXME this loop get vectorized by the compiler !
for (int j=0; j<alignedStart; ++j) for (Index j=0; j<alignedStart; ++j)
tmp0 += cj.pmul(lhs0[j], rhs[j]); tmp0 += cj.pmul(lhs0[j], rhs[j]);
if (alignedSize>alignedStart) if (alignedSize>alignedStart)
{ {
// process aligned rhs coeffs // process aligned rhs coeffs
if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0) if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0)
for (int j = alignedStart;j<alignedSize;j+=PacketSize) for (Index j = alignedStart;j<alignedSize;j+=PacketSize)
ptmp0 = cj.pmadd(ei_pload(&lhs0[j]), ei_pload(&rhs[j]), ptmp0); ptmp0 = cj.pmadd(ei_pload(&lhs0[j]), ei_pload(&rhs[j]), ptmp0);
else else
for (int j = alignedStart;j<alignedSize;j+=PacketSize) for (Index j = alignedStart;j<alignedSize;j+=PacketSize)
ptmp0 = cj.pmadd(ei_ploadu(&lhs0[j]), ei_pload(&rhs[j]), ptmp0); ptmp0 = cj.pmadd(ei_ploadu(&lhs0[j]), ei_pload(&rhs[j]), ptmp0);
tmp0 += ei_predux(ptmp0); tmp0 += ei_predux(ptmp0);
} }
// process remaining scalars // process remaining scalars
// FIXME this loop get vectorized by the compiler ! // FIXME this loop get vectorized by the compiler !
for (int j=alignedSize; j<size; ++j) for (Index j=alignedSize; j<size; ++j)
tmp0 += cj.pmul(lhs0[j], rhs[j]); tmp0 += cj.pmul(lhs0[j], rhs[j]);
res[i] += alpha*tmp0; res[i] += alpha*tmp0;
} }

View File

@ -25,20 +25,20 @@
#ifndef EIGEN_PARALLELIZER_H #ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H
template<typename BlockBScalar> struct GemmParallelInfo template<typename BlockBScalar, typename Index> struct GemmParallelInfo
{ {
GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0), blockB(0) {} GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0), blockB(0) {}
int volatile sync; int volatile sync;
int volatile users; int volatile users;
int rhs_start; Index rhs_start;
int rhs_length; Index rhs_length;
BlockBScalar* blockB; BlockBScalar* blockB;
}; };
template<bool Condition,typename Functor> template<bool Condition, typename Functor, typename Index>
void ei_parallelize_gemm(const Functor& func, int rows, int cols) void ei_parallelize_gemm(const Functor& func, Index rows, Index cols)
{ {
#ifndef EIGEN_HAS_OPENMP #ifndef EIGEN_HAS_OPENMP
func(0,rows, 0,cols); func(0,rows, 0,cols);
@ -57,16 +57,16 @@ void ei_parallelize_gemm(const Functor& func, int rows, int cols)
// 2- compute the maximal number of threads from the size of the product: // 2- compute the maximal number of threads from the size of the product:
// FIXME this has to be fine tuned // FIXME this has to be fine tuned
int max_threads = std::max(1,rows / 32); Index max_threads = std::max(1,rows / 32);
// 3 - compute the number of threads we are going to use // 3 - compute the number of threads we are going to use
int threads = std::min(omp_get_max_threads(), max_threads); Index threads = std::min<Index>(omp_get_max_threads(), max_threads);
if(threads==1) if(threads==1)
return func(0,rows, 0,cols); return func(0,rows, 0,cols);
int blockCols = (cols / threads) & ~0x3; Index blockCols = (cols / threads) & ~Index(0x3);
int blockRows = (rows / threads) & ~0x7; Index blockRows = (rows / threads) & ~Index(0x7);
typedef typename Functor::BlockBScalar BlockBScalar; typedef typename Functor::BlockBScalar BlockBScalar;
BlockBScalar* sharedBlockB = new BlockBScalar[func.sharedBlockBSize()]; BlockBScalar* sharedBlockB = new BlockBScalar[func.sharedBlockBSize()];
@ -74,13 +74,13 @@ void ei_parallelize_gemm(const Functor& func, int rows, int cols)
GemmParallelInfo<BlockBScalar>* info = new GemmParallelInfo<BlockBScalar>[threads]; GemmParallelInfo<BlockBScalar>* info = new GemmParallelInfo<BlockBScalar>[threads];
#pragma omp parallel for schedule(static,1) num_threads(threads) #pragma omp parallel for schedule(static,1) num_threads(threads)
for(int i=0; i<threads; ++i) for(Index i=0; i<threads; ++i)
{ {
int r0 = i*blockRows; Index r0 = i*blockRows;
int actualBlockRows = (i+1==threads) ? rows-r0 : blockRows; Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
int c0 = i*blockCols; Index c0 = i*blockCols;
int actualBlockCols = (i+1==threads) ? cols-c0 : blockCols; Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
info[i].rhs_start = c0; info[i].rhs_start = c0;
info[i].rhs_length = actualBlockCols; info[i].rhs_length = actualBlockCols;

View File

@ -26,41 +26,41 @@
#define EIGEN_SELFADJOINT_MATRIX_MATRIX_H #define EIGEN_SELFADJOINT_MATRIX_MATRIX_H
// pack a selfadjoint block diagonal for use with the gebp_kernel // pack a selfadjoint block diagonal for use with the gebp_kernel
template<typename Scalar, int mr, int StorageOrder> template<typename Scalar, typename Index, int mr, int StorageOrder>
struct ei_symm_pack_lhs struct ei_symm_pack_lhs
{ {
enum { PacketSize = ei_packet_traits<Scalar>::size }; enum { PacketSize = ei_packet_traits<Scalar>::size };
template<int BlockRows> inline template<int BlockRows> inline
void pack(Scalar* blockA, const ei_const_blas_data_mapper<Scalar,StorageOrder>& lhs, int cols, int i, int& count) void pack(Scalar* blockA, const ei_const_blas_data_mapper<Scalar,Index,StorageOrder>& lhs, Index cols, Index i, Index& count)
{ {
// normal copy // normal copy
for(int k=0; k<i; k++) for(Index k=0; k<i; k++)
for(int w=0; w<BlockRows; w++) for(Index w=0; w<BlockRows; w++)
blockA[count++] = lhs(i+w,k); // normal blockA[count++] = lhs(i+w,k); // normal
// symmetric copy // symmetric copy
int h = 0; Index h = 0;
for(int k=i; k<i+BlockRows; k++) for(Index k=i; k<i+BlockRows; k++)
{ {
for(int w=0; w<h; w++) for(Index w=0; w<h; w++)
blockA[count++] = ei_conj(lhs(k, i+w)); // transposed blockA[count++] = ei_conj(lhs(k, i+w)); // transposed
blockA[count++] = ei_real(lhs(k,k)); // real (diagonal) blockA[count++] = ei_real(lhs(k,k)); // real (diagonal)
for(int w=h+1; w<BlockRows; w++) for(Index w=h+1; w<BlockRows; w++)
blockA[count++] = lhs(i+w, k); // normal blockA[count++] = lhs(i+w, k); // normal
++h; ++h;
} }
// transposed copy // transposed copy
for(int k=i+BlockRows; k<cols; k++) for(Index k=i+BlockRows; k<cols; k++)
for(int w=0; w<BlockRows; w++) for(Index w=0; w<BlockRows; w++)
blockA[count++] = ei_conj(lhs(k, i+w)); // transposed blockA[count++] = ei_conj(lhs(k, i+w)); // transposed
} }
void operator()(Scalar* blockA, const Scalar* _lhs, int lhsStride, int cols, int rows) void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
{ {
ei_const_blas_data_mapper<Scalar,StorageOrder> lhs(_lhs,lhsStride); ei_const_blas_data_mapper<Scalar,Index,StorageOrder> lhs(_lhs,lhsStride);
int count = 0; Index count = 0;
int peeled_mc = (rows/mr)*mr; Index peeled_mc = (rows/mr)*mr;
for(int i=0; i<peeled_mc; i+=mr) for(Index i=0; i<peeled_mc; i+=mr)
{ {
pack<mr>(blockA, lhs, cols, i, count); pack<mr>(blockA, lhs, cols, i, count);
} }
@ -72,34 +72,34 @@ struct ei_symm_pack_lhs
} }
// do the same with mr==1 // do the same with mr==1
for(int i=peeled_mc; i<rows; i++) for(Index i=peeled_mc; i<rows; i++)
{ {
for(int k=0; k<i; k++) for(Index k=0; k<i; k++)
blockA[count++] = lhs(i, k); // normal blockA[count++] = lhs(i, k); // normal
blockA[count++] = ei_real(lhs(i, i)); // real (diagonal) blockA[count++] = ei_real(lhs(i, i)); // real (diagonal)
for(int k=i+1; k<cols; k++) for(Index k=i+1; k<cols; k++)
blockA[count++] = ei_conj(lhs(k, i)); // transposed blockA[count++] = ei_conj(lhs(k, i)); // transposed
} }
} }
}; };
template<typename Scalar, int nr, int StorageOrder> template<typename Scalar, typename Index, int nr, int StorageOrder>
struct ei_symm_pack_rhs struct ei_symm_pack_rhs
{ {
enum { PacketSize = ei_packet_traits<Scalar>::size }; enum { PacketSize = ei_packet_traits<Scalar>::size };
void operator()(Scalar* blockB, const Scalar* _rhs, int rhsStride, Scalar alpha, int rows, int cols, int k2) void operator()(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Scalar alpha, Index rows, Index cols, Index k2)
{ {
int end_k = k2 + rows; Index end_k = k2 + rows;
int count = 0; Index count = 0;
ei_const_blas_data_mapper<Scalar,StorageOrder> rhs(_rhs,rhsStride); ei_const_blas_data_mapper<Scalar,Index,StorageOrder> rhs(_rhs,rhsStride);
int packet_cols = (cols/nr)*nr; Index packet_cols = (cols/nr)*nr;
// first part: normal case // first part: normal case
for(int j2=0; j2<k2; j2+=nr) for(Index j2=0; j2<k2; j2+=nr)
{ {
for(int k=k2; k<end_k; k++) for(Index k=k2; k<end_k; k++)
{ {
blockB[count+0] = alpha*rhs(k,j2+0); blockB[count+0] = alpha*rhs(k,j2+0);
blockB[count+1] = alpha*rhs(k,j2+1); blockB[count+1] = alpha*rhs(k,j2+1);
@ -113,11 +113,11 @@ struct ei_symm_pack_rhs
} }
// second part: diagonal block // second part: diagonal block
for(int j2=k2; j2<std::min(k2+rows,packet_cols); j2+=nr) for(Index j2=k2; j2<std::min(k2+rows,packet_cols); j2+=nr)
{ {
// again we can split vertically in three different parts (transpose, symmetric, normal) // again we can split vertically in three different parts (transpose, symmetric, normal)
// transpose // transpose
for(int k=k2; k<j2; k++) for(Index k=k2; k<j2; k++)
{ {
blockB[count+0] = alpha*ei_conj(rhs(j2+0,k)); blockB[count+0] = alpha*ei_conj(rhs(j2+0,k));
blockB[count+1] = alpha*ei_conj(rhs(j2+1,k)); blockB[count+1] = alpha*ei_conj(rhs(j2+1,k));
@ -129,23 +129,23 @@ struct ei_symm_pack_rhs
count += nr; count += nr;
} }
// symmetric // symmetric
int h = 0; Index h = 0;
for(int k=j2; k<j2+nr; k++) for(Index k=j2; k<j2+nr; k++)
{ {
// normal // normal
for (int w=0 ; w<h; ++w) for (Index w=0 ; w<h; ++w)
blockB[count+w] = alpha*rhs(k,j2+w); blockB[count+w] = alpha*rhs(k,j2+w);
blockB[count+h] = alpha*rhs(k,k); blockB[count+h] = alpha*rhs(k,k);
// transpose // transpose
for (int w=h+1 ; w<nr; ++w) for (Index w=h+1 ; w<nr; ++w)
blockB[count+w] = alpha*ei_conj(rhs(j2+w,k)); blockB[count+w] = alpha*ei_conj(rhs(j2+w,k));
count += nr; count += nr;
++h; ++h;
} }
// normal // normal
for(int k=j2+nr; k<end_k; k++) for(Index k=j2+nr; k<end_k; k++)
{ {
blockB[count+0] = alpha*rhs(k,j2+0); blockB[count+0] = alpha*rhs(k,j2+0);
blockB[count+1] = alpha*rhs(k,j2+1); blockB[count+1] = alpha*rhs(k,j2+1);
@ -159,9 +159,9 @@ struct ei_symm_pack_rhs
} }
// third part: transposed // third part: transposed
for(int j2=k2+rows; j2<packet_cols; j2+=nr) for(Index j2=k2+rows; j2<packet_cols; j2+=nr)
{ {
for(int k=k2; k<end_k; k++) for(Index k=k2; k<end_k; k++)
{ {
blockB[count+0] = alpha*ei_conj(rhs(j2+0,k)); blockB[count+0] = alpha*ei_conj(rhs(j2+0,k));
blockB[count+1] = alpha*ei_conj(rhs(j2+1,k)); blockB[count+1] = alpha*ei_conj(rhs(j2+1,k));
@ -175,11 +175,11 @@ struct ei_symm_pack_rhs
} }
// copy the remaining columns one at a time (=> the same with nr==1) // copy the remaining columns one at a time (=> the same with nr==1)
for(int j2=packet_cols; j2<cols; ++j2) for(Index j2=packet_cols; j2<cols; ++j2)
{ {
// transpose // transpose
int half = std::min(end_k,j2); Index half = std::min(end_k,j2);
for(int k=k2; k<half; k++) for(Index k=k2; k<half; k++)
{ {
blockB[count] = alpha*ei_conj(rhs(j2,k)); blockB[count] = alpha*ei_conj(rhs(j2,k));
count += 1; count += 1;
@ -194,7 +194,7 @@ struct ei_symm_pack_rhs
half--; half--;
// normal // normal
for(int k=half+1; k<k2+rows; k++) for(Index k=half+1; k<k2+rows; k++)
{ {
blockB[count] = alpha*rhs(k,j2); blockB[count] = alpha*rhs(k,j2);
count += 1; count += 1;
@ -206,26 +206,26 @@ struct ei_symm_pack_rhs
/* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of /* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of
* the general matrix matrix product. * the general matrix matrix product.
*/ */
template <typename Scalar, template <typename Scalar, typename Index,
int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs, int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs, int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs,
int ResStorageOrder> int ResStorageOrder>
struct ei_product_selfadjoint_matrix; struct ei_product_selfadjoint_matrix;
template <typename Scalar, template <typename Scalar, typename Index,
int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs, int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs> int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs>
struct ei_product_selfadjoint_matrix<Scalar,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,RowMajor> struct ei_product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,RowMajor>
{ {
static EIGEN_STRONG_INLINE void run( static EIGEN_STRONG_INLINE void run(
int rows, int cols, Index rows, Index cols,
const Scalar* lhs, int lhsStride, const Scalar* lhs, Index lhsStride,
const Scalar* rhs, int rhsStride, const Scalar* rhs, Index rhsStride,
Scalar* res, int resStride, Scalar* res, Index resStride,
Scalar alpha) Scalar alpha)
{ {
ei_product_selfadjoint_matrix<Scalar, ei_product_selfadjoint_matrix<Scalar, Index,
EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor, EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs), RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs),
EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor, EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
@ -235,45 +235,45 @@ struct ei_product_selfadjoint_matrix<Scalar,LhsStorageOrder,LhsSelfAdjoint,Conju
} }
}; };
template <typename Scalar, template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs, int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs> int RhsStorageOrder, bool ConjugateRhs>
struct ei_product_selfadjoint_matrix<Scalar,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor> struct ei_product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>
{ {
static EIGEN_DONT_INLINE void run( static EIGEN_DONT_INLINE void run(
int rows, int cols, Index rows, Index cols,
const Scalar* _lhs, int lhsStride, const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, int rhsStride, const Scalar* _rhs, Index rhsStride,
Scalar* res, int resStride, Scalar* res, Index resStride,
Scalar alpha) Scalar alpha)
{ {
int size = rows; Index size = rows;
ei_const_blas_data_mapper<Scalar, LhsStorageOrder> lhs(_lhs,lhsStride); ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
ei_const_blas_data_mapper<Scalar, RhsStorageOrder> rhs(_rhs,rhsStride); ei_const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
if (ConjugateRhs) if (ConjugateRhs)
alpha = ei_conj(alpha); alpha = ei_conj(alpha);
typedef ei_product_blocking_traits<Scalar> Blocking; typedef ei_product_blocking_traits<Scalar> Blocking;
int kc = std::min<int>(Blocking::Max_kc,size); // cache block size along the K direction Index kc = std::min<Index>(Blocking::Max_kc,size); // cache block size along the K direction
int mc = std::min<int>(Blocking::Max_mc,rows); // cache block size along the M direction Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols; std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
Scalar* allocatedBlockB = ei_aligned_stack_new(Scalar, sizeB); Scalar* allocatedBlockB = ei_aligned_stack_new(Scalar, sizeB);
Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr; Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr;
ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel; ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
ei_symm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder> pack_lhs; ei_symm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder> pack_lhs;
ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder> pack_rhs; ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder> pack_rhs;
ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed; ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
for(int k2=0; k2<size; k2+=kc) for(Index k2=0; k2<size; k2+=kc)
{ {
const int actual_kc = std::min(k2+kc,size)-k2; const Index actual_kc = std::min(k2+kc,size)-k2;
// we have selected one row panel of rhs and one column panel of lhs // we have selected one row panel of rhs and one column panel of lhs
// pack rhs's panel into a sequential chunk of memory // pack rhs's panel into a sequential chunk of memory
@ -284,9 +284,9 @@ struct ei_product_selfadjoint_matrix<Scalar,LhsStorageOrder,true,ConjugateLhs, R
// 1 - the transposed panel above the diagonal block => transposed packed copy // 1 - the transposed panel above the diagonal block => transposed packed copy
// 2 - the diagonal block => special packed copy // 2 - the diagonal block => special packed copy
// 3 - the panel below the diagonal block => generic packed copy // 3 - the panel below the diagonal block => generic packed copy
for(int i2=0; i2<k2; i2+=mc) for(Index i2=0; i2<k2; i2+=mc)
{ {
const int actual_mc = std::min(i2+mc,k2)-i2; const Index actual_mc = std::min(i2+mc,k2)-i2;
// transposed packed copy // transposed packed copy
pack_lhs_transposed(blockA, &lhs(k2, i2), lhsStride, actual_kc, actual_mc); pack_lhs_transposed(blockA, &lhs(k2, i2), lhsStride, actual_kc, actual_mc);
@ -294,17 +294,17 @@ struct ei_product_selfadjoint_matrix<Scalar,LhsStorageOrder,true,ConjugateLhs, R
} }
// the block diagonal // the block diagonal
{ {
const int actual_mc = std::min(k2+kc,size)-k2; const Index actual_mc = std::min(k2+kc,size)-k2;
// symmetric packed copy // symmetric packed copy
pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc); pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
gebp_kernel(res+k2, resStride, blockA, blockB, actual_mc, actual_kc, cols); gebp_kernel(res+k2, resStride, blockA, blockB, actual_mc, actual_kc, cols);
} }
for(int i2=k2+kc; i2<size; i2+=mc) for(Index i2=k2+kc; i2<size; i2+=mc)
{ {
const int actual_mc = std::min(i2+mc,size)-i2; const Index actual_mc = std::min(i2+mc,size)-i2;
ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder,false>() ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder,false>()
(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc); (blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols); gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols);
@ -317,50 +317,50 @@ struct ei_product_selfadjoint_matrix<Scalar,LhsStorageOrder,true,ConjugateLhs, R
}; };
// matrix * selfadjoint product // matrix * selfadjoint product
template <typename Scalar, template <typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs, int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs> int RhsStorageOrder, bool ConjugateRhs>
struct ei_product_selfadjoint_matrix<Scalar,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor> struct ei_product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>
{ {
static EIGEN_DONT_INLINE void run( static EIGEN_DONT_INLINE void run(
int rows, int cols, Index rows, Index cols,
const Scalar* _lhs, int lhsStride, const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, int rhsStride, const Scalar* _rhs, Index rhsStride,
Scalar* res, int resStride, Scalar* res, Index resStride,
Scalar alpha) Scalar alpha)
{ {
int size = cols; Index size = cols;
ei_const_blas_data_mapper<Scalar, LhsStorageOrder> lhs(_lhs,lhsStride); ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
if (ConjugateRhs) if (ConjugateRhs)
alpha = ei_conj(alpha); alpha = ei_conj(alpha);
typedef ei_product_blocking_traits<Scalar> Blocking; typedef ei_product_blocking_traits<Scalar> Blocking;
int kc = std::min<int>(Blocking::Max_kc,size); // cache block size along the K direction Index kc = std::min<Index>(Blocking::Max_kc,size); // cache block size along the K direction
int mc = std::min<int>(Blocking::Max_mc,rows); // cache block size along the M direction Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols; std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
Scalar* allocatedBlockB = ei_aligned_stack_new(Scalar, sizeB); Scalar* allocatedBlockB = ei_aligned_stack_new(Scalar, sizeB);
Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr; Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr;
ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel; ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder> pack_lhs; ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder> pack_lhs;
ei_symm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder> pack_rhs; ei_symm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder> pack_rhs;
for(int k2=0; k2<size; k2+=kc) for(Index k2=0; k2<size; k2+=kc)
{ {
const int actual_kc = std::min(k2+kc,size)-k2; const Index actual_kc = std::min(k2+kc,size)-k2;
pack_rhs(blockB, _rhs, rhsStride, alpha, actual_kc, cols, k2); pack_rhs(blockB, _rhs, rhsStride, alpha, actual_kc, cols, k2);
// => GEPP // => GEPP
for(int i2=0; i2<rows; i2+=mc) for(Index i2=0; i2<rows; i2+=mc)
{ {
const int actual_mc = std::min(i2+mc,rows)-i2; const Index actual_mc = std::min(i2+mc,rows)-i2;
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc); pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols); gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols);
@ -406,7 +406,7 @@ struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false>
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs); * RhsBlasTraits::extractScalarFactor(m_rhs);
ei_product_selfadjoint_matrix<Scalar, ei_product_selfadjoint_matrix<Scalar, Index,
EIGEN_LOGICAL_XOR(LhsIsUpper, EIGEN_LOGICAL_XOR(LhsIsUpper,
ei_traits<Lhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint, ei_traits<Lhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint,
NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)), NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)),

View File

@ -30,15 +30,15 @@
* the number of load/stores of the result by a factor 2 and to reduce * the number of load/stores of the result by a factor 2 and to reduce
* the instruction dependency. * the instruction dependency.
*/ */
template<typename Scalar, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs>
static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector( static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector(
int size, Index size,
const Scalar* lhs, int lhsStride, const Scalar* lhs, Index lhsStride,
const Scalar* _rhs, int rhsIncr, const Scalar* _rhs, Index rhsIncr,
Scalar* res, Scalar alpha) Scalar* res, Scalar alpha)
{ {
typedef typename ei_packet_traits<Scalar>::type Packet; typedef typename ei_packet_traits<Scalar>::type Packet;
const int PacketSize = sizeof(Packet)/sizeof(Scalar); const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
enum { enum {
IsRowMajor = StorageOrder==RowMajor ? 1 : 0, IsRowMajor = StorageOrder==RowMajor ? 1 : 0,
@ -58,16 +58,16 @@ static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector(
{ {
Scalar* r = ei_aligned_stack_new(Scalar, size); Scalar* r = ei_aligned_stack_new(Scalar, size);
const Scalar* it = _rhs; const Scalar* it = _rhs;
for (int i=0; i<size; ++i, it+=rhsIncr) for (Index i=0; i<size; ++i, it+=rhsIncr)
r[i] = *it; r[i] = *it;
rhs = r; rhs = r;
} }
int bound = std::max(0,size-8) & 0xfffffffE; Index bound = std::max(Index(0),size-8) & 0xfffffffe;
if (FirstTriangular) if (FirstTriangular)
bound = size - bound; bound = size - bound;
for (int j=FirstTriangular ? bound : 0; for (Index j=FirstTriangular ? bound : 0;
j<(FirstTriangular ? size : bound);j+=2) j<(FirstTriangular ? size : bound);j+=2)
{ {
register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride; register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
@ -136,14 +136,14 @@ static EIGEN_DONT_INLINE void ei_product_selfadjoint_vector(
res[j] += alpha * (t2 + ei_predux(ptmp2)); res[j] += alpha * (t2 + ei_predux(ptmp2));
res[j+1] += alpha * (t3 + ei_predux(ptmp3)); res[j+1] += alpha * (t3 + ei_predux(ptmp3));
} }
for (int j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++) for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
{ {
register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride; register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
Scalar t1 = cjAlpha * rhs[j]; Scalar t1 = cjAlpha * rhs[j];
Scalar t2 = 0; Scalar t2 = 0;
res[j] += cj0.pmul(A0[j],t1); res[j] += cj0.pmul(A0[j],t1);
for (int i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++) { for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++) {
res[i] += cj0.pmul(A0[i], t1); res[i] += cj0.pmul(A0[i], t1);
t2 += cj1.pmul(A0[i], rhs[i]); t2 += cj1.pmul(A0[i], rhs[i]);
} }
@ -187,7 +187,7 @@ struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
ei_assert(dst.innerStride()==1 && "not implemented yet"); ei_assert(dst.innerStride()==1 && "not implemented yet");
ei_product_selfadjoint_vector<Scalar, (ei_traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)> ei_product_selfadjoint_vector<Scalar, Index, (ei_traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>
( (
lhs.rows(), // size lhs.rows(), // size
&lhs.coeff(0,0), lhs.outerStride(), // lhs info &lhs.coeff(0,0), lhs.outerStride(), // lhs info

View File

@ -26,52 +26,52 @@
#define EIGEN_SELFADJOINT_PRODUCT_H #define EIGEN_SELFADJOINT_PRODUCT_H
/********************************************************************** /**********************************************************************
* This file implement a self adjoint product: C += A A^T updating only * This file implements a self adjoint product: C += A A^T updating only
* an half of the selfadjoint matrix C. * half of the selfadjoint matrix C.
* It corresponds to the level 3 SYRK Blas routine. * It corresponds to the level 3 SYRK Blas routine.
**********************************************************************/ **********************************************************************/
// forward declarations (defined at the end of this file) // forward declarations (defined at the end of this file)
template<typename Scalar, int mr, int nr, typename Conj, int UpLo> template<typename Scalar, typename Index, int mr, int nr, typename Conj, int UpLo>
struct ei_sybb_kernel; struct ei_sybb_kernel;
/* Optimized selfadjoint product (_SYRK) */ /* Optimized selfadjoint product (_SYRK) */
template <typename Scalar, template <typename Scalar, typename Index,
int RhsStorageOrder, int RhsStorageOrder,
int ResStorageOrder, bool AAT, int UpLo> int ResStorageOrder, bool AAT, int UpLo>
struct ei_selfadjoint_product; struct ei_selfadjoint_product;
// as usual if the result is row major => we transpose the product // as usual if the result is row major => we transpose the product
template <typename Scalar, int MatStorageOrder, bool AAT, int UpLo> template <typename Scalar, typename Index, int MatStorageOrder, bool AAT, int UpLo>
struct ei_selfadjoint_product<Scalar,MatStorageOrder, RowMajor, AAT, UpLo> struct ei_selfadjoint_product<Scalar, Index, MatStorageOrder, RowMajor, AAT, UpLo>
{ {
static EIGEN_STRONG_INLINE void run(int size, int depth, const Scalar* mat, int matStride, Scalar* res, int resStride, Scalar alpha) static EIGEN_STRONG_INLINE void run(Index size, Index depth, const Scalar* mat, Index matStride, Scalar* res, Index resStride, Scalar alpha)
{ {
ei_selfadjoint_product<Scalar, MatStorageOrder, ColMajor, !AAT, UpLo==Lower?Upper:Lower> ei_selfadjoint_product<Scalar, Index, MatStorageOrder, ColMajor, !AAT, UpLo==Lower?Upper:Lower>
::run(size, depth, mat, matStride, res, resStride, alpha); ::run(size, depth, mat, matStride, res, resStride, alpha);
} }
}; };
template <typename Scalar, template <typename Scalar, typename Index,
int MatStorageOrder, bool AAT, int UpLo> int MatStorageOrder, bool AAT, int UpLo>
struct ei_selfadjoint_product<Scalar,MatStorageOrder, ColMajor, AAT, UpLo> struct ei_selfadjoint_product<Scalar, Index, MatStorageOrder, ColMajor, AAT, UpLo>
{ {
static EIGEN_DONT_INLINE void run( static EIGEN_DONT_INLINE void run(
int size, int depth, Index size, Index depth,
const Scalar* _mat, int matStride, const Scalar* _mat, Index matStride,
Scalar* res, int resStride, Scalar* res, Index resStride,
Scalar alpha) Scalar alpha)
{ {
ei_const_blas_data_mapper<Scalar, MatStorageOrder> mat(_mat,matStride); ei_const_blas_data_mapper<Scalar, Index, MatStorageOrder> mat(_mat,matStride);
if(AAT) if(AAT)
alpha = ei_conj(alpha); alpha = ei_conj(alpha);
typedef ei_product_blocking_traits<Scalar> Blocking; typedef ei_product_blocking_traits<Scalar> Blocking;
int kc = std::min<int>(Blocking::Max_kc,depth); // cache block size along the K direction Index kc = std::min<Index>(Blocking::Max_kc,depth); // cache block size along the K direction
int mc = std::min<int>(Blocking::Max_mc,size); // cache block size along the M direction Index mc = std::min<Index>(Blocking::Max_mc,size); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*size; std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*size;
@ -81,21 +81,21 @@ struct ei_selfadjoint_product<Scalar,MatStorageOrder, ColMajor, AAT, UpLo>
// note that the actual rhs is the transpose/adjoint of mat // note that the actual rhs is the transpose/adjoint of mat
typedef ei_conj_helper<NumTraits<Scalar>::IsComplex && !AAT, NumTraits<Scalar>::IsComplex && AAT> Conj; typedef ei_conj_helper<NumTraits<Scalar>::IsComplex && !AAT, NumTraits<Scalar>::IsComplex && AAT> Conj;
ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, Conj> gebp_kernel; ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, Conj> gebp_kernel;
ei_gemm_pack_rhs<Scalar,Blocking::nr,MatStorageOrder==RowMajor ? ColMajor : RowMajor> pack_rhs; ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,MatStorageOrder==RowMajor ? ColMajor : RowMajor> pack_rhs;
ei_gemm_pack_lhs<Scalar,Blocking::mr,MatStorageOrder, false> pack_lhs; ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,MatStorageOrder, false> pack_lhs;
ei_sybb_kernel<Scalar, Blocking::mr, Blocking::nr, Conj, UpLo> sybb; ei_sybb_kernel<Scalar, Index, Blocking::mr, Blocking::nr, Conj, UpLo> sybb;
for(int k2=0; k2<depth; k2+=kc) for(Index k2=0; k2<depth; k2+=kc)
{ {
const int actual_kc = std::min(k2+kc,depth)-k2; const Index actual_kc = std::min(k2+kc,depth)-k2;
// note that the actual rhs is the transpose/adjoint of mat // note that the actual rhs is the transpose/adjoint of mat
pack_rhs(blockB, &mat(0,k2), matStride, alpha, actual_kc, size); pack_rhs(blockB, &mat(0,k2), matStride, alpha, actual_kc, size);
for(int i2=0; i2<size; i2+=mc) for(Index i2=0; i2<size; i2+=mc)
{ {
const int actual_mc = std::min(i2+mc,size)-i2; const Index actual_mc = std::min(i2+mc,size)-i2;
pack_lhs(blockA, &mat(i2, k2), matStride, actual_kc, actual_mc); pack_lhs(blockA, &mat(i2, k2), matStride, actual_kc, actual_mc);
@ -111,8 +111,8 @@ struct ei_selfadjoint_product<Scalar,MatStorageOrder, ColMajor, AAT, UpLo>
if (UpLo==Upper) if (UpLo==Upper)
{ {
int j2 = i2+actual_mc; Index j2 = i2+actual_mc;
gebp_kernel(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(0,size-j2), gebp_kernel(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(Index(0),size-j2),
-1, -1, 0, 0, allocatedBlockB); -1, -1, 0, 0, allocatedBlockB);
} }
} }
@ -138,7 +138,7 @@ SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
enum { IsRowMajor = (ei_traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 }; enum { IsRowMajor = (ei_traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
ei_selfadjoint_product<Scalar, ei_selfadjoint_product<Scalar, Index,
_ActualUType::Flags&RowMajorBit ? RowMajor : ColMajor, _ActualUType::Flags&RowMajorBit ? RowMajor : ColMajor,
ei_traits<MatrixType>::Flags&RowMajorBit ? RowMajor : ColMajor, ei_traits<MatrixType>::Flags&RowMajorBit ? RowMajor : ColMajor,
!UBlasTraits::NeedToConjugate, UpLo> !UBlasTraits::NeedToConjugate, UpLo>
@ -158,23 +158,23 @@ SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
// while the selfadjoint block overlapping the diagonal is evaluated into a // while the selfadjoint block overlapping the diagonal is evaluated into a
// small temporary buffer which is then accumulated into the result using a // small temporary buffer which is then accumulated into the result using a
// triangular traversal. // triangular traversal.
template<typename Scalar, int mr, int nr, typename Conj, int UpLo> template<typename Scalar, typename Index, int mr, int nr, typename Conj, int UpLo>
struct ei_sybb_kernel struct ei_sybb_kernel
{ {
enum { enum {
PacketSize = ei_packet_traits<Scalar>::size, PacketSize = ei_packet_traits<Scalar>::size,
BlockSize = EIGEN_ENUM_MAX(mr,nr) BlockSize = EIGEN_ENUM_MAX(mr,nr)
}; };
void operator()(Scalar* res, int resStride, const Scalar* blockA, const Scalar* blockB, int size, int depth, Scalar* workspace) void operator()(Scalar* res, Index resStride, const Scalar* blockA, const Scalar* blockB, Index size, Index depth, Scalar* workspace)
{ {
ei_gebp_kernel<Scalar, mr, nr, Conj> gebp_kernel; ei_gebp_kernel<Scalar, Index, mr, nr, Conj> gebp_kernel;
Matrix<Scalar,BlockSize,BlockSize,ColMajor> buffer; Matrix<Scalar,BlockSize,BlockSize,ColMajor> buffer;
// let's process the block per panel of actual_mc x BlockSize, // let's process the block per panel of actual_mc x BlockSize,
// again, each is split into three parts, etc. // again, each is split into three parts, etc.
for (int j=0; j<size; j+=BlockSize) for (Index j=0; j<size; j+=BlockSize)
{ {
int actualBlockSize = std::min<int>(BlockSize,size - j); Index actualBlockSize = std::min<Index>(BlockSize,size - j);
const Scalar* actual_b = blockB+j*depth; const Scalar* actual_b = blockB+j*depth;
if(UpLo==Upper) if(UpLo==Upper)
@ -182,16 +182,16 @@ struct ei_sybb_kernel
// selfadjoint micro block // selfadjoint micro block
{ {
int i = j; Index i = j;
buffer.setZero(); buffer.setZero();
// 1 - apply the kernel on the temporary buffer // 1 - apply the kernel on the temporary buffer
gebp_kernel(buffer.data(), BlockSize, blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, gebp_kernel(buffer.data(), BlockSize, blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize,
-1, -1, 0, 0, workspace); -1, -1, 0, 0, workspace);
// 2 - triangular accumulation // 2 - triangular accumulation
for(int j1=0; j1<actualBlockSize; ++j1) for(Index j1=0; j1<actualBlockSize; ++j1)
{ {
Scalar* r = res + (j+j1)*resStride + i; Scalar* r = res + (j+j1)*resStride + i;
for(int i1=UpLo==Lower ? j1 : 0; for(Index i1=UpLo==Lower ? j1 : 0;
UpLo==Lower ? i1<actualBlockSize : i1<=j1; ++i1) UpLo==Lower ? i1<actualBlockSize : i1<=j1; ++i1)
r[i1] += buffer(i1,j1); r[i1] += buffer(i1,j1);
} }
@ -199,7 +199,7 @@ struct ei_sybb_kernel
if(UpLo==Lower) if(UpLo==Lower)
{ {
int i = j+actualBlockSize; Index i = j+actualBlockSize;
gebp_kernel(res+j*resStride+i, resStride, blockA+depth*i, actual_b, size-i, depth, actualBlockSize, gebp_kernel(res+j*resStride+i, resStride, blockA+depth*i, actual_b, size-i, depth, actualBlockSize,
-1, -1, 0, 0, workspace); -1, -1, 0, 0, workspace);
} }

View File

@ -29,16 +29,16 @@
* It corresponds to the Level2 syr2 BLAS routine * It corresponds to the Level2 syr2 BLAS routine
*/ */
template<typename Scalar, typename UType, typename VType, int UpLo> template<typename Scalar, typename Index, typename UType, typename VType, int UpLo>
struct ei_selfadjoint_rank2_update_selector; struct ei_selfadjoint_rank2_update_selector;
template<typename Scalar, typename UType, typename VType> template<typename Scalar, typename Index, typename UType, typename VType>
struct ei_selfadjoint_rank2_update_selector<Scalar,UType,VType,Lower> struct ei_selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
{ {
static void run(Scalar* mat, int stride, const UType& u, const VType& v, Scalar alpha) static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
{ {
const int size = u.size(); const Index size = u.size();
for (int i=0; i<size; ++i) for (Index i=0; i<size; ++i)
{ {
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) += Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) +=
(alpha * ei_conj(u.coeff(i))) * v.tail(size-i) (alpha * ei_conj(u.coeff(i))) * v.tail(size-i)
@ -47,13 +47,13 @@ struct ei_selfadjoint_rank2_update_selector<Scalar,UType,VType,Lower>
} }
}; };
template<typename Scalar, typename UType, typename VType> template<typename Scalar, typename Index, typename UType, typename VType>
struct ei_selfadjoint_rank2_update_selector<Scalar,UType,VType,Upper> struct ei_selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Upper>
{ {
static void run(Scalar* mat, int stride, const UType& u, const VType& v, Scalar alpha) static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
{ {
const int size = u.size(); const Index size = u.size();
for (int i=0; i<size; ++i) for (Index i=0; i<size; ++i)
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) += Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) +=
(alpha * ei_conj(u.coeff(i))) * v.head(i+1) (alpha * ei_conj(u.coeff(i))) * v.head(i+1)
+ (alpha * ei_conj(v.coeff(i))) * u.head(i+1); + (alpha * ei_conj(v.coeff(i))) * u.head(i+1);
@ -84,7 +84,7 @@ SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
* VBlasTraits::extractScalarFactor(v.derived()); * VBlasTraits::extractScalarFactor(v.derived());
enum { IsRowMajor = (ei_traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 }; enum { IsRowMajor = (ei_traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
ei_selfadjoint_rank2_update_selector<Scalar, ei_selfadjoint_rank2_update_selector<Scalar, Index,
typename ei_cleantype<typename ei_conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::ret>::type, typename ei_cleantype<typename ei_conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::ret>::type,
typename ei_cleantype<typename ei_conj_expr_if<IsRowMajor ^ VBlasTraits::NeedToConjugate,_ActualVType>::ret>::type, typename ei_cleantype<typename ei_conj_expr_if<IsRowMajor ^ VBlasTraits::NeedToConjugate,_ActualVType>::ret>::type,
(IsRowMajor ? int(UpLo==Upper ? Lower : Upper) : UpLo)> (IsRowMajor ? int(UpLo==Upper ? Lower : Upper) : UpLo)>

View File

@ -52,29 +52,29 @@
/* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of /* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of
* the general matrix matrix product. * the general matrix matrix product.
*/ */
template <typename Scalar, template <typename Scalar, typename Index,
int Mode, bool LhsIsTriangular, int Mode, bool LhsIsTriangular,
int LhsStorageOrder, bool ConjugateLhs, int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs, int RhsStorageOrder, bool ConjugateRhs,
int ResStorageOrder> int ResStorageOrder>
struct ei_product_triangular_matrix_matrix; struct ei_product_triangular_matrix_matrix;
template <typename Scalar, template <typename Scalar, typename Index,
int Mode, bool LhsIsTriangular, int Mode, bool LhsIsTriangular,
int LhsStorageOrder, bool ConjugateLhs, int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs> int RhsStorageOrder, bool ConjugateRhs>
struct ei_product_triangular_matrix_matrix<Scalar,Mode,LhsIsTriangular, struct ei_product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
LhsStorageOrder,ConjugateLhs, LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,RowMajor> RhsStorageOrder,ConjugateRhs,RowMajor>
{ {
static EIGEN_STRONG_INLINE void run( static EIGEN_STRONG_INLINE void run(
int size, int otherSize, Index size, Index otherSize,
const Scalar* lhs, int lhsStride, const Scalar* lhs, Index lhsStride,
const Scalar* rhs, int rhsStride, const Scalar* rhs, Index rhsStride,
Scalar* res, int resStride, Scalar* res, Index resStride,
Scalar alpha) Scalar alpha)
{ {
ei_product_triangular_matrix_matrix<Scalar, ei_product_triangular_matrix_matrix<Scalar, Index,
(Mode&UnitDiag) | (Mode&Upper) ? Lower : Upper, (Mode&UnitDiag) | (Mode&Upper) ? Lower : Upper,
(!LhsIsTriangular), (!LhsIsTriangular),
RhsStorageOrder==RowMajor ? ColMajor : RowMajor, RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
@ -87,25 +87,25 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,LhsIsTriangular,
}; };
// implements col-major += alpha * op(triangular) * op(general) // implements col-major += alpha * op(triangular) * op(general)
template <typename Scalar, int Mode, template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs, int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs> int RhsStorageOrder, bool ConjugateRhs>
struct ei_product_triangular_matrix_matrix<Scalar,Mode,true, struct ei_product_triangular_matrix_matrix<Scalar,Index,Mode,true,
LhsStorageOrder,ConjugateLhs, LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,ColMajor> RhsStorageOrder,ConjugateRhs,ColMajor>
{ {
static EIGEN_DONT_INLINE void run( static EIGEN_DONT_INLINE void run(
int size, int cols, Index size, Index cols,
const Scalar* _lhs, int lhsStride, const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, int rhsStride, const Scalar* _rhs, Index rhsStride,
Scalar* res, int resStride, Scalar* res, Index resStride,
Scalar alpha) Scalar alpha)
{ {
int rows = size; Index rows = size;
ei_const_blas_data_mapper<Scalar, LhsStorageOrder> lhs(_lhs,lhsStride); ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
ei_const_blas_data_mapper<Scalar, RhsStorageOrder> rhs(_rhs,rhsStride); ei_const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
if (ConjugateRhs) if (ConjugateRhs)
alpha = ei_conj(alpha); alpha = ei_conj(alpha);
@ -116,8 +116,8 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,true,
IsLower = (Mode&Lower) == Lower IsLower = (Mode&Lower) == Lower
}; };
int kc = std::min<int>(Blocking::Max_kc/4,size); // cache block size along the K direction Index kc = std::min<Index>(Blocking::Max_kc/4,size); // cache block size along the K direction
int mc = std::min<int>(Blocking::Max_mc,rows); // cache block size along the M direction Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols; std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
@ -129,16 +129,16 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,true,
triangularBuffer.setZero(); triangularBuffer.setZero();
triangularBuffer.diagonal().setOnes(); triangularBuffer.diagonal().setOnes();
ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel; ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder> pack_lhs; ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder> pack_lhs;
ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder> pack_rhs; ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder> pack_rhs;
for(int k2=IsLower ? size : 0; for(Index k2=IsLower ? size : 0;
IsLower ? k2>0 : k2<size; IsLower ? k2>0 : k2<size;
IsLower ? k2-=kc : k2+=kc) IsLower ? k2-=kc : k2+=kc)
{ {
const int actual_kc = std::min(IsLower ? k2 : size-k2, kc); const Index actual_kc = std::min(IsLower ? k2 : size-k2, kc);
int actual_k2 = IsLower ? k2-actual_kc : k2; Index actual_k2 = IsLower ? k2-actual_kc : k2;
pack_rhs(blockB, &rhs(actual_k2,0), rhsStride, alpha, actual_kc, cols); pack_rhs(blockB, &rhs(actual_k2,0), rhsStride, alpha, actual_kc, cols);
@ -149,21 +149,21 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,true,
// the block diagonal // the block diagonal
{ {
// for each small vertical panels of lhs // for each small vertical panels of lhs
for (int k1=0; k1<actual_kc; k1+=SmallPanelWidth) for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)
{ {
int actualPanelWidth = std::min<int>(actual_kc-k1, SmallPanelWidth); Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);
int lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1; Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1;
int startBlock = actual_k2+k1; Index startBlock = actual_k2+k1;
int blockBOffset = k1; Index blockBOffset = k1;
// => GEBP with the micro triangular block // => GEBP with the micro triangular block
// The trick is to pack this micro block while filling the opposite triangular part with zeros. // The trick is to pack this micro block while filling the opposite triangular part with zeros.
// To this end we do an extra triangular copy to a small temporary buffer // To this end we do an extra triangular copy to a small temporary buffer
for (int k=0;k<actualPanelWidth;++k) for (Index k=0;k<actualPanelWidth;++k)
{ {
if (!(Mode&UnitDiag)) if (!(Mode&UnitDiag))
triangularBuffer.coeffRef(k,k) = lhs(startBlock+k,startBlock+k); triangularBuffer.coeffRef(k,k) = lhs(startBlock+k,startBlock+k);
for (int i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i) for (Index i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i)
triangularBuffer.coeffRef(i,k) = lhs(startBlock+i,startBlock+k); triangularBuffer.coeffRef(i,k) = lhs(startBlock+i,startBlock+k);
} }
pack_lhs(blockA, triangularBuffer.data(), triangularBuffer.outerStride(), actualPanelWidth, actualPanelWidth); pack_lhs(blockA, triangularBuffer.data(), triangularBuffer.outerStride(), actualPanelWidth, actualPanelWidth);
@ -174,7 +174,7 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,true,
// GEBP with remaining micro panel // GEBP with remaining micro panel
if (lengthTarget>0) if (lengthTarget>0)
{ {
int startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2; Index startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2;
pack_lhs(blockA, &lhs(startTarget,startBlock), lhsStride, actualPanelWidth, lengthTarget); pack_lhs(blockA, &lhs(startTarget,startBlock), lhsStride, actualPanelWidth, lengthTarget);
@ -185,12 +185,12 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,true,
} }
// the part below the diagonal => GEPP // the part below the diagonal => GEPP
{ {
int start = IsLower ? k2 : 0; Index start = IsLower ? k2 : 0;
int end = IsLower ? size : actual_k2; Index end = IsLower ? size : actual_k2;
for(int i2=start; i2<end; i2+=mc) for(Index i2=start; i2<end; i2+=mc)
{ {
const int actual_mc = std::min(i2+mc,end)-i2; const Index actual_mc = std::min(i2+mc,end)-i2;
ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder,false>() ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder,false>()
(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc); (blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols); gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols);
@ -205,25 +205,25 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,true,
}; };
// implements col-major += alpha * op(general) * op(triangular) // implements col-major += alpha * op(general) * op(triangular)
template <typename Scalar, int Mode, template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs, int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs> int RhsStorageOrder, bool ConjugateRhs>
struct ei_product_triangular_matrix_matrix<Scalar,Mode,false, struct ei_product_triangular_matrix_matrix<Scalar,Index,Mode,false,
LhsStorageOrder,ConjugateLhs, LhsStorageOrder,ConjugateLhs,
RhsStorageOrder,ConjugateRhs,ColMajor> RhsStorageOrder,ConjugateRhs,ColMajor>
{ {
static EIGEN_DONT_INLINE void run( static EIGEN_DONT_INLINE void run(
int size, int rows, Index size, Index rows,
const Scalar* _lhs, int lhsStride, const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, int rhsStride, const Scalar* _rhs, Index rhsStride,
Scalar* res, int resStride, Scalar* res, Index resStride,
Scalar alpha) Scalar alpha)
{ {
int cols = size; Index cols = size;
ei_const_blas_data_mapper<Scalar, LhsStorageOrder> lhs(_lhs,lhsStride); ei_const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
ei_const_blas_data_mapper<Scalar, RhsStorageOrder> rhs(_rhs,rhsStride); ei_const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
if (ConjugateRhs) if (ConjugateRhs)
alpha = ei_conj(alpha); alpha = ei_conj(alpha);
@ -234,8 +234,8 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,false,
IsLower = (Mode&Lower) == Lower IsLower = (Mode&Lower) == Lower
}; };
int kc = std::min<int>(Blocking::Max_kc/4,size); // cache block size along the K direction Index kc = std::min<Index>(Blocking::Max_kc/4,size); // cache block size along the K direction
int mc = std::min<int>(Blocking::Max_mc,rows); // cache block size along the M direction Index mc = std::min<Index>(Blocking::Max_mc,rows); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols; std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
@ -246,30 +246,30 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,false,
triangularBuffer.setZero(); triangularBuffer.setZero();
triangularBuffer.diagonal().setOnes(); triangularBuffer.diagonal().setOnes();
ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel; ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<ConjugateLhs,ConjugateRhs> > gebp_kernel;
ei_gemm_pack_lhs<Scalar,Blocking::mr,LhsStorageOrder> pack_lhs; ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,LhsStorageOrder> pack_lhs;
ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder> pack_rhs; ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder> pack_rhs;
ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder,true> pack_rhs_panel; ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder,true> pack_rhs_panel;
for(int k2=IsLower ? 0 : size; for(Index k2=IsLower ? 0 : size;
IsLower ? k2<size : k2>0; IsLower ? k2<size : k2>0;
IsLower ? k2+=kc : k2-=kc) IsLower ? k2+=kc : k2-=kc)
{ {
const int actual_kc = std::min(IsLower ? size-k2 : k2, kc); const Index actual_kc = std::min(IsLower ? size-k2 : k2, kc);
int actual_k2 = IsLower ? k2 : k2-actual_kc; Index actual_k2 = IsLower ? k2 : k2-actual_kc;
int rs = IsLower ? actual_k2 : size - k2; Index rs = IsLower ? actual_k2 : size - k2;
Scalar* geb = blockB+actual_kc*actual_kc; Scalar* geb = blockB+actual_kc*actual_kc;
pack_rhs(geb, &rhs(actual_k2,IsLower ? 0 : k2), rhsStride, alpha, actual_kc, rs); pack_rhs(geb, &rhs(actual_k2,IsLower ? 0 : k2), rhsStride, alpha, actual_kc, rs);
// pack the triangular part of the rhs padding the unrolled blocks with zeros // pack the triangular part of the rhs padding the unrolled blocks with zeros
{ {
for (int j2=0; j2<actual_kc; j2+=SmallPanelWidth) for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
{ {
int actualPanelWidth = std::min<int>(actual_kc-j2, SmallPanelWidth); Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
int actual_j2 = actual_k2 + j2; Index actual_j2 = actual_k2 + j2;
int panelOffset = IsLower ? j2+actualPanelWidth : 0; Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
int panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2; Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
// general part // general part
pack_rhs_panel(blockB+j2*actual_kc, pack_rhs_panel(blockB+j2*actual_kc,
&rhs(actual_k2+panelOffset, actual_j2), rhsStride, alpha, &rhs(actual_k2+panelOffset, actual_j2), rhsStride, alpha,
@ -277,11 +277,11 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,false,
actual_kc, panelOffset); actual_kc, panelOffset);
// append the triangular part via a temporary buffer // append the triangular part via a temporary buffer
for (int j=0;j<actualPanelWidth;++j) for (Index j=0;j<actualPanelWidth;++j)
{ {
if (!(Mode&UnitDiag)) if (!(Mode&UnitDiag))
triangularBuffer.coeffRef(j,j) = rhs(actual_j2+j,actual_j2+j); triangularBuffer.coeffRef(j,j) = rhs(actual_j2+j,actual_j2+j);
for (int k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k) for (Index k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k)
triangularBuffer.coeffRef(k,j) = rhs(actual_j2+k,actual_j2+j); triangularBuffer.coeffRef(k,j) = rhs(actual_j2+k,actual_j2+j);
} }
@ -292,18 +292,18 @@ struct ei_product_triangular_matrix_matrix<Scalar,Mode,false,
} }
} }
for (int i2=0; i2<rows; i2+=mc) for (Index i2=0; i2<rows; i2+=mc)
{ {
const int actual_mc = std::min(mc,rows-i2); const Index actual_mc = std::min(mc,rows-i2);
pack_lhs(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc); pack_lhs(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
// triangular kernel // triangular kernel
{ {
for (int j2=0; j2<actual_kc; j2+=SmallPanelWidth) for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
{ {
int actualPanelWidth = std::min<int>(actual_kc-j2, SmallPanelWidth); Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
int panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth; Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth;
int blockOffset = IsLower ? j2 : 0; Index blockOffset = IsLower ? j2 : 0;
gebp_kernel(res+i2+(actual_k2+j2)*resStride, resStride, gebp_kernel(res+i2+(actual_k2+j2)*resStride, resStride,
blockA, blockB+j2*actual_kc, blockA, blockB+j2*actual_kc,
@ -349,7 +349,7 @@ struct TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs); * RhsBlasTraits::extractScalarFactor(m_rhs);
ei_product_triangular_matrix_matrix<Scalar, ei_product_triangular_matrix_matrix<Scalar, Index,
Mode, LhsIsTriangular, Mode, LhsIsTriangular,
(ei_traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate, (ei_traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
(ei_traits<_ActualRhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate, (ei_traits<_ActualRhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,

View File

@ -33,34 +33,35 @@ template<typename Lhs, typename Rhs, typename Result, int Mode, bool ConjLhs, bo
struct ei_product_triangular_vector_selector<Lhs,Rhs,Result,Mode,ConjLhs,ConjRhs,ColMajor> struct ei_product_triangular_vector_selector<Lhs,Rhs,Result,Mode,ConjLhs,ConjRhs,ColMajor>
{ {
typedef typename Rhs::Scalar Scalar; typedef typename Rhs::Scalar Scalar;
typedef typename Rhs::Index Index;
enum { enum {
IsLower = ((Mode&Lower)==Lower), IsLower = ((Mode&Lower)==Lower),
HasUnitDiag = (Mode & UnitDiag)==UnitDiag HasUnitDiag = (Mode & UnitDiag)==UnitDiag
}; };
static EIGEN_DONT_INLINE void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits<Lhs>::Scalar alpha) static EIGEN_DONT_INLINE void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits<Lhs>::Scalar alpha)
{ {
static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
typename ei_conj_expr_if<ConjLhs,Lhs>::ret cjLhs(lhs); typename ei_conj_expr_if<ConjLhs,Lhs>::ret cjLhs(lhs);
typename ei_conj_expr_if<ConjRhs,Rhs>::ret cjRhs(rhs); typename ei_conj_expr_if<ConjRhs,Rhs>::ret cjRhs(rhs);
int size = lhs.cols(); Index size = lhs.cols();
for (int pi=0; pi<size; pi+=PanelWidth) for (Index pi=0; pi<size; pi+=PanelWidth)
{ {
int actualPanelWidth = std::min(PanelWidth, size-pi); Index actualPanelWidth = std::min(PanelWidth, size-pi);
for (int k=0; k<actualPanelWidth; ++k) for (Index k=0; k<actualPanelWidth; ++k)
{ {
int i = pi + k; Index i = pi + k;
int s = IsLower ? (HasUnitDiag ? i+1 : i ) : pi; Index s = IsLower ? (HasUnitDiag ? i+1 : i ) : pi;
int r = IsLower ? actualPanelWidth-k : k+1; Index r = IsLower ? actualPanelWidth-k : k+1;
if ((!HasUnitDiag) || (--r)>0) if ((!HasUnitDiag) || (--r)>0)
res.segment(s,r) += (alpha * cjRhs.coeff(i)) * cjLhs.col(i).segment(s,r); res.segment(s,r) += (alpha * cjRhs.coeff(i)) * cjLhs.col(i).segment(s,r);
if (HasUnitDiag) if (HasUnitDiag)
res.coeffRef(i) += alpha * cjRhs.coeff(i); res.coeffRef(i) += alpha * cjRhs.coeff(i);
} }
int r = IsLower ? size - pi - actualPanelWidth : pi; Index r = IsLower ? size - pi - actualPanelWidth : pi;
if (r>0) if (r>0)
{ {
int s = IsLower ? pi+actualPanelWidth : 0; Index s = IsLower ? pi+actualPanelWidth : 0;
ei_cache_friendly_product_colmajor_times_vector<ConjLhs,ConjRhs>( ei_cache_friendly_product_colmajor_times_vector<ConjLhs,ConjRhs>(
r, r,
&(lhs.const_cast_derived().coeffRef(s,pi)), lhs.outerStride(), &(lhs.const_cast_derived().coeffRef(s,pi)), lhs.outerStride(),
@ -76,33 +77,34 @@ template<typename Lhs, typename Rhs, typename Result, int Mode, bool ConjLhs, bo
struct ei_product_triangular_vector_selector<Lhs,Rhs,Result,Mode,ConjLhs,ConjRhs,RowMajor> struct ei_product_triangular_vector_selector<Lhs,Rhs,Result,Mode,ConjLhs,ConjRhs,RowMajor>
{ {
typedef typename Rhs::Scalar Scalar; typedef typename Rhs::Scalar Scalar;
typedef typename Rhs::Index Index;
enum { enum {
IsLower = ((Mode&Lower)==Lower), IsLower = ((Mode&Lower)==Lower),
HasUnitDiag = (Mode & UnitDiag)==UnitDiag HasUnitDiag = (Mode & UnitDiag)==UnitDiag
}; };
static void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits<Lhs>::Scalar alpha) static void run(const Lhs& lhs, const Rhs& rhs, Result& res, typename ei_traits<Lhs>::Scalar alpha)
{ {
static const int PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
typename ei_conj_expr_if<ConjLhs,Lhs>::ret cjLhs(lhs); typename ei_conj_expr_if<ConjLhs,Lhs>::ret cjLhs(lhs);
typename ei_conj_expr_if<ConjRhs,Rhs>::ret cjRhs(rhs); typename ei_conj_expr_if<ConjRhs,Rhs>::ret cjRhs(rhs);
int size = lhs.cols(); Index size = lhs.cols();
for (int pi=0; pi<size; pi+=PanelWidth) for (Index pi=0; pi<size; pi+=PanelWidth)
{ {
int actualPanelWidth = std::min(PanelWidth, size-pi); Index actualPanelWidth = std::min(PanelWidth, size-pi);
for (int k=0; k<actualPanelWidth; ++k) for (Index k=0; k<actualPanelWidth; ++k)
{ {
int i = pi + k; Index i = pi + k;
int s = IsLower ? pi : (HasUnitDiag ? i+1 : i); Index s = IsLower ? pi : (HasUnitDiag ? i+1 : i);
int r = IsLower ? k+1 : actualPanelWidth-k; Index r = IsLower ? k+1 : actualPanelWidth-k;
if ((!HasUnitDiag) || (--r)>0) if ((!HasUnitDiag) || (--r)>0)
res.coeffRef(i) += alpha * (cjLhs.row(i).segment(s,r).cwiseProduct(cjRhs.segment(s,r).transpose())).sum(); res.coeffRef(i) += alpha * (cjLhs.row(i).segment(s,r).cwiseProduct(cjRhs.segment(s,r).transpose())).sum();
if (HasUnitDiag) if (HasUnitDiag)
res.coeffRef(i) += alpha * cjRhs.coeff(i); res.coeffRef(i) += alpha * cjRhs.coeff(i);
} }
int r = IsLower ? pi : size - pi - actualPanelWidth; Index r = IsLower ? pi : size - pi - actualPanelWidth;
if (r>0) if (r>0)
{ {
int s = IsLower ? 0 : pi + actualPanelWidth; Index s = IsLower ? 0 : pi + actualPanelWidth;
Block<Result,Dynamic,1> target(res,pi,0,actualPanelWidth,1); Block<Result,Dynamic,1> target(res,pi,0,actualPanelWidth,1);
ei_cache_friendly_product_rowmajor_times_vector<ConjLhs,ConjRhs>( ei_cache_friendly_product_rowmajor_times_vector<ConjLhs,ConjRhs>(
&(lhs.const_cast_derived().coeffRef(pi,s)), lhs.outerStride(), &(lhs.const_cast_derived().coeffRef(pi,s)), lhs.outerStride(),

View File

@ -26,16 +26,16 @@
#define EIGEN_TRIANGULAR_SOLVER_MATRIX_H #define EIGEN_TRIANGULAR_SOLVER_MATRIX_H
// if the rhs is row major, let's transpose the product // if the rhs is row major, let's transpose the product
template <typename Scalar, int Side, int Mode, bool Conjugate, int TriStorageOrder> template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder>
struct ei_triangular_solve_matrix<Scalar,Side,Mode,Conjugate,TriStorageOrder,RowMajor> struct ei_triangular_solve_matrix<Scalar,Index,Side,Mode,Conjugate,TriStorageOrder,RowMajor>
{ {
static EIGEN_DONT_INLINE void run( static EIGEN_DONT_INLINE void run(
int size, int cols, Index size, Index cols,
const Scalar* tri, int triStride, const Scalar* tri, Index triStride,
Scalar* _other, int otherStride) Scalar* _other, Index otherStride)
{ {
ei_triangular_solve_matrix< ei_triangular_solve_matrix<
Scalar, Side==OnTheLeft?OnTheRight:OnTheLeft, Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft,
(Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper), (Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper),
NumTraits<Scalar>::IsComplex && Conjugate, NumTraits<Scalar>::IsComplex && Conjugate,
TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor> TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor>
@ -45,17 +45,17 @@ struct ei_triangular_solve_matrix<Scalar,Side,Mode,Conjugate,TriStorageOrder,Row
/* Optimized triangular solver with multiple right hand side and the triangular matrix on the left /* Optimized triangular solver with multiple right hand side and the triangular matrix on the left
*/ */
template <typename Scalar, int Mode, bool Conjugate, int TriStorageOrder> template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
struct ei_triangular_solve_matrix<Scalar,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor> struct ei_triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>
{ {
static EIGEN_DONT_INLINE void run( static EIGEN_DONT_INLINE void run(
int size, int otherSize, Index size, Index otherSize,
const Scalar* _tri, int triStride, const Scalar* _tri, Index triStride,
Scalar* _other, int otherStride) Scalar* _other, Index otherStride)
{ {
int cols = otherSize; Index cols = otherSize;
ei_const_blas_data_mapper<Scalar, TriStorageOrder> tri(_tri,triStride); ei_const_blas_data_mapper<Scalar, Index, TriStorageOrder> tri(_tri,triStride);
ei_blas_data_mapper<Scalar, ColMajor> other(_other,otherStride); ei_blas_data_mapper<Scalar, Index, ColMajor> other(_other,otherStride);
typedef ei_product_blocking_traits<Scalar> Blocking; typedef ei_product_blocking_traits<Scalar> Blocking;
enum { enum {
@ -63,8 +63,8 @@ struct ei_triangular_solve_matrix<Scalar,OnTheLeft,Mode,Conjugate,TriStorageOrde
IsLower = (Mode&Lower) == Lower IsLower = (Mode&Lower) == Lower
}; };
int kc = std::min<int>(Blocking::Max_kc/4,size); // cache block size along the K direction Index kc = std::min<Index>(Blocking::Max_kc/4,size); // cache block size along the K direction
int mc = std::min<int>(Blocking::Max_mc,size); // cache block size along the M direction Index mc = std::min<Index>(Blocking::Max_mc,size); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols; std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*cols;
@ -72,15 +72,15 @@ struct ei_triangular_solve_matrix<Scalar,OnTheLeft,Mode,Conjugate,TriStorageOrde
Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr; Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr;
ei_conj_if<Conjugate> conj; ei_conj_if<Conjugate> conj;
ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<Conjugate,false> > gebp_kernel; ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<Conjugate,false> > gebp_kernel;
ei_gemm_pack_lhs<Scalar,Blocking::mr,TriStorageOrder> pack_lhs; ei_gemm_pack_lhs<Scalar, Index, Blocking::mr,TriStorageOrder> pack_lhs;
ei_gemm_pack_rhs<Scalar, Blocking::nr, ColMajor, true> pack_rhs; ei_gemm_pack_rhs<Scalar, Index, Blocking::nr, ColMajor, true> pack_rhs;
for(int k2=IsLower ? 0 : size; for(Index k2=IsLower ? 0 : size;
IsLower ? k2<size : k2>0; IsLower ? k2<size : k2>0;
IsLower ? k2+=kc : k2-=kc) IsLower ? k2+=kc : k2-=kc)
{ {
const int actual_kc = std::min(IsLower ? size-k2 : k2, kc); const Index actual_kc = std::min(IsLower ? size-k2 : k2, kc);
// We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel, // We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,
// and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into // and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into
@ -97,45 +97,45 @@ struct ei_triangular_solve_matrix<Scalar,OnTheLeft,Mode,Conjugate,TriStorageOrde
// and the remaining small part A2 which is processed using gebp with appropriate block strides // and the remaining small part A2 which is processed using gebp with appropriate block strides
{ {
// for each small vertical panels of lhs // for each small vertical panels of lhs
for (int k1=0; k1<actual_kc; k1+=SmallPanelWidth) for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)
{ {
int actualPanelWidth = std::min<int>(actual_kc-k1, SmallPanelWidth); Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);
// tr solve // tr solve
for (int k=0; k<actualPanelWidth; ++k) for (Index k=0; k<actualPanelWidth; ++k)
{ {
// TODO write a small kernel handling this (can be shared with trsv) // TODO write a small kernel handling this (can be shared with trsv)
int i = IsLower ? k2+k1+k : k2-k1-k-1; Index i = IsLower ? k2+k1+k : k2-k1-k-1;
int s = IsLower ? k2+k1 : i+1; Index s = IsLower ? k2+k1 : i+1;
int rs = actualPanelWidth - k - 1; // remaining size Index rs = actualPanelWidth - k - 1; // remaining size
Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(tri(i,i)); Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(tri(i,i));
for (int j=0; j<cols; ++j) for (Index j=0; j<cols; ++j)
{ {
if (TriStorageOrder==RowMajor) if (TriStorageOrder==RowMajor)
{ {
Scalar b = 0; Scalar b = 0;
const Scalar* l = &tri(i,s); const Scalar* l = &tri(i,s);
Scalar* r = &other(s,j); Scalar* r = &other(s,j);
for (int i3=0; i3<k; ++i3) for (Index i3=0; i3<k; ++i3)
b += conj(l[i3]) * r[i3]; b += conj(l[i3]) * r[i3];
other(i,j) = (other(i,j) - b)*a; other(i,j) = (other(i,j) - b)*a;
} }
else else
{ {
int s = IsLower ? i+1 : i-rs; Index s = IsLower ? i+1 : i-rs;
Scalar b = (other(i,j) *= a); Scalar b = (other(i,j) *= a);
Scalar* r = &other(s,j); Scalar* r = &other(s,j);
const Scalar* l = &tri(s,i); const Scalar* l = &tri(s,i);
for (int i3=0;i3<rs;++i3) for (Index i3=0;i3<rs;++i3)
r[i3] -= b * conj(l[i3]); r[i3] -= b * conj(l[i3]);
} }
} }
} }
int lengthTarget = actual_kc-k1-actualPanelWidth; Index lengthTarget = actual_kc-k1-actualPanelWidth;
int startBlock = IsLower ? k2+k1 : k2-k1-actualPanelWidth; Index startBlock = IsLower ? k2+k1 : k2-k1-actualPanelWidth;
int blockBOffset = IsLower ? k1 : lengthTarget; Index blockBOffset = IsLower ? k1 : lengthTarget;
// update the respective rows of B from other // update the respective rows of B from other
pack_rhs(blockB, _other+startBlock, otherStride, -1, actualPanelWidth, cols, actual_kc, blockBOffset); pack_rhs(blockB, _other+startBlock, otherStride, -1, actualPanelWidth, cols, actual_kc, blockBOffset);
@ -143,7 +143,7 @@ struct ei_triangular_solve_matrix<Scalar,OnTheLeft,Mode,Conjugate,TriStorageOrde
// GEBP // GEBP
if (lengthTarget>0) if (lengthTarget>0)
{ {
int startTarget = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc; Index startTarget = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc;
pack_lhs(blockA, &tri(startTarget,startBlock), triStride, actualPanelWidth, lengthTarget); pack_lhs(blockA, &tri(startTarget,startBlock), triStride, actualPanelWidth, lengthTarget);
@ -155,11 +155,11 @@ struct ei_triangular_solve_matrix<Scalar,OnTheLeft,Mode,Conjugate,TriStorageOrde
// R2 = A2 * B => GEPP // R2 = A2 * B => GEPP
{ {
int start = IsLower ? k2+kc : 0; Index start = IsLower ? k2+kc : 0;
int end = IsLower ? size : k2-kc; Index end = IsLower ? size : k2-kc;
for(int i2=start; i2<end; i2+=mc) for(Index i2=start; i2<end; i2+=mc)
{ {
const int actual_mc = std::min(mc,end-i2); const Index actual_mc = std::min(mc,end-i2);
if (actual_mc>0) if (actual_mc>0)
{ {
pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc); pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc);
@ -177,17 +177,17 @@ struct ei_triangular_solve_matrix<Scalar,OnTheLeft,Mode,Conjugate,TriStorageOrde
/* Optimized triangular solver with multiple left hand sides and the trinagular matrix on the right /* Optimized triangular solver with multiple left hand sides and the trinagular matrix on the right
*/ */
template <typename Scalar, int Mode, bool Conjugate, int TriStorageOrder> template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
struct ei_triangular_solve_matrix<Scalar,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor> struct ei_triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>
{ {
static EIGEN_DONT_INLINE void run( static EIGEN_DONT_INLINE void run(
int size, int otherSize, Index size, Index otherSize,
const Scalar* _tri, int triStride, const Scalar* _tri, Index triStride,
Scalar* _other, int otherStride) Scalar* _other, Index otherStride)
{ {
int rows = otherSize; Index rows = otherSize;
ei_const_blas_data_mapper<Scalar, TriStorageOrder> rhs(_tri,triStride); ei_const_blas_data_mapper<Scalar, Index, TriStorageOrder> rhs(_tri,triStride);
ei_blas_data_mapper<Scalar, ColMajor> lhs(_other,otherStride); ei_blas_data_mapper<Scalar, Index, ColMajor> lhs(_other,otherStride);
typedef ei_product_blocking_traits<Scalar> Blocking; typedef ei_product_blocking_traits<Scalar> Blocking;
enum { enum {
@ -196,8 +196,8 @@ struct ei_triangular_solve_matrix<Scalar,OnTheRight,Mode,Conjugate,TriStorageOrd
IsLower = (Mode&Lower) == Lower IsLower = (Mode&Lower) == Lower
}; };
int kc = std::min<int>(Blocking::Max_kc/4,size); // cache block size along the K direction Index kc = std::min<Index>(Blocking::Max_kc/4,size); // cache block size along the K direction
int mc = std::min<int>(Blocking::Max_mc,size); // cache block size along the M direction Index mc = std::min<Index>(Blocking::Max_mc,size); // cache block size along the M direction
Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc); Scalar* blockA = ei_aligned_stack_new(Scalar, kc*mc);
std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*size; std::size_t sizeB = kc*Blocking::PacketSize*Blocking::nr + kc*size;
@ -205,20 +205,20 @@ struct ei_triangular_solve_matrix<Scalar,OnTheRight,Mode,Conjugate,TriStorageOrd
Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr; Scalar* blockB = allocatedBlockB + kc*Blocking::PacketSize*Blocking::nr;
ei_conj_if<Conjugate> conj; ei_conj_if<Conjugate> conj;
ei_gebp_kernel<Scalar, Blocking::mr, Blocking::nr, ei_conj_helper<false,Conjugate> > gebp_kernel; ei_gebp_kernel<Scalar, Index, Blocking::mr, Blocking::nr, ei_conj_helper<false,Conjugate> > gebp_kernel;
ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder> pack_rhs; ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder> pack_rhs;
ei_gemm_pack_rhs<Scalar,Blocking::nr,RhsStorageOrder,true> pack_rhs_panel; ei_gemm_pack_rhs<Scalar, Index, Blocking::nr,RhsStorageOrder,true> pack_rhs_panel;
ei_gemm_pack_lhs<Scalar, Blocking::mr, ColMajor, false, true> pack_lhs_panel; ei_gemm_pack_lhs<Scalar, Index, Blocking::mr, ColMajor, false, true> pack_lhs_panel;
for(int k2=IsLower ? size : 0; for(Index k2=IsLower ? size : 0;
IsLower ? k2>0 : k2<size; IsLower ? k2>0 : k2<size;
IsLower ? k2-=kc : k2+=kc) IsLower ? k2-=kc : k2+=kc)
{ {
const int actual_kc = std::min(IsLower ? k2 : size-k2, kc); const Index actual_kc = std::min(IsLower ? k2 : size-k2, kc);
int actual_k2 = IsLower ? k2-actual_kc : k2 ; Index actual_k2 = IsLower ? k2-actual_kc : k2 ;
int startPanel = IsLower ? 0 : k2+actual_kc; Index startPanel = IsLower ? 0 : k2+actual_kc;
int rs = IsLower ? actual_k2 : size - actual_k2 - actual_kc; Index rs = IsLower ? actual_k2 : size - actual_k2 - actual_kc;
Scalar* geb = blockB+actual_kc*actual_kc; Scalar* geb = blockB+actual_kc*actual_kc;
if (rs>0) pack_rhs(geb, &rhs(actual_k2,startPanel), triStride, -1, actual_kc, rs); if (rs>0) pack_rhs(geb, &rhs(actual_k2,startPanel), triStride, -1, actual_kc, rs);
@ -226,12 +226,12 @@ struct ei_triangular_solve_matrix<Scalar,OnTheRight,Mode,Conjugate,TriStorageOrd
// triangular packing (we only pack the panels off the diagonal, // triangular packing (we only pack the panels off the diagonal,
// neglecting the blocks overlapping the diagonal // neglecting the blocks overlapping the diagonal
{ {
for (int j2=0; j2<actual_kc; j2+=SmallPanelWidth) for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
{ {
int actualPanelWidth = std::min<int>(actual_kc-j2, SmallPanelWidth); Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
int actual_j2 = actual_k2 + j2; Index actual_j2 = actual_k2 + j2;
int panelOffset = IsLower ? j2+actualPanelWidth : 0; Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
int panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2; Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
if (panelLength>0) if (panelLength>0)
pack_rhs_panel(blockB+j2*actual_kc, pack_rhs_panel(blockB+j2*actual_kc,
@ -241,24 +241,24 @@ struct ei_triangular_solve_matrix<Scalar,OnTheRight,Mode,Conjugate,TriStorageOrd
} }
} }
for(int i2=0; i2<rows; i2+=mc) for(Index i2=0; i2<rows; i2+=mc)
{ {
const int actual_mc = std::min(mc,rows-i2); const Index actual_mc = std::min(mc,rows-i2);
// triangular solver kernel // triangular solver kernel
{ {
// for each small block of the diagonal (=> vertical panels of rhs) // for each small block of the diagonal (=> vertical panels of rhs)
for (int j2 = IsLower for (Index j2 = IsLower
? (actual_kc - ((actual_kc%SmallPanelWidth) ? (actual_kc%SmallPanelWidth) ? (actual_kc - ((actual_kc%SmallPanelWidth) ? Index(actual_kc%SmallPanelWidth)
: SmallPanelWidth)) : Index(SmallPanelWidth)))
: 0; : 0;
IsLower ? j2>=0 : j2<actual_kc; IsLower ? j2>=0 : j2<actual_kc;
IsLower ? j2-=SmallPanelWidth : j2+=SmallPanelWidth) IsLower ? j2-=SmallPanelWidth : j2+=SmallPanelWidth)
{ {
int actualPanelWidth = std::min<int>(actual_kc-j2, SmallPanelWidth); Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
int absolute_j2 = actual_k2 + j2; Index absolute_j2 = actual_k2 + j2;
int panelOffset = IsLower ? j2+actualPanelWidth : 0; Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
int panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2; Index panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2;
// GEBP // GEBP
if(panelLength>0) if(panelLength>0)
@ -272,20 +272,20 @@ struct ei_triangular_solve_matrix<Scalar,OnTheRight,Mode,Conjugate,TriStorageOrd
} }
// unblocked triangular solve // unblocked triangular solve
for (int k=0; k<actualPanelWidth; ++k) for (Index k=0; k<actualPanelWidth; ++k)
{ {
int j = IsLower ? absolute_j2+actualPanelWidth-k-1 : absolute_j2+k; Index j = IsLower ? absolute_j2+actualPanelWidth-k-1 : absolute_j2+k;
Scalar* r = &lhs(i2,j); Scalar* r = &lhs(i2,j);
for (int k3=0; k3<k; ++k3) for (Index k3=0; k3<k; ++k3)
{ {
Scalar b = conj(rhs(IsLower ? j+1+k3 : absolute_j2+k3,j)); Scalar b = conj(rhs(IsLower ? j+1+k3 : absolute_j2+k3,j));
Scalar* a = &lhs(i2,IsLower ? j+1+k3 : absolute_j2+k3); Scalar* a = &lhs(i2,IsLower ? j+1+k3 : absolute_j2+k3);
for (int i=0; i<actual_mc; ++i) for (Index i=0; i<actual_mc; ++i)
r[i] -= a[i] * b; r[i] -= a[i] * b;
} }
Scalar b = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(rhs(j,j)); Scalar b = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(rhs(j,j));
for (int i=0; i<actual_mc; ++i) for (Index i=0; i<actual_mc; ++i)
r[i] *= b; r[i] *= b;
} }

View File

@ -29,29 +29,29 @@
// implement and control fast level 2 and level 3 BLAS-like routines. // implement and control fast level 2 and level 3 BLAS-like routines.
// forward declarations // forward declarations
template<typename Scalar, int mr, int nr, typename Conj> template<typename Scalar, typename Index, int mr, int nr, typename Conj>
struct ei_gebp_kernel; struct ei_gebp_kernel;
template<typename Scalar, int nr, int StorageOrder, bool PanelMode=false> template<typename Scalar, typename Index, int nr, int StorageOrder, bool PanelMode=false>
struct ei_gemm_pack_rhs; struct ei_gemm_pack_rhs;
template<typename Scalar, int mr, int StorageOrder, bool Conjugate = false, bool PanelMode = false> template<typename Scalar, typename Index, int mr, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
struct ei_gemm_pack_lhs; struct ei_gemm_pack_lhs;
template< template<
typename Scalar, typename Scalar, typename Index,
int LhsStorageOrder, bool ConjugateLhs, int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs, int RhsStorageOrder, bool ConjugateRhs,
int ResStorageOrder> int ResStorageOrder>
struct ei_general_matrix_matrix_product; struct ei_general_matrix_matrix_product;
template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename RhsType> template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename Index, typename RhsType>
static void ei_cache_friendly_product_colmajor_times_vector( static void ei_cache_friendly_product_colmajor_times_vector(
int size, const Scalar* lhs, int lhsStride, const RhsType& rhs, Scalar* res, Scalar alpha); Index size, const Scalar* lhs, Index lhsStride, const RhsType& rhs, Scalar* res, Scalar alpha);
template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename ResType> template<bool ConjugateLhs, bool ConjugateRhs, typename Scalar, typename Index, typename ResType>
static void ei_cache_friendly_product_rowmajor_times_vector( static void ei_cache_friendly_product_rowmajor_times_vector(
const Scalar* lhs, int lhsStride, const Scalar* rhs, int rhsSize, ResType& res, Scalar alpha); const Scalar* lhs, Index lhsStride, const Scalar* rhs, Index rhsSize, ResType& res, Scalar alpha);
// Provides scalar/packet-wise product and product with accumulation // Provides scalar/packet-wise product and product with accumulation
// with optional conjugation of the arguments. // with optional conjugation of the arguments.
@ -98,29 +98,29 @@ template<> struct ei_conj_helper<true,true>
// Lightweight helper class to access matrix coefficients. // Lightweight helper class to access matrix coefficients.
// Yes, this is somehow redundant with Map<>, but this version is much much lighter, // Yes, this is somehow redundant with Map<>, but this version is much much lighter,
// and so I hope better compilation performance (time and code quality). // and so I hope better compilation performance (time and code quality).
template<typename Scalar, int StorageOrder> template<typename Scalar, typename Index, int StorageOrder>
class ei_blas_data_mapper class ei_blas_data_mapper
{ {
public: public:
ei_blas_data_mapper(Scalar* data, int stride) : m_data(data), m_stride(stride) {} ei_blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
EIGEN_STRONG_INLINE Scalar& operator()(int i, int j) EIGEN_STRONG_INLINE Scalar& operator()(Index i, Index j)
{ return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; } { return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; }
protected: protected:
Scalar* EIGEN_RESTRICT m_data; Scalar* EIGEN_RESTRICT m_data;
int m_stride; Index m_stride;
}; };
// lightweight helper class to access matrix coefficients (const version) // lightweight helper class to access matrix coefficients (const version)
template<typename Scalar, int StorageOrder> template<typename Scalar, typename Index, int StorageOrder>
class ei_const_blas_data_mapper class ei_const_blas_data_mapper
{ {
public: public:
ei_const_blas_data_mapper(const Scalar* data, int stride) : m_data(data), m_stride(stride) {} ei_const_blas_data_mapper(const Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
EIGEN_STRONG_INLINE const Scalar& operator()(int i, int j) const EIGEN_STRONG_INLINE const Scalar& operator()(Index i, Index j) const
{ return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; } { return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; }
protected: protected:
const Scalar* EIGEN_RESTRICT m_data; const Scalar* EIGEN_RESTRICT m_data;
int m_stride; Index m_stride;
}; };
// Defines various constant controlling level 3 blocking // Defines various constant controlling level 3 blocking

View File

@ -94,6 +94,14 @@
#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor #define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor
#endif #endif
#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
#endif
#ifndef EIGEN_DEFAULT_SPARSE_INDEX_TYPE
#define EIGEN_DEFAULT_SPARSE_INDEX_TYPE int
#endif
/** Allows to disable some optimizations which might affect the accuracy of the result. /** Allows to disable some optimizations which might affect the accuracy of the result.
* Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them. * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
* They currently include: * They currently include:
@ -266,6 +274,8 @@
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::ei_nested<Derived>::type Nested; \ typedef typename Eigen::ei_nested<Derived>::type Nested; \
typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
typedef typename Eigen::ei_index<StorageKind>::type Index; \
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \ enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \ ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
Flags = Eigen::ei_traits<Derived>::Flags, \ Flags = Eigen::ei_traits<Derived>::Flags, \
@ -281,6 +291,8 @@
typedef typename Base::PacketScalar PacketScalar; \ typedef typename Base::PacketScalar PacketScalar; \
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::ei_nested<Derived>::type Nested; \ typedef typename Eigen::ei_nested<Derived>::type Nested; \
typedef typename Eigen::ei_traits<Derived>::StorageKind StorageKind; \
typedef typename Eigen::ei_index<StorageKind>::type Index; \
enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \ enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \ ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
MaxRowsAtCompileTime = Eigen::ei_traits<Derived>::MaxRowsAtCompileTime, \ MaxRowsAtCompileTime = Eigen::ei_traits<Derived>::MaxRowsAtCompileTime, \

View File

@ -379,10 +379,10 @@ template<typename T, bool Align> inline T* ei_conditional_aligned_realloc_new(T*
* other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for
* example with Scalar=double on certain 32-bit platforms, see bug #79. * example with Scalar=double on certain 32-bit platforms, see bug #79.
* *
* There is also the variant ei_first_aligned(const MatrixBase&, Integer) defined in Coeffs.h. * There is also the variant ei_first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
*/ */
template<typename Scalar, typename Integer> template<typename Scalar, typename Index>
inline static Integer ei_first_aligned(const Scalar* array, Integer size) inline static Index ei_first_aligned(const Scalar* array, Index size)
{ {
typedef typename ei_packet_traits<Scalar>::type Packet; typedef typename ei_packet_traits<Scalar>::type Packet;
enum { PacketSize = ei_packet_traits<Scalar>::size, enum { PacketSize = ei_packet_traits<Scalar>::size,
@ -403,7 +403,7 @@ inline static Integer ei_first_aligned(const Scalar* array, Integer size)
} }
else else
{ {
return std::min<Integer>( (PacketSize - (Integer((size_t(array)/sizeof(Scalar))) & PacketAlignedMask)) return std::min<Index>( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
& PacketAlignedMask, size); & PacketAlignedMask, size);
} }
} }

View File

@ -42,27 +42,35 @@ class ei_no_assignment_operator
ei_no_assignment_operator& operator=(const ei_no_assignment_operator&); ei_no_assignment_operator& operator=(const ei_no_assignment_operator&);
}; };
/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around an int variable that template<typename StorageKind> struct ei_index {};
template<>
struct ei_index<Dense>
{ typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE type; };
typedef typename ei_index<Dense>::type DenseIndex;
/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
* can be accessed using value() and setValue(). * can be accessed using value() and setValue().
* Otherwise, this class is an empty structure and value() just returns the template parameter Value. * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
*/ */
template<int Value> class ei_int_if_dynamic template<typename T, int Value> class ei_variable_if_dynamic
{ {
public: public:
EIGEN_EMPTY_STRUCT_CTOR(ei_int_if_dynamic) EIGEN_EMPTY_STRUCT_CTOR(ei_variable_if_dynamic)
explicit ei_int_if_dynamic(int v) { EIGEN_ONLY_USED_FOR_DEBUG(v); ei_assert(v == Value); } explicit ei_variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); ei_assert(v == T(Value)); }
static int value() { return Value; } static T value() { return T(Value); }
void setValue(int) {} void setValue(T) {}
}; };
template<> class ei_int_if_dynamic<Dynamic> template<typename T> class ei_variable_if_dynamic<T, Dynamic>
{ {
int m_value; T m_value;
ei_int_if_dynamic() { ei_assert(false); } ei_variable_if_dynamic() { ei_assert(false); }
public: public:
explicit ei_int_if_dynamic(int value) : m_value(value) {} explicit ei_variable_if_dynamic(T value) : m_value(value) {}
int value() const { return m_value; } T value() const { return m_value; }
void setValue(int value) { m_value = value; } void setValue(T value) { m_value = value; }
}; };
template<typename T> struct ei_functor_traits template<typename T> struct ei_functor_traits

View File

@ -40,11 +40,11 @@
* when it is applied to a fixed-size matrix, it inherits a fixed maximal size, * when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
* which means that evaluating it does not cause a dynamic memory allocation. * which means that evaluating it does not cause a dynamic memory allocation.
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
inline Block<Derived> DenseBase<Derived> inline Block<Derived> DenseBase<Derived>
::corner(CornerType type, int cRows, int cCols) ::corner(CornerType type, Index cRows, Index cCols)
{ {
switch(type) switch(type)
{ {
@ -61,10 +61,10 @@ inline Block<Derived> DenseBase<Derived>
} }
} }
/** This is the const version of corner(CornerType, int, int).*/ /** This is the const version of corner(CornerType, Index, Index).*/
template<typename Derived> template<typename Derived>
inline const Block<Derived> inline const Block<Derived>
DenseBase<Derived>::corner(CornerType type, int cRows, int cCols) const DenseBase<Derived>::corner(CornerType type, Index cRows, Index cCols) const
{ {
switch(type) switch(type)
{ {
@ -91,7 +91,7 @@ DenseBase<Derived>::corner(CornerType type, int cRows, int cCols) const
* Example: \include MatrixBase_template_int_int_corner_enum.cpp * Example: \include MatrixBase_template_int_int_corner_enum.cpp
* Output: \verbinclude MatrixBase_template_int_int_corner_enum.out * Output: \verbinclude MatrixBase_template_int_int_corner_enum.out
* *
* \sa class Block, block(int,int,int,int) * \sa class Block, block(Index,Index,Index,Index)
*/ */
template<typename Derived> template<typename Derived>
template<int CRows, int CCols> template<int CRows, int CCols>

View File

@ -44,6 +44,7 @@ struct ei_traits<Minor<MatrixType> >
{ {
typedef typename ei_nested<MatrixType>::type MatrixTypeNested; typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested; typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
typedef typename MatrixType::StorageKind StorageKind;
enum { enum {
RowsAtCompileTime = (MatrixType::RowsAtCompileTime != Dynamic) ? RowsAtCompileTime = (MatrixType::RowsAtCompileTime != Dynamic) ?
int(MatrixType::RowsAtCompileTime) - 1 : Dynamic, int(MatrixType::RowsAtCompileTime) - 1 : Dynamic,
@ -68,7 +69,7 @@ template<typename MatrixType> class Minor
EIGEN_DENSE_PUBLIC_INTERFACE(Minor) EIGEN_DENSE_PUBLIC_INTERFACE(Minor)
inline Minor(const MatrixType& matrix, inline Minor(const MatrixType& matrix,
int row, int col) Index row, Index col)
: m_matrix(matrix), m_row(row), m_col(col) : m_matrix(matrix), m_row(row), m_col(col)
{ {
ei_assert(row >= 0 && row < matrix.rows() ei_assert(row >= 0 && row < matrix.rows()
@ -77,22 +78,22 @@ template<typename MatrixType> class Minor
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Minor) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Minor)
inline int rows() const { return m_matrix.rows() - 1; } inline Index rows() const { return m_matrix.rows() - 1; }
inline int cols() const { return m_matrix.cols() - 1; } inline Index cols() const { return m_matrix.cols() - 1; }
inline Scalar& coeffRef(int row, int col) inline Scalar& coeffRef(Index row, Index col)
{ {
return m_matrix.const_cast_derived().coeffRef(row + (row >= m_row), col + (col >= m_col)); return m_matrix.const_cast_derived().coeffRef(row + (row >= m_row), col + (col >= m_col));
} }
inline const Scalar coeff(int row, int col) const inline const Scalar coeff(Index row, Index col) const
{ {
return m_matrix.coeff(row + (row >= m_row), col + (col >= m_col)); return m_matrix.coeff(row + (row >= m_row), col + (col >= m_col));
} }
protected: protected:
const typename MatrixType::Nested m_matrix; const typename MatrixType::Nested m_matrix;
const int m_row, m_col; const Index m_row, m_col;
}; };
/** \nonstableyet /** \nonstableyet
@ -107,7 +108,7 @@ template<typename MatrixType> class Minor
*/ */
template<typename Derived> template<typename Derived>
inline Minor<Derived> inline Minor<Derived>
MatrixBase<Derived>::minor(int row, int col) MatrixBase<Derived>::minor(Index row, Index col)
{ {
return Minor<Derived>(derived(), row, col); return Minor<Derived>(derived(), row, col);
} }
@ -116,7 +117,7 @@ MatrixBase<Derived>::minor(int row, int col)
* This is the const version of minor(). */ * This is the const version of minor(). */
template<typename Derived> template<typename Derived>
inline const Minor<Derived> inline const Minor<Derived>
MatrixBase<Derived>::minor(int row, int col) const MatrixBase<Derived>::minor(Index row, Index col) const
{ {
return Minor<Derived>(derived(), row, col); return Minor<Derived>(derived(), row, col);
} }

View File

@ -26,37 +26,37 @@
#ifndef EIGEN_VECTORBLOCK2_H #ifndef EIGEN_VECTORBLOCK2_H
#define EIGEN_VECTORBLOCK2_H #define EIGEN_VECTORBLOCK2_H
/** \deprecated use DenseMase::head(int) */ /** \deprecated use DenseMase::head(Index) */
template<typename Derived> template<typename Derived>
inline VectorBlock<Derived> inline VectorBlock<Derived>
MatrixBase<Derived>::start(int size) MatrixBase<Derived>::start(Index size)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), 0, size); return VectorBlock<Derived>(derived(), 0, size);
} }
/** \deprecated use DenseMase::head(int) */ /** \deprecated use DenseMase::head(Index) */
template<typename Derived> template<typename Derived>
inline const VectorBlock<Derived> inline const VectorBlock<Derived>
MatrixBase<Derived>::start(int size) const MatrixBase<Derived>::start(Index size) const
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), 0, size); return VectorBlock<Derived>(derived(), 0, size);
} }
/** \deprecated use DenseMase::tail(int) */ /** \deprecated use DenseMase::tail(Index) */
template<typename Derived> template<typename Derived>
inline VectorBlock<Derived> inline VectorBlock<Derived>
MatrixBase<Derived>::end(int size) MatrixBase<Derived>::end(Index size)
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), this->size() - size, size); return VectorBlock<Derived>(derived(), this->size() - size, size);
} }
/** \deprecated use DenseMase::tail(int) */ /** \deprecated use DenseMase::tail(Index) */
template<typename Derived> template<typename Derived>
inline const VectorBlock<Derived> inline const VectorBlock<Derived>
MatrixBase<Derived>::end(int size) const MatrixBase<Derived>::end(Index size) const
{ {
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return VectorBlock<Derived>(derived(), this->size() - size, size); return VectorBlock<Derived>(derived(), this->size() - size, size);

View File

@ -68,6 +68,7 @@ template<typename _MatrixType> class ComplexEigenSolver
/** \brief Scalar type for matrices of type \p _MatrixType. */ /** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
/** \brief Complex scalar type for \p _MatrixType. /** \brief Complex scalar type for \p _MatrixType.
* *
@ -110,7 +111,7 @@ template<typename _MatrixType> class ComplexEigenSolver
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa ComplexEigenSolver() * \sa ComplexEigenSolver()
*/ */
ComplexEigenSolver(int size) ComplexEigenSolver(Index size)
: m_eivec(size, size), : m_eivec(size, size),
m_eivalues(size), m_eivalues(size),
m_schur(size), m_schur(size),
@ -216,7 +217,7 @@ void ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix)
{ {
// this code is inspired from Jampack // this code is inspired from Jampack
assert(matrix.cols() == matrix.rows()); assert(matrix.cols() == matrix.rows());
const int n = matrix.cols(); const Index n = matrix.cols();
const RealScalar matrixnorm = matrix.norm(); const RealScalar matrixnorm = matrix.norm();
// Step 1: Do a complex Schur decomposition, A = U T U^* // Step 1: Do a complex Schur decomposition, A = U T U^*
@ -227,11 +228,11 @@ void ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix)
// Step 2: Compute X such that T = X D X^(-1), where D is the diagonal of T. // Step 2: Compute X such that T = X D X^(-1), where D is the diagonal of T.
// The matrix X is unit triangular. // The matrix X is unit triangular.
m_matX = EigenvectorType::Zero(n, n); m_matX = EigenvectorType::Zero(n, n);
for(int k=n-1 ; k>=0 ; k--) for(Index k=n-1 ; k>=0 ; k--)
{ {
m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0); m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0);
// Compute X(i,k) using the (i,k) entry of the equation X T = D X // Compute X(i,k) using the (i,k) entry of the equation X T = D X
for(int i=k-1 ; i>=0 ; i--) for(Index i=k-1 ; i>=0 ; i--)
{ {
m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k); m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k);
if(k-i-1>0) if(k-i-1>0)
@ -250,16 +251,16 @@ void ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix)
// Step 3: Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1) // Step 3: Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)
m_eivec.noalias() = m_schur.matrixU() * m_matX; m_eivec.noalias() = m_schur.matrixU() * m_matX;
// .. and normalize the eigenvectors // .. and normalize the eigenvectors
for(int k=0 ; k<n ; k++) for(Index k=0 ; k<n ; k++)
{ {
m_eivec.col(k).normalize(); m_eivec.col(k).normalize();
} }
m_isInitialized = true; m_isInitialized = true;
// Step 4: Sort the eigenvalues // Step 4: Sort the eigenvalues
for (int i=0; i<n; i++) for (Index i=0; i<n; i++)
{ {
int k; Index k;
m_eivalues.cwiseAbs().tail(n-i).minCoeff(&k); m_eivalues.cwiseAbs().tail(n-i).minCoeff(&k);
if (k != 0) if (k != 0)
{ {

View File

@ -71,8 +71,8 @@ template<typename _MatrixType> class ComplexSchur
/** \brief Scalar type for matrices of type \p _MatrixType. */ /** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
/** \brief Complex scalar type for \p _MatrixType. /** \brief Complex scalar type for \p _MatrixType.
* *
@ -100,7 +100,7 @@ template<typename _MatrixType> class ComplexSchur
* *
* \sa compute() for an example. * \sa compute() for an example.
*/ */
ComplexSchur(int size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
: m_matT(size,size), : m_matT(size,size),
m_matU(size,size), m_matU(size,size),
m_hess(size), m_hess(size),
@ -197,8 +197,8 @@ template<typename _MatrixType> class ComplexSchur
bool m_matUisUptodate; bool m_matUisUptodate;
private: private:
bool subdiagonalEntryIsNeglegible(int i); bool subdiagonalEntryIsNeglegible(Index i);
ComplexScalar computeShift(int iu, int iter); ComplexScalar computeShift(Index iu, Index iter);
void reduceToTriangularForm(bool skipU); void reduceToTriangularForm(bool skipU);
friend struct ei_complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>; friend struct ei_complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>;
}; };
@ -244,7 +244,7 @@ std::complex<RealScalar> ei_sqrt(const std::complex<RealScalar> &z)
* compared to m_matT(i,i) and m_matT(j,j), then set it to zero and * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and
* return true, else return false. */ * return true, else return false. */
template<typename MatrixType> template<typename MatrixType>
inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(int i) inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
{ {
RealScalar d = ei_norm1(m_matT.coeff(i,i)) + ei_norm1(m_matT.coeff(i+1,i+1)); RealScalar d = ei_norm1(m_matT.coeff(i,i)) + ei_norm1(m_matT.coeff(i+1,i+1));
RealScalar sd = ei_norm1(m_matT.coeff(i+1,i)); RealScalar sd = ei_norm1(m_matT.coeff(i+1,i));
@ -259,7 +259,7 @@ inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(int i)
/** Compute the shift in the current QR iteration. */ /** Compute the shift in the current QR iteration. */
template<typename MatrixType> template<typename MatrixType>
typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(int iu, int iter) typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter)
{ {
if (iter == 10 || iter == 20) if (iter == 10 || iter == 20)
{ {
@ -356,9 +356,9 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool skipU)
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active submatrix). // Rows il,...,iu is the part we are working on (the active submatrix).
// Rows iu+1,...,end are already brought in triangular form. // Rows iu+1,...,end are already brought in triangular form.
int iu = m_matT.cols() - 1; Index iu = m_matT.cols() - 1;
int il; Index il;
int iter = 0; // number of iterations we are working on the (iu,iu) element Index iter = 0; // number of iterations we are working on the (iu,iu) element
while(true) while(true)
{ {
@ -395,7 +395,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool skipU)
m_matT.topRows(std::min(il+2,iu)+1).applyOnTheRight(il, il+1, rot); m_matT.topRows(std::min(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
if(!skipU) m_matU.applyOnTheRight(il, il+1, rot); if(!skipU) m_matU.applyOnTheRight(il, il+1, rot);
for(int i=il+1 ; i<iu ; i++) for(Index i=il+1 ; i<iu ; i++)
{ {
rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1)); rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
m_matT.coeffRef(i+1,i-1) = ComplexScalar(0); m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);

View File

@ -90,6 +90,7 @@ template<typename _MatrixType> class EigenSolver
/** \brief Scalar type for matrices of type \p _MatrixType. */ /** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
/** \brief Complex scalar type for \p _MatrixType. /** \brief Complex scalar type for \p _MatrixType.
* *
@ -128,7 +129,7 @@ template<typename _MatrixType> class EigenSolver
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa EigenSolver() * \sa EigenSolver()
*/ */
EigenSolver(int size) EigenSolver(Index size)
: m_eivec(size, size), : m_eivec(size, size),
m_eivalues(size), m_eivalues(size),
m_isInitialized(false), m_isInitialized(false),
@ -285,9 +286,9 @@ template<typename MatrixType>
MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
{ {
ei_assert(m_isInitialized && "EigenSolver is not initialized."); ei_assert(m_isInitialized && "EigenSolver is not initialized.");
int n = m_eivec.cols(); Index n = m_eivec.cols();
MatrixType matD = MatrixType::Zero(n,n); MatrixType matD = MatrixType::Zero(n,n);
for (int i=0; i<n; ++i) for (Index i=0; i<n; ++i)
{ {
if (ei_isMuchSmallerThan(ei_imag(m_eivalues.coeff(i)), ei_real(m_eivalues.coeff(i)))) if (ei_isMuchSmallerThan(ei_imag(m_eivalues.coeff(i)), ei_real(m_eivalues.coeff(i))))
matD.coeffRef(i,i) = ei_real(m_eivalues.coeff(i)); matD.coeffRef(i,i) = ei_real(m_eivalues.coeff(i));
@ -305,9 +306,9 @@ template<typename MatrixType>
typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const
{ {
ei_assert(m_isInitialized && "EigenSolver is not initialized."); ei_assert(m_isInitialized && "EigenSolver is not initialized.");
int n = m_eivec.cols(); Index n = m_eivec.cols();
EigenvectorsType matV(n,n); EigenvectorsType matV(n,n);
for (int j=0; j<n; ++j) for (Index j=0; j<n; ++j)
{ {
if (ei_isMuchSmallerThan(ei_imag(m_eivalues.coeff(j)), ei_real(m_eivalues.coeff(j)))) if (ei_isMuchSmallerThan(ei_imag(m_eivalues.coeff(j)), ei_real(m_eivalues.coeff(j))))
{ {
@ -317,7 +318,7 @@ typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eige
else else
{ {
// we have a pair of complex eigen values // we have a pair of complex eigen values
for (int i=0; i<n; ++i) for (Index i=0; i<n; ++i)
{ {
matV.coeffRef(i,j) = ComplexScalar(m_eivec.coeff(i,j), m_eivec.coeff(i,j+1)); matV.coeffRef(i,j) = ComplexScalar(m_eivec.coeff(i,j), m_eivec.coeff(i,j+1));
matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1)); matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
@ -342,7 +343,7 @@ EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matr
// Compute eigenvalues from matT // Compute eigenvalues from matT
m_eivalues.resize(matrix.cols()); m_eivalues.resize(matrix.cols());
int i = 0; Index i = 0;
while (i < matrix.cols()) while (i < matrix.cols())
{ {
if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0)) if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0))
@ -390,14 +391,14 @@ std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
template<typename MatrixType> template<typename MatrixType>
void EigenSolver<MatrixType>::computeEigenvectors() void EigenSolver<MatrixType>::computeEigenvectors()
{ {
const int size = m_eivec.cols(); const Index size = m_eivec.cols();
const Scalar eps = NumTraits<Scalar>::epsilon(); const Scalar eps = NumTraits<Scalar>::epsilon();
// inefficient! this is already computed in RealSchur // inefficient! this is already computed in RealSchur
Scalar norm = 0.0; Scalar norm = 0.0;
for (int j = 0; j < size; ++j) for (Index j = 0; j < size; ++j)
{ {
norm += m_matT.row(j).segment(std::max(j-1,0), size-std::max(j-1,0)).cwiseAbs().sum(); norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum();
} }
// Backsubstitute to find vectors of upper triangular form // Backsubstitute to find vectors of upper triangular form
@ -406,7 +407,7 @@ void EigenSolver<MatrixType>::computeEigenvectors()
return; return;
} }
for (int n = size-1; n >= 0; n--) for (Index n = size-1; n >= 0; n--)
{ {
Scalar p = m_eivalues.coeff(n).real(); Scalar p = m_eivalues.coeff(n).real();
Scalar q = m_eivalues.coeff(n).imag(); Scalar q = m_eivalues.coeff(n).imag();
@ -415,10 +416,10 @@ void EigenSolver<MatrixType>::computeEigenvectors()
if (q == 0) if (q == 0)
{ {
Scalar lastr=0, lastw=0; Scalar lastr=0, lastw=0;
int l = n; Index l = n;
m_matT.coeffRef(n,n) = 1.0; m_matT.coeffRef(n,n) = 1.0;
for (int i = n-1; i >= 0; i--) for (Index i = n-1; i >= 0; i--)
{ {
Scalar w = m_matT.coeff(i,i) - p; Scalar w = m_matT.coeff(i,i) - p;
Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1)); Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
@ -461,7 +462,7 @@ void EigenSolver<MatrixType>::computeEigenvectors()
else if (q < 0) // Complex vector else if (q < 0) // Complex vector
{ {
Scalar lastra=0, lastsa=0, lastw=0; Scalar lastra=0, lastsa=0, lastw=0;
int l = n-1; Index l = n-1;
// Last vector component imaginary so matrix is triangular // Last vector component imaginary so matrix is triangular
if (ei_abs(m_matT.coeff(n,n-1)) > ei_abs(m_matT.coeff(n-1,n))) if (ei_abs(m_matT.coeff(n,n-1)) > ei_abs(m_matT.coeff(n-1,n)))
@ -477,7 +478,7 @@ void EigenSolver<MatrixType>::computeEigenvectors()
} }
m_matT.coeffRef(n,n-1) = 0.0; m_matT.coeffRef(n,n-1) = 0.0;
m_matT.coeffRef(n,n) = 1.0; m_matT.coeffRef(n,n) = 1.0;
for (int i = n-2; i >= 0; i--) for (Index i = n-2; i >= 0; i--)
{ {
Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1)); Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1));
Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1)); Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
@ -535,7 +536,7 @@ void EigenSolver<MatrixType>::computeEigenvectors()
} }
// Back transformation to get eigenvectors of original matrix // Back transformation to get eigenvectors of original matrix
for (int j = size-1; j >= 0; j--) for (Index j = size-1; j >= 0; j--)
{ {
m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1); m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1);
m_eivec.col(j) = m_tmp; m_eivec.col(j) = m_tmp;

View File

@ -81,6 +81,7 @@ template<typename _MatrixType> class HessenbergDecomposition
/** \brief Scalar type for matrices of type #MatrixType. */ /** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
/** \brief Type for vector of Householder coefficients. /** \brief Type for vector of Householder coefficients.
* *
@ -104,7 +105,7 @@ template<typename _MatrixType> class HessenbergDecomposition
* *
* \sa compute() for an example. * \sa compute() for an example.
*/ */
HessenbergDecomposition(int size = Size==Dynamic ? 2 : Size) HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size)
: m_matrix(size,size), : m_matrix(size,size),
m_temp(size) m_temp(size)
{ {
@ -276,12 +277,12 @@ template<typename MatrixType>
void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp) void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp)
{ {
assert(matA.rows()==matA.cols()); assert(matA.rows()==matA.cols());
int n = matA.rows(); Index n = matA.rows();
temp.resize(n); temp.resize(n);
for (int i = 0; i<n-1; ++i) for (Index i = 0; i<n-1; ++i)
{ {
// let's consider the vector v = i-th column starting at position i+1 // let's consider the vector v = i-th column starting at position i+1
int remainingSize = n-i-1; Index remainingSize = n-i-1;
RealScalar beta; RealScalar beta;
Scalar h; Scalar h;
matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta); matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
@ -321,6 +322,7 @@ void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVector
template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType
: public ReturnByValue<HessenbergDecompositionMatrixHReturnType<MatrixType> > : public ReturnByValue<HessenbergDecompositionMatrixHReturnType<MatrixType> >
{ {
typedef typename MatrixType::Index Index;
public: public:
/** \brief Constructor. /** \brief Constructor.
* *
@ -337,13 +339,13 @@ template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType
inline void evalTo(ResultType& result) const inline void evalTo(ResultType& result) const
{ {
result = m_hess.packedMatrix(); result = m_hess.packedMatrix();
int n = result.rows(); Index n = result.rows();
if (n>2) if (n>2)
result.bottomLeftCorner(n-2, n-2).template triangularView<Lower>().setZero(); result.bottomLeftCorner(n-2, n-2).template triangularView<Lower>().setZero();
} }
int rows() const { return m_hess.packedMatrix().rows(); } Index rows() const { return m_hess.packedMatrix().rows(); }
int cols() const { return m_hess.packedMatrix().cols(); } Index cols() const { return m_hess.packedMatrix().cols(); }
protected: protected:
const HessenbergDecomposition<MatrixType>& m_hess; const HessenbergDecomposition<MatrixType>& m_hess;

View File

@ -77,6 +77,8 @@ template<typename _MatrixType> class RealSchur
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar; typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef typename MatrixType::Index Index;
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType; typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType; typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
@ -91,7 +93,7 @@ template<typename _MatrixType> class RealSchur
* *
* \sa compute() for an example. * \sa compute() for an example.
*/ */
RealSchur(int size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
: m_matT(size, size), : m_matT(size, size),
m_matU(size, size), m_matU(size, size),
m_workspaceVector(size), m_workspaceVector(size),
@ -177,11 +179,11 @@ template<typename _MatrixType> class RealSchur
typedef Matrix<Scalar,3,1> Vector3s; typedef Matrix<Scalar,3,1> Vector3s;
Scalar computeNormOfT(); Scalar computeNormOfT();
int findSmallSubdiagEntry(int iu, Scalar norm); Index findSmallSubdiagEntry(Index iu, Scalar norm);
void splitOffTwoRows(int iu, Scalar exshift); void splitOffTwoRows(Index iu, Scalar exshift);
void computeShift(int iu, int iter, Scalar& exshift, Vector3s& shiftInfo); void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
void initFrancisQRStep(int il, int iu, const Vector3s& shiftInfo, int& im, Vector3s& firstHouseholderVector); void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
void performFrancisQRStep(int il, int im, int iu, const Vector3s& firstHouseholderVector, Scalar* workspace); void performFrancisQRStep(Index il, Index im, Index iu, const Vector3s& firstHouseholderVector, Scalar* workspace);
}; };
@ -204,14 +206,14 @@ void RealSchur<MatrixType>::compute(const MatrixType& matrix)
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active window). // Rows il,...,iu is the part we are working on (the active window).
// Rows iu+1,...,end are already brought in triangular form. // Rows iu+1,...,end are already brought in triangular form.
int iu = m_matU.cols() - 1; Index iu = m_matU.cols() - 1;
int iter = 0; // iteration count Index iter = 0; // iteration count
Scalar exshift = 0.0; // sum of exceptional shifts Scalar exshift = 0.0; // sum of exceptional shifts
Scalar norm = computeNormOfT(); Scalar norm = computeNormOfT();
while (iu >= 0) while (iu >= 0)
{ {
int il = findSmallSubdiagEntry(iu, norm); Index il = findSmallSubdiagEntry(iu, norm);
// Check for convergence // Check for convergence
if (il == iu) // One root found if (il == iu) // One root found
@ -233,7 +235,7 @@ void RealSchur<MatrixType>::compute(const MatrixType& matrix)
Vector3s firstHouseholderVector, shiftInfo; Vector3s firstHouseholderVector, shiftInfo;
computeShift(iu, iter, exshift, shiftInfo); computeShift(iu, iter, exshift, shiftInfo);
iter = iter + 1; // (Could check iteration count here.) iter = iter + 1; // (Could check iteration count here.)
int im; Index im;
initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector); initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
performFrancisQRStep(il, im, iu, firstHouseholderVector, workspace); performFrancisQRStep(il, im, iu, firstHouseholderVector, workspace);
} }
@ -246,21 +248,21 @@ void RealSchur<MatrixType>::compute(const MatrixType& matrix)
template<typename MatrixType> template<typename MatrixType>
inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT() inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
{ {
const int size = m_matU.cols(); const Index size = m_matU.cols();
// FIXME to be efficient the following would requires a triangular reduxion code // FIXME to be efficient the following would requires a triangular reduxion code
// Scalar norm = m_matT.upper().cwiseAbs().sum() // Scalar norm = m_matT.upper().cwiseAbs().sum()
// + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum(); // + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
Scalar norm = 0.0; Scalar norm = 0.0;
for (int j = 0; j < size; ++j) for (Index j = 0; j < size; ++j)
norm += m_matT.row(j).segment(std::max(j-1,0), size-std::max(j-1,0)).cwiseAbs().sum(); norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum();
return norm; return norm;
} }
/** \internal Look for single small sub-diagonal element and returns its index */ /** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType> template<typename MatrixType>
inline int RealSchur<MatrixType>::findSmallSubdiagEntry(int iu, Scalar norm) inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, Scalar norm)
{ {
int res = iu; Index res = iu;
while (res > 0) while (res > 0)
{ {
Scalar s = ei_abs(m_matT.coeff(res-1,res-1)) + ei_abs(m_matT.coeff(res,res)); Scalar s = ei_abs(m_matT.coeff(res-1,res-1)) + ei_abs(m_matT.coeff(res,res));
@ -275,9 +277,9 @@ inline int RealSchur<MatrixType>::findSmallSubdiagEntry(int iu, Scalar norm)
/** \internal Update T given that rows iu-1 and iu decouple from the rest. */ /** \internal Update T given that rows iu-1 and iu decouple from the rest. */
template<typename MatrixType> template<typename MatrixType>
inline void RealSchur<MatrixType>::splitOffTwoRows(int iu, Scalar exshift) inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, Scalar exshift)
{ {
const int size = m_matU.cols(); const Index size = m_matU.cols();
// The eigenvalues of the 2x2 matrix [a b; c d] are // The eigenvalues of the 2x2 matrix [a b; c d] are
// trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
@ -307,7 +309,7 @@ inline void RealSchur<MatrixType>::splitOffTwoRows(int iu, Scalar exshift)
/** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */ /** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */
template<typename MatrixType> template<typename MatrixType>
inline void RealSchur<MatrixType>::computeShift(int iu, int iter, Scalar& exshift, Vector3s& shiftInfo) inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)
{ {
shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu); shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);
shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1); shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);
@ -317,7 +319,7 @@ inline void RealSchur<MatrixType>::computeShift(int iu, int iter, Scalar& exshif
if (iter == 10) if (iter == 10)
{ {
exshift += shiftInfo.coeff(0); exshift += shiftInfo.coeff(0);
for (int i = 0; i <= iu; ++i) for (Index i = 0; i <= iu; ++i)
m_matT.coeffRef(i,i) -= shiftInfo.coeff(0); m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);
Scalar s = ei_abs(m_matT.coeff(iu,iu-1)) + ei_abs(m_matT.coeff(iu-1,iu-2)); Scalar s = ei_abs(m_matT.coeff(iu,iu-1)) + ei_abs(m_matT.coeff(iu-1,iu-2));
shiftInfo.coeffRef(0) = Scalar(0.75) * s; shiftInfo.coeffRef(0) = Scalar(0.75) * s;
@ -338,7 +340,7 @@ inline void RealSchur<MatrixType>::computeShift(int iu, int iter, Scalar& exshif
s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0); s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s; s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s;
exshift += s; exshift += s;
for (int i = 0; i <= iu; ++i) for (Index i = 0; i <= iu; ++i)
m_matT.coeffRef(i,i) -= s; m_matT.coeffRef(i,i) -= s;
shiftInfo.setConstant(Scalar(0.964)); shiftInfo.setConstant(Scalar(0.964));
} }
@ -347,7 +349,7 @@ inline void RealSchur<MatrixType>::computeShift(int iu, int iter, Scalar& exshif
/** \internal Compute index im at which Francis QR step starts and the first Householder vector. */ /** \internal Compute index im at which Francis QR step starts and the first Householder vector. */
template<typename MatrixType> template<typename MatrixType>
inline void RealSchur<MatrixType>::initFrancisQRStep(int il, int iu, const Vector3s& shiftInfo, int& im, Vector3s& firstHouseholderVector) inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)
{ {
Vector3s& v = firstHouseholderVector; // alias to save typing Vector3s& v = firstHouseholderVector; // alias to save typing
@ -373,14 +375,14 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(int il, int iu, const Vecto
/** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */ /** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */
template<typename MatrixType> template<typename MatrixType>
inline void RealSchur<MatrixType>::performFrancisQRStep(int il, int im, int iu, const Vector3s& firstHouseholderVector, Scalar* workspace) inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, const Vector3s& firstHouseholderVector, Scalar* workspace)
{ {
assert(im >= il); assert(im >= il);
assert(im <= iu-2); assert(im <= iu-2);
const int size = m_matU.cols(); const Index size = m_matU.cols();
for (int k = im; k <= iu-2; ++k) for (Index k = im; k <= iu-2; ++k)
{ {
bool firstIteration = (k == im); bool firstIteration = (k == im);
@ -422,7 +424,7 @@ inline void RealSchur<MatrixType>::performFrancisQRStep(int il, int im, int iu,
} }
// clean up pollution due to round-off errors // clean up pollution due to round-off errors
for (int i = im+2; i <= iu; ++i) for (Index i = im+2; i <= iu; ++i)
{ {
m_matT.coeffRef(i,i-2) = Scalar(0); m_matT.coeffRef(i,i-2) = Scalar(0);
if (i > im+2) if (i > im+2)

View File

@ -82,6 +82,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
/** \brief Scalar type for matrices of type \p _MatrixType. */ /** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
/** \brief Real scalar type for \p _MatrixType. /** \brief Real scalar type for \p _MatrixType.
* *
@ -105,7 +106,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
* perform decompositions via compute(const MatrixType&, bool) or * perform decompositions via compute(const MatrixType&, bool) or
* compute(const MatrixType&, const MatrixType&, bool). This constructor * compute(const MatrixType&, const MatrixType&, bool). This constructor
* can only be used if \p _MatrixType is a fixed-size matrix; use * can only be used if \p _MatrixType is a fixed-size matrix; use
* SelfAdjointEigenSolver(int) for dynamic-size matrices. * SelfAdjointEigenSolver(Index) for dynamic-size matrices.
* *
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
@ -132,7 +133,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
* *
* \sa compute(const MatrixType&, bool) for an example * \sa compute(const MatrixType&, bool) for an example
*/ */
SelfAdjointEigenSolver(int size) SelfAdjointEigenSolver(Index size)
: m_eivec(size, size), : m_eivec(size, size),
m_eivalues(size), m_eivalues(size),
m_tridiag(size), m_tridiag(size),
@ -379,8 +380,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
* Implemented from Golub's "Matrix Computations", algorithm 8.3.2: * Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
* "implicit symmetric QR step with Wilkinson shift" * "implicit symmetric QR step with Wilkinson shift"
*/ */
template<typename RealScalar, typename Scalar> template<typename RealScalar, typename Scalar, typename Index>
static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n); static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
template<typename MatrixType> template<typename MatrixType>
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors) SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
@ -389,7 +390,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(
m_eigenvectorsOk = computeEigenvectors; m_eigenvectorsOk = computeEigenvectors;
#endif #endif
assert(matrix.cols() == matrix.rows()); assert(matrix.cols() == matrix.rows());
int n = matrix.cols(); Index n = matrix.cols();
m_eivalues.resize(n,1); m_eivalues.resize(n,1);
m_eivec.resize(n,n); m_eivec.resize(n,n);
@ -407,11 +408,11 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(
if (computeEigenvectors) if (computeEigenvectors)
m_eivec = m_tridiag.matrixQ(); m_eivec = m_tridiag.matrixQ();
int end = n-1; Index end = n-1;
int start = 0; Index start = 0;
while (end>0) while (end>0)
{ {
for (int i = start; i<end; ++i) for (Index i = start; i<end; ++i)
if (ei_isMuchSmallerThan(ei_abs(m_subdiag[i]),(ei_abs(diag[i])+ei_abs(diag[i+1])))) if (ei_isMuchSmallerThan(ei_abs(m_subdiag[i]),(ei_abs(diag[i])+ei_abs(diag[i+1]))))
m_subdiag[i] = 0; m_subdiag[i] = 0;
@ -430,9 +431,9 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::compute(
// Sort eigenvalues and corresponding vectors. // Sort eigenvalues and corresponding vectors.
// TODO make the sort optional ? // TODO make the sort optional ?
// TODO use a better sort algorithm !! // TODO use a better sort algorithm !!
for (int i = 0; i < n-1; ++i) for (Index i = 0; i < n-1; ++i)
{ {
int k; Index k;
m_eivalues.segment(i,n-i).minCoeff(&k); m_eivalues.segment(i,n-i).minCoeff(&k);
if (k > 0) if (k > 0)
{ {
@ -473,7 +474,7 @@ compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors
{ {
// transform back the eigen vectors: evecs = inv(U) * evecs // transform back the eigen vectors: evecs = inv(U) * evecs
cholB.matrixU().solveInPlace(m_eivec); cholB.matrixU().solveInPlace(m_eivec);
for (int i=0; i<m_eivec.cols(); ++i) for (Index i=0; i<m_eivec.cols(); ++i)
m_eivec.col(i) = m_eivec.col(i).normalized(); m_eivec.col(i) = m_eivec.col(i).normalized();
} }
return *this; return *this;
@ -482,8 +483,8 @@ compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors
#endif // EIGEN_HIDE_HEAVY_CODE #endif // EIGEN_HIDE_HEAVY_CODE
#ifndef EIGEN_EXTERN_INSTANTIATIONS #ifndef EIGEN_EXTERN_INSTANTIATIONS
template<typename RealScalar, typename Scalar> template<typename RealScalar, typename Scalar, typename Index>
static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n) static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
{ {
RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5); RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
RealScalar e2 = ei_abs2(subdiag[end-1]); RealScalar e2 = ei_abs2(subdiag[end-1]);
@ -491,7 +492,7 @@ static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int st
RealScalar x = diag[start] - mu; RealScalar x = diag[start] - mu;
RealScalar z = subdiag[start]; RealScalar z = subdiag[start];
for (int k = start; k < end; ++k) for (Index k = start; k < end; ++k)
{ {
PlanarRotation<RealScalar> rot; PlanarRotation<RealScalar> rot;
rot.makeGivens(x, z); rot.makeGivens(x, z);

View File

@ -67,6 +67,7 @@ template<typename _MatrixType> class Tridiagonalization
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
enum { enum {
Size = MatrixType::RowsAtCompileTime, Size = MatrixType::RowsAtCompileTime,
@ -107,7 +108,7 @@ template<typename _MatrixType> class Tridiagonalization
* *
* \sa compute() for an example. * \sa compute() for an example.
*/ */
Tridiagonalization(int size = Size==Dynamic ? 2 : Size) Tridiagonalization(Index size = Size==Dynamic ? 2 : Size)
: m_matrix(size,size), m_hCoeffs(size > 1 ? size-1 : 1) : m_matrix(size,size), m_hCoeffs(size > 1 ? size-1 : 1)
{} {}
@ -324,7 +325,7 @@ template<typename MatrixType>
const typename Tridiagonalization<MatrixType>::SubDiagonalReturnType const typename Tridiagonalization<MatrixType>::SubDiagonalReturnType
Tridiagonalization<MatrixType>::subDiagonal() const Tridiagonalization<MatrixType>::subDiagonal() const
{ {
int n = m_matrix.rows(); Index n = m_matrix.rows();
return Block<MatrixType,SizeMinusOne,SizeMinusOne>(m_matrix, 1, 0, n-1,n-1).diagonal(); return Block<MatrixType,SizeMinusOne,SizeMinusOne>(m_matrix, 1, 0, n-1,n-1).diagonal();
} }
@ -334,7 +335,7 @@ Tridiagonalization<MatrixType>::matrixT() const
{ {
// FIXME should this function (and other similar ones) rather take a matrix as argument // FIXME should this function (and other similar ones) rather take a matrix as argument
// and fill it ? (to avoid temporaries) // and fill it ? (to avoid temporaries)
int n = m_matrix.rows(); Index n = m_matrix.rows();
MatrixType matT = m_matrix; MatrixType matT = m_matrix;
matT.topRightCorner(n-1, n-1).diagonal() = subDiagonal().template cast<Scalar>().conjugate(); matT.topRightCorner(n-1, n-1).diagonal() = subDiagonal().template cast<Scalar>().conjugate();
if (n>2) if (n>2)
@ -363,10 +364,10 @@ template<typename MatrixType>
void Tridiagonalization<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs) void Tridiagonalization<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs)
{ {
assert(matA.rows()==matA.cols()); assert(matA.rows()==matA.cols());
int n = matA.rows(); Index n = matA.rows();
for (int i = 0; i<n-1; ++i) for (Index i = 0; i<n-1; ++i)
{ {
int remainingSize = n-i-1; Index remainingSize = n-i-1;
RealScalar beta; RealScalar beta;
Scalar h; Scalar h;
matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta); matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
@ -391,7 +392,7 @@ void Tridiagonalization<MatrixType>::_compute(MatrixType& matA, CoeffVectorType&
template<typename MatrixType> template<typename MatrixType>
void Tridiagonalization<MatrixType>::decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) void Tridiagonalization<MatrixType>::decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{ {
int n = mat.rows(); Index n = mat.rows();
ei_assert(mat.cols()==n && diag.size()==n && subdiag.size()==n-1); ei_assert(mat.cols()==n && diag.size()==n && subdiag.size()==n-1);
if (n==3 && (!NumTraits<Scalar>::IsComplex) ) if (n==3 && (!NumTraits<Scalar>::IsComplex) )
{ {

View File

@ -45,6 +45,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
enum { AmbientDimAtCompileTime = _AmbientDim }; enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar; typedef _Scalar Scalar;
typedef NumTraits<Scalar> ScalarTraits; typedef NumTraits<Scalar> ScalarTraits;
typedef DenseIndex Index;
typedef typename ScalarTraits::Real RealScalar; typedef typename ScalarTraits::Real RealScalar;
typedef typename ScalarTraits::NonInteger NonInteger; typedef typename ScalarTraits::NonInteger NonInteger;
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType; typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
@ -72,7 +73,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
{ if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); } { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); }
/** Constructs a null box with \a _dim the dimension of the ambient space. */ /** Constructs a null box with \a _dim the dimension of the ambient space. */
inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim) inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
{ setEmpty(); } { setEmpty(); }
/** Constructs a box with extremities \a _min and \a _max. */ /** Constructs a box with extremities \a _min and \a _max. */
@ -91,7 +92,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
~AlignedBox() {} ~AlignedBox() {}
/** \returns the dimension in which the box holds */ /** \returns the dimension in which the box holds */
inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; } inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : Index(AmbientDimAtCompileTime); }
/** \deprecated use isEmpty */ /** \deprecated use isEmpty */
inline bool isNull() const { return isEmpty(); } inline bool isNull() const { return isEmpty(); }
@ -157,8 +158,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
VectorType res; VectorType res;
int mult = 1; Index mult = 1;
for(int d=0; d<dim(); ++d) for(Index d=0; d<dim(); ++d)
{ {
if( mult & corner ) res[d] = m_max[d]; if( mult & corner ) res[d] = m_max[d];
else res[d] = m_min[d]; else res[d] = m_min[d];
@ -172,7 +173,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
inline VectorType sample() const inline VectorType sample() const
{ {
VectorType r; VectorType r;
for(int d=0; d<dim(); ++d) for(Index d=0; d<dim(); ++d)
{ {
if(!ScalarTraits::IsInteger) if(!ScalarTraits::IsInteger)
{ {
@ -311,7 +312,7 @@ inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const Matri
const typename ei_nested<Derived,2*AmbientDim>::type p(a_p.derived()); const typename ei_nested<Derived,2*AmbientDim>::type p(a_p.derived());
Scalar dist2 = 0.; Scalar dist2 = 0.;
Scalar aux; Scalar aux;
for (int k=0; k<dim(); ++k) for (Index k=0; k<dim(); ++k)
{ {
if( m_min[k] > p[k] ) if( m_min[k] > p[k] )
{ {
@ -332,7 +333,7 @@ inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const Align
{ {
Scalar dist2 = 0.; Scalar dist2 = 0.;
Scalar aux; Scalar aux;
for (int k=0; k<dim(); ++k) for (Index k=0; k<dim(); ++k)
{ {
if( m_min[k] > b.m_max[k] ) if( m_min[k] > b.m_max[k] )
{ {

View File

@ -43,7 +43,7 @@
*/ */
template<typename Derived> template<typename Derived>
inline Matrix<typename MatrixBase<Derived>::Scalar,3,1> inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>
MatrixBase<Derived>::eulerAngles(int a0, int a1, int a2) const MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const
{ {
/* Implemented from Graphics Gems IV */ /* Implemented from Graphics Gems IV */
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3) EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3)
@ -52,10 +52,10 @@ MatrixBase<Derived>::eulerAngles(int a0, int a1, int a2) const
typedef Matrix<typename Derived::Scalar,2,1> Vector2; typedef Matrix<typename Derived::Scalar,2,1> Vector2;
const Scalar epsilon = NumTraits<Scalar>::dummy_precision(); const Scalar epsilon = NumTraits<Scalar>::dummy_precision();
const int odd = ((a0+1)%3 == a1) ? 0 : 1; const Index odd = ((a0+1)%3 == a1) ? 0 : 1;
const int i = a0; const Index i = a0;
const int j = (a0 + 1 + odd)%3; const Index j = (a0 + 1 + odd)%3;
const int k = (a0 + 2 - odd)%3; const Index k = (a0 + 2 - odd)%3;
if (a0==a2) if (a0==a2)
{ {

View File

@ -77,10 +77,10 @@ template<typename MatrixType,int _Direction> class Homogeneous
: m_matrix(matrix) : m_matrix(matrix)
{} {}
inline int rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); } inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); }
inline int cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); } inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); }
inline Scalar coeff(int row, int col) const inline Scalar coeff(Index row, Index col) const
{ {
if( (int(Direction)==Vertical && row==m_matrix.rows()) if( (int(Direction)==Vertical && row==m_matrix.rows())
|| (int(Direction)==Horizontal && col==m_matrix.cols())) || (int(Direction)==Horizontal && col==m_matrix.cols()))
@ -223,12 +223,13 @@ struct ei_homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs>
: public ReturnByValue<ei_homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> > : public ReturnByValue<ei_homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> >
{ {
typedef typename ei_cleantype<typename Lhs::Nested>::type LhsNested; typedef typename ei_cleantype<typename Lhs::Nested>::type LhsNested;
typedef typename MatrixType::Index Index;
ei_homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs) ei_homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs)
: m_lhs(lhs), m_rhs(rhs) : m_lhs(lhs), m_rhs(rhs)
{} {}
inline int rows() const { return m_lhs.rows(); } inline Index rows() const { return m_lhs.rows(); }
inline int cols() const { return m_rhs.cols(); } inline Index cols() const { return m_rhs.cols(); }
template<typename Dest> void evalTo(Dest& dst) const template<typename Dest> void evalTo(Dest& dst) const
{ {
@ -261,12 +262,13 @@ struct ei_homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs>
: public ReturnByValue<ei_homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> > : public ReturnByValue<ei_homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >
{ {
typedef typename ei_cleantype<typename Rhs::Nested>::type RhsNested; typedef typename ei_cleantype<typename Rhs::Nested>::type RhsNested;
typedef typename MatrixType::Index Index;
ei_homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs) ei_homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs)
: m_lhs(lhs), m_rhs(rhs) : m_lhs(lhs), m_rhs(rhs)
{} {}
inline int rows() const { return m_lhs.rows(); } inline Index rows() const { return m_lhs.rows(); }
inline int cols() const { return m_rhs.cols(); } inline Index cols() const { return m_rhs.cols(); }
template<typename Dest> void evalTo(Dest& dst) const template<typename Dest> void evalTo(Dest& dst) const
{ {

View File

@ -51,10 +51,11 @@ public:
enum { AmbientDimAtCompileTime = _AmbientDim }; enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar; typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseIndex Index;
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType; typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
typedef Matrix<Scalar,int(AmbientDimAtCompileTime)==Dynamic typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic
? Dynamic ? Dynamic
: int(AmbientDimAtCompileTime)+1,1> Coefficients; : Index(AmbientDimAtCompileTime)+1,1> Coefficients;
typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType; typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
/** Default constructor without initialization */ /** Default constructor without initialization */
@ -62,7 +63,7 @@ public:
/** Constructs a dynamic-size hyperplane with \a _dim the dimension /** Constructs a dynamic-size hyperplane with \a _dim the dimension
* of the ambient space */ * of the ambient space */
inline explicit Hyperplane(int _dim) : m_coeffs(_dim+1) {} inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {}
/** Construct a plane from its normal \a n and a point \a e onto the plane. /** Construct a plane from its normal \a n and a point \a e onto the plane.
* \warning the vector normal is assumed to be normalized. * \warning the vector normal is assumed to be normalized.
@ -122,7 +123,7 @@ public:
~Hyperplane() {} ~Hyperplane() {}
/** \returns the dimension in which the plane holds */ /** \returns the dimension in which the plane holds */
inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : AmbientDimAtCompileTime; } inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); }
/** normalizes \c *this */ /** normalizes \c *this */
void normalize(void) void normalize(void)

View File

@ -137,12 +137,13 @@ struct ei_unitOrthogonal_selector
typedef typename ei_plain_matrix_type<Derived>::type VectorType; typedef typename ei_plain_matrix_type<Derived>::type VectorType;
typedef typename ei_traits<Derived>::Scalar Scalar; typedef typename ei_traits<Derived>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename Derived::Index Index;
typedef Matrix<Scalar,2,1> Vector2; typedef Matrix<Scalar,2,1> Vector2;
inline static VectorType run(const Derived& src) inline static VectorType run(const Derived& src)
{ {
VectorType perp = VectorType::Zero(src.size()); VectorType perp = VectorType::Zero(src.size());
int maxi = 0; Index maxi = 0;
int sndi = 0; Index sndi = 0;
src.cwiseAbs().maxCoeff(&maxi); src.cwiseAbs().maxCoeff(&maxi);
if (maxi==0) if (maxi==0)
sndi = 1; sndi = 1;

View File

@ -47,6 +47,7 @@ public:
enum { AmbientDimAtCompileTime = _AmbientDim }; enum { AmbientDimAtCompileTime = _AmbientDim };
typedef _Scalar Scalar; typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseIndex Index;
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType; typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
/** Default constructor without initialization */ /** Default constructor without initialization */
@ -54,7 +55,7 @@ public:
/** Constructs a dynamic-size line with \a _dim the dimension /** Constructs a dynamic-size line with \a _dim the dimension
* of the ambient space */ * of the ambient space */
inline explicit ParametrizedLine(int _dim) : m_origin(_dim), m_direction(_dim) {} inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {}
/** Initializes a parametrized line of direction \a direction and origin \a origin. /** Initializes a parametrized line of direction \a direction and origin \a origin.
* \warning the vector direction is assumed to be normalized. * \warning the vector direction is assumed to be normalized.
@ -71,7 +72,7 @@ public:
~ParametrizedLine() {} ~ParametrizedLine() {}
/** \returns the dimension in which the line holds */ /** \returns the dimension in which the line holds */
inline int dim() const { return m_direction.size(); } inline Index dim() const { return m_direction.size(); }
const VectorType& origin() const { return m_origin; } const VectorType& origin() const { return m_origin; }
VectorType& origin() { return m_origin; } VectorType& origin() { return m_origin; }

View File

@ -617,6 +617,7 @@ template<typename Other>
struct ei_quaternionbase_assign_impl<Other,3,3> struct ei_quaternionbase_assign_impl<Other,3,3>
{ {
typedef typename Other::Scalar Scalar; typedef typename Other::Scalar Scalar;
typedef DenseIndex Index;
template<class Derived> inline static void run(QuaternionBase<Derived>& q, const Other& mat) template<class Derived> inline static void run(QuaternionBase<Derived>& q, const Other& mat)
{ {
// This algorithm comes from "Quaternion Calculus and Fast Animation", // This algorithm comes from "Quaternion Calculus and Fast Animation",
@ -633,13 +634,13 @@ struct ei_quaternionbase_assign_impl<Other,3,3>
} }
else else
{ {
int i = 0; DenseIndex i = 0;
if (mat.coeff(1,1) > mat.coeff(0,0)) if (mat.coeff(1,1) > mat.coeff(0,0))
i = 1; i = 1;
if (mat.coeff(2,2) > mat.coeff(i,i)) if (mat.coeff(2,2) > mat.coeff(i,i))
i = 2; i = 2;
int j = (i+1)%3; DenseIndex j = (i+1)%3;
int k = (j+1)%3; DenseIndex k = (j+1)%3;
t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
q.coeffs().coeffRef(i) = Scalar(0.5) * t; q.coeffs().coeffRef(i) = Scalar(0.5) * t;

View File

@ -174,6 +174,7 @@ public:
}; };
/** the scalar type of the coefficients */ /** the scalar type of the coefficients */
typedef _Scalar Scalar; typedef _Scalar Scalar;
typedef DenseIndex Index;
/** type of the matrix used to represent the transformation */ /** type of the matrix used to represent the transformation */
typedef Matrix<Scalar,Rows,HDim> MatrixType; typedef Matrix<Scalar,Rows,HDim> MatrixType;
/** type of the matrix used to represent the linear part of the transformation */ /** type of the matrix used to represent the linear part of the transformation */
@ -270,11 +271,11 @@ public:
#endif #endif
/** shortcut for m_matrix(row,col); /** shortcut for m_matrix(row,col);
* \sa MatrixBase::operaror(int,int) const */ * \sa MatrixBase::operaror(Index,Index) const */
inline Scalar operator() (int row, int col) const { return m_matrix(row,col); } inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); }
/** shortcut for m_matrix(row,col); /** shortcut for m_matrix(row,col);
* \sa MatrixBase::operaror(int,int) */ * \sa MatrixBase::operaror(Index,Index) */
inline Scalar& operator() (int row, int col) { return m_matrix(row,col); } inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); }
/** \returns a read-only expression of the transformation matrix */ /** \returns a read-only expression of the transformation matrix */
inline const MatrixType& matrix() const { return m_matrix; } inline const MatrixType& matrix() const { return m_matrix; }
@ -1141,7 +1142,7 @@ struct ei_transform_right_product_impl<Other,Mode, Dim,HDim, Dim,HDim>
static ResultType run(const TransformType& tr, const Other& other) static ResultType run(const TransformType& tr, const Other& other)
{ {
TransformType res; TransformType res;
const int Rows = Mode==Projective ? HDim : Dim; enum { Rows = Mode==Projective ? HDim : Dim };
res.matrix().template block<Rows,HDim>(0,0).noalias() = (tr.linearExt() * other); res.matrix().template block<Rows,HDim>(0,0).noalias() = (tr.linearExt() * other);
res.translationExt() += tr.translationExt(); res.translationExt() += tr.translationExt();
if(Mode!=Affine) if(Mode!=Affine)

View File

@ -109,6 +109,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
typedef typename ei_umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType; typedef typename ei_umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType;
typedef typename ei_traits<TransformationMatrixType>::Scalar Scalar; typedef typename ei_traits<TransformationMatrixType>::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
typedef typename Derived::Index Index;
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename ei_traits<OtherDerived>::Scalar>::ret), EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename ei_traits<OtherDerived>::Scalar>::ret),
@ -120,8 +121,8 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
typedef Matrix<Scalar, Dimension, Dimension> MatrixType; typedef Matrix<Scalar, Dimension, Dimension> MatrixType;
typedef typename ei_plain_matrix_type_row_major<Derived>::type RowMajorMatrixType; typedef typename ei_plain_matrix_type_row_major<Derived>::type RowMajorMatrixType;
const int m = src.rows(); // dimension const Index m = src.rows(); // dimension
const int n = src.cols(); // number of measurements const Index n = src.cols(); // number of measurements
// required for demeaning ... // required for demeaning ...
const RealScalar one_over_n = 1 / static_cast<RealScalar>(n); const RealScalar one_over_n = 1 / static_cast<RealScalar>(n);
@ -151,7 +152,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
// Eq. (40) and (43) // Eq. (40) and (43)
const VectorType& d = svd.singularValues(); const VectorType& d = svd.singularValues();
int rank = 0; for (int i=0; i<m; ++i) if (!ei_isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank; Index rank = 0; for (Index i=0; i<m; ++i) if (!ei_isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
if (rank == m-1) { if (rank == m-1) {
if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) { if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) {
Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose(); Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose();

View File

@ -53,6 +53,7 @@ template<typename VectorsType, typename CoeffsType, int Side>
struct ei_traits<HouseholderSequence<VectorsType,CoeffsType,Side> > struct ei_traits<HouseholderSequence<VectorsType,CoeffsType,Side> >
{ {
typedef typename VectorsType::Scalar Scalar; typedef typename VectorsType::Scalar Scalar;
typedef typename VectorsType::StorageKind StorageKind;
enum { enum {
RowsAtCompileTime = Side==OnTheLeft ? ei_traits<VectorsType>::RowsAtCompileTime RowsAtCompileTime = Side==OnTheLeft ? ei_traits<VectorsType>::RowsAtCompileTime
: ei_traits<VectorsType>::ColsAtCompileTime, : ei_traits<VectorsType>::ColsAtCompileTime,
@ -69,9 +70,10 @@ struct ei_hseq_side_dependent_impl
{ {
typedef Block<VectorsType, Dynamic, 1> EssentialVectorType; typedef Block<VectorsType, Dynamic, 1> EssentialVectorType;
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType; typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType;
static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, int k) typedef typename VectorsType::Index Index;
static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
{ {
const int start = k+1+h.m_shift; Index start = k+1+h.m_shift;
return Block<VectorsType,Dynamic,1>(h.m_vectors, start, k, h.rows()-start, 1); return Block<VectorsType,Dynamic,1>(h.m_vectors, start, k, h.rows()-start, 1);
} }
}; };
@ -81,9 +83,10 @@ struct ei_hseq_side_dependent_impl<VectorsType, CoeffsType, OnTheRight>
{ {
typedef Transpose<Block<VectorsType, 1, Dynamic> > EssentialVectorType; typedef Transpose<Block<VectorsType, 1, Dynamic> > EssentialVectorType;
typedef HouseholderSequence<VectorsType, CoeffsType, OnTheRight> HouseholderSequenceType; typedef HouseholderSequence<VectorsType, CoeffsType, OnTheRight> HouseholderSequenceType;
static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, int k) typedef typename VectorsType::Index Index;
static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
{ {
const int start = k+1+h.m_shift; Index start = k+1+h.m_shift;
return Block<VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose(); return Block<VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose();
} }
}; };
@ -106,6 +109,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
MaxColsAtCompileTime = ei_traits<HouseholderSequence>::MaxColsAtCompileTime MaxColsAtCompileTime = ei_traits<HouseholderSequence>::MaxColsAtCompileTime
}; };
typedef typename ei_traits<HouseholderSequence>::Scalar Scalar; typedef typename ei_traits<HouseholderSequence>::Scalar Scalar;
typedef typename VectorsType::Index Index;
typedef typename ei_hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType typedef typename ei_hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType
EssentialVectorType; EssentialVectorType;
@ -126,15 +130,15 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
{ {
} }
HouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans, int actualVectors, int shift) HouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans, Index actualVectors, Index shift)
: m_vectors(v), m_coeffs(h), m_trans(trans), m_actualVectors(actualVectors), m_shift(shift) : m_vectors(v), m_coeffs(h), m_trans(trans), m_actualVectors(actualVectors), m_shift(shift)
{ {
} }
int rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); } Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); }
int cols() const { return rows(); } Index cols() const { return rows(); }
const EssentialVectorType essentialVector(int k) const const EssentialVectorType essentialVector(Index k) const
{ {
ei_assert(k >= 0 && k < m_actualVectors); ei_assert(k >= 0 && k < m_actualVectors);
return ei_hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::essentialVector(*this, k); return ei_hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::essentialVector(*this, k);
@ -154,13 +158,13 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
/** \internal */ /** \internal */
template<typename DestType> void evalTo(DestType& dst) const template<typename DestType> void evalTo(DestType& dst) const
{ {
int vecs = m_actualVectors; Index vecs = m_actualVectors;
dst.setIdentity(rows(), rows()); dst.setIdentity(rows(), rows());
Matrix<Scalar, DestType::RowsAtCompileTime, 1, Matrix<Scalar, DestType::RowsAtCompileTime, 1,
AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> temp(rows()); AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> temp(rows());
for(int k = vecs-1; k >= 0; --k) for(Index k = vecs-1; k >= 0; --k)
{ {
int cornerSize = rows() - k - m_shift; Index cornerSize = rows() - k - m_shift;
if(m_trans) if(m_trans)
dst.bottomRightCorner(cornerSize, cornerSize) dst.bottomRightCorner(cornerSize, cornerSize)
.applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0)); .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
@ -174,9 +178,9 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const
{ {
Matrix<Scalar,1,Dest::RowsAtCompileTime> temp(dst.rows()); Matrix<Scalar,1,Dest::RowsAtCompileTime> temp(dst.rows());
for(int k = 0; k < m_actualVectors; ++k) for(Index k = 0; k < m_actualVectors; ++k)
{ {
int actual_k = m_trans ? m_actualVectors-k-1 : k; Index actual_k = m_trans ? m_actualVectors-k-1 : k;
dst.rightCols(rows()-m_shift-actual_k) dst.rightCols(rows()-m_shift-actual_k)
.applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0)); .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0));
} }
@ -186,9 +190,9 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const
{ {
Matrix<Scalar,1,Dest::ColsAtCompileTime> temp(dst.cols()); Matrix<Scalar,1,Dest::ColsAtCompileTime> temp(dst.cols());
for(int k = 0; k < m_actualVectors; ++k) for(Index k = 0; k < m_actualVectors; ++k)
{ {
int actual_k = m_trans ? k : m_actualVectors-k-1; Index actual_k = m_trans ? k : m_actualVectors-k-1;
dst.bottomRows(rows()-m_shift-actual_k) dst.bottomRows(rows()-m_shift-actual_k)
.applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0)); .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0));
} }
@ -218,8 +222,8 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
typename VectorsType::Nested m_vectors; typename VectorsType::Nested m_vectors;
typename CoeffsType::Nested m_coeffs; typename CoeffsType::Nested m_coeffs;
bool m_trans; bool m_trans;
int m_actualVectors; Index m_actualVectors;
int m_shift; Index m_shift;
}; };
template<typename VectorsType, typename CoeffsType> template<typename VectorsType, typename CoeffsType>
@ -229,7 +233,9 @@ HouseholderSequence<VectorsType,CoeffsType> householderSequence(const VectorsTyp
} }
template<typename VectorsType, typename CoeffsType> template<typename VectorsType, typename CoeffsType>
HouseholderSequence<VectorsType,CoeffsType> householderSequence(const VectorsType& v, const CoeffsType& h, bool trans, int actualVectors, int shift) HouseholderSequence<VectorsType,CoeffsType> householderSequence
(const VectorsType& v, const CoeffsType& h,
bool trans, typename VectorsType::Index actualVectors, typename VectorsType::Index shift)
{ {
return HouseholderSequence<VectorsType,CoeffsType,OnTheLeft>(v, h, trans, actualVectors, shift); return HouseholderSequence<VectorsType,CoeffsType,OnTheLeft>(v, h, trans, actualVectors, shift);
} }
@ -241,7 +247,9 @@ HouseholderSequence<VectorsType,CoeffsType> rightHouseholderSequence(const Vecto
} }
template<typename VectorsType, typename CoeffsType> template<typename VectorsType, typename CoeffsType>
HouseholderSequence<VectorsType,CoeffsType> rightHouseholderSequence(const VectorsType& v, const CoeffsType& h, bool trans, int actualVectors, int shift) HouseholderSequence<VectorsType,CoeffsType> rightHouseholderSequence
(const VectorsType& v, const CoeffsType& h, bool trans,
typename VectorsType::Index actualVectors, typename VectorsType::Index shift)
{ {
return HouseholderSequence<VectorsType,CoeffsType,OnTheRight>(v, h, trans, actualVectors, shift); return HouseholderSequence<VectorsType,CoeffsType,OnTheRight>(v, h, trans, actualVectors, shift);
} }

View File

@ -74,7 +74,7 @@ template<typename Scalar> class PlanarRotation
PlanarRotation adjoint() const { return PlanarRotation(ei_conj(m_c), -m_s); } PlanarRotation adjoint() const { return PlanarRotation(ei_conj(m_c), -m_s); }
template<typename Derived> template<typename Derived>
bool makeJacobi(const MatrixBase<Derived>&, int p, int q); bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
bool makeJacobi(RealScalar x, Scalar y, RealScalar z); bool makeJacobi(RealScalar x, Scalar y, RealScalar z);
void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0); void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
@ -89,7 +89,7 @@ template<typename Scalar> class PlanarRotation
/** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix /** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix
* \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$ * \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$
* *
* \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, int, int), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() * \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/ */
template<typename Scalar> template<typename Scalar>
bool PlanarRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z) bool PlanarRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
@ -133,7 +133,7 @@ bool PlanarRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
*/ */
template<typename Scalar> template<typename Scalar>
template<typename Derived> template<typename Derived>
inline bool PlanarRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, int p, int q) inline bool PlanarRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, typename Derived::Index p, typename Derived::Index q)
{ {
return makeJacobi(ei_real(m.coeff(p,p)), m.coeff(p,q), ei_real(m.coeff(q,q))); return makeJacobi(ei_real(m.coeff(p,p)), m.coeff(p,q), ei_real(m.coeff(q,q)));
} }
@ -277,7 +277,7 @@ void ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const PlanarRotati
*/ */
template<typename Derived> template<typename Derived>
template<typename OtherScalar> template<typename OtherScalar>
inline void MatrixBase<Derived>::applyOnTheLeft(int p, int q, const PlanarRotation<OtherScalar>& j) inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const PlanarRotation<OtherScalar>& j)
{ {
RowXpr x(this->row(p)); RowXpr x(this->row(p));
RowXpr y(this->row(q)); RowXpr y(this->row(q));
@ -292,7 +292,7 @@ inline void MatrixBase<Derived>::applyOnTheLeft(int p, int q, const PlanarRotati
*/ */
template<typename Derived> template<typename Derived>
template<typename OtherScalar> template<typename OtherScalar>
inline void MatrixBase<Derived>::applyOnTheRight(int p, int q, const PlanarRotation<OtherScalar>& j) inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const PlanarRotation<OtherScalar>& j)
{ {
ColXpr x(this->col(p)); ColXpr x(this->col(p));
ColXpr y(this->col(q)); ColXpr y(this->col(q));
@ -303,11 +303,12 @@ inline void MatrixBase<Derived>::applyOnTheRight(int p, int q, const PlanarRotat
template<typename VectorX, typename VectorY, typename OtherScalar> template<typename VectorX, typename VectorY, typename OtherScalar>
void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const PlanarRotation<OtherScalar>& j) void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const PlanarRotation<OtherScalar>& j)
{ {
typedef typename VectorX::Index Index;
typedef typename VectorX::Scalar Scalar; typedef typename VectorX::Scalar Scalar;
ei_assert(_x.size() == _y.size()); ei_assert(_x.size() == _y.size());
int size = _x.size(); Index size = _x.size();
int incrx = size ==1 ? 1 : &_x.coeffRef(1) - &_x.coeffRef(0); Index incrx = size ==1 ? 1 : &_x.coeffRef(1) - &_x.coeffRef(0);
int incry = size ==1 ? 1 : &_y.coeffRef(1) - &_y.coeffRef(0); Index incry = size ==1 ? 1 : &_y.coeffRef(1) - &_y.coeffRef(0);
Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0); Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0);
Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0); Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0);
@ -318,14 +319,14 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY&
typedef typename ei_packet_traits<Scalar>::type Packet; typedef typename ei_packet_traits<Scalar>::type Packet;
enum { PacketSize = ei_packet_traits<Scalar>::size, Peeling = 2 }; enum { PacketSize = ei_packet_traits<Scalar>::size, Peeling = 2 };
int alignedStart = ei_first_aligned(y, size); Index alignedStart = ei_first_aligned(y, size);
int alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize; Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
const Packet pc = ei_pset1(Scalar(j.c())); const Packet pc = ei_pset1(Scalar(j.c()));
const Packet ps = ei_pset1(Scalar(j.s())); const Packet ps = ei_pset1(Scalar(j.s()));
ei_conj_helper<NumTraits<Scalar>::IsComplex,false> cj; ei_conj_helper<NumTraits<Scalar>::IsComplex,false> cj;
for(int i=0; i<alignedStart; ++i) for(Index i=0; i<alignedStart; ++i)
{ {
Scalar xi = x[i]; Scalar xi = x[i];
Scalar yi = y[i]; Scalar yi = y[i];
@ -338,7 +339,7 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY&
if(ei_first_aligned(x, size)==alignedStart) if(ei_first_aligned(x, size)==alignedStart)
{ {
for(int i=alignedStart; i<alignedEnd; i+=PacketSize) for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)
{ {
Packet xi = ei_pload(px); Packet xi = ei_pload(px);
Packet yi = ei_pload(py); Packet yi = ei_pload(py);
@ -350,8 +351,8 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY&
} }
else else
{ {
int peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize); Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
for(int i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize) for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
{ {
Packet xi = ei_ploadu(px); Packet xi = ei_ploadu(px);
Packet xi1 = ei_ploadu(px+PacketSize); Packet xi1 = ei_ploadu(px+PacketSize);
@ -373,7 +374,7 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY&
} }
} }
for(int i=alignedEnd; i<size; ++i) for(Index i=alignedEnd; i<size; ++i)
{ {
Scalar xi = x[i]; Scalar xi = x[i];
Scalar yi = y[i]; Scalar yi = y[i];
@ -383,7 +384,7 @@ void /*EIGEN_DONT_INLINE*/ ei_apply_rotation_in_the_plane(VectorX& _x, VectorY&
} }
else else
{ {
for(int i=0; i<size; ++i) for(Index i=0; i<size; ++i)
{ {
Scalar xi = *x; Scalar xi = *x;
Scalar yi = *y; Scalar yi = *y;

View File

@ -68,8 +68,10 @@ template<typename _MatrixType> class FullPivLU
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename ei_plain_row_type<MatrixType, int>::type IntRowVectorType; typedef typename ei_traits<MatrixType>::StorageKind StorageKind;
typedef typename ei_plain_col_type<MatrixType, int>::type IntColVectorType; typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_plain_row_type<MatrixType, Index>::type IntRowVectorType;
typedef typename ei_plain_col_type<MatrixType, Index>::type IntColVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType; typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType;
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationPType; typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationPType;
@ -87,7 +89,7 @@ template<typename _MatrixType> class FullPivLU
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa FullPivLU() * \sa FullPivLU()
*/ */
FullPivLU(int rows, int cols); FullPivLU(Index rows, Index cols);
/** Constructor. /** Constructor.
* *
@ -124,7 +126,7 @@ template<typename _MatrixType> class FullPivLU
* *
* \sa rank() * \sa rank()
*/ */
inline int nonzeroPivots() const inline Index nonzeroPivots() const
{ {
ei_assert(m_isInitialized && "LU is not initialized."); ei_assert(m_isInitialized && "LU is not initialized.");
return m_nonzero_pivots; return m_nonzero_pivots;
@ -301,12 +303,12 @@ template<typename _MatrixType> class FullPivLU
* For that, it uses the threshold value that you can control by calling * For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&). * setThreshold(const RealScalar&).
*/ */
inline int rank() const inline Index rank() const
{ {
ei_assert(m_isInitialized && "LU is not initialized."); ei_assert(m_isInitialized && "LU is not initialized.");
RealScalar premultiplied_threshold = ei_abs(m_maxpivot) * threshold(); RealScalar premultiplied_threshold = ei_abs(m_maxpivot) * threshold();
int result = 0; Index result = 0;
for(int i = 0; i < m_nonzero_pivots; ++i) for(Index i = 0; i < m_nonzero_pivots; ++i)
result += (ei_abs(m_lu.coeff(i,i)) > premultiplied_threshold); result += (ei_abs(m_lu.coeff(i,i)) > premultiplied_threshold);
return result; return result;
} }
@ -317,7 +319,7 @@ template<typename _MatrixType> class FullPivLU
* For that, it uses the threshold value that you can control by calling * For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&). * setThreshold(const RealScalar&).
*/ */
inline int dimensionOfKernel() const inline Index dimensionOfKernel() const
{ {
ei_assert(m_isInitialized && "LU is not initialized."); ei_assert(m_isInitialized && "LU is not initialized.");
return cols() - rank(); return cols() - rank();
@ -378,8 +380,8 @@ template<typename _MatrixType> class FullPivLU
MatrixType reconstructedMatrix() const; MatrixType reconstructedMatrix() const;
inline int rows() const { return m_lu.rows(); } inline Index rows() const { return m_lu.rows(); }
inline int cols() const { return m_lu.cols(); } inline Index cols() const { return m_lu.cols(); }
protected: protected:
MatrixType m_lu; MatrixType m_lu;
@ -387,7 +389,7 @@ template<typename _MatrixType> class FullPivLU
PermutationQType m_q; PermutationQType m_q;
IntColVectorType m_rowsTranspositions; IntColVectorType m_rowsTranspositions;
IntRowVectorType m_colsTranspositions; IntRowVectorType m_colsTranspositions;
int m_det_pq, m_nonzero_pivots; Index m_det_pq, m_nonzero_pivots;
RealScalar m_maxpivot, m_prescribedThreshold; RealScalar m_maxpivot, m_prescribedThreshold;
bool m_isInitialized, m_usePrescribedThreshold; bool m_isInitialized, m_usePrescribedThreshold;
}; };
@ -399,7 +401,7 @@ FullPivLU<MatrixType>::FullPivLU()
} }
template<typename MatrixType> template<typename MatrixType>
FullPivLU<MatrixType>::FullPivLU(int rows, int cols) FullPivLU<MatrixType>::FullPivLU(Index rows, Index cols)
: m_lu(rows, cols), : m_lu(rows, cols),
m_p(rows), m_p(rows),
m_q(cols), m_q(cols),
@ -429,26 +431,26 @@ FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
m_isInitialized = true; m_isInitialized = true;
m_lu = matrix; m_lu = matrix;
const int size = matrix.diagonalSize(); const Index size = matrix.diagonalSize();
const int rows = matrix.rows(); const Index rows = matrix.rows();
const int cols = matrix.cols(); const Index cols = matrix.cols();
// will store the transpositions, before we accumulate them at the end. // will store the transpositions, before we accumulate them at the end.
// can't accumulate on-the-fly because that will be done in reverse order for the rows. // can't accumulate on-the-fly because that will be done in reverse order for the rows.
m_rowsTranspositions.resize(matrix.rows()); m_rowsTranspositions.resize(matrix.rows());
m_colsTranspositions.resize(matrix.cols()); m_colsTranspositions.resize(matrix.cols());
int number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0); m_maxpivot = RealScalar(0);
RealScalar cutoff(0); RealScalar cutoff(0);
for(int k = 0; k < size; ++k) for(Index k = 0; k < size; ++k)
{ {
// First, we need to find the pivot. // First, we need to find the pivot.
// biggest coefficient in the remaining bottom-right corner (starting at row k, col k) // biggest coefficient in the remaining bottom-right corner (starting at row k, col k)
int row_of_biggest_in_corner, col_of_biggest_in_corner; Index row_of_biggest_in_corner, col_of_biggest_in_corner;
RealScalar biggest_in_corner; RealScalar biggest_in_corner;
biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k) biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k)
.cwiseAbs() .cwiseAbs()
@ -468,7 +470,7 @@ FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
// before exiting, make sure to initialize the still uninitialized transpositions // before exiting, make sure to initialize the still uninitialized transpositions
// in a sane state without destroying what we already have. // in a sane state without destroying what we already have.
m_nonzero_pivots = k; m_nonzero_pivots = k;
for(int i = k; i < size; ++i) for(Index i = k; i < size; ++i)
{ {
m_rowsTranspositions.coeffRef(i) = i; m_rowsTranspositions.coeffRef(i) = i;
m_colsTranspositions.coeffRef(i) = i; m_colsTranspositions.coeffRef(i) = i;
@ -505,11 +507,11 @@ FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
// permutations P and Q // permutations P and Q
m_p.setIdentity(rows); m_p.setIdentity(rows);
for(int k = size-1; k >= 0; --k) for(Index k = size-1; k >= 0; --k)
m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k)); m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k));
m_q.setIdentity(cols); m_q.setIdentity(cols);
for(int k = 0; k < size; ++k) for(Index k = 0; k < size; ++k)
m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k)); m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));
m_det_pq = (number_of_transpositions%2) ? -1 : 1; m_det_pq = (number_of_transpositions%2) ? -1 : 1;
@ -531,7 +533,7 @@ template<typename MatrixType>
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
{ {
ei_assert(m_isInitialized && "LU is not initialized."); ei_assert(m_isInitialized && "LU is not initialized.");
const int smalldim = std::min(m_lu.rows(), m_lu.cols()); const Index smalldim = std::min(m_lu.rows(), m_lu.cols());
// LU // LU
MatrixType res(m_lu.rows(),m_lu.cols()); MatrixType res(m_lu.rows(),m_lu.cols());
// FIXME the .toDenseMatrix() should not be needed... // FIXME the .toDenseMatrix() should not be needed...
@ -564,7 +566,7 @@ struct ei_kernel_retval<FullPivLU<_MatrixType> >
template<typename Dest> void evalTo(Dest& dst) const template<typename Dest> void evalTo(Dest& dst) const
{ {
const int cols = dec().matrixLU().cols(), dimker = cols - rank(); const Index cols = dec().matrixLU().cols(), dimker = cols - rank();
if(dimker == 0) if(dimker == 0)
{ {
// The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's // The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's
@ -590,10 +592,10 @@ struct ei_kernel_retval<FullPivLU<_MatrixType> >
* independent vectors in Ker U. * independent vectors in Ker U.
*/ */
Matrix<int, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank()); Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
int p = 0; Index p = 0;
for(int i = 0; i < dec().nonzeroPivots(); ++i) for(Index i = 0; i < dec().nonzeroPivots(); ++i)
if(ei_abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) if(ei_abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
pivots.coeffRef(p++) = i; pivots.coeffRef(p++) = i;
ei_internal_assert(p == rank()); ei_internal_assert(p == rank());
@ -605,14 +607,14 @@ struct ei_kernel_retval<FullPivLU<_MatrixType> >
Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, MatrixType::Options, Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, MatrixType::Options,
MaxSmallDimAtCompileTime, MatrixType::MaxColsAtCompileTime> MaxSmallDimAtCompileTime, MatrixType::MaxColsAtCompileTime>
m(dec().matrixLU().block(0, 0, rank(), cols)); m(dec().matrixLU().block(0, 0, rank(), cols));
for(int i = 0; i < rank(); ++i) for(Index i = 0; i < rank(); ++i)
{ {
if(i) m.row(i).head(i).setZero(); if(i) m.row(i).head(i).setZero();
m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i); m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i);
} }
m.block(0, 0, rank(), rank()); m.block(0, 0, rank(), rank());
m.block(0, 0, rank(), rank()).template triangularView<StrictlyLower>().setZero(); m.block(0, 0, rank(), rank()).template triangularView<StrictlyLower>().setZero();
for(int i = 0; i < rank(); ++i) for(Index i = 0; i < rank(); ++i)
m.col(i).swap(m.col(pivots.coeff(i))); m.col(i).swap(m.col(pivots.coeff(i)));
// ok, we have our trapezoid matrix, we can apply the triangular solver. // ok, we have our trapezoid matrix, we can apply the triangular solver.
@ -624,13 +626,13 @@ struct ei_kernel_retval<FullPivLU<_MatrixType> >
); );
// now we must undo the column permutation that we had applied! // now we must undo the column permutation that we had applied!
for(int i = rank()-1; i >= 0; --i) for(Index i = rank()-1; i >= 0; --i)
m.col(i).swap(m.col(pivots.coeff(i))); m.col(i).swap(m.col(pivots.coeff(i)));
// see the negative sign in the next line, that's what we were talking about above. // see the negative sign in the next line, that's what we were talking about above.
for(int i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker); for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker);
for(int i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero(); for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero();
for(int k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1); for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1);
} }
}; };
@ -658,15 +660,15 @@ struct ei_image_retval<FullPivLU<_MatrixType> >
return; return;
} }
Matrix<int, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank()); Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold(); RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
int p = 0; Index p = 0;
for(int i = 0; i < dec().nonzeroPivots(); ++i) for(Index i = 0; i < dec().nonzeroPivots(); ++i)
if(ei_abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold) if(ei_abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
pivots.coeffRef(p++) = i; pivots.coeffRef(p++) = i;
ei_internal_assert(p == rank()); ei_internal_assert(p == rank());
for(int i = 0; i < rank(); ++i) for(Index i = 0; i < rank(); ++i)
dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i))); dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i)));
} }
}; };
@ -689,10 +691,10 @@ struct ei_solve_retval<FullPivLU<_MatrixType>, Rhs>
* Step 4: result = Q * c; * Step 4: result = Q * c;
*/ */
const int rows = dec().rows(), cols = dec().cols(), const Index rows = dec().rows(), cols = dec().cols(),
nonzero_pivots = dec().nonzeroPivots(); nonzero_pivots = dec().nonzeroPivots();
ei_assert(rhs().rows() == rows); ei_assert(rhs().rows() == rows);
const int smalldim = std::min(rows, cols); const Index smalldim = std::min(rows, cols);
if(nonzero_pivots == 0) if(nonzero_pivots == 0)
{ {
@ -724,9 +726,9 @@ struct ei_solve_retval<FullPivLU<_MatrixType>, Rhs>
.solveInPlace(c.topRows(nonzero_pivots)); .solveInPlace(c.topRows(nonzero_pivots));
// Step 4 // Step 4
for(int i = 0; i < nonzero_pivots; ++i) for(Index i = 0; i < nonzero_pivots; ++i)
dst.row(dec().permutationQ().indices().coeff(i)) = c.row(i); dst.row(dec().permutationQ().indices().coeff(i)) = c.row(i);
for(int i = nonzero_pivots; i < dec().matrixLU().cols(); ++i) for(Index i = nonzero_pivots; i < dec().matrixLU().cols(); ++i)
dst.row(dec().permutationQ().indices().coeff(i)).setZero(); dst.row(dec().permutationQ().indices().coeff(i)).setZero();
} }
}; };

View File

@ -281,7 +281,8 @@ struct ei_traits<ei_inverse_impl<MatrixType> >
template<typename MatrixType> template<typename MatrixType>
struct ei_inverse_impl : public ReturnByValue<ei_inverse_impl<MatrixType> > struct ei_inverse_impl : public ReturnByValue<ei_inverse_impl<MatrixType> >
{ {
typedef typename MatrixType::Nested MatrixTypeNested; typedef typename MatrixType::Index Index;
typedef typename ei_eval<MatrixType>::type MatrixTypeNested;
typedef typename ei_cleantype<MatrixTypeNested>::type MatrixTypeNestedCleaned; typedef typename ei_cleantype<MatrixTypeNested>::type MatrixTypeNestedCleaned;
const MatrixTypeNested m_matrix; const MatrixTypeNested m_matrix;
@ -290,8 +291,8 @@ struct ei_inverse_impl : public ReturnByValue<ei_inverse_impl<MatrixType> >
: m_matrix(matrix) : m_matrix(matrix)
{} {}
inline int rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline int cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
template<typename Dest> inline void evalTo(Dest& dst) const template<typename Dest> inline void evalTo(Dest& dst) const
{ {

View File

@ -71,7 +71,9 @@ template<typename _MatrixType> class PartialPivLU
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename ei_plain_col_type<MatrixType, int>::type PermutationVectorType; typedef typename ei_traits<MatrixType>::StorageKind StorageKind;
typedef typename ei_index<StorageKind>::type Index;
typedef typename ei_plain_col_type<MatrixType, Index>::type PermutationVectorType;
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType; typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
@ -89,7 +91,7 @@ template<typename _MatrixType> class PartialPivLU
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa PartialPivLU() * \sa PartialPivLU()
*/ */
PartialPivLU(int size); PartialPivLU(Index size);
/** Constructor. /** Constructor.
* *
@ -178,14 +180,14 @@ template<typename _MatrixType> class PartialPivLU
MatrixType reconstructedMatrix() const; MatrixType reconstructedMatrix() const;
inline int rows() const { return m_lu.rows(); } inline Index rows() const { return m_lu.rows(); }
inline int cols() const { return m_lu.cols(); } inline Index cols() const { return m_lu.cols(); }
protected: protected:
MatrixType m_lu; MatrixType m_lu;
PermutationType m_p; PermutationType m_p;
PermutationVectorType m_rowsTranspositions; PermutationVectorType m_rowsTranspositions;
int m_det_p; Index m_det_p;
bool m_isInitialized; bool m_isInitialized;
}; };
@ -200,7 +202,7 @@ PartialPivLU<MatrixType>::PartialPivLU()
} }
template<typename MatrixType> template<typename MatrixType>
PartialPivLU<MatrixType>::PartialPivLU(int size) PartialPivLU<MatrixType>::PartialPivLU(Index size)
: m_lu(size, size), : m_lu(size, size),
m_p(size), m_p(size),
m_rowsTranspositions(size), m_rowsTranspositions(size),
@ -233,6 +235,7 @@ struct ei_partial_lu_impl
typedef Block<MapLU, Dynamic, Dynamic> MatrixType; typedef Block<MapLU, Dynamic, Dynamic> MatrixType;
typedef Block<MatrixType,Dynamic,Dynamic> BlockType; typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
/** \internal performs the LU decomposition in-place of the matrix \a lu /** \internal performs the LU decomposition in-place of the matrix \a lu
* using an unblocked algorithm. * using an unblocked algorithm.
@ -246,14 +249,14 @@ struct ei_partial_lu_impl
* undefined coefficients (to avoid generating inf/nan values). Returns true * undefined coefficients (to avoid generating inf/nan values). Returns true
* otherwise. * otherwise.
*/ */
static bool unblocked_lu(MatrixType& lu, int* row_transpositions, int& nb_transpositions) static bool unblocked_lu(MatrixType& lu, Index* row_transpositions, Index& nb_transpositions)
{ {
const int rows = lu.rows(); const Index rows = lu.rows();
const int size = std::min(lu.rows(),lu.cols()); const Index size = std::min(lu.rows(),lu.cols());
nb_transpositions = 0; nb_transpositions = 0;
for(int k = 0; k < size; ++k) for(Index k = 0; k < size; ++k)
{ {
int row_of_biggest_in_col; Index row_of_biggest_in_col;
RealScalar biggest_in_corner RealScalar biggest_in_corner
= lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col); = lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col);
row_of_biggest_in_col += k; row_of_biggest_in_col += k;
@ -265,7 +268,7 @@ struct ei_partial_lu_impl
// the blocked_lu code can't guarantee the same. // the blocked_lu code can't guarantee the same.
// before exiting, make sure to initialize the still uninitialized row_transpositions // before exiting, make sure to initialize the still uninitialized row_transpositions
// in a sane state without destroying what we already have. // in a sane state without destroying what we already have.
for(int i = k; i < size; i++) for(Index i = k; i < size; i++)
row_transpositions[i] = i; row_transpositions[i] = i;
return false; return false;
} }
@ -280,8 +283,8 @@ struct ei_partial_lu_impl
if(k<rows-1) if(k<rows-1)
{ {
int rrows = rows-k-1; Index rrows = rows-k-1;
int rsize = size-k-1; Index rsize = size-k-1;
lu.col(k).tail(rrows) /= lu.coeff(k,k); lu.col(k).tail(rrows) /= lu.coeff(k,k);
lu.bottomRightCorner(rrows,rsize).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rsize); lu.bottomRightCorner(rrows,rsize).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rsize);
} }
@ -306,12 +309,12 @@ struct ei_partial_lu_impl
* 1 - reduce the number of instanciations to the strict minimum * 1 - reduce the number of instanciations to the strict minimum
* 2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > > * 2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > >
*/ */
static bool blocked_lu(int rows, int cols, Scalar* lu_data, int luStride, int* row_transpositions, int& nb_transpositions, int maxBlockSize=256) static bool blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, Index* row_transpositions, Index& nb_transpositions, Index maxBlockSize=256)
{ {
MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols); MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
MatrixType lu(lu1,0,0,rows,cols); MatrixType lu(lu1,0,0,rows,cols);
const int size = std::min(rows,cols); const Index size = std::min(rows,cols);
// if the matrix is too small, no blocking: // if the matrix is too small, no blocking:
if(size<=16) if(size<=16)
@ -321,19 +324,19 @@ struct ei_partial_lu_impl
// automatically adjust the number of subdivisions to the size // automatically adjust the number of subdivisions to the size
// of the matrix so that there is enough sub blocks: // of the matrix so that there is enough sub blocks:
int blockSize; Index blockSize;
{ {
blockSize = size/8; blockSize = size/8;
blockSize = (blockSize/16)*16; blockSize = (blockSize/16)*16;
blockSize = std::min(std::max(blockSize,8), maxBlockSize); blockSize = std::min(std::max(blockSize,Index(8)), maxBlockSize);
} }
nb_transpositions = 0; nb_transpositions = 0;
for(int k = 0; k < size; k+=blockSize) for(Index k = 0; k < size; k+=blockSize)
{ {
int bs = std::min(size-k,blockSize); // actual size of the block Index bs = std::min(size-k,blockSize); // actual size of the block
int trows = rows - k - bs; // trailing rows Index trows = rows - k - bs; // trailing rows
int tsize = size - k - bs; // trailing size Index tsize = size - k - bs; // trailing size
// partition the matrix: // partition the matrix:
// A00 | A01 | A02 // A00 | A01 | A02
@ -346,7 +349,7 @@ struct ei_partial_lu_impl
BlockType A21(lu,k+bs,k,trows,bs); BlockType A21(lu,k+bs,k,trows,bs);
BlockType A22(lu,k+bs,k+bs,trows,tsize); BlockType A22(lu,k+bs,k+bs,trows,tsize);
int nb_transpositions_in_panel; Index nb_transpositions_in_panel;
// recursively calls the blocked LU algorithm with a very small // recursively calls the blocked LU algorithm with a very small
// blocking size: // blocking size:
if(!blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride, if(!blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride,
@ -355,23 +358,23 @@ struct ei_partial_lu_impl
// end quickly with undefined coefficients, just avoid generating inf/nan values. // end quickly with undefined coefficients, just avoid generating inf/nan values.
// before exiting, make sure to initialize the still uninitialized row_transpositions // before exiting, make sure to initialize the still uninitialized row_transpositions
// in a sane state without destroying what we already have. // in a sane state without destroying what we already have.
for(int i=k; i<size; ++i) for(Index i=k; i<size; ++i)
row_transpositions[i] = i; row_transpositions[i] = i;
return false; return false;
} }
nb_transpositions += nb_transpositions_in_panel; nb_transpositions += nb_transpositions_in_panel;
// update permutations and apply them to A10 // update permutations and apply them to A10
for(int i=k; i<k+bs; ++i) for(Index i=k; i<k+bs; ++i)
{ {
int piv = (row_transpositions[i] += k); Index piv = (row_transpositions[i] += k);
A_0.row(i).swap(A_0.row(piv)); A_0.row(i).swap(A_0.row(piv));
} }
if(trows) if(trows)
{ {
// apply permutations to A_2 // apply permutations to A_2
for(int i=k;i<k+bs; ++i) for(Index i=k;i<k+bs; ++i)
A_2.row(i).swap(A_2.row(row_transpositions[i])); A_2.row(i).swap(A_2.row(row_transpositions[i]));
// A12 = A11^-1 A12 // A12 = A11^-1 A12
@ -387,7 +390,7 @@ struct ei_partial_lu_impl
/** \internal performs the LU decomposition with partial pivoting in-place. /** \internal performs the LU decomposition with partial pivoting in-place.
*/ */
template<typename MatrixType, typename IntVector> template<typename MatrixType, typename IntVector>
void ei_partial_lu_inplace(MatrixType& lu, IntVector& row_transpositions, int& nb_transpositions) void ei_partial_lu_inplace(MatrixType& lu, IntVector& row_transpositions, typename MatrixType::Index& nb_transpositions)
{ {
ei_assert(lu.cols() == row_transpositions.size()); ei_assert(lu.cols() == row_transpositions.size());
ei_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1); ei_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
@ -403,16 +406,16 @@ PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& ma
m_lu = matrix; m_lu = matrix;
ei_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices"); ei_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
const int size = matrix.rows(); const Index size = matrix.rows();
m_rowsTranspositions.resize(size); m_rowsTranspositions.resize(size);
int nb_transpositions; Index nb_transpositions;
ei_partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions); ei_partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions);
m_det_p = (nb_transpositions%2) ? -1 : 1; m_det_p = (nb_transpositions%2) ? -1 : 1;
m_p.setIdentity(size); m_p.setIdentity(size);
for(int k = size-1; k >= 0; --k) for(Index k = size-1; k >= 0; --k)
m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k)); m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k));
m_isInitialized = true; m_isInitialized = true;

View File

@ -56,10 +56,11 @@ template<typename _MatrixType> class ColPivHouseholderQR
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType; typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename ei_plain_diag_type<MatrixType>::type HCoeffsType; typedef typename ei_plain_diag_type<MatrixType>::type HCoeffsType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType; typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
typedef typename ei_plain_row_type<MatrixType, int>::type IntRowVectorType; typedef typename ei_plain_row_type<MatrixType, Index>::type IntRowVectorType;
typedef typename ei_plain_row_type<MatrixType>::type RowVectorType; typedef typename ei_plain_row_type<MatrixType>::type RowVectorType;
typedef typename ei_plain_row_type<MatrixType, RealScalar>::type RealRowVectorType; typedef typename ei_plain_row_type<MatrixType, RealScalar>::type RealRowVectorType;
typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType; typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
@ -85,7 +86,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa ColPivHouseholderQR() * \sa ColPivHouseholderQR()
*/ */
ColPivHouseholderQR(int rows, int cols) ColPivHouseholderQR(Index rows, Index cols)
: m_qr(rows, cols), : m_qr(rows, cols),
m_hCoeffs(std::min(rows,cols)), m_hCoeffs(std::min(rows,cols)),
m_colsPermutation(cols), m_colsPermutation(cols),
@ -186,12 +187,12 @@ template<typename _MatrixType> class ColPivHouseholderQR
* For that, it uses the threshold value that you can control by calling * For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&). * setThreshold(const RealScalar&).
*/ */
inline int rank() const inline Index rank() const
{ {
ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
RealScalar premultiplied_threshold = ei_abs(m_maxpivot) * threshold(); RealScalar premultiplied_threshold = ei_abs(m_maxpivot) * threshold();
int result = 0; Index result = 0;
for(int i = 0; i < m_nonzero_pivots; ++i) for(Index i = 0; i < m_nonzero_pivots; ++i)
result += (ei_abs(m_qr.coeff(i,i)) > premultiplied_threshold); result += (ei_abs(m_qr.coeff(i,i)) > premultiplied_threshold);
return result; return result;
} }
@ -202,7 +203,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
* For that, it uses the threshold value that you can control by calling * For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&). * setThreshold(const RealScalar&).
*/ */
inline int dimensionOfKernel() const inline Index dimensionOfKernel() const
{ {
ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); ei_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return cols() - rank(); return cols() - rank();
@ -260,8 +261,8 @@ template<typename _MatrixType> class ColPivHouseholderQR
(*this, MatrixType::Identity(m_qr.rows(), m_qr.cols())); (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols()));
} }
inline int rows() const { return m_qr.rows(); } inline Index rows() const { return m_qr.rows(); }
inline int cols() const { return m_qr.cols(); } inline Index cols() const { return m_qr.cols(); }
const HCoeffsType& hCoeffs() const { return m_hCoeffs; } const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
/** Allows to prescribe a threshold to be used by certain methods, such as rank(), /** Allows to prescribe a threshold to be used by certain methods, such as rank(),
@ -320,7 +321,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
* *
* \sa rank() * \sa rank()
*/ */
inline int nonzeroPivots() const inline Index nonzeroPivots() const
{ {
ei_assert(m_isInitialized && "LU is not initialized."); ei_assert(m_isInitialized && "LU is not initialized.");
return m_nonzero_pivots; return m_nonzero_pivots;
@ -340,8 +341,8 @@ template<typename _MatrixType> class ColPivHouseholderQR
RealRowVectorType m_colSqNorms; RealRowVectorType m_colSqNorms;
bool m_isInitialized, m_usePrescribedThreshold; bool m_isInitialized, m_usePrescribedThreshold;
RealScalar m_prescribedThreshold, m_maxpivot; RealScalar m_prescribedThreshold, m_maxpivot;
int m_nonzero_pivots; Index m_nonzero_pivots;
int m_det_pq; Index m_det_pq;
}; };
#ifndef EIGEN_HIDE_HEAVY_CODE #ifndef EIGEN_HIDE_HEAVY_CODE
@ -365,9 +366,9 @@ typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDetermina
template<typename MatrixType> template<typename MatrixType>
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix) ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{ {
int rows = matrix.rows(); Index rows = matrix.rows();
int cols = matrix.cols(); Index cols = matrix.cols();
int size = matrix.diagonalSize(); Index size = matrix.diagonalSize();
m_qr = matrix; m_qr = matrix;
m_hCoeffs.resize(size); m_hCoeffs.resize(size);
@ -375,10 +376,10 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
m_temp.resize(cols); m_temp.resize(cols);
m_colsTranspositions.resize(matrix.cols()); m_colsTranspositions.resize(matrix.cols());
int number_of_transpositions = 0; Index number_of_transpositions = 0;
m_colSqNorms.resize(cols); m_colSqNorms.resize(cols);
for(int k = 0; k < cols; ++k) for(Index k = 0; k < cols; ++k)
m_colSqNorms.coeffRef(k) = m_qr.col(k).squaredNorm(); m_colSqNorms.coeffRef(k) = m_qr.col(k).squaredNorm();
RealScalar threshold_helper = m_colSqNorms.maxCoeff() * ei_abs2(NumTraits<Scalar>::epsilon()) / rows; RealScalar threshold_helper = m_colSqNorms.maxCoeff() * ei_abs2(NumTraits<Scalar>::epsilon()) / rows;
@ -386,10 +387,10 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0); m_maxpivot = RealScalar(0);
for(int k = 0; k < size; ++k) for(Index k = 0; k < size; ++k)
{ {
// first, we look up in our table m_colSqNorms which column has the biggest squared norm // first, we look up in our table m_colSqNorms which column has the biggest squared norm
int biggest_col_index; Index biggest_col_index;
RealScalar biggest_col_sq_norm = m_colSqNorms.tail(cols-k).maxCoeff(&biggest_col_index); RealScalar biggest_col_sq_norm = m_colSqNorms.tail(cols-k).maxCoeff(&biggest_col_index);
biggest_col_index += k; biggest_col_index += k;
@ -444,7 +445,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
} }
m_colsPermutation.setIdentity(cols); m_colsPermutation.setIdentity(cols);
for(int k = 0; k < m_nonzero_pivots; ++k) for(Index k = 0; k < m_nonzero_pivots; ++k)
m_colsPermutation.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k)); m_colsPermutation.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));
m_det_pq = (number_of_transpositions%2) ? -1 : 1; m_det_pq = (number_of_transpositions%2) ? -1 : 1;
@ -461,10 +462,8 @@ struct ei_solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
template<typename Dest> void evalTo(Dest& dst) const template<typename Dest> void evalTo(Dest& dst) const
{ {
#ifndef EIGEN_NO_DEBUG ei_assert(rhs().rows() == dec().rows());
const int rows = dec().rows();
ei_assert(rhs().rows() == rows);
#endif
const int cols = dec().cols(), const int cols = dec().cols(),
nonzero_pivots = dec().nonzeroPivots(); nonzero_pivots = dec().nonzeroPivots();
@ -498,8 +497,8 @@ struct ei_solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
.template triangularView<Upper>() .template triangularView<Upper>()
* c.topRows(nonzero_pivots); * c.topRows(nonzero_pivots);
for(int i = 0; i < nonzero_pivots; ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i); for(Index i = 0; i < nonzero_pivots; ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
for(int i = nonzero_pivots; i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero(); for(Index i = nonzero_pivots; i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
} }
}; };

View File

@ -56,11 +56,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType; typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename ei_plain_diag_type<MatrixType>::type HCoeffsType; typedef typename ei_plain_diag_type<MatrixType>::type HCoeffsType;
typedef Matrix<int, 1, ColsAtCompileTime, RowMajor, 1, MaxColsAtCompileTime> IntRowVectorType; typedef Matrix<Index, 1, ColsAtCompileTime, RowMajor, 1, MaxColsAtCompileTime> IntRowVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType; typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
typedef typename ei_plain_col_type<MatrixType, int>::type IntColVectorType; typedef typename ei_plain_col_type<MatrixType, Index>::type IntColVectorType;
typedef typename ei_plain_row_type<MatrixType>::type RowVectorType; typedef typename ei_plain_row_type<MatrixType>::type RowVectorType;
typedef typename ei_plain_col_type<MatrixType>::type ColVectorType; typedef typename ei_plain_col_type<MatrixType>::type ColVectorType;
@ -84,7 +85,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa FullPivHouseholderQR() * \sa FullPivHouseholderQR()
*/ */
FullPivHouseholderQR(int rows, int cols) FullPivHouseholderQR(Index rows, Index cols)
: m_qr(rows, cols), : m_qr(rows, cols),
m_hCoeffs(std::min(rows,cols)), m_hCoeffs(std::min(rows,cols)),
m_rows_transpositions(rows), m_rows_transpositions(rows),
@ -188,7 +189,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
* \note This is computed at the time of the construction of the QR decomposition. This * \note This is computed at the time of the construction of the QR decomposition. This
* method does not perform any further computation. * method does not perform any further computation.
*/ */
inline int rank() const inline Index rank() const
{ {
ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return m_rank; return m_rank;
@ -199,7 +200,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
* \note Since the rank is computed at the time of the construction of the QR decomposition, this * \note Since the rank is computed at the time of the construction of the QR decomposition, this
* method almost does not perform any further computation. * method almost does not perform any further computation.
*/ */
inline int dimensionOfKernel() const inline Index dimensionOfKernel() const
{ {
ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); ei_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return m_qr.cols() - m_rank; return m_qr.cols() - m_rank;
@ -253,8 +254,8 @@ template<typename _MatrixType> class FullPivHouseholderQR
(*this, MatrixType::Identity(m_qr.rows(), m_qr.cols())); (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols()));
} }
inline int rows() const { return m_qr.rows(); } inline Index rows() const { return m_qr.rows(); }
inline int cols() const { return m_qr.cols(); } inline Index cols() const { return m_qr.cols(); }
const HCoeffsType& hCoeffs() const { return m_hCoeffs; } const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
protected: protected:
@ -266,8 +267,8 @@ template<typename _MatrixType> class FullPivHouseholderQR
RowVectorType m_temp; RowVectorType m_temp;
bool m_isInitialized; bool m_isInitialized;
RealScalar m_precision; RealScalar m_precision;
int m_rank; Index m_rank;
int m_det_pq; Index m_det_pq;
}; };
#ifndef EIGEN_HIDE_HEAVY_CODE #ifndef EIGEN_HIDE_HEAVY_CODE
@ -291,9 +292,9 @@ typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDetermin
template<typename MatrixType> template<typename MatrixType>
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix) FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{ {
int rows = matrix.rows(); Index rows = matrix.rows();
int cols = matrix.cols(); Index cols = matrix.cols();
int size = std::min(rows,cols); Index size = std::min(rows,cols);
m_rank = size; m_rank = size;
m_qr = matrix; m_qr = matrix;
@ -305,13 +306,13 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
m_rows_transpositions.resize(matrix.rows()); m_rows_transpositions.resize(matrix.rows());
m_cols_transpositions.resize(matrix.cols()); m_cols_transpositions.resize(matrix.cols());
int number_of_transpositions = 0; Index number_of_transpositions = 0;
RealScalar biggest(0); RealScalar biggest(0);
for (int k = 0; k < size; ++k) for (Index k = 0; k < size; ++k)
{ {
int row_of_biggest_in_corner, col_of_biggest_in_corner; Index row_of_biggest_in_corner, col_of_biggest_in_corner;
RealScalar biggest_in_corner; RealScalar biggest_in_corner;
biggest_in_corner = m_qr.bottomRightCorner(rows-k, cols-k) biggest_in_corner = m_qr.bottomRightCorner(rows-k, cols-k)
@ -325,7 +326,7 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
if(ei_isMuchSmallerThan(biggest_in_corner, biggest, m_precision)) if(ei_isMuchSmallerThan(biggest_in_corner, biggest, m_precision))
{ {
m_rank = k; m_rank = k;
for(int i = k; i < size; i++) for(Index i = k; i < size; i++)
{ {
m_rows_transpositions.coeffRef(i) = i; m_rows_transpositions.coeffRef(i) = i;
m_cols_transpositions.coeffRef(i) = i; m_cols_transpositions.coeffRef(i) = i;
@ -354,7 +355,7 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
} }
m_cols_permutation.setIdentity(cols); m_cols_permutation.setIdentity(cols);
for(int k = 0; k < size; ++k) for(Index k = 0; k < size; ++k)
m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k)); m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k));
m_det_pq = (number_of_transpositions%2) ? -1 : 1; m_det_pq = (number_of_transpositions%2) ? -1 : 1;
@ -371,7 +372,7 @@ struct ei_solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
template<typename Dest> void evalTo(Dest& dst) const template<typename Dest> void evalTo(Dest& dst) const
{ {
const int rows = dec().rows(), cols = dec().cols(); const Index rows = dec().rows(), cols = dec().cols();
ei_assert(rhs().rows() == rows); ei_assert(rhs().rows() == rows);
// FIXME introduce nonzeroPivots() and use it here. and more generally, // FIXME introduce nonzeroPivots() and use it here. and more generally,
@ -385,9 +386,9 @@ struct ei_solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
typename Rhs::PlainObject c(rhs()); typename Rhs::PlainObject c(rhs());
Matrix<Scalar,1,Rhs::ColsAtCompileTime> temp(rhs().cols()); Matrix<Scalar,1,Rhs::ColsAtCompileTime> temp(rhs().cols());
for (int k = 0; k < dec().rank(); ++k) for (Index k = 0; k < dec().rank(); ++k)
{ {
int remainingSize = rows-k; Index remainingSize = rows-k;
c.row(k).swap(c.row(dec().rowsTranspositions().coeff(k))); c.row(k).swap(c.row(dec().rowsTranspositions().coeff(k)));
c.bottomRightCorner(remainingSize, rhs().cols()) c.bottomRightCorner(remainingSize, rhs().cols())
.applyHouseholderOnTheLeft(dec().matrixQR().col(k).tail(remainingSize-1), .applyHouseholderOnTheLeft(dec().matrixQR().col(k).tail(remainingSize-1),
@ -409,8 +410,8 @@ struct ei_solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
.template triangularView<Upper>() .template triangularView<Upper>()
.solveInPlace(c.topRows(dec().rank())); .solveInPlace(c.topRows(dec().rank()));
for(int i = 0; i < dec().rank(); ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i); for(Index i = 0; i < dec().rank(); ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
for(int i = dec().rank(); i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero(); for(Index i = dec().rank(); i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
} }
}; };
@ -422,12 +423,12 @@ typename FullPivHouseholderQR<MatrixType>::MatrixQType FullPivHouseholderQR<Matr
// compute the product H'_0 H'_1 ... H'_n-1, // compute the product H'_0 H'_1 ... H'_n-1,
// where H_k is the k-th Householder transformation I - h_k v_k v_k' // where H_k is the k-th Householder transformation I - h_k v_k v_k'
// and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...] // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
int rows = m_qr.rows(); Index rows = m_qr.rows();
int cols = m_qr.cols(); Index cols = m_qr.cols();
int size = std::min(rows,cols); Index size = std::min(rows,cols);
MatrixQType res = MatrixQType::Identity(rows, rows); MatrixQType res = MatrixQType::Identity(rows, rows);
Matrix<Scalar,1,MatrixType::RowsAtCompileTime> temp(rows); Matrix<Scalar,1,MatrixType::RowsAtCompileTime> temp(rows);
for (int k = size-1; k >= 0; k--) for (Index k = size-1; k >= 0; k--)
{ {
res.block(k, k, rows-k, rows-k) res.block(k, k, rows-k, rows-k)
.applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), ei_conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k)); .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), ei_conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k));

View File

@ -60,6 +60,7 @@ template<typename _MatrixType> class HouseholderQR
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, ei_traits<MatrixType>::Flags&RowMajorBit ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType; typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, ei_traits<MatrixType>::Flags&RowMajorBit ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename ei_plain_diag_type<MatrixType>::type HCoeffsType; typedef typename ei_plain_diag_type<MatrixType>::type HCoeffsType;
typedef typename ei_plain_row_type<MatrixType>::type RowVectorType; typedef typename ei_plain_row_type<MatrixType>::type RowVectorType;
@ -79,7 +80,7 @@ template<typename _MatrixType> class HouseholderQR
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa HouseholderQR() * \sa HouseholderQR()
*/ */
HouseholderQR(int rows, int cols) HouseholderQR(Index rows, Index cols)
: m_qr(rows, cols), : m_qr(rows, cols),
m_hCoeffs(std::min(rows,cols)), m_hCoeffs(std::min(rows,cols)),
m_temp(cols), m_temp(cols),
@ -165,8 +166,8 @@ template<typename _MatrixType> class HouseholderQR
*/ */
typename MatrixType::RealScalar logAbsDeterminant() const; typename MatrixType::RealScalar logAbsDeterminant() const;
inline int rows() const { return m_qr.rows(); } inline Index rows() const { return m_qr.rows(); }
inline int cols() const { return m_qr.cols(); } inline Index cols() const { return m_qr.cols(); }
const HCoeffsType& hCoeffs() const { return m_hCoeffs; } const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
protected: protected:
@ -197,19 +198,19 @@ typename MatrixType::RealScalar HouseholderQR<MatrixType>::logAbsDeterminant() c
template<typename MatrixType> template<typename MatrixType>
HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix) HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{ {
int rows = matrix.rows(); Index rows = matrix.rows();
int cols = matrix.cols(); Index cols = matrix.cols();
int size = std::min(rows,cols); Index size = std::min(rows,cols);
m_qr = matrix; m_qr = matrix;
m_hCoeffs.resize(size); m_hCoeffs.resize(size);
m_temp.resize(cols); m_temp.resize(cols);
for(int k = 0; k < size; ++k) for(Index k = 0; k < size; ++k)
{ {
int remainingRows = rows - k; Index remainingRows = rows - k;
int remainingCols = cols - k - 1; Index remainingCols = cols - k - 1;
RealScalar beta; RealScalar beta;
m_qr.col(k).tail(remainingRows).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta); m_qr.col(k).tail(remainingRows).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
@ -231,8 +232,8 @@ struct ei_solve_retval<HouseholderQR<_MatrixType>, Rhs>
template<typename Dest> void evalTo(Dest& dst) const template<typename Dest> void evalTo(Dest& dst) const
{ {
const int rows = dec().rows(), cols = dec().cols(); const Index rows = dec().rows(), cols = dec().cols();
const int rank = std::min(rows, cols); const Index rank = std::min(rows, cols);
ei_assert(rhs().rows() == rows); ei_assert(rhs().rows() == rows);
typename Rhs::PlainObject c(rhs()); typename Rhs::PlainObject c(rhs());

View File

@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library // This file is part of Eigen, a lightweight C++ template library
// for linear algebra. // for linear algebra.
// //
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// //
// Eigen is free software; you can redistribute it and/or // Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public // modify it under the terms of the GNU Lesser General Public
@ -63,6 +63,7 @@ template<typename MatrixType, unsigned int Options> class JacobiSVD
private: private:
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
enum { enum {
ComputeU = (Options & SkipU) == 0, ComputeU = (Options & SkipU) == 0,
ComputeV = (Options & SkipV) == 0, ComputeV = (Options & SkipV) == 0,
@ -107,7 +108,7 @@ template<typename MatrixType, unsigned int Options> class JacobiSVD
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa JacobiSVD() * \sa JacobiSVD()
*/ */
JacobiSVD(int rows, int cols) : m_matrixU(rows, rows), JacobiSVD(Index rows, Index cols) : m_matrixU(rows, rows),
m_matrixV(cols, cols), m_matrixV(cols, cols),
m_singularValues(std::min(rows, cols)), m_singularValues(std::min(rows, cols)),
m_workMatrix(rows, cols), m_workMatrix(rows, cols),
@ -119,7 +120,7 @@ template<typename MatrixType, unsigned int Options> class JacobiSVD
m_workMatrix(), m_workMatrix(),
m_isInitialized(false) m_isInitialized(false)
{ {
const int minSize = std::min(matrix.rows(), matrix.cols()); const Index minSize = std::min(matrix.rows(), matrix.cols());
m_singularValues.resize(minSize); m_singularValues.resize(minSize);
m_workMatrix.resize(minSize, minSize); m_workMatrix.resize(minSize, minSize);
compute(matrix); compute(matrix);
@ -164,7 +165,8 @@ template<typename MatrixType, unsigned int Options>
struct ei_svd_precondition_2x2_block_to_be_real<MatrixType, Options, false> struct ei_svd_precondition_2x2_block_to_be_real<MatrixType, Options, false>
{ {
typedef JacobiSVD<MatrixType, Options> SVD; typedef JacobiSVD<MatrixType, Options> SVD;
static void run(typename SVD::WorkMatrixType&, JacobiSVD<MatrixType, Options>&, int, int) {} typedef typename SVD::Index Index;
static void run(typename SVD::WorkMatrixType&, JacobiSVD<MatrixType, Options>&, Index, Index) {}
}; };
template<typename MatrixType, unsigned int Options> template<typename MatrixType, unsigned int Options>
@ -173,8 +175,9 @@ struct ei_svd_precondition_2x2_block_to_be_real<MatrixType, Options, true>
typedef JacobiSVD<MatrixType, Options> SVD; typedef JacobiSVD<MatrixType, Options> SVD;
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename SVD::Index Index;
enum { ComputeU = SVD::ComputeU, ComputeV = SVD::ComputeV }; enum { ComputeU = SVD::ComputeU, ComputeV = SVD::ComputeV };
static void run(typename SVD::WorkMatrixType& work_matrix, JacobiSVD<MatrixType, Options>& svd, int p, int q) static void run(typename SVD::WorkMatrixType& work_matrix, JacobiSVD<MatrixType, Options>& svd, Index p, Index q)
{ {
Scalar z; Scalar z;
PlanarRotation<Scalar> rot; PlanarRotation<Scalar> rot;
@ -210,8 +213,8 @@ struct ei_svd_precondition_2x2_block_to_be_real<MatrixType, Options, true>
} }
}; };
template<typename MatrixType, typename RealScalar> template<typename MatrixType, typename RealScalar, typename Index>
void ei_real_2x2_jacobi_svd(const MatrixType& matrix, int p, int q, void ei_real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
PlanarRotation<RealScalar> *j_left, PlanarRotation<RealScalar> *j_left,
PlanarRotation<RealScalar> *j_right) PlanarRotation<RealScalar> *j_right)
{ {
@ -250,12 +253,13 @@ struct ei_svd_precondition_if_more_rows_than_cols<MatrixType, Options, true>
typedef JacobiSVD<MatrixType, Options> SVD; typedef JacobiSVD<MatrixType, Options> SVD;
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
enum { ComputeU = SVD::ComputeU, ComputeV = SVD::ComputeV }; enum { ComputeU = SVD::ComputeU, ComputeV = SVD::ComputeV };
static bool run(const MatrixType& matrix, typename SVD::WorkMatrixType& work_matrix, SVD& svd) static bool run(const MatrixType& matrix, typename SVD::WorkMatrixType& work_matrix, SVD& svd)
{ {
int rows = matrix.rows(); Index rows = matrix.rows();
int cols = matrix.cols(); Index cols = matrix.cols();
int diagSize = cols; Index diagSize = cols;
if(rows > cols) if(rows > cols)
{ {
FullPivHouseholderQR<MatrixType> qr(matrix); FullPivHouseholderQR<MatrixType> qr(matrix);
@ -282,6 +286,7 @@ struct ei_svd_precondition_if_more_cols_than_rows<MatrixType, Options, true>
typedef JacobiSVD<MatrixType, Options> SVD; typedef JacobiSVD<MatrixType, Options> SVD;
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
enum { enum {
ComputeU = SVD::ComputeU, ComputeU = SVD::ComputeU,
ComputeV = SVD::ComputeV, ComputeV = SVD::ComputeV,
@ -294,9 +299,9 @@ struct ei_svd_precondition_if_more_cols_than_rows<MatrixType, Options, true>
static bool run(const MatrixType& matrix, typename SVD::WorkMatrixType& work_matrix, SVD& svd) static bool run(const MatrixType& matrix, typename SVD::WorkMatrixType& work_matrix, SVD& svd)
{ {
int rows = matrix.rows(); Index rows = matrix.rows();
int cols = matrix.cols(); Index cols = matrix.cols();
int diagSize = rows; Index diagSize = rows;
if(cols > rows) if(cols > rows)
{ {
typedef Matrix<Scalar,ColsAtCompileTime,RowsAtCompileTime, typedef Matrix<Scalar,ColsAtCompileTime,RowsAtCompileTime,
@ -315,9 +320,9 @@ struct ei_svd_precondition_if_more_cols_than_rows<MatrixType, Options, true>
template<typename MatrixType, unsigned int Options> template<typename MatrixType, unsigned int Options>
JacobiSVD<MatrixType, Options>& JacobiSVD<MatrixType, Options>::compute(const MatrixType& matrix) JacobiSVD<MatrixType, Options>& JacobiSVD<MatrixType, Options>::compute(const MatrixType& matrix)
{ {
int rows = matrix.rows(); Index rows = matrix.rows();
int cols = matrix.cols(); Index cols = matrix.cols();
int diagSize = std::min(rows, cols); Index diagSize = std::min(rows, cols);
m_singularValues.resize(diagSize); m_singularValues.resize(diagSize);
const RealScalar precision = 2 * NumTraits<Scalar>::epsilon(); const RealScalar precision = 2 * NumTraits<Scalar>::epsilon();
@ -333,9 +338,9 @@ JacobiSVD<MatrixType, Options>& JacobiSVD<MatrixType, Options>::compute(const Ma
while(!finished) while(!finished)
{ {
finished = true; finished = true;
for(int p = 1; p < diagSize; ++p) for(Index p = 1; p < diagSize; ++p)
{ {
for(int q = 0; q < p; ++q) for(Index q = 0; q < p; ++q)
{ {
if(std::max(ei_abs(m_workMatrix.coeff(p,q)),ei_abs(m_workMatrix.coeff(q,p))) if(std::max(ei_abs(m_workMatrix.coeff(p,q)),ei_abs(m_workMatrix.coeff(q,p)))
> std::max(ei_abs(m_workMatrix.coeff(p,p)),ei_abs(m_workMatrix.coeff(q,q)))*precision) > std::max(ei_abs(m_workMatrix.coeff(p,p)),ei_abs(m_workMatrix.coeff(q,q)))*precision)
@ -356,16 +361,16 @@ JacobiSVD<MatrixType, Options>& JacobiSVD<MatrixType, Options>::compute(const Ma
} }
} }
for(int i = 0; i < diagSize; ++i) for(Index i = 0; i < diagSize; ++i)
{ {
RealScalar a = ei_abs(m_workMatrix.coeff(i,i)); RealScalar a = ei_abs(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = a; m_singularValues.coeffRef(i) = a;
if(ComputeU && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a; if(ComputeU && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
} }
for(int i = 0; i < diagSize; i++) for(Index i = 0; i < diagSize; i++)
{ {
int pos; Index pos;
m_singularValues.tail(diagSize-i).maxCoeff(&pos); m_singularValues.tail(diagSize-i).maxCoeff(&pos);
if(pos) if(pos)
{ {

View File

@ -46,6 +46,7 @@ template<typename _MatrixType> class SVD
typedef _MatrixType MatrixType; typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index;
enum { enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime, RowsAtCompileTime = MatrixType::RowsAtCompileTime,
@ -79,7 +80,7 @@ template<typename _MatrixType> class SVD
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa JacobiSVD() * \sa JacobiSVD()
*/ */
SVD(int rows, int cols) : m_matU(rows, rows), SVD(Index rows, Index cols) : m_matU(rows, rows),
m_matV(cols,cols), m_matV(cols,cols),
m_sigma(std::min(rows, cols)), m_sigma(std::min(rows, cols)),
m_workMatrix(rows, cols), m_workMatrix(rows, cols),
@ -143,13 +144,13 @@ template<typename _MatrixType> class SVD
template<typename ScalingType, typename RotationType> template<typename ScalingType, typename RotationType>
void computeScalingRotation(ScalingType *positive, RotationType *unitary) const; void computeScalingRotation(ScalingType *positive, RotationType *unitary) const;
inline int rows() const inline Index rows() const
{ {
ei_assert(m_isInitialized && "SVD is not initialized."); ei_assert(m_isInitialized && "SVD is not initialized.");
return m_rows; return m_rows;
} }
inline int cols() const inline Index cols() const
{ {
ei_assert(m_isInitialized && "SVD is not initialized."); ei_assert(m_isInitialized && "SVD is not initialized.");
return m_cols; return m_cols;
@ -182,7 +183,7 @@ template<typename _MatrixType> class SVD
MatrixType m_workMatrix; MatrixType m_workMatrix;
RowVector m_rv1; RowVector m_rv1;
bool m_isInitialized; bool m_isInitialized;
int m_rows, m_cols; Index m_rows, m_cols;
}; };
/** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix /** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix
@ -194,8 +195,8 @@ template<typename _MatrixType> class SVD
template<typename MatrixType> template<typename MatrixType>
SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix) SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
{ {
const int m = m_rows = matrix.rows(); const Index m = m_rows = matrix.rows();
const int n = m_cols = matrix.cols(); const Index n = m_cols = matrix.cols();
m_matU.resize(m, m); m_matU.resize(m, m);
m_matU.setZero(); m_matU.setZero();
@ -203,14 +204,14 @@ SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
m_matV.resize(n,n); m_matV.resize(n,n);
m_workMatrix = matrix; m_workMatrix = matrix;
int max_iters = 30; Index max_iters = 30;
MatrixVType& V = m_matV; MatrixVType& V = m_matV;
MatrixType& A = m_workMatrix; MatrixType& A = m_workMatrix;
SingularValuesType& W = m_sigma; SingularValuesType& W = m_sigma;
bool flag; bool flag;
int i=0,its=0,j=0,k=0,l=0,nm=0; Index i=0,its=0,j=0,k=0,l=0,nm=0;
Scalar anorm, c, f, g, h, s, scale, x, y, z; Scalar anorm, c, f, g, h, s, scale, x, y, z;
bool convergence = true; bool convergence = true;
Scalar eps = NumTraits<Scalar>::dummy_precision(); Scalar eps = NumTraits<Scalar>::dummy_precision();
@ -426,9 +427,9 @@ SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
// sort the singular values: // sort the singular values:
{ {
for (int i=0; i<n; i++) for (Index i=0; i<n; i++)
{ {
int k; Index k;
W.tail(n-i).maxCoeff(&k); W.tail(n-i).maxCoeff(&k);
if (k != 0) if (k != 0)
{ {
@ -459,11 +460,11 @@ struct ei_solve_retval<SVD<_MatrixType>, Rhs>
{ {
ei_assert(rhs().rows() == dec().rows()); ei_assert(rhs().rows() == dec().rows());
for (int j=0; j<cols(); ++j) for (Index j=0; j<cols(); ++j)
{ {
Matrix<Scalar,MatrixType::RowsAtCompileTime,1> aux = dec().matrixU().adjoint() * rhs().col(j); Matrix<Scalar,MatrixType::RowsAtCompileTime,1> aux = dec().matrixU().adjoint() * rhs().col(j);
for (int i = 0; i < dec().rows(); ++i) for (Index i = 0; i < dec().rows(); ++i)
{ {
Scalar si = dec().singularValues().coeff(i); Scalar si = dec().singularValues().coeff(i);
if(si == RealScalar(0)) if(si == RealScalar(0))
@ -471,7 +472,7 @@ struct ei_solve_retval<SVD<_MatrixType>, Rhs>
else else
aux.coeffRef(i) /= si; aux.coeffRef(i) /= si;
} }
const int minsize = std::min(dec().rows(),dec().cols()); const Index minsize = std::min(dec().rows(),dec().cols());
dst.col(j).head(minsize) = aux.head(minsize); dst.col(j).head(minsize) = aux.head(minsize);
if(dec().cols()>dec().rows()) dst.col(j).tail(cols()-minsize).setZero(); if(dec().cols()>dec().rows()) dst.col(j).tail(cols()-minsize).setZero();
dst.col(j) = dec().matrixV() * dst.col(j); dst.col(j) = dec().matrixV() * dst.col(j);

View File

@ -37,6 +37,7 @@ template<typename _MatrixType> class UpperBidiagonalization
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType; typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;
typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType; typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0> BidiagonalType; typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0> BidiagonalType;
@ -95,8 +96,8 @@ template<typename _MatrixType> class UpperBidiagonalization
template<typename _MatrixType> template<typename _MatrixType>
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix) UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)
{ {
int rows = matrix.rows(); Index rows = matrix.rows();
int cols = matrix.cols(); Index cols = matrix.cols();
ei_assert(rows >= cols && "UpperBidiagonalization is only for matrices satisfying rows>=cols."); ei_assert(rows >= cols && "UpperBidiagonalization is only for matrices satisfying rows>=cols.");
@ -104,10 +105,10 @@ UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::comput
ColVectorType temp(rows); ColVectorType temp(rows);
for (int k = 0; /* breaks at k==cols-1 below */ ; ++k) for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
{ {
int remainingRows = rows - k; Index remainingRows = rows - k;
int remainingCols = cols - k - 1; Index remainingCols = cols - k - 1;
// construct left householder transform in-place in m_householder // construct left householder transform in-place in m_householder
m_householder.col(k).tail(remainingRows) m_householder.col(k).tail(remainingRows)

View File

@ -35,7 +35,8 @@ template<typename _Scalar> class AmbiVector
public: public:
typedef _Scalar Scalar; typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
AmbiVector(int size) typedef SparseIndex Index;
AmbiVector(Index size)
: m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1) : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
{ {
resize(size); resize(size);
@ -44,40 +45,40 @@ template<typename _Scalar> class AmbiVector
void init(double estimatedDensity); void init(double estimatedDensity);
void init(int mode); void init(int mode);
int nonZeros() const; Index nonZeros() const;
/** Specifies a sub-vector to work on */ /** Specifies a sub-vector to work on */
void setBounds(int start, int end) { m_start = start; m_end = end; } void setBounds(Index start, Index end) { m_start = start; m_end = end; }
void setZero(); void setZero();
void restart(); void restart();
Scalar& coeffRef(int i); Scalar& coeffRef(Index i);
Scalar& coeff(int i); Scalar& coeff(Index i);
class Iterator; class Iterator;
~AmbiVector() { delete[] m_buffer; } ~AmbiVector() { delete[] m_buffer; }
void resize(int size) void resize(Index size)
{ {
if (m_allocatedSize < size) if (m_allocatedSize < size)
reallocate(size); reallocate(size);
m_size = size; m_size = size;
} }
int size() const { return m_size; } Index size() const { return m_size; }
protected: protected:
void reallocate(int size) void reallocate(Index size)
{ {
// if the size of the matrix is not too large, let's allocate a bit more than needed such // if the size of the matrix is not too large, let's allocate a bit more than needed such
// that we can handle dense vector even in sparse mode. // that we can handle dense vector even in sparse mode.
delete[] m_buffer; delete[] m_buffer;
if (size<1000) if (size<1000)
{ {
int allocSize = (size * sizeof(ListEl))/sizeof(Scalar); Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl); m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
m_buffer = new Scalar[allocSize]; m_buffer = new Scalar[allocSize];
} }
@ -93,9 +94,9 @@ template<typename _Scalar> class AmbiVector
void reallocateSparse() void reallocateSparse()
{ {
int copyElements = m_allocatedElements; Index copyElements = m_allocatedElements;
m_allocatedElements = std::min(int(m_allocatedElements*1.5),m_size); m_allocatedElements = std::min(Index(m_allocatedElements*1.5),m_size);
int allocSize = m_allocatedElements * sizeof(ListEl); Index allocSize = m_allocatedElements * sizeof(ListEl);
allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0); allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
Scalar* newBuffer = new Scalar[allocSize]; Scalar* newBuffer = new Scalar[allocSize];
memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl)); memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
@ -107,30 +108,30 @@ template<typename _Scalar> class AmbiVector
// element type of the linked list // element type of the linked list
struct ListEl struct ListEl
{ {
int next; Index next;
int index; Index index;
Scalar value; Scalar value;
}; };
// used to store data in both mode // used to store data in both mode
Scalar* m_buffer; Scalar* m_buffer;
Scalar m_zero; Scalar m_zero;
int m_size; Index m_size;
int m_start; Index m_start;
int m_end; Index m_end;
int m_allocatedSize; Index m_allocatedSize;
int m_allocatedElements; Index m_allocatedElements;
int m_mode; Index m_mode;
// linked list mode // linked list mode
int m_llStart; Index m_llStart;
int m_llCurrent; Index m_llCurrent;
int m_llSize; Index m_llSize;
}; };
/** \returns the number of non zeros in the current sub vector */ /** \returns the number of non zeros in the current sub vector */
template<typename Scalar> template<typename Scalar>
int AmbiVector<Scalar>::nonZeros() const SparseIndex AmbiVector<Scalar>::nonZeros() const
{ {
if (m_mode==IsSparse) if (m_mode==IsSparse)
return m_llSize; return m_llSize;
@ -175,7 +176,7 @@ void AmbiVector<Scalar>::setZero()
{ {
if (m_mode==IsDense) if (m_mode==IsDense)
{ {
for (int i=m_start; i<m_end; ++i) for (Index i=m_start; i<m_end; ++i)
m_buffer[i] = Scalar(0); m_buffer[i] = Scalar(0);
} }
else else
@ -187,7 +188,7 @@ void AmbiVector<Scalar>::setZero()
} }
template<typename Scalar> template<typename Scalar>
Scalar& AmbiVector<Scalar>::coeffRef(int i) Scalar& AmbiVector<Scalar>::coeffRef(Index i)
{ {
if (m_mode==IsDense) if (m_mode==IsDense)
return m_buffer[i]; return m_buffer[i];
@ -221,7 +222,7 @@ Scalar& AmbiVector<Scalar>::coeffRef(int i)
} }
else else
{ {
int nextel = llElements[m_llCurrent].next; Index nextel = llElements[m_llCurrent].next;
ei_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); ei_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
while (nextel >= 0 && llElements[nextel].index<=i) while (nextel >= 0 && llElements[nextel].index<=i)
{ {
@ -256,7 +257,7 @@ Scalar& AmbiVector<Scalar>::coeffRef(int i)
} }
template<typename Scalar> template<typename Scalar>
Scalar& AmbiVector<Scalar>::coeff(int i) Scalar& AmbiVector<Scalar>::coeff(Index i)
{ {
if (m_mode==IsDense) if (m_mode==IsDense)
return m_buffer[i]; return m_buffer[i];
@ -270,7 +271,7 @@ Scalar& AmbiVector<Scalar>::coeff(int i)
} }
else else
{ {
int elid = m_llStart; Index elid = m_llStart;
while (elid >= 0 && llElements[elid].index<i) while (elid >= 0 && llElements[elid].index<i)
elid = llElements[elid].next; elid = llElements[elid].next;
@ -327,7 +328,7 @@ class AmbiVector<_Scalar>::Iterator
} }
} }
int index() const { return m_cachedIndex; } Index index() const { return m_cachedIndex; }
Scalar value() const { return m_cachedValue; } Scalar value() const { return m_cachedValue; }
operator bool() const { return m_cachedIndex>=0; } operator bool() const { return m_cachedIndex>=0; }
@ -365,9 +366,9 @@ class AmbiVector<_Scalar>::Iterator
protected: protected:
const AmbiVector& m_vector; // the target vector const AmbiVector& m_vector; // the target vector
int m_currentEl; // the current element in sparse/linked-list mode Index m_currentEl; // the current element in sparse/linked-list mode
RealScalar m_epsilon; // epsilon used to prune zero coefficients RealScalar m_epsilon; // epsilon used to prune zero coefficients
int m_cachedIndex; // current coordinate Index m_cachedIndex; // current coordinate
Scalar m_cachedValue; // current value Scalar m_cachedValue; // current value
bool m_isDense; // mode of the vector bool m_isDense; // mode of the vector
}; };

Some files were not shown because too many files have changed in this diff Show More