mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-04-29 23:34:12 +08:00
The usage of DenseIndex is deprecated, so let's replace DenseIndex by Index
This commit is contained in:
parent
cc641aabb7
commit
45cbb0bbb1
@ -179,7 +179,7 @@ struct traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> >
|
|||||||
{
|
{
|
||||||
typedef _Scalar Scalar;
|
typedef _Scalar Scalar;
|
||||||
typedef Dense StorageKind;
|
typedef Dense StorageKind;
|
||||||
typedef DenseIndex StorageIndex;
|
typedef Eigen::Index StorageIndex;
|
||||||
enum {
|
enum {
|
||||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||||
RowsAtCompileTime = _Rows,
|
RowsAtCompileTime = _Rows,
|
||||||
|
@ -140,12 +140,12 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
|
|||||||
if (this != &other) m_data = other.m_data;
|
if (this != &other) m_data = other.m_data;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
EIGEN_DEVICE_FUNC DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {}
|
||||||
EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
|
EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
|
||||||
EIGEN_DEVICE_FUNC static DenseIndex rows(void) {return _Rows;}
|
EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
|
||||||
EIGEN_DEVICE_FUNC static DenseIndex cols(void) {return _Cols;}
|
EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
|
||||||
EIGEN_DEVICE_FUNC void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}
|
||||||
EIGEN_DEVICE_FUNC void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {}
|
||||||
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
|
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
|
||||||
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
|
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
|
||||||
};
|
};
|
||||||
@ -158,12 +158,12 @@ template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0
|
|||||||
EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) {}
|
EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) {}
|
||||||
EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) {}
|
EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) {}
|
||||||
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) { return *this; }
|
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) { return *this; }
|
||||||
EIGEN_DEVICE_FUNC DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {}
|
||||||
EIGEN_DEVICE_FUNC void swap(DenseStorage& ) {}
|
EIGEN_DEVICE_FUNC void swap(DenseStorage& ) {}
|
||||||
EIGEN_DEVICE_FUNC static DenseIndex rows(void) {return _Rows;}
|
EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
|
||||||
EIGEN_DEVICE_FUNC static DenseIndex cols(void) {return _Cols;}
|
EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
|
||||||
EIGEN_DEVICE_FUNC void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}
|
||||||
EIGEN_DEVICE_FUNC void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {}
|
||||||
EIGEN_DEVICE_FUNC const T *data() const { return 0; }
|
EIGEN_DEVICE_FUNC const T *data() const { return 0; }
|
||||||
EIGEN_DEVICE_FUNC T *data() { return 0; }
|
EIGEN_DEVICE_FUNC T *data() { return 0; }
|
||||||
};
|
};
|
||||||
@ -182,8 +182,8 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, 0, Dynamic,
|
|||||||
template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options>
|
template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options>
|
||||||
{
|
{
|
||||||
internal::plain_array<T,Size,_Options> m_data;
|
internal::plain_array<T,Size,_Options> m_data;
|
||||||
DenseIndex m_rows;
|
Index m_rows;
|
||||||
DenseIndex m_cols;
|
Index m_cols;
|
||||||
public:
|
public:
|
||||||
EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {}
|
EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {}
|
||||||
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
|
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||||
@ -199,13 +199,13 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
|
|||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
|
DenseStorage(Index, Index nbRows, Index nbCols) : m_rows(nbRows), m_cols(nbCols) {}
|
||||||
void swap(DenseStorage& other)
|
void swap(DenseStorage& other)
|
||||||
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
||||||
EIGEN_DEVICE_FUNC DenseIndex rows() const {return m_rows;}
|
EIGEN_DEVICE_FUNC Index rows() const {return m_rows;}
|
||||||
EIGEN_DEVICE_FUNC DenseIndex cols() const {return m_cols;}
|
EIGEN_DEVICE_FUNC Index cols() const {return m_cols;}
|
||||||
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
void conservativeResize(Index, Index nbRows, Index nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||||
void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
void resize(Index, Index nbRows, Index nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||||
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
|
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
|
||||||
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
|
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
|
||||||
};
|
};
|
||||||
@ -214,7 +214,7 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
|
|||||||
template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options>
|
template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options>
|
||||||
{
|
{
|
||||||
internal::plain_array<T,Size,_Options> m_data;
|
internal::plain_array<T,Size,_Options> m_data;
|
||||||
DenseIndex m_rows;
|
Index m_rows;
|
||||||
public:
|
public:
|
||||||
EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {}
|
EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {}
|
||||||
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
|
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||||
@ -229,12 +229,12 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
|
|||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
|
DenseStorage(Index, Index nbRows, Index) : m_rows(nbRows) {}
|
||||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||||
EIGEN_DEVICE_FUNC DenseIndex rows(void) const {return m_rows;}
|
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
|
||||||
EIGEN_DEVICE_FUNC DenseIndex cols(void) const {return _Cols;}
|
EIGEN_DEVICE_FUNC Index cols(void) const {return _Cols;}
|
||||||
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
void conservativeResize(Index, Index nbRows, Index) { m_rows = nbRows; }
|
||||||
void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
void resize(Index, Index nbRows, Index) { m_rows = nbRows; }
|
||||||
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
|
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
|
||||||
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
|
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
|
||||||
};
|
};
|
||||||
@ -243,7 +243,7 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
|
|||||||
template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options>
|
template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options>
|
||||||
{
|
{
|
||||||
internal::plain_array<T,Size,_Options> m_data;
|
internal::plain_array<T,Size,_Options> m_data;
|
||||||
DenseIndex m_cols;
|
Index m_cols;
|
||||||
public:
|
public:
|
||||||
EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {}
|
EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {}
|
||||||
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
|
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||||
@ -258,12 +258,12 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
|
|||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
|
DenseStorage(Index, Index, Index nbCols) : m_cols(nbCols) {}
|
||||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||||
EIGEN_DEVICE_FUNC DenseIndex rows(void) const {return _Rows;}
|
EIGEN_DEVICE_FUNC Index rows(void) const {return _Rows;}
|
||||||
EIGEN_DEVICE_FUNC DenseIndex cols(void) const {return m_cols;}
|
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
|
||||||
void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
void conservativeResize(Index, Index, Index nbCols) { m_cols = nbCols; }
|
||||||
void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
void resize(Index, Index, Index nbCols) { m_cols = nbCols; }
|
||||||
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
|
EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }
|
||||||
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
|
EIGEN_DEVICE_FUNC T *data() { return m_data.array; }
|
||||||
};
|
};
|
||||||
@ -272,13 +272,13 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
|
|||||||
template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options>
|
template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options>
|
||||||
{
|
{
|
||||||
T *m_data;
|
T *m_data;
|
||||||
DenseIndex m_rows;
|
Index m_rows;
|
||||||
DenseIndex m_cols;
|
Index m_cols;
|
||||||
public:
|
public:
|
||||||
EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
|
EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
|
||||||
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
|
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||||
: m_data(0), m_rows(0), m_cols(0) {}
|
: m_data(0), m_rows(0), m_cols(0) {}
|
||||||
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
DenseStorage(Index size, Index nbRows, Index nbCols)
|
||||||
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
|
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
|
||||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||||
DenseStorage(const DenseStorage& other)
|
DenseStorage(const DenseStorage& other)
|
||||||
@ -317,15 +317,15 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
|||||||
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
|
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
|
||||||
void swap(DenseStorage& other)
|
void swap(DenseStorage& other)
|
||||||
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
||||||
EIGEN_DEVICE_FUNC DenseIndex rows(void) const {return m_rows;}
|
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
|
||||||
EIGEN_DEVICE_FUNC DenseIndex cols(void) const {return m_cols;}
|
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
|
||||||
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
void conservativeResize(Index size, Index nbRows, Index nbCols)
|
||||||
{
|
{
|
||||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
|
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
|
||||||
m_rows = nbRows;
|
m_rows = nbRows;
|
||||||
m_cols = nbCols;
|
m_cols = nbCols;
|
||||||
}
|
}
|
||||||
void resize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
void resize(Index size, Index nbRows, Index nbCols)
|
||||||
{
|
{
|
||||||
if(size != m_rows*m_cols)
|
if(size != m_rows*m_cols)
|
||||||
{
|
{
|
||||||
@ -347,11 +347,11 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
|||||||
template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options>
|
template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options>
|
||||||
{
|
{
|
||||||
T *m_data;
|
T *m_data;
|
||||||
DenseIndex m_cols;
|
Index m_cols;
|
||||||
public:
|
public:
|
||||||
EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_cols(0) {}
|
EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_cols(0) {}
|
||||||
explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
|
explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
|
||||||
DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
|
DenseStorage(Index size, Index, Index nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
|
||||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||||
DenseStorage(const DenseStorage& other)
|
DenseStorage(const DenseStorage& other)
|
||||||
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(_Rows*other.m_cols))
|
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(_Rows*other.m_cols))
|
||||||
@ -385,14 +385,14 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
|||||||
#endif
|
#endif
|
||||||
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
|
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
|
||||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||||
EIGEN_DEVICE_FUNC static DenseIndex rows(void) {return _Rows;}
|
EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}
|
||||||
EIGEN_DEVICE_FUNC DenseIndex cols(void) const {return m_cols;}
|
EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}
|
||||||
void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
|
void conservativeResize(Index size, Index, Index nbCols)
|
||||||
{
|
{
|
||||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
|
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
|
||||||
m_cols = nbCols;
|
m_cols = nbCols;
|
||||||
}
|
}
|
||||||
EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex nbCols)
|
EIGEN_STRONG_INLINE void resize(Index size, Index, Index nbCols)
|
||||||
{
|
{
|
||||||
if(size != _Rows*m_cols)
|
if(size != _Rows*m_cols)
|
||||||
{
|
{
|
||||||
@ -413,11 +413,11 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
|||||||
template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options>
|
template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options>
|
||||||
{
|
{
|
||||||
T *m_data;
|
T *m_data;
|
||||||
DenseIndex m_rows;
|
Index m_rows;
|
||||||
public:
|
public:
|
||||||
EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0) {}
|
EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0) {}
|
||||||
explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
|
explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
|
||||||
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
|
DenseStorage(Index size, Index nbRows, Index) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
|
||||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||||
DenseStorage(const DenseStorage& other)
|
DenseStorage(const DenseStorage& other)
|
||||||
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(other.m_rows*_Cols))
|
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(other.m_rows*_Cols))
|
||||||
@ -451,14 +451,14 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
|||||||
#endif
|
#endif
|
||||||
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
|
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
|
||||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||||
EIGEN_DEVICE_FUNC DenseIndex rows(void) const {return m_rows;}
|
EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}
|
||||||
EIGEN_DEVICE_FUNC static DenseIndex cols(void) {return _Cols;}
|
EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}
|
||||||
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
|
void conservativeResize(Index size, Index nbRows, Index)
|
||||||
{
|
{
|
||||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
|
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
|
||||||
m_rows = nbRows;
|
m_rows = nbRows;
|
||||||
}
|
}
|
||||||
EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex nbRows, DenseIndex)
|
EIGEN_STRONG_INLINE void resize(Index size, Index nbRows, Index)
|
||||||
{
|
{
|
||||||
if(size != m_rows*_Cols)
|
if(size != m_rows*_Cols)
|
||||||
{
|
{
|
||||||
|
@ -236,10 +236,10 @@ template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstore(
|
|||||||
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from)
|
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from)
|
||||||
{ (*to) = from; }
|
{ (*to) = from; }
|
||||||
|
|
||||||
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, DenseIndex /*stride*/)
|
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/)
|
||||||
{ return ploadu<Packet>(from); }
|
{ return ploadu<Packet>(from); }
|
||||||
|
|
||||||
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, DenseIndex /*stride*/)
|
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index /*stride*/)
|
||||||
{ pstore(to, from); }
|
{ pstore(to, from); }
|
||||||
|
|
||||||
/** \internal tries to do cache prefetching of \a addr */
|
/** \internal tries to do cache prefetching of \a addr */
|
||||||
|
@ -107,7 +107,7 @@ struct traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
|
|||||||
{
|
{
|
||||||
typedef _Scalar Scalar;
|
typedef _Scalar Scalar;
|
||||||
typedef Dense StorageKind;
|
typedef Dense StorageKind;
|
||||||
typedef DenseIndex StorageIndex;
|
typedef Eigen::Index StorageIndex;
|
||||||
typedef MatrixXpr XprKind;
|
typedef MatrixXpr XprKind;
|
||||||
enum {
|
enum {
|
||||||
RowsAtCompileTime = _Rows,
|
RowsAtCompileTime = _Rows,
|
||||||
|
@ -44,7 +44,7 @@ template<int _OuterStrideAtCompileTime, int _InnerStrideAtCompileTime>
|
|||||||
class Stride
|
class Stride
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
typedef DenseIndex Index;
|
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
|
||||||
enum {
|
enum {
|
||||||
InnerStrideAtCompileTime = _InnerStrideAtCompileTime,
|
InnerStrideAtCompileTime = _InnerStrideAtCompileTime,
|
||||||
OuterStrideAtCompileTime = _OuterStrideAtCompileTime
|
OuterStrideAtCompileTime = _OuterStrideAtCompileTime
|
||||||
@ -91,7 +91,6 @@ class InnerStride : public Stride<0, Value>
|
|||||||
{
|
{
|
||||||
typedef Stride<0, Value> Base;
|
typedef Stride<0, Value> Base;
|
||||||
public:
|
public:
|
||||||
typedef DenseIndex Index;
|
|
||||||
EIGEN_DEVICE_FUNC InnerStride() : Base() {}
|
EIGEN_DEVICE_FUNC InnerStride() : Base() {}
|
||||||
EIGEN_DEVICE_FUNC InnerStride(Index v) : Base(0, v) {} // FIXME making this explicit could break valid code
|
EIGEN_DEVICE_FUNC InnerStride(Index v) : Base(0, v) {} // FIXME making this explicit could break valid code
|
||||||
};
|
};
|
||||||
@ -103,7 +102,6 @@ class OuterStride : public Stride<Value, 0>
|
|||||||
{
|
{
|
||||||
typedef Stride<Value, 0> Base;
|
typedef Stride<Value, 0> Base;
|
||||||
public:
|
public:
|
||||||
typedef DenseIndex Index;
|
|
||||||
EIGEN_DEVICE_FUNC OuterStride() : Base() {}
|
EIGEN_DEVICE_FUNC OuterStride() : Base() {}
|
||||||
EIGEN_DEVICE_FUNC OuterStride(Index v) : Base(v,0) {} // FIXME making this explicit could break valid code
|
EIGEN_DEVICE_FUNC OuterStride(Index v) : Base(v,0) {} // FIXME making this explicit could break valid code
|
||||||
};
|
};
|
||||||
|
@ -92,7 +92,7 @@ template<> EIGEN_STRONG_INLINE Packet4cf ploaddup<Packet4cf>(const std::complex<
|
|||||||
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
|
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
|
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packet4cf>(const std::complex<float>* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packet4cf>(const std::complex<float>* from, Index stride)
|
||||||
{
|
{
|
||||||
return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]),
|
return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]),
|
||||||
std::imag(from[2*stride]), std::real(from[2*stride]),
|
std::imag(from[2*stride]), std::real(from[2*stride]),
|
||||||
@ -100,7 +100,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packe
|
|||||||
std::imag(from[0*stride]), std::real(from[0*stride])));
|
std::imag(from[0*stride]), std::real(from[0*stride])));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet4cf>(std::complex<float>* to, const Packet4cf& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet4cf>(std::complex<float>* to, const Packet4cf& from, Index stride)
|
||||||
{
|
{
|
||||||
__m128 low = _mm256_extractf128_ps(from.v, 0);
|
__m128 low = _mm256_extractf128_ps(from.v, 0);
|
||||||
to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)),
|
to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)),
|
||||||
@ -310,13 +310,13 @@ template<> EIGEN_STRONG_INLINE Packet2cd ploaddup<Packet2cd>(const std::complex<
|
|||||||
template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet2cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
|
template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet2cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet2cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
|
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet2cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet2cd pgather<std::complex<double>, Packet2cd>(const std::complex<double>* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet2cd pgather<std::complex<double>, Packet2cd>(const std::complex<double>* from, Index stride)
|
||||||
{
|
{
|
||||||
return Packet2cd(_mm256_set_pd(std::imag(from[1*stride]), std::real(from[1*stride]),
|
return Packet2cd(_mm256_set_pd(std::imag(from[1*stride]), std::real(from[1*stride]),
|
||||||
std::imag(from[0*stride]), std::real(from[0*stride])));
|
std::imag(from[0*stride]), std::real(from[0*stride])));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet2cd>(std::complex<double>* to, const Packet2cd& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet2cd>(std::complex<double>* to, const Packet2cd& from, Index stride)
|
||||||
{
|
{
|
||||||
__m128d low = _mm256_extractf128_pd(from.v, 0);
|
__m128d low = _mm256_extractf128_pd(from.v, 0);
|
||||||
to[stride*0] = std::complex<double>(_mm_cvtsd_f64(low), _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1)));
|
to[stride*0] = std::complex<double>(_mm_cvtsd_f64(low), _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1)));
|
||||||
|
@ -226,17 +226,17 @@ template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i&
|
|||||||
|
|
||||||
// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
|
// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
|
||||||
// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
|
// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride)
|
||||||
{
|
{
|
||||||
return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
|
return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
|
||||||
from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride)
|
||||||
{
|
{
|
||||||
return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride)
|
||||||
{
|
{
|
||||||
__m128 low = _mm256_extractf128_ps(from, 0);
|
__m128 low = _mm256_extractf128_ps(from, 0);
|
||||||
to[stride*0] = _mm_cvtss_f32(low);
|
to[stride*0] = _mm_cvtss_f32(low);
|
||||||
@ -250,7 +250,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, co
|
|||||||
to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
|
to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
|
||||||
to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
|
to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride)
|
||||||
{
|
{
|
||||||
__m128d low = _mm256_extractf128_pd(from, 0);
|
__m128d low = _mm256_extractf128_pd(from, 0);
|
||||||
to[stride*0] = _mm_cvtsd_f64(low);
|
to[stride*0] = _mm_cvtsd_f64(low);
|
||||||
|
@ -67,14 +67,14 @@ template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<flo
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
|
||||||
{
|
{
|
||||||
std::complex<float> EIGEN_ALIGN16 af[2];
|
std::complex<float> EIGEN_ALIGN16 af[2];
|
||||||
af[0] = from[0*stride];
|
af[0] = from[0*stride];
|
||||||
af[1] = from[1*stride];
|
af[1] = from[1*stride];
|
||||||
return Packet2cf(vec_ld(0, (const float*)af));
|
return Packet2cf(vec_ld(0, (const float*)af));
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
|
||||||
{
|
{
|
||||||
std::complex<float> EIGEN_ALIGN16 af[2];
|
std::complex<float> EIGEN_ALIGN16 af[2];
|
||||||
vec_st(from.v, 0, (float*)af);
|
vec_st(from.v, 0, (float*)af);
|
||||||
@ -285,14 +285,14 @@ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<
|
|||||||
template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
|
template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
|
||||||
{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
|
{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)
|
||||||
{
|
{
|
||||||
std::complex<double> EIGEN_ALIGN16 af[2];
|
std::complex<double> EIGEN_ALIGN16 af[2];
|
||||||
af[0] = from[0*stride];
|
af[0] = from[0*stride];
|
||||||
af[1] = from[1*stride];
|
af[1] = from[1*stride];
|
||||||
return pload<Packet1cd>(af);
|
return pload<Packet1cd>(af);
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride)
|
||||||
{
|
{
|
||||||
std::complex<double> EIGEN_ALIGN16 af[2];
|
std::complex<double> EIGEN_ALIGN16 af[2];
|
||||||
pstore<std::complex<double> >(af, from);
|
pstore<std::complex<double> >(af, from);
|
||||||
|
@ -252,7 +252,7 @@ pbroadcast4<Packet4i>(const int *a,
|
|||||||
a3 = vec_splat(a3, 3);
|
a3 = vec_splat(a3, 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
|
||||||
{
|
{
|
||||||
float EIGEN_ALIGN16 af[4];
|
float EIGEN_ALIGN16 af[4];
|
||||||
af[0] = from[0*stride];
|
af[0] = from[0*stride];
|
||||||
@ -261,7 +261,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const floa
|
|||||||
af[3] = from[3*stride];
|
af[3] = from[3*stride];
|
||||||
return pload<Packet4f>(af);
|
return pload<Packet4f>(af);
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
|
||||||
{
|
{
|
||||||
int EIGEN_ALIGN16 ai[4];
|
int EIGEN_ALIGN16 ai[4];
|
||||||
ai[0] = from[0*stride];
|
ai[0] = from[0*stride];
|
||||||
@ -270,7 +270,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* f
|
|||||||
ai[3] = from[3*stride];
|
ai[3] = from[3*stride];
|
||||||
return pload<Packet4i>(ai);
|
return pload<Packet4i>(ai);
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
|
||||||
{
|
{
|
||||||
float EIGEN_ALIGN16 af[4];
|
float EIGEN_ALIGN16 af[4];
|
||||||
pstore<float>(af, from);
|
pstore<float>(af, from);
|
||||||
@ -279,7 +279,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, co
|
|||||||
to[2*stride] = af[2];
|
to[2*stride] = af[2];
|
||||||
to[3*stride] = af[3];
|
to[3*stride] = af[3];
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
|
||||||
{
|
{
|
||||||
int EIGEN_ALIGN16 ai[4];
|
int EIGEN_ALIGN16 ai[4];
|
||||||
pstore<int>((int *)ai, from);
|
pstore<int>((int *)ai, from);
|
||||||
@ -793,14 +793,14 @@ pbroadcast4<Packet2d>(const double *a,
|
|||||||
a2 = vec_splat_dbl(a3, 0);
|
a2 = vec_splat_dbl(a3, 0);
|
||||||
a3 = vec_splat_dbl(a3, 1);
|
a3 = vec_splat_dbl(a3, 1);
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
|
||||||
{
|
{
|
||||||
double EIGEN_ALIGN16 af[2];
|
double EIGEN_ALIGN16 af[2];
|
||||||
af[0] = from[0*stride];
|
af[0] = from[0*stride];
|
||||||
af[1] = from[1*stride];
|
af[1] = from[1*stride];
|
||||||
return pload<Packet2d>(af);
|
return pload<Packet2d>(af);
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
|
||||||
{
|
{
|
||||||
double EIGEN_ALIGN16 af[2];
|
double EIGEN_ALIGN16 af[2];
|
||||||
pstore<double>(af, from);
|
pstore<double>(af, from);
|
||||||
|
@ -112,7 +112,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<
|
|||||||
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
|
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
|
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
|
||||||
{
|
{
|
||||||
Packet4f res;
|
Packet4f res;
|
||||||
res = vsetq_lane_f32(std::real(from[0*stride]), res, 0);
|
res = vsetq_lane_f32(std::real(from[0*stride]), res, 0);
|
||||||
@ -122,7 +122,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packe
|
|||||||
return Packet2cf(res);
|
return Packet2cf(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = std::complex<float>(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1));
|
to[stride*0] = std::complex<float>(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1));
|
||||||
to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));
|
to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));
|
||||||
@ -363,7 +363,7 @@ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<
|
|||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ARM_PREFETCH((double *)addr); }
|
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ARM_PREFETCH((double *)addr); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)
|
||||||
{
|
{
|
||||||
Packet2d res;
|
Packet2d res;
|
||||||
res = vsetq_lane_f64(std::real(from[0*stride]), res, 0);
|
res = vsetq_lane_f64(std::real(from[0*stride]), res, 0);
|
||||||
@ -371,7 +371,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Pack
|
|||||||
return Packet1cd(res);
|
return Packet1cd(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = std::complex<double>(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1));
|
to[stride*0] = std::complex<double>(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1));
|
||||||
}
|
}
|
||||||
|
@ -250,7 +250,7 @@ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& f
|
|||||||
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
|
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
|
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
|
||||||
{
|
{
|
||||||
Packet4f res;
|
Packet4f res;
|
||||||
res = vsetq_lane_f32(from[0*stride], res, 0);
|
res = vsetq_lane_f32(from[0*stride], res, 0);
|
||||||
@ -259,7 +259,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const floa
|
|||||||
res = vsetq_lane_f32(from[3*stride], res, 3);
|
res = vsetq_lane_f32(from[3*stride], res, 3);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
|
||||||
{
|
{
|
||||||
Packet4i res;
|
Packet4i res;
|
||||||
res = vsetq_lane_s32(from[0*stride], res, 0);
|
res = vsetq_lane_s32(from[0*stride], res, 0);
|
||||||
@ -269,14 +269,14 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* f
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = vgetq_lane_f32(from, 0);
|
to[stride*0] = vgetq_lane_f32(from, 0);
|
||||||
to[stride*1] = vgetq_lane_f32(from, 1);
|
to[stride*1] = vgetq_lane_f32(from, 1);
|
||||||
to[stride*2] = vgetq_lane_f32(from, 2);
|
to[stride*2] = vgetq_lane_f32(from, 2);
|
||||||
to[stride*3] = vgetq_lane_f32(from, 3);
|
to[stride*3] = vgetq_lane_f32(from, 3);
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = vgetq_lane_s32(from, 0);
|
to[stride*0] = vgetq_lane_s32(from, 0);
|
||||||
to[stride*1] = vgetq_lane_s32(from, 1);
|
to[stride*1] = vgetq_lane_s32(from, 1);
|
||||||
@ -606,14 +606,14 @@ template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d&
|
|||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); }
|
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
|
||||||
{
|
{
|
||||||
Packet2d res;
|
Packet2d res;
|
||||||
res = vsetq_lane_f64(from[0*stride], res, 0);
|
res = vsetq_lane_f64(from[0*stride], res, 0);
|
||||||
res = vsetq_lane_f64(from[1*stride], res, 1);
|
res = vsetq_lane_f64(from[1*stride], res, 1);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = vgetq_lane_f64(from, 0);
|
to[stride*0] = vgetq_lane_f64(from, 0);
|
||||||
to[stride*1] = vgetq_lane_f64(from, 1);
|
to[stride*1] = vgetq_lane_f64(from, 1);
|
||||||
|
@ -115,13 +115,13 @@ template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<f
|
|||||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), Packet4f(from.v)); }
|
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), Packet4f(from.v)); }
|
||||||
|
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
|
||||||
{
|
{
|
||||||
return Packet2cf(_mm_set_ps(std::imag(from[1*stride]), std::real(from[1*stride]),
|
return Packet2cf(_mm_set_ps(std::imag(from[1*stride]), std::real(from[1*stride]),
|
||||||
std::imag(from[0*stride]), std::real(from[0*stride])));
|
std::imag(from[0*stride]), std::real(from[0*stride])));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 0)),
|
to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 0)),
|
||||||
_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 1)));
|
_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 1)));
|
||||||
|
@ -387,32 +387,32 @@ template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d&
|
|||||||
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), Packet2d(_mm_castps_pd(from))); }
|
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), Packet2d(_mm_castps_pd(from))); }
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), Packet2d(_mm_castsi128_pd(from))); }
|
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), Packet2d(_mm_castsi128_pd(from))); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
|
||||||
{
|
{
|
||||||
return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
|
||||||
{
|
{
|
||||||
return _mm_set_pd(from[1*stride], from[0*stride]);
|
return _mm_set_pd(from[1*stride], from[0*stride]);
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
|
||||||
{
|
{
|
||||||
return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = _mm_cvtss_f32(from);
|
to[stride*0] = _mm_cvtss_f32(from);
|
||||||
to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));
|
to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));
|
||||||
to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));
|
to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));
|
||||||
to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));
|
to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = _mm_cvtsd_f64(from);
|
to[stride*0] = _mm_cvtsd_f64(from);
|
||||||
to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
|
to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, DenseIndex stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = _mm_cvtsi128_si32(from);
|
to[stride*0] = _mm_cvtsi128_si32(from);
|
||||||
to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
|
to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
|
||||||
|
@ -112,7 +112,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o
|
|||||||
template <typename Scalar, bool RandomAccess> struct linspaced_op
|
template <typename Scalar, bool RandomAccess> struct linspaced_op
|
||||||
{
|
{
|
||||||
typedef typename packet_traits<Scalar>::type Packet;
|
typedef typename packet_traits<Scalar>::type Packet;
|
||||||
linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {}
|
linspaced_op(const Scalar& low, const Scalar& high, Index num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {}
|
||||||
|
|
||||||
template<typename Index>
|
template<typename Index>
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
|
||||||
|
@ -257,9 +257,9 @@ class level3_blocking
|
|||||||
LhsScalar* m_blockA;
|
LhsScalar* m_blockA;
|
||||||
RhsScalar* m_blockB;
|
RhsScalar* m_blockB;
|
||||||
|
|
||||||
DenseIndex m_mc;
|
Index m_mc;
|
||||||
DenseIndex m_nc;
|
Index m_nc;
|
||||||
DenseIndex m_kc;
|
Index m_kc;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -267,9 +267,9 @@ class level3_blocking
|
|||||||
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
|
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
inline DenseIndex mc() const { return m_mc; }
|
inline Index mc() const { return m_mc; }
|
||||||
inline DenseIndex nc() const { return m_nc; }
|
inline Index nc() const { return m_nc; }
|
||||||
inline DenseIndex kc() const { return m_kc; }
|
inline Index kc() const { return m_kc; }
|
||||||
|
|
||||||
inline LhsScalar* blockA() { return m_blockA; }
|
inline LhsScalar* blockA() { return m_blockA; }
|
||||||
inline RhsScalar* blockB() { return m_blockB; }
|
inline RhsScalar* blockB() { return m_blockB; }
|
||||||
@ -299,7 +299,7 @@ class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, M
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/, int /*num_threads*/, bool /*full_rows = false*/)
|
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, int /*num_threads*/, bool /*full_rows = false*/)
|
||||||
{
|
{
|
||||||
this->m_mc = ActualRows;
|
this->m_mc = ActualRows;
|
||||||
this->m_nc = ActualCols;
|
this->m_nc = ActualCols;
|
||||||
@ -326,12 +326,12 @@ class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, M
|
|||||||
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
|
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
|
||||||
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
|
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
|
||||||
|
|
||||||
DenseIndex m_sizeA;
|
Index m_sizeA;
|
||||||
DenseIndex m_sizeB;
|
Index m_sizeB;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth, int num_threads, bool l3_blocking)
|
gemm_blocking_space(Index rows, Index cols, Index depth, int num_threads, bool l3_blocking)
|
||||||
{
|
{
|
||||||
this->m_mc = Transpose ? cols : rows;
|
this->m_mc = Transpose ? cols : rows;
|
||||||
this->m_nc = Transpose ? rows : cols;
|
this->m_nc = Transpose ? rows : cols;
|
||||||
@ -343,8 +343,8 @@ class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, M
|
|||||||
}
|
}
|
||||||
else // no l3 blocking
|
else // no l3 blocking
|
||||||
{
|
{
|
||||||
DenseIndex m = this->m_mc;
|
Index m = this->m_mc;
|
||||||
DenseIndex n = this->m_nc;
|
Index n = this->m_nc;
|
||||||
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, n, num_threads);
|
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, n, num_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||||||
enum { AmbientDimAtCompileTime = _AmbientDim };
|
enum { AmbientDimAtCompileTime = _AmbientDim };
|
||||||
typedef _Scalar Scalar;
|
typedef _Scalar Scalar;
|
||||||
typedef NumTraits<Scalar> ScalarTraits;
|
typedef NumTraits<Scalar> ScalarTraits;
|
||||||
typedef DenseIndex Index;
|
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
|
||||||
typedef typename ScalarTraits::Real RealScalar;
|
typedef typename ScalarTraits::Real RealScalar;
|
||||||
typedef typename ScalarTraits::NonInteger NonInteger;
|
typedef typename ScalarTraits::NonInteger NonInteger;
|
||||||
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
|
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
|
||||||
|
@ -41,7 +41,7 @@ public:
|
|||||||
};
|
};
|
||||||
typedef _Scalar Scalar;
|
typedef _Scalar Scalar;
|
||||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||||
typedef DenseIndex Index;
|
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
|
||||||
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
|
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
|
||||||
typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic
|
typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic
|
||||||
? Dynamic
|
? Dynamic
|
||||||
|
@ -37,7 +37,7 @@ public:
|
|||||||
};
|
};
|
||||||
typedef _Scalar Scalar;
|
typedef _Scalar Scalar;
|
||||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||||
typedef DenseIndex Index;
|
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
|
||||||
typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
|
typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
|
||||||
|
|
||||||
/** Default constructor without initialization */
|
/** Default constructor without initialization */
|
||||||
|
@ -724,7 +724,6 @@ template<typename Other>
|
|||||||
struct quaternionbase_assign_impl<Other,3,3>
|
struct quaternionbase_assign_impl<Other,3,3>
|
||||||
{
|
{
|
||||||
typedef typename Other::Scalar Scalar;
|
typedef typename Other::Scalar Scalar;
|
||||||
typedef DenseIndex Index;
|
|
||||||
template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& mat)
|
template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& mat)
|
||||||
{
|
{
|
||||||
using std::sqrt;
|
using std::sqrt;
|
||||||
@ -742,13 +741,13 @@ struct quaternionbase_assign_impl<Other,3,3>
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
DenseIndex i = 0;
|
Index i = 0;
|
||||||
if (mat.coeff(1,1) > mat.coeff(0,0))
|
if (mat.coeff(1,1) > mat.coeff(0,0))
|
||||||
i = 1;
|
i = 1;
|
||||||
if (mat.coeff(2,2) > mat.coeff(i,i))
|
if (mat.coeff(2,2) > mat.coeff(i,i))
|
||||||
i = 2;
|
i = 2;
|
||||||
DenseIndex j = (i+1)%3;
|
Index j = (i+1)%3;
|
||||||
DenseIndex k = (j+1)%3;
|
Index k = (j+1)%3;
|
||||||
|
|
||||||
t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
|
t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
|
||||||
q.coeffs().coeffRef(i) = Scalar(0.5) * t;
|
q.coeffs().coeffRef(i) = Scalar(0.5) * t;
|
||||||
|
@ -40,13 +40,13 @@ template<typename _MatrixType, int Options=Upper> class PardisoLDLT;
|
|||||||
|
|
||||||
namespace internal
|
namespace internal
|
||||||
{
|
{
|
||||||
template<typename Index>
|
template<typename IndexType>
|
||||||
struct pardiso_run_selector
|
struct pardiso_run_selector
|
||||||
{
|
{
|
||||||
static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
|
static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a,
|
||||||
Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
|
IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x)
|
||||||
{
|
{
|
||||||
Index error = 0;
|
IndexType error = 0;
|
||||||
::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
|
::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
@ -54,11 +54,11 @@ namespace internal
|
|||||||
template<>
|
template<>
|
||||||
struct pardiso_run_selector<long long int>
|
struct pardiso_run_selector<long long int>
|
||||||
{
|
{
|
||||||
typedef long long int Index;
|
typedef long long int IndexTypeType;
|
||||||
static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
|
static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a,
|
||||||
Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
|
IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x)
|
||||||
{
|
{
|
||||||
Index error = 0;
|
IndexType error = 0;
|
||||||
::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
|
::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
@ -72,7 +72,7 @@ namespace internal
|
|||||||
typedef _MatrixType MatrixType;
|
typedef _MatrixType MatrixType;
|
||||||
typedef typename _MatrixType::Scalar Scalar;
|
typedef typename _MatrixType::Scalar Scalar;
|
||||||
typedef typename _MatrixType::RealScalar RealScalar;
|
typedef typename _MatrixType::RealScalar RealScalar;
|
||||||
typedef typename _MatrixType::Index Index;
|
typedef typename _MatrixType::StorageIndex StorageIndex;
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename _MatrixType, int Options>
|
template<typename _MatrixType, int Options>
|
||||||
@ -81,7 +81,7 @@ namespace internal
|
|||||||
typedef _MatrixType MatrixType;
|
typedef _MatrixType MatrixType;
|
||||||
typedef typename _MatrixType::Scalar Scalar;
|
typedef typename _MatrixType::Scalar Scalar;
|
||||||
typedef typename _MatrixType::RealScalar RealScalar;
|
typedef typename _MatrixType::RealScalar RealScalar;
|
||||||
typedef typename _MatrixType::Index Index;
|
typedef typename _MatrixType::StorageIndex StorageIndex;
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename _MatrixType, int Options>
|
template<typename _MatrixType, int Options>
|
||||||
@ -90,7 +90,7 @@ namespace internal
|
|||||||
typedef _MatrixType MatrixType;
|
typedef _MatrixType MatrixType;
|
||||||
typedef typename _MatrixType::Scalar Scalar;
|
typedef typename _MatrixType::Scalar Scalar;
|
||||||
typedef typename _MatrixType::RealScalar RealScalar;
|
typedef typename _MatrixType::RealScalar RealScalar;
|
||||||
typedef typename _MatrixType::Index Index;
|
typedef typename _MatrixType::StorageIndex StorageIndex;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -111,18 +111,18 @@ class PardisoImpl : public SparseSolveBase<PardisoImpl<Derived>
|
|||||||
typedef typename Traits::Scalar Scalar;
|
typedef typename Traits::Scalar Scalar;
|
||||||
typedef typename Traits::RealScalar RealScalar;
|
typedef typename Traits::RealScalar RealScalar;
|
||||||
typedef typename Traits::StorageIndex StorageIndex;
|
typedef typename Traits::StorageIndex StorageIndex;
|
||||||
typedef SparseMatrix<Scalar,RowMajor,Index> SparseMatrixType;
|
typedef SparseMatrix<Scalar,RowMajor,StorageIndex> SparseMatrixType;
|
||||||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||||
typedef Matrix<Index, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
|
typedef Matrix<StorageIndex, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
|
||||||
typedef Matrix<Index, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
|
typedef Matrix<StorageIndex, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
|
||||||
typedef Array<Index,64,1,DontAlign> ParameterType;
|
typedef Array<StorageIndex,64,1,DontAlign> ParameterType;
|
||||||
enum {
|
enum {
|
||||||
ScalarIsComplex = NumTraits<Scalar>::IsComplex
|
ScalarIsComplex = NumTraits<Scalar>::IsComplex
|
||||||
};
|
};
|
||||||
|
|
||||||
PardisoImpl()
|
PardisoImpl()
|
||||||
{
|
{
|
||||||
eigen_assert((sizeof(Index) >= sizeof(_INTEGER_t) && sizeof(Index) <= 8) && "Non-supported index type");
|
eigen_assert((sizeof(StorageIndex) >= sizeof(_INTEGER_t) && sizeof(StorageIndex) <= 8) && "Non-supported index type");
|
||||||
m_iparm.setZero();
|
m_iparm.setZero();
|
||||||
m_msglvl = 0; // No output
|
m_msglvl = 0; // No output
|
||||||
m_isInitialized = false;
|
m_isInitialized = false;
|
||||||
@ -181,7 +181,7 @@ class PardisoImpl : public SparseSolveBase<PardisoImpl<Derived>
|
|||||||
{
|
{
|
||||||
if(m_isInitialized) // Factorization ran at least once
|
if(m_isInitialized) // Factorization ran at least once
|
||||||
{
|
{
|
||||||
internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0,
|
internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0,
|
||||||
m_iparm.data(), m_msglvl, 0, 0);
|
m_iparm.data(), m_msglvl, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -261,7 +261,7 @@ Derived& PardisoImpl<Derived>::compute(const MatrixType& a)
|
|||||||
derived().getMatrix(a);
|
derived().getMatrix(a);
|
||||||
|
|
||||||
Index error;
|
Index error;
|
||||||
error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 12, m_size,
|
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 12, m_size,
|
||||||
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
|
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
|
||||||
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
|
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
|
||||||
|
|
||||||
@ -284,7 +284,7 @@ Derived& PardisoImpl<Derived>::analyzePattern(const MatrixType& a)
|
|||||||
derived().getMatrix(a);
|
derived().getMatrix(a);
|
||||||
|
|
||||||
Index error;
|
Index error;
|
||||||
error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 11, m_size,
|
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 11, m_size,
|
||||||
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
|
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
|
||||||
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
|
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
|
||||||
|
|
||||||
@ -304,7 +304,7 @@ Derived& PardisoImpl<Derived>::factorize(const MatrixType& a)
|
|||||||
derived().getMatrix(a);
|
derived().getMatrix(a);
|
||||||
|
|
||||||
Index error;
|
Index error;
|
||||||
error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 22, m_size,
|
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 22, m_size,
|
||||||
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
|
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
|
||||||
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
|
m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
|
||||||
|
|
||||||
@ -348,7 +348,7 @@ bool PardisoImpl<Base>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XD
|
|||||||
}
|
}
|
||||||
|
|
||||||
Index error;
|
Index error;
|
||||||
error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 33, m_size,
|
error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 33, m_size,
|
||||||
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
|
m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
|
||||||
m_perm.data(), nrhs, m_iparm.data(), m_msglvl,
|
m_perm.data(), nrhs, m_iparm.data(), m_msglvl,
|
||||||
rhs_ptr, x.derived().data());
|
rhs_ptr, x.derived().data());
|
||||||
@ -424,7 +424,7 @@ class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
|
|||||||
protected:
|
protected:
|
||||||
typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;
|
typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;
|
||||||
typedef typename Base::Scalar Scalar;
|
typedef typename Base::Scalar Scalar;
|
||||||
typedef typename Base::Index Index;
|
typedef typename Base::StorageIndex StorageIndex;
|
||||||
typedef typename Base::RealScalar RealScalar;
|
typedef typename Base::RealScalar RealScalar;
|
||||||
using Base::pardisoInit;
|
using Base::pardisoInit;
|
||||||
using Base::m_matrix;
|
using Base::m_matrix;
|
||||||
@ -454,7 +454,7 @@ class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
|
|||||||
void getMatrix(const MatrixType& matrix)
|
void getMatrix(const MatrixType& matrix)
|
||||||
{
|
{
|
||||||
// PARDISO supports only upper, row-major matrices
|
// PARDISO supports only upper, row-major matrices
|
||||||
PermutationMatrix<Dynamic,Dynamic,Index> p_null;
|
PermutationMatrix<Dynamic,Dynamic,StorageIndex> p_null;
|
||||||
m_matrix.resize(matrix.rows(), matrix.cols());
|
m_matrix.resize(matrix.rows(), matrix.cols());
|
||||||
m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
|
m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
|
||||||
}
|
}
|
||||||
@ -482,7 +482,7 @@ class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
|
|||||||
protected:
|
protected:
|
||||||
typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;
|
typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;
|
||||||
typedef typename Base::Scalar Scalar;
|
typedef typename Base::Scalar Scalar;
|
||||||
typedef typename Base::Index Index;
|
typedef typename Base::StorageIndex StorageIndex;
|
||||||
typedef typename Base::RealScalar RealScalar;
|
typedef typename Base::RealScalar RealScalar;
|
||||||
using Base::pardisoInit;
|
using Base::pardisoInit;
|
||||||
using Base::m_matrix;
|
using Base::m_matrix;
|
||||||
@ -510,7 +510,7 @@ class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
|
|||||||
void getMatrix(const MatrixType& matrix)
|
void getMatrix(const MatrixType& matrix)
|
||||||
{
|
{
|
||||||
// PARDISO supports only upper, row-major matrices
|
// PARDISO supports only upper, row-major matrices
|
||||||
PermutationMatrix<Dynamic,Dynamic,Index> p_null;
|
PermutationMatrix<Dynamic,Dynamic,StorageIndex> p_null;
|
||||||
m_matrix.resize(matrix.rows(), matrix.cols());
|
m_matrix.resize(matrix.rows(), matrix.cols());
|
||||||
m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
|
m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
|
||||||
}
|
}
|
||||||
|
@ -306,7 +306,7 @@ void BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, co
|
|||||||
{
|
{
|
||||||
// If the matrices are large enough, let's exploit the sparse structure of A by
|
// If the matrices are large enough, let's exploit the sparse structure of A by
|
||||||
// splitting it in half (wrt n1), and packing the non-zero columns.
|
// splitting it in half (wrt n1), and packing the non-zero columns.
|
||||||
DenseIndex n2 = n - n1;
|
Index n2 = n - n1;
|
||||||
MatrixXr A1(n1,n), A2(n2,n), B1(n,n), B2(n,n);
|
MatrixXr A1(n1,n), A2(n2,n), B1(n,n), B2(n,n);
|
||||||
Index k1=0, k2=0;
|
Index k1=0, k2=0;
|
||||||
for(Index j=0; j<n; ++j)
|
for(Index j=0; j<n; ++j)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user