mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-08 17:59:00 +08:00
Merged eigen/eigen into default
This commit is contained in:
commit
e0bd6f5738
@ -13,9 +13,9 @@
|
||||
#define EIGEN_MATRIXSTORAGE_H
|
||||
|
||||
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_DENSE_STORAGE_CTOR_PLUGIN;
|
||||
#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) X; EIGEN_DENSE_STORAGE_CTOR_PLUGIN;
|
||||
#else
|
||||
#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X)
|
||||
#endif
|
||||
|
||||
namespace Eigen {
|
||||
@ -184,12 +184,16 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
|
||||
{
|
||||
internal::plain_array<T,Size,_Options> m_data;
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC DenseStorage() {}
|
||||
EIGEN_DEVICE_FUNC DenseStorage() {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size)
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
explicit DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()) {}
|
||||
EIGEN_DEVICE_FUNC
|
||||
DenseStorage(const DenseStorage& other) : m_data(other.m_data) {}
|
||||
DenseStorage(const DenseStorage& other) : m_data(other.m_data) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size)
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
DenseStorage& operator=(const DenseStorage& other)
|
||||
{
|
||||
@ -197,7 +201,7 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
|
||||
return *this;
|
||||
}
|
||||
EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) {
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
eigen_internal_assert(size==rows*cols && rows==_Rows && cols==_Cols);
|
||||
EIGEN_UNUSED_VARIABLE(size);
|
||||
EIGEN_UNUSED_VARIABLE(rows);
|
||||
@ -343,7 +347,7 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
||||
EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols)
|
||||
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols)
|
||||
{
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
eigen_internal_assert(size==rows*cols && rows>=0 && cols >=0);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)
|
||||
@ -351,6 +355,7 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
||||
, m_rows(other.m_rows)
|
||||
, m_cols(other.m_cols)
|
||||
{
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*m_cols)
|
||||
internal::smart_copy(other.m_data, other.m_data+other.m_rows*other.m_cols, m_data);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
|
||||
@ -403,7 +408,7 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
||||
m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
|
||||
else
|
||||
m_data = 0;
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
}
|
||||
m_rows = rows;
|
||||
m_cols = cols;
|
||||
@ -422,7 +427,7 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
||||
explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
|
||||
EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(cols)
|
||||
{
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
eigen_internal_assert(size==rows*cols && rows==_Rows && cols >=0);
|
||||
EIGEN_UNUSED_VARIABLE(rows);
|
||||
}
|
||||
@ -430,6 +435,7 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
||||
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(_Rows*other.m_cols))
|
||||
, m_cols(other.m_cols)
|
||||
{
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_cols*_Rows)
|
||||
internal::smart_copy(other.m_data, other.m_data+_Rows*m_cols, m_data);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
|
||||
@ -477,7 +483,7 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
||||
m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
|
||||
else
|
||||
m_data = 0;
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
}
|
||||
m_cols = cols;
|
||||
}
|
||||
@ -495,7 +501,7 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
||||
explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
|
||||
EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows)
|
||||
{
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
eigen_internal_assert(size==rows*cols && rows>=0 && cols == _Cols);
|
||||
EIGEN_UNUSED_VARIABLE(cols);
|
||||
}
|
||||
@ -503,6 +509,7 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
||||
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(other.m_rows*_Cols))
|
||||
, m_rows(other.m_rows)
|
||||
{
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*_Cols)
|
||||
internal::smart_copy(other.m_data, other.m_data+other.m_rows*_Cols, m_data);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)
|
||||
@ -550,7 +557,7 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
||||
m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
|
||||
else
|
||||
m_data = 0;
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
}
|
||||
m_rows = rows;
|
||||
}
|
||||
|
@ -812,6 +812,13 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||
this->_set_noalias(other);
|
||||
}
|
||||
|
||||
// Initialize an arbitrary matrix from an object convertible to the Derived type.
|
||||
template<typename T>
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE void _init1(const Derived& other){
|
||||
this->_set_noalias(other);
|
||||
}
|
||||
|
||||
// Initialize an arbitrary matrix from a generic Eigen expression
|
||||
template<typename T, typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC
|
||||
@ -834,7 +841,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||
this->derived() = r;
|
||||
}
|
||||
|
||||
// For fixed -size arrays:
|
||||
// For fixed-size Array<Scalar,...>
|
||||
template<typename T>
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE void _init1(const Scalar& val0,
|
||||
@ -846,6 +853,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||
Base::setConstant(val0);
|
||||
}
|
||||
|
||||
// For fixed-size Array<Index,...>
|
||||
template<typename T>
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE void _init1(const Index& val0,
|
||||
|
@ -46,7 +46,7 @@ typedef uint32x4_t Packet4ui;
|
||||
const Packet4f p4f_##NAME = pset1<Packet4f>(X)
|
||||
|
||||
#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
|
||||
const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X))
|
||||
const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int32_t>(X))
|
||||
|
||||
#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
|
||||
const Packet4i p4i_##NAME = pset1<Packet4i>(X)
|
||||
@ -83,7 +83,7 @@ template<> struct packet_traits<float> : default_packet_traits
|
||||
HasSqrt = 0
|
||||
};
|
||||
};
|
||||
template<> struct packet_traits<int> : default_packet_traits
|
||||
template<> struct packet_traits<int32_t> : default_packet_traits
|
||||
{
|
||||
typedef Packet4i type;
|
||||
typedef Packet4i half; // Packet2i intrinsics not implemented yet
|
||||
@ -106,10 +106,10 @@ EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_
|
||||
#endif
|
||||
|
||||
template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };
|
||||
template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
|
||||
template<> struct unpacket_traits<Packet4i> { typedef int32_t type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return vdupq_n_s32(from); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) { return vdupq_n_s32(from); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a)
|
||||
{
|
||||
@ -117,7 +117,7 @@ template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a)
|
||||
Packet4f countdown = vld1q_f32(f);
|
||||
return vaddq_f32(pset1<Packet4f>(a), countdown);
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a)
|
||||
template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int32_t& a)
|
||||
{
|
||||
const int32_t i[] = {0, 1, 2, 3};
|
||||
Packet4i countdown = vld1q_s32(i);
|
||||
@ -241,10 +241,10 @@ template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, con
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
|
||||
{
|
||||
@ -253,7 +253,7 @@ template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
|
||||
hi = vld1_dup_f32(from+1);
|
||||
return vcombine_f32(lo, hi);
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
|
||||
template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int32_t* from)
|
||||
{
|
||||
int32x2_t lo, hi;
|
||||
lo = vld1_dup_s32(from);
|
||||
@ -261,11 +261,11 @@ template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
|
||||
return vcombine_s32(lo, hi);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }
|
||||
template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }
|
||||
template<> EIGEN_STRONG_INLINE void pstore<float> (float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }
|
||||
template<> EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<float> (float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
|
||||
{
|
||||
@ -276,7 +276,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const floa
|
||||
res = vsetq_lane_f32(from[3*stride], res, 3);
|
||||
return res;
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride)
|
||||
{
|
||||
Packet4i res = pset1<Packet4i>(0);
|
||||
res = vsetq_lane_s32(from[0*stride], res, 0);
|
||||
@ -293,7 +293,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, co
|
||||
to[stride*2] = vgetq_lane_f32(from, 2);
|
||||
to[stride*3] = vgetq_lane_f32(from, 3);
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from, Index stride)
|
||||
{
|
||||
to[stride*0] = vgetq_lane_s32(from, 0);
|
||||
to[stride*1] = vgetq_lane_s32(from, 1);
|
||||
@ -301,12 +301,12 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const
|
||||
to[stride*3] = vgetq_lane_s32(from, 3);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ARM_PREFETCH(addr); }
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_ARM_PREFETCH(addr); }
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<float> (const float* addr) { EIGEN_ARM_PREFETCH(addr); }
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); }
|
||||
|
||||
// FIXME only store the 2 first elements ?
|
||||
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }
|
||||
template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; }
|
||||
template<> EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) { int32_t EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
|
||||
float32x2_t a_lo, a_hi;
|
||||
@ -361,7 +361,7 @@ template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
|
||||
return sum;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
|
||||
template<> EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a)
|
||||
{
|
||||
int32x2_t a_lo, a_hi, sum;
|
||||
|
||||
@ -408,7 +408,7 @@ template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
|
||||
|
||||
return vget_lane_f32(prod, 0);
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
|
||||
template<> EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a)
|
||||
{
|
||||
int32x2_t a_lo, a_hi, prod;
|
||||
|
||||
@ -436,7 +436,7 @@ template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
|
||||
return vget_lane_f32(min, 0);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
|
||||
template<> EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a)
|
||||
{
|
||||
int32x2_t a_lo, a_hi, min;
|
||||
|
||||
@ -461,7 +461,7 @@ template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
|
||||
return vget_lane_f32(max, 0);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
|
||||
template<> EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a)
|
||||
{
|
||||
int32x2_t a_lo, a_hi, max;
|
||||
|
||||
|
@ -148,7 +148,7 @@ struct tribb_kernel
|
||||
ResMapper res(_res, resStride);
|
||||
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, mr, nr, ConjLhs, ConjRhs> gebp_kernel;
|
||||
|
||||
Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer;
|
||||
Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer((internal::constructor_without_unaligned_array_assert()));
|
||||
|
||||
// let's process the block per panel of actual_mc x BlockSize,
|
||||
// again, each is split into three parts, etc.
|
||||
|
@ -137,7 +137,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
||||
|
||||
Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer;
|
||||
Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer((internal::constructor_without_unaligned_array_assert()));
|
||||
triangularBuffer.setZero();
|
||||
if((Mode&ZeroDiag)==ZeroDiag)
|
||||
triangularBuffer.diagonal().setZero();
|
||||
@ -284,7 +284,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
||||
|
||||
Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer;
|
||||
Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer((internal::constructor_without_unaligned_array_assert()));
|
||||
triangularBuffer.setZero();
|
||||
if((Mode&ZeroDiag)==ZeroDiag)
|
||||
triangularBuffer.diagonal().setZero();
|
||||
|
@ -837,7 +837,7 @@ namespace Eigen {
|
||||
// just an empty macro !
|
||||
#define EIGEN_EMPTY
|
||||
|
||||
#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 || __CUDACC_VER__) // for older MSVC versions, as well as 1900 && CUDA 8, using the base operator is sufficient (cf Bugs 1000, 1324)
|
||||
#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 || defined(__CUDACC_VER__)) // for older MSVC versions, as well as 1900 && CUDA 8, using the base operator is sufficient (cf Bugs 1000, 1324)
|
||||
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
|
||||
using Base::operator =;
|
||||
#elif EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
|
||||
|
@ -414,7 +414,8 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||
|
||||
if(n==1)
|
||||
{
|
||||
m_eivalues.coeffRef(0,0) = numext::real(matrix.diagonal()[0]);
|
||||
m_eivec = matrix;
|
||||
m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0));
|
||||
if(computeEigenvectors)
|
||||
m_eivec.setOnes(n,n);
|
||||
m_info = Success;
|
||||
|
@ -87,7 +87,8 @@ void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vec
|
||||
const TriangularView<const VectorsType, UnitLower> V(vectors);
|
||||
|
||||
// A -= V T V^* A
|
||||
Matrix<typename MatrixType::Scalar,VectorsType::ColsAtCompileTime,MatrixType::ColsAtCompileTime,0,
|
||||
Matrix<typename MatrixType::Scalar,VectorsType::ColsAtCompileTime,MatrixType::ColsAtCompileTime,
|
||||
(VectorsType::MaxColsAtCompileTime==1 && MatrixType::MaxColsAtCompileTime!=1)?RowMajor:ColMajor,
|
||||
VectorsType::MaxColsAtCompileTime,MatrixType::MaxColsAtCompileTime> tmp = V.adjoint() * mat;
|
||||
// FIXME add .noalias() once the triangular product can work inplace
|
||||
if(forward) tmp = T.template triangularView<Upper>() * tmp;
|
||||
|
@ -112,9 +112,11 @@ public:
|
||||
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
||||
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
|
||||
Options = MatrixType::Options
|
||||
TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor))
|
||||
: ColsAtCompileTime==1 ? (MatrixType::Options | RowMajor)
|
||||
: MatrixType::Options
|
||||
};
|
||||
typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
|
||||
typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, TrOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime>
|
||||
TransposeTypeWithSameStorageOrder;
|
||||
|
||||
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
|
||||
|
@ -151,6 +151,7 @@ ei_add_test(packetmath "-DEIGEN_FAST_MATH=1")
|
||||
ei_add_test(unalignedassert)
|
||||
ei_add_test(vectorization_logic)
|
||||
ei_add_test(basicstuff)
|
||||
ei_add_test(constructor)
|
||||
ei_add_test(linearstructure)
|
||||
ei_add_test(integer_types)
|
||||
ei_add_test(unalignedcount)
|
||||
|
84
test/constructor.cpp
Normal file
84
test/constructor.cpp
Normal file
@ -0,0 +1,84 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
|
||||
#define TEST_ENABLE_TEMPORARY_TRACKING
|
||||
|
||||
#include "main.h"
|
||||
|
||||
template<typename MatrixType> struct Wrapper
|
||||
{
|
||||
MatrixType m_mat;
|
||||
inline Wrapper(const MatrixType &x) : m_mat(x) {}
|
||||
inline operator const MatrixType& () const { return m_mat; }
|
||||
inline operator MatrixType& () { return m_mat; }
|
||||
};
|
||||
|
||||
template<typename MatrixType> void ctor_init1(const MatrixType& m)
|
||||
{
|
||||
// Check logic in PlainObjectBase::_init1
|
||||
Index rows = m.rows();
|
||||
Index cols = m.cols();
|
||||
|
||||
MatrixType m0 = MatrixType::Random(rows,cols);
|
||||
|
||||
VERIFY_EVALUATION_COUNT( MatrixType m1(m0), 1);
|
||||
VERIFY_EVALUATION_COUNT( MatrixType m2(m0+m0), 1);
|
||||
VERIFY_EVALUATION_COUNT( MatrixType m2(m0.block(0,0,rows,cols)) , 1);
|
||||
|
||||
Wrapper<MatrixType> wrapper(m0);
|
||||
VERIFY_EVALUATION_COUNT( MatrixType m3(wrapper) , 1);
|
||||
}
|
||||
|
||||
|
||||
void test_constructor()
|
||||
{
|
||||
for(int i = 0; i < g_repeat; i++) {
|
||||
CALL_SUBTEST_1( ctor_init1(Matrix<float, 1, 1>()) );
|
||||
CALL_SUBTEST_1( ctor_init1(Matrix4d()) );
|
||||
CALL_SUBTEST_1( ctor_init1(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
|
||||
CALL_SUBTEST_1( ctor_init1(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
|
||||
}
|
||||
{
|
||||
Matrix<Index,1,1> a(123);
|
||||
VERIFY_IS_EQUAL(a[0], 123);
|
||||
}
|
||||
{
|
||||
Matrix<Index,1,1> a(123.0);
|
||||
VERIFY_IS_EQUAL(a[0], 123);
|
||||
}
|
||||
{
|
||||
Matrix<float,1,1> a(123);
|
||||
VERIFY_IS_EQUAL(a[0], 123.f);
|
||||
}
|
||||
{
|
||||
Array<Index,1,1> a(123);
|
||||
VERIFY_IS_EQUAL(a[0], 123);
|
||||
}
|
||||
{
|
||||
Array<Index,1,1> a(123.0);
|
||||
VERIFY_IS_EQUAL(a[0], 123);
|
||||
}
|
||||
{
|
||||
Array<float,1,1> a(123);
|
||||
VERIFY_IS_EQUAL(a[0], 123.f);
|
||||
}
|
||||
{
|
||||
Array<Index,3,3> a(123);
|
||||
VERIFY_IS_EQUAL(a(4), 123);
|
||||
}
|
||||
{
|
||||
Array<Index,3,3> a(123.0);
|
||||
VERIFY_IS_EQUAL(a(4), 123);
|
||||
}
|
||||
{
|
||||
Array<float,3,3> a(123);
|
||||
VERIFY_IS_EQUAL(a(4), 123.f);
|
||||
}
|
||||
}
|
@ -101,6 +101,12 @@ void test_jacobisvd()
|
||||
// Test on inf/nan matrix
|
||||
CALL_SUBTEST_7( (svd_inf_nan<JacobiSVD<MatrixXf>, MatrixXf>()) );
|
||||
CALL_SUBTEST_10( (svd_inf_nan<JacobiSVD<MatrixXd>, MatrixXd>()) );
|
||||
|
||||
// bug1395 test compile-time vectors as input
|
||||
CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix<double,6,1>()) ));
|
||||
CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix<double,1,6>()) ));
|
||||
CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix<double,Dynamic,1>(r)) ));
|
||||
CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix<double,1,Dynamic>(c)) ));
|
||||
}
|
||||
|
||||
CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <complex>
|
||||
#include <deque>
|
||||
#include <queue>
|
||||
#include <cassert>
|
||||
#include <list>
|
||||
#if __cplusplus >= 201103L
|
||||
#include <random>
|
||||
@ -79,10 +80,12 @@
|
||||
#ifdef TEST_ENABLE_TEMPORARY_TRACKING
|
||||
|
||||
static long int nb_temporaries;
|
||||
static long int nb_temporaries_on_assert = -1;
|
||||
|
||||
inline void on_temporary_creation(long int size) {
|
||||
// here's a great place to set a breakpoint when debugging failures in this test!
|
||||
if(size!=0) nb_temporaries++;
|
||||
if(nb_temporaries_on_assert>0) assert(nb_temporaries<nb_temporaries_on_assert);
|
||||
}
|
||||
|
||||
#define EIGEN_DENSE_STORAGE_CTOR_PLUGIN { on_temporary_creation(size); }
|
||||
|
@ -37,8 +37,7 @@ template<typename MatrixType> void permutationmatrices(const MatrixType& m)
|
||||
RightPermutationType rp(rv);
|
||||
MatrixType m_permuted = MatrixType::Random(rows,cols);
|
||||
|
||||
const int one_if_dynamic = MatrixType::SizeAtCompileTime==Dynamic ? 1 : 0;
|
||||
VERIFY_EVALUATION_COUNT(m_permuted = lp * m_original * rp, one_if_dynamic); // 1 temp for sub expression "lp * m_original"
|
||||
VERIFY_EVALUATION_COUNT(m_permuted = lp * m_original * rp, 1); // 1 temp for sub expression "lp * m_original"
|
||||
|
||||
for (int i=0; i<rows; i++)
|
||||
for (int j=0; j<cols; j++)
|
||||
@ -50,7 +49,7 @@ template<typename MatrixType> void permutationmatrices(const MatrixType& m)
|
||||
VERIFY_IS_APPROX(m_permuted, lm*m_original*rm);
|
||||
|
||||
m_permuted = m_original;
|
||||
VERIFY_EVALUATION_COUNT(m_permuted = lp * m_permuted * rp, one_if_dynamic);
|
||||
VERIFY_EVALUATION_COUNT(m_permuted = lp * m_permuted * rp, 1);
|
||||
VERIFY_IS_APPROX(m_permuted, lm*m_original*rm);
|
||||
|
||||
VERIFY_IS_APPROX(lp.inverse()*m_permuted*rp.inverse(), m_original);
|
||||
@ -75,19 +74,19 @@ template<typename MatrixType> void permutationmatrices(const MatrixType& m)
|
||||
|
||||
// check inplace permutations
|
||||
m_permuted = m_original;
|
||||
VERIFY_EVALUATION_COUNT(m_permuted.noalias()= lp.inverse() * m_permuted, one_if_dynamic); // 1 temp to allocate the mask
|
||||
VERIFY_EVALUATION_COUNT(m_permuted.noalias()= lp.inverse() * m_permuted, 1); // 1 temp to allocate the mask
|
||||
VERIFY_IS_APPROX(m_permuted, lp.inverse()*m_original);
|
||||
|
||||
m_permuted = m_original;
|
||||
VERIFY_EVALUATION_COUNT(m_permuted.noalias() = m_permuted * rp.inverse(), one_if_dynamic); // 1 temp to allocate the mask
|
||||
VERIFY_EVALUATION_COUNT(m_permuted.noalias() = m_permuted * rp.inverse(), 1); // 1 temp to allocate the mask
|
||||
VERIFY_IS_APPROX(m_permuted, m_original*rp.inverse());
|
||||
|
||||
m_permuted = m_original;
|
||||
VERIFY_EVALUATION_COUNT(m_permuted.noalias() = lp * m_permuted, one_if_dynamic); // 1 temp to allocate the mask
|
||||
VERIFY_EVALUATION_COUNT(m_permuted.noalias() = lp * m_permuted, 1); // 1 temp to allocate the mask
|
||||
VERIFY_IS_APPROX(m_permuted, lp*m_original);
|
||||
|
||||
m_permuted = m_original;
|
||||
VERIFY_EVALUATION_COUNT(m_permuted.noalias() = m_permuted * rp, one_if_dynamic); // 1 temp to allocate the mask
|
||||
VERIFY_EVALUATION_COUNT(m_permuted.noalias() = m_permuted * rp, 1); // 1 temp to allocate the mask
|
||||
VERIFY_IS_APPROX(m_permuted, m_original*rp);
|
||||
|
||||
if(rows>1 && cols>1)
|
||||
|
@ -70,10 +70,10 @@ template<typename MatrixType> void matrixRedux(const MatrixType& m)
|
||||
VERIFY_IS_APPROX(m1.block(r0,c0,0,0).prod(), Scalar(1));
|
||||
|
||||
// test nesting complex expression
|
||||
VERIFY_EVALUATION_COUNT( (m1.matrix()*m1.matrix().transpose()).sum(), (MatrixType::SizeAtCompileTime==Dynamic ? 1 : 0) );
|
||||
VERIFY_EVALUATION_COUNT( (m1.matrix()*m1.matrix().transpose()).sum(), (MatrixType::IsVectorAtCompileTime && MatrixType::SizeAtCompileTime!=1 ? 0 : 1) );
|
||||
Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> m2(rows,rows);
|
||||
m2.setRandom();
|
||||
VERIFY_EVALUATION_COUNT( ((m1.matrix()*m1.matrix().transpose())+m2).sum(), (MatrixType::SizeAtCompileTime==Dynamic ? 1 : 0) );
|
||||
VERIFY_EVALUATION_COUNT( ((m1.matrix()*m1.matrix().transpose())+m2).sum(),(MatrixType::IsVectorAtCompileTime && MatrixType::SizeAtCompileTime!=1 ? 0 : 1));
|
||||
}
|
||||
|
||||
template<typename VectorType> void vectorRedux(const VectorType& w)
|
||||
@ -156,8 +156,10 @@ void test_redux()
|
||||
CALL_SUBTEST_1( matrixRedux(Array<float, 1, 1>()) );
|
||||
CALL_SUBTEST_2( matrixRedux(Matrix2f()) );
|
||||
CALL_SUBTEST_2( matrixRedux(Array2f()) );
|
||||
CALL_SUBTEST_2( matrixRedux(Array22f()) );
|
||||
CALL_SUBTEST_3( matrixRedux(Matrix4d()) );
|
||||
CALL_SUBTEST_3( matrixRedux(Array4d()) );
|
||||
CALL_SUBTEST_3( matrixRedux(Array44d()) );
|
||||
CALL_SUBTEST_4( matrixRedux(MatrixXcf(internal::random<int>(1,maxsize), internal::random<int>(1,maxsize))) );
|
||||
CALL_SUBTEST_4( matrixRedux(ArrayXXcf(internal::random<int>(1,maxsize), internal::random<int>(1,maxsize))) );
|
||||
CALL_SUBTEST_5( matrixRedux(MatrixXd (internal::random<int>(1,maxsize), internal::random<int>(1,maxsize))) );
|
||||
|
@ -231,12 +231,12 @@ template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m)
|
||||
Matrix<Scalar,MatrixType::RowsAtCompileTime,MatrixType::RowsAtCompileTime> m1m1 = m1 * m1.transpose();
|
||||
VERIFY_IS_APPROX( (m1 * m1.transpose()).colwise().sum(), m1m1.colwise().sum());
|
||||
Matrix<Scalar,1,MatrixType::RowsAtCompileTime> tmp(rows);
|
||||
VERIFY_EVALUATION_COUNT( tmp = (m1 * m1.transpose()).colwise().sum(), (MatrixType::RowsAtCompileTime==Dynamic ? 1 : 0));
|
||||
VERIFY_EVALUATION_COUNT( tmp = (m1 * m1.transpose()).colwise().sum(), 1);
|
||||
|
||||
m2 = m1.rowwise() - (m1.colwise().sum()/RealScalar(m1.rows())).eval();
|
||||
m1 = m1.rowwise() - (m1.colwise().sum()/RealScalar(m1.rows()));
|
||||
VERIFY_IS_APPROX( m1, m2 );
|
||||
VERIFY_EVALUATION_COUNT( m2 = (m1.rowwise() - m1.colwise().sum()/RealScalar(m1.rows())), (MatrixType::RowsAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime!=1 ? 1 : 0) );
|
||||
VERIFY_EVALUATION_COUNT( m2 = (m1.rowwise() - m1.colwise().sum()/RealScalar(m1.rows())), (MatrixType::RowsAtCompileTime!=1 ? 1 : 0) );
|
||||
}
|
||||
|
||||
void test_vectorwiseop()
|
||||
|
@ -54,7 +54,7 @@ struct is_input_scalar<Sizes<> > {
|
||||
static const bool value = true;
|
||||
};
|
||||
#ifndef EIGEN_EMULATE_CXX11_META_H
|
||||
template <typename std::size_t... Indices>
|
||||
template <typename std::ptrdiff_t... Indices>
|
||||
struct is_input_scalar<Sizes<Indices...> > {
|
||||
static const bool value = (Sizes<Indices...>::total_size == 1);
|
||||
};
|
||||
|
@ -126,7 +126,7 @@ class TensorStorage<T, DSizes<IndexType, NumIndices_>, Options_>
|
||||
}
|
||||
else
|
||||
m_data = 0;
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
|
||||
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})
|
||||
}
|
||||
m_dimensions = nbDimensions;
|
||||
}
|
||||
|
@ -61,10 +61,11 @@ struct MatrixExponentialScalingOp
|
||||
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
||||
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
||||
*/
|
||||
template <typename MatrixType>
|
||||
void matrix_exp_pade3(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
template <typename MatA, typename MatU, typename MatV>
|
||||
void matrix_exp_pade3(const MatA& A, MatU& U, MatV& V)
|
||||
{
|
||||
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
|
||||
typedef typename MatA::PlainObject MatrixType;
|
||||
typedef typename NumTraits<typename traits<MatA>::Scalar>::Real RealScalar;
|
||||
const RealScalar b[] = {120.L, 60.L, 12.L, 1.L};
|
||||
const MatrixType A2 = A * A;
|
||||
const MatrixType tmp = b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols());
|
||||
@ -77,9 +78,10 @@ void matrix_exp_pade3(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
||||
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
||||
*/
|
||||
template <typename MatrixType>
|
||||
void matrix_exp_pade5(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
template <typename MatA, typename MatU, typename MatV>
|
||||
void matrix_exp_pade5(const MatA& A, MatU& U, MatV& V)
|
||||
{
|
||||
typedef typename MatA::PlainObject MatrixType;
|
||||
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
|
||||
const RealScalar b[] = {30240.L, 15120.L, 3360.L, 420.L, 30.L, 1.L};
|
||||
const MatrixType A2 = A * A;
|
||||
@ -94,9 +96,10 @@ void matrix_exp_pade5(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
||||
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
||||
*/
|
||||
template <typename MatrixType>
|
||||
void matrix_exp_pade7(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
template <typename MatA, typename MatU, typename MatV>
|
||||
void matrix_exp_pade7(const MatA& A, MatU& U, MatV& V)
|
||||
{
|
||||
typedef typename MatA::PlainObject MatrixType;
|
||||
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
|
||||
const RealScalar b[] = {17297280.L, 8648640.L, 1995840.L, 277200.L, 25200.L, 1512.L, 56.L, 1.L};
|
||||
const MatrixType A2 = A * A;
|
||||
@ -114,9 +117,10 @@ void matrix_exp_pade7(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
||||
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
||||
*/
|
||||
template <typename MatrixType>
|
||||
void matrix_exp_pade9(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
template <typename MatA, typename MatU, typename MatV>
|
||||
void matrix_exp_pade9(const MatA& A, MatU& U, MatV& V)
|
||||
{
|
||||
typedef typename MatA::PlainObject MatrixType;
|
||||
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
|
||||
const RealScalar b[] = {17643225600.L, 8821612800.L, 2075673600.L, 302702400.L, 30270240.L,
|
||||
2162160.L, 110880.L, 3960.L, 90.L, 1.L};
|
||||
@ -135,9 +139,10 @@ void matrix_exp_pade9(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
* After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
|
||||
* approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
|
||||
*/
|
||||
template <typename MatrixType>
|
||||
void matrix_exp_pade13(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
template <typename MatA, typename MatU, typename MatV>
|
||||
void matrix_exp_pade13(const MatA& A, MatU& U, MatV& V)
|
||||
{
|
||||
typedef typename MatA::PlainObject MatrixType;
|
||||
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
|
||||
const RealScalar b[] = {64764752532480000.L, 32382376266240000.L, 7771770303897600.L,
|
||||
1187353796428800.L, 129060195264000.L, 10559470521600.L, 670442572800.L,
|
||||
@ -162,9 +167,10 @@ void matrix_exp_pade13(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
* This function activates only if your long double is double-double or quadruple.
|
||||
*/
|
||||
#if LDBL_MANT_DIG > 64
|
||||
template <typename MatrixType>
|
||||
void matrix_exp_pade17(const MatrixType &A, MatrixType &U, MatrixType &V)
|
||||
template <typename MatA, typename MatU, typename MatV>
|
||||
void matrix_exp_pade17(const MatA& A, MatU& U, MatV& V)
|
||||
{
|
||||
typedef typename MatA::PlainObject MatrixType;
|
||||
typedef typename NumTraits<typename traits<MatrixType>::Scalar>::Real RealScalar;
|
||||
const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L,
|
||||
100610229646136770560000.L, 15720348382208870400000.L,
|
||||
@ -342,9 +348,10 @@ struct matrix_exp_computeUV<MatrixType, long double>
|
||||
* \param arg argument of matrix exponential (should be plain object)
|
||||
* \param result variable in which result will be stored
|
||||
*/
|
||||
template <typename MatrixType, typename ResultType>
|
||||
void matrix_exp_compute(const MatrixType& arg, ResultType &result)
|
||||
template <typename ArgType, typename ResultType>
|
||||
void matrix_exp_compute(const ArgType& arg, ResultType &result)
|
||||
{
|
||||
typedef typename ArgType::PlainObject MatrixType;
|
||||
#if LDBL_MANT_DIG > 112 // rarely happens
|
||||
typedef typename traits<MatrixType>::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
@ -354,11 +361,11 @@ void matrix_exp_compute(const MatrixType& arg, ResultType &result)
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
typename MatrixType::PlainObject U, V;
|
||||
MatrixType U, V;
|
||||
int squarings;
|
||||
matrix_exp_computeUV<MatrixType>::run(arg, U, V, squarings); // Pade approximant is (U+V) / (-U+V)
|
||||
typename MatrixType::PlainObject numer = U + V;
|
||||
typename MatrixType::PlainObject denom = -U + V;
|
||||
MatrixType numer = U + V;
|
||||
MatrixType denom = -U + V;
|
||||
result = denom.partialPivLu().solve(numer);
|
||||
for (int i=0; i<squarings; i++)
|
||||
result *= result; // undo scaling by repeated squaring
|
||||
|
Loading…
x
Reference in New Issue
Block a user