mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-09-12 17:33:15 +08:00
Improvements to the tidiness and completeness of the NEON implementation
This commit is contained in:
parent
452371cead
commit
da5a7afed0
@ -15,7 +15,8 @@ namespace Eigen {
|
|||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
inline uint32x4_t p4ui_CONJ_XOR() {
|
inline uint32x4_t p4ui_CONJ_XOR()
|
||||||
|
{
|
||||||
// See bug 1325, clang fails to call vld1q_u64.
|
// See bug 1325, clang fails to call vld1q_u64.
|
||||||
#if EIGEN_COMP_CLANG
|
#if EIGEN_COMP_CLANG
|
||||||
uint32x4_t ret = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
|
uint32x4_t ret = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
|
||||||
@ -26,7 +27,8 @@ inline uint32x4_t p4ui_CONJ_XOR() {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
inline uint32x2_t p2ui_CONJ_XOR() {
|
inline uint32x2_t p2ui_CONJ_XOR()
|
||||||
|
{
|
||||||
static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000 };
|
static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000 };
|
||||||
return vld1_u32( conj_XOR_DATA );
|
return vld1_u32( conj_XOR_DATA );
|
||||||
}
|
}
|
||||||
@ -36,48 +38,64 @@ struct Packet2cf
|
|||||||
{
|
{
|
||||||
EIGEN_STRONG_INLINE Packet2cf() {}
|
EIGEN_STRONG_INLINE Packet2cf() {}
|
||||||
EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
|
EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
|
||||||
Packet4f v;
|
Packet4f v;
|
||||||
};
|
};
|
||||||
|
|
||||||
template<> struct packet_traits<std::complex<float> > : default_packet_traits
|
template<> struct packet_traits<std::complex<float> > : default_packet_traits
|
||||||
{
|
{
|
||||||
typedef Packet2cf type;
|
typedef Packet2cf type;
|
||||||
typedef Packet2cf half;
|
typedef Packet2cf half;
|
||||||
enum {
|
enum
|
||||||
|
{
|
||||||
Vectorizable = 1,
|
Vectorizable = 1,
|
||||||
AlignedOnScalar = 1,
|
AlignedOnScalar = 1,
|
||||||
size = 2,
|
size = 2,
|
||||||
HasHalfPacket = 0,
|
HasHalfPacket = 0,
|
||||||
|
|
||||||
HasAdd = 1,
|
HasAdd = 1,
|
||||||
HasSub = 1,
|
HasSub = 1,
|
||||||
HasMul = 1,
|
HasMul = 1,
|
||||||
HasDiv = 1,
|
HasDiv = 1,
|
||||||
HasNegate = 1,
|
HasNegate = 1,
|
||||||
HasAbs = 0,
|
HasAbs = 0,
|
||||||
HasAbs2 = 0,
|
HasAbs2 = 0,
|
||||||
HasMin = 0,
|
HasMin = 0,
|
||||||
HasMax = 0,
|
HasMax = 0,
|
||||||
HasSetLinear = 0
|
HasSetLinear = 0
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet2cf half; };
|
template<> struct unpacket_traits<Packet2cf>
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
|
|
||||||
{
|
{
|
||||||
float32x2_t r64;
|
typedef std::complex<float> type;
|
||||||
r64 = vld1_f32((const float *)&from);
|
typedef Packet2cf half;
|
||||||
|
enum
|
||||||
|
{
|
||||||
|
size = 2,
|
||||||
|
alignment = Aligned16,
|
||||||
|
vectorizable = true,
|
||||||
|
masked_load_available = false,
|
||||||
|
masked_store_available = false
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
|
||||||
|
{
|
||||||
|
const float32x2_t r64 = vld1_f32(reinterpret_cast<const float*>(&from));
|
||||||
return Packet2cf(vcombine_f32(r64, r64));
|
return Packet2cf(vcombine_f32(r64, r64));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v,b.v)); }
|
template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v,b.v)); }
|
{ return Packet2cf(padd<Packet4f>(a.v, b.v)); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
||||||
|
{ return Packet2cf(psub<Packet4f>(a.v, b.v)); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate<Packet4f>(a.v)); }
|
template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate<Packet4f>(a.v)); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
|
template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
|
||||||
{
|
{
|
||||||
Packet4ui b = vreinterpretq_u32_f32(a.v);
|
const Packet4ui b = vreinterpretq_u32_f32(a.v);
|
||||||
return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR())));
|
return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR())));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,7 +111,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, con
|
|||||||
v1 = vmulq_f32(v1, b.v);
|
v1 = vmulq_f32(v1, b.v);
|
||||||
// Multiply the imag a with b
|
// Multiply the imag a with b
|
||||||
v2 = vmulq_f32(v2, b.v);
|
v2 = vmulq_f32(v2, b.v);
|
||||||
// Conjugate v2
|
// Conjugate v2
|
||||||
v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR()));
|
v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR()));
|
||||||
// Swap real/imag elements in v2.
|
// Swap real/imag elements in v2.
|
||||||
v2 = vrev64q_f32(v2);
|
v2 = vrev64q_f32(v2);
|
||||||
@ -113,96 +131,78 @@ template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packe
|
|||||||
return Packet2cf(pand<Packet4f>(eq, eq_swapped));
|
return Packet2cf(pand<Packet4f>(eq, eq_swapped));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
template<> EIGEN_STRONG_INLINE Packet2cf pand<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
||||||
{
|
{ return Packet2cf(vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
|
||||||
return Packet2cf(vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
|
|
||||||
}
|
template<> EIGEN_STRONG_INLINE Packet2cf por<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
{ return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
|
||||||
{
|
|
||||||
return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
|
template<> EIGEN_STRONG_INLINE Packet2cf pxor<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
||||||
}
|
{ return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
|
||||||
{
|
|
||||||
return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
|
|
||||||
}
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
||||||
|
{ return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from)
|
||||||
|
{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(reinterpret_cast<const float*>(from))); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from)
|
||||||
|
{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(reinterpret_cast<const float*>(from))); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from)
|
||||||
|
{ return pset1<Packet2cf>(*from); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *to, const Packet2cf& from)
|
||||||
|
{ EIGEN_DEBUG_ALIGNED_STORE pstore(reinterpret_cast<float*>(to), from.v); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *to, const Packet2cf& from)
|
||||||
|
{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<float*>(to), from.v); }
|
||||||
|
|
||||||
|
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(
|
||||||
|
const std::complex<float>* from, Index stride)
|
||||||
{
|
{
|
||||||
return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
|
Packet4f res = vdupq_n_f32(std::real(from[0*stride]));
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
|
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
|
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
|
|
||||||
{
|
|
||||||
Packet4f res = pset1<Packet4f>(0.f);
|
|
||||||
res = vsetq_lane_f32(std::real(from[0*stride]), res, 0);
|
|
||||||
res = vsetq_lane_f32(std::imag(from[0*stride]), res, 1);
|
res = vsetq_lane_f32(std::imag(from[0*stride]), res, 1);
|
||||||
res = vsetq_lane_f32(std::real(from[1*stride]), res, 2);
|
res = vsetq_lane_f32(std::real(from[1*stride]), res, 2);
|
||||||
res = vsetq_lane_f32(std::imag(from[1*stride]), res, 3);
|
res = vsetq_lane_f32(std::imag(from[1*stride]), res, 3);
|
||||||
return Packet2cf(res);
|
return Packet2cf(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(
|
||||||
|
std::complex<float>* to, const Packet2cf& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = std::complex<float>(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1));
|
to[stride*0] = std::complex<float>(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1));
|
||||||
to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));
|
to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ARM_PREFETCH((const float *)addr); }
|
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> *addr)
|
||||||
|
{ EIGEN_ARM_PREFETCH(reinterpret_cast<const float*>(addr)); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
|
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
|
||||||
{
|
{
|
||||||
EIGEN_ALIGN16 std::complex<float> x[2];
|
EIGEN_ALIGN16 std::complex<float> x[2];
|
||||||
vst1q_f32((float *)x, a.v);
|
vst1q_f32(reinterpret_cast<float*>(x), a.v);
|
||||||
return x[0];
|
return x[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
|
template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
|
||||||
{
|
{ return Packet2cf(vcombine_f32(vget_high_f32(a.v), vget_low_f32(a.v))); }
|
||||||
float32x2_t a_lo, a_hi;
|
|
||||||
Packet4f a_r128;
|
|
||||||
|
|
||||||
a_lo = vget_low_f32(a.v);
|
|
||||||
a_hi = vget_high_f32(a.v);
|
|
||||||
a_r128 = vcombine_f32(a_hi, a_lo);
|
|
||||||
|
|
||||||
return Packet2cf(a_r128);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& a)
|
template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& a)
|
||||||
{
|
{ return Packet2cf(vrev64q_f32(a.v)); }
|
||||||
return Packet2cf(vrev64q_f32(a.v));
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
|
template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
|
||||||
{
|
{
|
||||||
float32x2_t a1, a2;
|
|
||||||
std::complex<float> s;
|
std::complex<float> s;
|
||||||
|
vst1_f32(reinterpret_cast<float*>(&s), vadd_f32(vget_low_f32(a.v), vget_high_f32(a.v)));
|
||||||
a1 = vget_low_f32(a.v);
|
|
||||||
a2 = vget_high_f32(a.v);
|
|
||||||
a2 = vadd_f32(a1, a2);
|
|
||||||
vst1_f32((float *)&s, a2);
|
|
||||||
|
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
|
template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
|
||||||
{
|
{
|
||||||
Packet4f sum1, sum2, sum;
|
const Packet4f sum1 = vcombine_f32(vget_low_f32(vecs[0].v), vget_low_f32(vecs[1].v));
|
||||||
|
const Packet4f sum2 = vcombine_f32(vget_high_f32(vecs[0].v), vget_high_f32(vecs[1].v));
|
||||||
// Add the first two 64-bit float32x2_t of vecs[0]
|
return Packet2cf(vaddq_f32(sum1, sum2));
|
||||||
sum1 = vcombine_f32(vget_low_f32(vecs[0].v), vget_low_f32(vecs[1].v));
|
|
||||||
sum2 = vcombine_f32(vget_high_f32(vecs[0].v), vget_high_f32(vecs[1].v));
|
|
||||||
sum = vaddq_f32(sum1, sum2);
|
|
||||||
|
|
||||||
return Packet2cf(sum);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
|
template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
|
||||||
@ -220,14 +220,14 @@ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const P
|
|||||||
v1 = vmul_f32(v1, a2);
|
v1 = vmul_f32(v1, a2);
|
||||||
// Multiply the imag a with b
|
// Multiply the imag a with b
|
||||||
v2 = vmul_f32(v2, a2);
|
v2 = vmul_f32(v2, a2);
|
||||||
// Conjugate v2
|
// Conjugate v2
|
||||||
v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR()));
|
v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR()));
|
||||||
// Swap real/imag elements in v2.
|
// Swap real/imag elements in v2.
|
||||||
v2 = vrev64_f32(v2);
|
v2 = vrev64_f32(v2);
|
||||||
// Add v1, v2
|
// Add v1, v2
|
||||||
prod = vadd_f32(v1, v2);
|
prod = vadd_f32(v1, v2);
|
||||||
|
|
||||||
vst1_f32((float *)&s, prod);
|
vst1_f32(reinterpret_cast<float*>(&s), prod);
|
||||||
|
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@ -237,44 +237,36 @@ struct palign_impl<Offset,Packet2cf>
|
|||||||
{
|
{
|
||||||
EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second)
|
EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second)
|
||||||
{
|
{
|
||||||
if (Offset==1)
|
if (Offset == 1)
|
||||||
{
|
|
||||||
first.v = vextq_f32(first.v, second.v, 2);
|
first.v = vextq_f32(first.v, second.v, 2);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
|
template<> struct conj_helper<Packet2cf,Packet2cf,false,true>
|
||||||
{
|
{
|
||||||
EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
|
EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
|
||||||
{ return padd(pmul(x,y),c); }
|
{ return padd(pmul(x,y),c); }
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
|
EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
|
||||||
{
|
{ return internal::pmul(a, pconj(b)); }
|
||||||
return internal::pmul(a, pconj(b));
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
|
template<> struct conj_helper<Packet2cf,Packet2cf,true,false>
|
||||||
{
|
{
|
||||||
EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
|
EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
|
||||||
{ return padd(pmul(x,y),c); }
|
{ return padd(pmul(x,y),c); }
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
|
EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
|
||||||
{
|
{ return internal::pmul(pconj(a), b); }
|
||||||
return internal::pmul(pconj(a), b);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
|
template<> struct conj_helper<Packet2cf,Packet2cf,true,true>
|
||||||
{
|
{
|
||||||
EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
|
EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
|
||||||
{ return padd(pmul(x,y),c); }
|
{ return padd(pmul(x,y),c); }
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
|
EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
|
||||||
{
|
{ return pconj(internal::pmul(a,b)); }
|
||||||
return pconj(internal::pmul(a, b));
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
|
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
|
||||||
@ -282,18 +274,18 @@ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
|
|||||||
template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
||||||
{
|
{
|
||||||
// TODO optimize it for NEON
|
// TODO optimize it for NEON
|
||||||
Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
|
Packet2cf res = conj_helper<Packet2cf, Packet2cf, false, true>().pmul(a,b);
|
||||||
Packet4f s, rev_s;
|
Packet4f s, rev_s;
|
||||||
|
|
||||||
// this computes the norm
|
// this computes the norm
|
||||||
s = vmulq_f32(b.v, b.v);
|
s = vmulq_f32(b.v, b.v);
|
||||||
rev_s = vrev64q_f32(s);
|
rev_s = vrev64q_f32(s);
|
||||||
|
|
||||||
return Packet2cf(pdiv<Packet4f>(res.v, vaddq_f32(s,rev_s)));
|
return Packet2cf(pdiv<Packet4f>(res.v, vaddq_f32(s, rev_s)));
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC inline void
|
EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2cf, 2>& kernel)
|
||||||
ptranspose(PacketBlock<Packet2cf,2>& kernel) {
|
{
|
||||||
Packet4f tmp = vcombine_f32(vget_high_f32(kernel.packet[0].v), vget_high_f32(kernel.packet[1].v));
|
Packet4f tmp = vcombine_f32(vget_high_f32(kernel.packet[0].v), vget_high_f32(kernel.packet[1].v));
|
||||||
kernel.packet[0].v = vcombine_f32(vget_low_f32(kernel.packet[0].v), vget_low_f32(kernel.packet[1].v));
|
kernel.packet[0].v = vcombine_f32(vget_low_f32(kernel.packet[0].v), vget_low_f32(kernel.packet[1].v));
|
||||||
kernel.packet[1].v = tmp;
|
kernel.packet[1].v = tmp;
|
||||||
@ -321,7 +313,8 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
|
|||||||
{
|
{
|
||||||
typedef Packet1cd type;
|
typedef Packet1cd type;
|
||||||
typedef Packet1cd half;
|
typedef Packet1cd half;
|
||||||
enum {
|
enum
|
||||||
|
{
|
||||||
Vectorizable = 1,
|
Vectorizable = 1,
|
||||||
AlignedOnScalar = 0,
|
AlignedOnScalar = 0,
|
||||||
size = 1,
|
size = 1,
|
||||||
@ -340,24 +333,49 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet1cd half; };
|
template<> struct unpacket_traits<Packet1cd>
|
||||||
|
{
|
||||||
|
typedef std::complex<double> type;
|
||||||
|
enum
|
||||||
|
{
|
||||||
|
size=1,
|
||||||
|
alignment=Aligned16,
|
||||||
|
vectorizable=true,
|
||||||
|
masked_load_available=false,
|
||||||
|
masked_store_available=false
|
||||||
|
};
|
||||||
|
typedef Packet1cd half;
|
||||||
|
};
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd pload<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }
|
template<> EIGEN_STRONG_INLINE Packet1cd pload<Packet1cd>(const std::complex<double>* from)
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }
|
{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>(reinterpret_cast<const double*>(from))); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
|
template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from)
|
||||||
{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
|
{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>(reinterpret_cast<const double*>(from))); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(padd<Packet2d>(a.v,b.v)); }
|
template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(psub<Packet2d>(a.v,b.v)); }
|
{
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate<Packet2d>(a.v)); }
|
/* here we really have to use unaligned loads :( */
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v), p2ul_CONJ_XOR))); }
|
return ploadu<Packet1cd>(&from);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
||||||
|
{ return Packet1cd(padd<Packet2d>(a.v, b.v)); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
||||||
|
{ return Packet1cd(psub<Packet2d>(a.v, b.v)); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a)
|
||||||
|
{ return Packet1cd(pnegate<Packet2d>(a.v)); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a)
|
||||||
|
{ return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v), p2ul_CONJ_XOR))); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
||||||
{
|
{
|
||||||
Packet2d v1, v2;
|
Packet2d v1, v2;
|
||||||
|
|
||||||
// Get the real values of a
|
// Get the real values of a
|
||||||
v1 = vdupq_lane_f64(vget_low_f64(a.v), 0);
|
v1 = vdupq_lane_f64(vget_low_f64(a.v), 0);
|
||||||
// Get the imag values of a
|
// Get the imag values of a
|
||||||
v2 = vdupq_lane_f64(vget_high_f64(a.v), 0);
|
v2 = vdupq_lane_f64(vget_high_f64(a.v), 0);
|
||||||
@ -365,7 +383,7 @@ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, con
|
|||||||
v1 = vmulq_f64(v1, b.v);
|
v1 = vmulq_f64(v1, b.v);
|
||||||
// Multiply the imag a with b
|
// Multiply the imag a with b
|
||||||
v2 = vmulq_f64(v2, b.v);
|
v2 = vmulq_f64(v2, b.v);
|
||||||
// Conjugate v2
|
// Conjugate v2
|
||||||
v2 = vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(v2), p2ul_CONJ_XOR));
|
v2 = vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(v2), p2ul_CONJ_XOR));
|
||||||
// Swap real/imag elements in v2.
|
// Swap real/imag elements in v2.
|
||||||
v2 = preverse<Packet2d>(v2);
|
v2 = preverse<Packet2d>(v2);
|
||||||
@ -385,31 +403,32 @@ template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packe
|
|||||||
return Packet1cd(pand<Packet2d>(eq, eq_swapped));
|
return Packet1cd(pand<Packet2d>(eq, eq_swapped));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
template<> EIGEN_STRONG_INLINE Packet1cd pand<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
||||||
{
|
{ return Packet1cd(vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
|
||||||
return Packet1cd(vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));
|
|
||||||
}
|
template<> EIGEN_STRONG_INLINE Packet1cd por<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
{ return Packet1cd(vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
|
||||||
{
|
|
||||||
return Packet1cd(vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));
|
template<> EIGEN_STRONG_INLINE Packet1cd pxor<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
||||||
}
|
{ return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
|
||||||
{
|
|
||||||
return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));
|
|
||||||
}
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
|
||||||
{
|
{ return Packet1cd(vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
|
||||||
return Packet1cd(vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }
|
template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from)
|
||||||
|
{ return pset1<Packet1cd>(*from); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
|
template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> *to, const Packet1cd& from)
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
|
{ EIGEN_DEBUG_ALIGNED_STORE pstore(reinterpret_cast<double*>(to), from.v); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ARM_PREFETCH((const double *)addr); }
|
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> *to, const Packet1cd& from)
|
||||||
|
{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), from.v); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)
|
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> *addr)
|
||||||
|
{ EIGEN_ARM_PREFETCH(reinterpret_cast<const double*>(addr)); }
|
||||||
|
|
||||||
|
template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(
|
||||||
|
const std::complex<double>* from, Index stride)
|
||||||
{
|
{
|
||||||
Packet2d res = pset1<Packet2d>(0.0);
|
Packet2d res = pset1<Packet2d>(0.0);
|
||||||
res = vsetq_lane_f64(std::real(from[0*stride]), res, 0);
|
res = vsetq_lane_f64(std::real(from[0*stride]), res, 0);
|
||||||
@ -417,17 +436,14 @@ template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Pack
|
|||||||
return Packet1cd(res);
|
return Packet1cd(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(
|
||||||
{
|
std::complex<double>* to, const Packet1cd& from, Index stride)
|
||||||
to[stride*0] = std::complex<double>(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1));
|
{ to[stride*0] = std::complex<double>(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1)); }
|
||||||
}
|
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
|
||||||
template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
|
|
||||||
{
|
{
|
||||||
EIGEN_ALIGN16 std::complex<double> res;
|
EIGEN_ALIGN16 std::complex<double> res;
|
||||||
pstore<std::complex<double> >(&res, a);
|
pstore<std::complex<double> >(&res, a);
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -455,9 +471,7 @@ template<> struct conj_helper<Packet1cd, Packet1cd, false,true>
|
|||||||
{ return padd(pmul(x,y),c); }
|
{ return padd(pmul(x,y),c); }
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
|
EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
|
||||||
{
|
{ return internal::pmul(a, pconj(b)); }
|
||||||
return internal::pmul(a, pconj(b));
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
|
template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
|
||||||
@ -466,9 +480,7 @@ template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
|
|||||||
{ return padd(pmul(x,y),c); }
|
{ return padd(pmul(x,y),c); }
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
|
EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
|
||||||
{
|
{ return internal::pmul(pconj(a), b); }
|
||||||
return internal::pmul(pconj(a), b);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
|
template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
|
||||||
@ -477,9 +489,7 @@ template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
|
|||||||
{ return padd(pmul(x,y),c); }
|
{ return padd(pmul(x,y),c); }
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
|
EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
|
||||||
{
|
{ return pconj(internal::pmul(a,b)); }
|
||||||
return pconj(internal::pmul(a, b));
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
|
EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
|
||||||
@ -495,9 +505,7 @@ template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, con
|
|||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
|
EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
|
||||||
{
|
{ return Packet1cd(preverse(Packet2d(x.v))); }
|
||||||
return Packet1cd(preverse(Packet2d(x.v)));
|
|
||||||
}
|
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
|
EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
|
||||||
{
|
{
|
||||||
|
@ -12,36 +12,21 @@ namespace Eigen {
|
|||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp<Packet4f>(const Packet4f& x)
|
||||||
Packet4f pexp<Packet4f>(const Packet4f& x)
|
{ return pexp_float(x); }
|
||||||
{
|
|
||||||
return pexp_float(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f plog<Packet4f>(const Packet4f& x)
|
||||||
Packet4f plog<Packet4f>(const Packet4f& x)
|
{ return plog_float(x); }
|
||||||
{
|
|
||||||
return plog_float(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psin<Packet4f>(const Packet4f& x)
|
||||||
Packet4f psin<Packet4f>(const Packet4f& x)
|
{ return psin_float(x); }
|
||||||
{
|
|
||||||
return psin_float(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pcos<Packet4f>(const Packet4f& x)
|
||||||
Packet4f pcos<Packet4f>(const Packet4f& x)
|
{ return pcos_float(x); }
|
||||||
{
|
|
||||||
return pcos_float(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hyperbolic Tangent function.
|
// Hyperbolic Tangent function.
|
||||||
template <>
|
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f ptanh<Packet4f>(const Packet4f& x)
|
||||||
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
|
{ return internal::generic_fast_tanh_float(x); }
|
||||||
ptanh<Packet4f>(const Packet4f& x) {
|
|
||||||
return internal::generic_fast_tanh_float(x);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // end namespace internal
|
} // end namespace internal
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ namespace internal {
|
|||||||
#if EIGEN_ARCH_ARM64
|
#if EIGEN_ARCH_ARM64
|
||||||
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
|
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
|
||||||
#else
|
#else
|
||||||
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
|
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -50,7 +50,8 @@ struct eigen_packet_wrapper
|
|||||||
operator const T&() const { return m_val; }
|
operator const T&() const { return m_val; }
|
||||||
eigen_packet_wrapper() {}
|
eigen_packet_wrapper() {}
|
||||||
eigen_packet_wrapper(const T &v) : m_val(v) {}
|
eigen_packet_wrapper(const T &v) : m_val(v) {}
|
||||||
eigen_packet_wrapper& operator=(const T &v) {
|
eigen_packet_wrapper& operator=(const T &v)
|
||||||
|
{
|
||||||
m_val = v;
|
m_val = v;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@ -59,16 +60,16 @@ struct eigen_packet_wrapper
|
|||||||
};
|
};
|
||||||
typedef eigen_packet_wrapper<float32x2_t,0> Packet2f;
|
typedef eigen_packet_wrapper<float32x2_t,0> Packet2f;
|
||||||
typedef eigen_packet_wrapper<float32x4_t,1> Packet4f;
|
typedef eigen_packet_wrapper<float32x4_t,1> Packet4f;
|
||||||
typedef eigen_packet_wrapper<int32x4_t ,2> Packet4i;
|
typedef eigen_packet_wrapper<int32x2_t ,2> Packet2i;
|
||||||
typedef eigen_packet_wrapper<int32x2_t ,3> Packet2i;
|
typedef eigen_packet_wrapper<int32x4_t ,3> Packet4i;
|
||||||
typedef eigen_packet_wrapper<uint32x4_t ,4> Packet4ui;
|
typedef eigen_packet_wrapper<uint32x4_t ,4> Packet4ui;
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
typedef float32x2_t Packet2f;
|
typedef float32x2_t Packet2f;
|
||||||
typedef float32x4_t Packet4f;
|
typedef float32x4_t Packet4f;
|
||||||
typedef int32x4_t Packet4i;
|
|
||||||
typedef int32x2_t Packet2i;
|
typedef int32x2_t Packet2i;
|
||||||
|
typedef int32x4_t Packet4i;
|
||||||
typedef uint32x4_t Packet4ui;
|
typedef uint32x4_t Packet4ui;
|
||||||
|
|
||||||
#endif // EIGEN_COMP_MSVC
|
#endif // EIGEN_COMP_MSVC
|
||||||
@ -99,79 +100,124 @@ typedef uint32x4_t Packet4ui;
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
struct packet_traits<float> : default_packet_traits {
|
struct packet_traits<float> : default_packet_traits
|
||||||
|
{
|
||||||
typedef Packet4f type;
|
typedef Packet4f type;
|
||||||
typedef Packet4f half; // Packet2f intrinsics not implemented yet
|
typedef Packet4f half; // Packet2f intrinsics not implemented yet
|
||||||
enum {
|
enum
|
||||||
|
{
|
||||||
Vectorizable = 1,
|
Vectorizable = 1,
|
||||||
AlignedOnScalar = 1,
|
AlignedOnScalar = 1,
|
||||||
size = 4,
|
size = 4,
|
||||||
HasHalfPacket = 0, // Packet2f intrinsics not implemented yet
|
HasHalfPacket = 0, // Packet2f intrinsics not implemented yet
|
||||||
|
|
||||||
HasDiv = 1,
|
HasAdd = 1,
|
||||||
|
HasSub = 1,
|
||||||
|
HasMul = 1,
|
||||||
|
HasNegate = 1,
|
||||||
|
HasAbs = 1,
|
||||||
|
HasArg = 0,
|
||||||
|
HasAbs2 = 1,
|
||||||
|
HasMin = 1,
|
||||||
|
HasMax = 1,
|
||||||
|
HasConj = 1,
|
||||||
|
HasSetLinear = 0,
|
||||||
|
HasBlend = 0,
|
||||||
|
HasReduxp = 1,
|
||||||
|
|
||||||
|
HasDiv = 1,
|
||||||
HasFloor = 1,
|
HasFloor = 1,
|
||||||
// FIXME check the Has*
|
|
||||||
HasSin = EIGEN_FAST_MATH,
|
HasSin = EIGEN_FAST_MATH,
|
||||||
HasCos = EIGEN_FAST_MATH,
|
HasCos = EIGEN_FAST_MATH,
|
||||||
HasLog = 1,
|
HasLog = 1,
|
||||||
HasExp = 1,
|
HasExp = 1,
|
||||||
HasSqrt = 0,
|
HasSqrt = 0,
|
||||||
HasTanh = EIGEN_FAST_MATH,
|
HasTanh = EIGEN_FAST_MATH,
|
||||||
HasErf = EIGEN_FAST_MATH
|
HasErf = EIGEN_FAST_MATH
|
||||||
};
|
|
||||||
};
|
|
||||||
template <>
|
|
||||||
struct packet_traits<int32_t> : default_packet_traits {
|
|
||||||
typedef Packet4i type;
|
|
||||||
typedef Packet4i half; // Packet2i intrinsics not implemented yet
|
|
||||||
enum {
|
|
||||||
Vectorizable = 1,
|
|
||||||
AlignedOnScalar = 1,
|
|
||||||
size=4,
|
|
||||||
HasHalfPacket=0 // Packet2i intrinsics not implemented yet
|
|
||||||
// FIXME check the Has*
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
#if EIGEN_GNUC_AT_MOST(4,4) && !EIGEN_COMP_LLVM
|
template <>
|
||||||
|
struct packet_traits<int32_t> : default_packet_traits
|
||||||
|
{
|
||||||
|
typedef Packet4i type;
|
||||||
|
typedef Packet4i half; // Packet2i intrinsics not implemented yet
|
||||||
|
enum
|
||||||
|
{
|
||||||
|
Vectorizable = 1,
|
||||||
|
AlignedOnScalar = 1,
|
||||||
|
size = 4,
|
||||||
|
HasHalfPacket = 0, // Packet2i intrinsics not implemented yet
|
||||||
|
|
||||||
|
HasAdd = 1,
|
||||||
|
HasSub = 1,
|
||||||
|
HasMul = 1,
|
||||||
|
HasNegate = 1,
|
||||||
|
HasAbs = 1,
|
||||||
|
HasArg = 0,
|
||||||
|
HasAbs2 = 1,
|
||||||
|
HasMin = 1,
|
||||||
|
HasMax = 1,
|
||||||
|
HasConj = 1,
|
||||||
|
HasSetLinear = 0,
|
||||||
|
HasBlend = 0,
|
||||||
|
HasReduxp = 1
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
#if EIGEN_GNUC_AT_MOST(4, 4) && !EIGEN_COMP_LLVM
|
||||||
// workaround gcc 4.2, 4.3 and 4.4 compilatin issue
|
// workaround gcc 4.2, 4.3 and 4.4 compilatin issue
|
||||||
EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }
|
EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }
|
||||||
EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); }
|
EIGEN_STRONG_INLINE float32x2_t vld1_f32(const float* x) { return ::vld1_f32 ((const float32_t*)x); }
|
||||||
EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32 (const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); }
|
EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32(const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); }
|
||||||
EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
|
EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
|
||||||
EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
|
EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
template<> struct unpacket_traits<Packet4f>
|
template<> struct unpacket_traits<Packet4f>
|
||||||
{
|
{
|
||||||
typedef float type;
|
typedef float type;
|
||||||
typedef Packet4f half;
|
typedef Packet4f half;
|
||||||
typedef Packet4i integer_packet;
|
typedef Packet4i integer_packet;
|
||||||
enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
|
enum
|
||||||
|
{
|
||||||
|
size = 4,
|
||||||
|
alignment = Aligned16,
|
||||||
|
vectorizable = true,
|
||||||
|
masked_load_available = false,
|
||||||
|
masked_store_available = false
|
||||||
|
};
|
||||||
};
|
};
|
||||||
template<> struct unpacket_traits<Packet4i>
|
template<> struct unpacket_traits<Packet4i>
|
||||||
{
|
{
|
||||||
typedef int32_t type;
|
typedef int32_t type;
|
||||||
typedef Packet4i half;
|
typedef Packet4i half;
|
||||||
enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
|
enum
|
||||||
|
{
|
||||||
|
size = 4,
|
||||||
|
alignment = Aligned16,
|
||||||
|
vectorizable = true,
|
||||||
|
masked_load_available = false,
|
||||||
|
masked_store_available = false
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); }
|
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) { return vdupq_n_s32(from); }
|
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) { return vdupq_n_s32(from); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) { return vreinterpretq_f32_u32(vdupq_n_u32(from)); }
|
template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from)
|
||||||
|
{ return vreinterpretq_f32_u32(vdupq_n_u32(from)); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a)
|
template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a)
|
||||||
{
|
{
|
||||||
const float f[] = {0, 1, 2, 3};
|
const float c[] = {0.0f,1.0f,2.0f,3.0f};
|
||||||
Packet4f countdown = vld1q_f32(f);
|
return vaddq_f32(pset1<Packet4f>(a), vld1q_f32(c));
|
||||||
return vaddq_f32(pset1<Packet4f>(a), countdown);
|
|
||||||
}
|
}
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int32_t& a)
|
template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int32_t& a)
|
||||||
{
|
{
|
||||||
const int32_t i[] = {0, 1, 2, 3};
|
const int32_t c[] = {0,1,2,3};
|
||||||
Packet4i countdown = vld1q_s32(i);
|
return vaddq_s32(pset1<Packet4i>(a), vld1q_s32(c));
|
||||||
return vaddq_s32(pset1<Packet4i>(a), countdown);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); }
|
||||||
@ -216,7 +262,8 @@ template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const
|
|||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
|
template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
|
||||||
{ eigen_assert(false && "packet integer division are not supported by NEON");
|
{
|
||||||
|
eigen_assert(false && "packet integer division are not supported by NEON");
|
||||||
return pset1<Packet4i>(0);
|
return pset1<Packet4i>(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,9 +278,11 @@ template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, co
|
|||||||
// MLA is not fused i.e. does 2 roundings.
|
// MLA is not fused i.e. does 2 roundings.
|
||||||
// In addition to giving better accuracy, FMA also gives better performance here on a Krait (Nexus 4):
|
// In addition to giving better accuracy, FMA also gives better performance here on a Krait (Nexus 4):
|
||||||
// MLA: 10 GFlop/s ; FMA: 12 GFlops/s.
|
// MLA: 10 GFlop/s ; FMA: 12 GFlops/s.
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vfmaq_f32(c,a,b); }
|
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
|
||||||
|
{ return vfmaq_f32(c,a,b); }
|
||||||
#else
|
#else
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) {
|
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
|
||||||
|
{
|
||||||
#if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM
|
#if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM
|
||||||
// Clang/ARM will replace VMLA by VMUL+VADD at least for some values of -mcpu,
|
// Clang/ARM will replace VMLA by VMUL+VADD at least for some values of -mcpu,
|
||||||
// at least -mcpu=cortex-a8 and -mcpu=cortex-a7. Since the former is the default on
|
// at least -mcpu=cortex-a8 and -mcpu=cortex-a7. Since the former is the default on
|
||||||
@ -257,7 +306,8 @@ template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f&
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// No FMA instruction for int, so use MLA unconditionally.
|
// No FMA instruction for int, so use MLA unconditionally.
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); }
|
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c)
|
||||||
|
{ return vmlaq_s32(c,a,b); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }
|
||||||
@ -265,12 +315,23 @@ template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const
|
|||||||
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vcleq_f32(a,b)); }
|
template<> EIGEN_STRONG_INLINE Packet4f pcmp_le<Packet4f>(const Packet4f& a, const Packet4f& b)
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vcltq_f32(a,b)); }
|
{ return vreinterpretq_f32_u32(vcleq_f32(a,b)); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vceqq_f32(a,b)); }
|
template<> EIGEN_STRONG_INLINE Packet4i pcmp_le<Packet4i>(const Packet4i& a, const Packet4i& b)
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vmvnq_u32(vcgeq_f32(a,b))); }
|
{ return vreinterpretq_s32_u32(vcleq_s32(a,b)); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return vreinterpretq_s32_u32(vceqq_s32(a,b)); }
|
template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt<Packet4f>(const Packet4f& a, const Packet4f& b)
|
||||||
|
{ return vreinterpretq_f32_u32(vcltq_f32(a,b)); }
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt<Packet4i>(const Packet4i& a, const Packet4i& b)
|
||||||
|
{ return vreinterpretq_s32_u32(vcltq_s32(a,b)); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq<Packet4f>(const Packet4f& a, const Packet4f& b)
|
||||||
|
{ return vreinterpretq_f32_u32(vceqq_f32(a,b)); }
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq<Packet4i>(const Packet4i& a, const Packet4i& b)
|
||||||
|
{ return vreinterpretq_s32_u32(vceqq_s32(a,b)); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan<Packet4f>(const Packet4f& a, const Packet4f& b)
|
||||||
|
{ return vreinterpretq_f32_u32(vmvnq_u32(vcgeq_f32(a,b))); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
|
template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
|
||||||
{
|
{
|
||||||
@ -286,278 +347,191 @@ template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
|
|||||||
|
|
||||||
// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
|
// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
|
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
|
||||||
{
|
{ return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); }
|
||||||
return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
|
|
||||||
}
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
|
template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
|
||||||
{
|
{ return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); }
|
||||||
return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
|
|
||||||
}
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
|
template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
|
||||||
{
|
{ return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); }
|
||||||
return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
|
|
||||||
}
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
|
template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
|
||||||
{
|
{ return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); }
|
||||||
return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
|
template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b)
|
||||||
}
|
{ return vbicq_s32(a,b); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); }
|
|
||||||
|
|
||||||
template<int N> EIGEN_STRONG_INLINE Packet4i pshiftright(Packet4i a) { return vshrq_n_s32(a,N); }
|
template<int N> EIGEN_STRONG_INLINE Packet4i pshiftright(Packet4i a) { return vshrq_n_s32(a,N); }
|
||||||
template<int N> EIGEN_STRONG_INLINE Packet4i pshiftleft(Packet4i a) { return vshlq_n_s32(a,N); }
|
template<int N> EIGEN_STRONG_INLINE Packet4i pshiftleft(Packet4i a) { return vshlq_n_s32(a,N); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }
|
template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
|
{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from)
|
||||||
|
{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }
|
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }
|
{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from)
|
||||||
|
{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
|
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
|
||||||
{
|
{ return vcombine_f32(vld1_dup_f32(from), vld1_dup_f32(from+1)); }
|
||||||
float32x2_t lo, hi;
|
|
||||||
lo = vld1_dup_f32(from);
|
|
||||||
hi = vld1_dup_f32(from+1);
|
|
||||||
return vcombine_f32(lo, hi);
|
|
||||||
}
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int32_t* from)
|
template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int32_t* from)
|
||||||
{
|
{ return vcombine_s32(vld1_dup_s32(from), vld1_dup_s32(from+1)); }
|
||||||
int32x2_t lo, hi;
|
|
||||||
lo = vld1_dup_s32(from);
|
|
||||||
hi = vld1_dup_s32(from+1);
|
|
||||||
return vcombine_s32(lo, hi);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void pstore<float> (float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }
|
template<> EIGEN_STRONG_INLINE Packet4f ploadquad<Packet4f>(const float* from) { return vld1q_dup_f32(from); }
|
||||||
template<> EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }
|
template<> EIGEN_STRONG_INLINE Packet4i ploadquad<Packet4i>(const int32_t* from) { return vld1q_dup_s32(from); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<float> (float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
|
template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
|
{ EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to,from); }
|
||||||
|
template<> EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet4i& from)
|
||||||
|
{ EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to,from); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from)
|
||||||
|
{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to,from); }
|
||||||
|
template<> EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from)
|
||||||
|
{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to,from); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
|
||||||
{
|
{
|
||||||
Packet4f res = pset1<Packet4f>(0.f);
|
Packet4f res = vld1q_dup_f32(from);
|
||||||
res = vsetq_lane_f32(from[0*stride], res, 0);
|
res = vld1q_lane_f32(from + 1*stride, res, 1);
|
||||||
res = vsetq_lane_f32(from[1*stride], res, 1);
|
res = vld1q_lane_f32(from + 2*stride, res, 2);
|
||||||
res = vsetq_lane_f32(from[2*stride], res, 2);
|
res = vld1q_lane_f32(from + 3*stride, res, 3);
|
||||||
res = vsetq_lane_f32(from[3*stride], res, 3);
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride)
|
||||||
{
|
{
|
||||||
Packet4i res = pset1<Packet4i>(0);
|
Packet4i res = vld1q_dup_s32(from);
|
||||||
res = vsetq_lane_s32(from[0*stride], res, 0);
|
res = vld1q_lane_s32(from + 1*stride, res, 1);
|
||||||
res = vsetq_lane_s32(from[1*stride], res, 1);
|
res = vld1q_lane_s32(from + 2*stride, res, 2);
|
||||||
res = vsetq_lane_s32(from[2*stride], res, 2);
|
res = vld1q_lane_s32(from + 3*stride, res, 3);
|
||||||
res = vsetq_lane_s32(from[3*stride], res, 3);
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = vgetq_lane_f32(from, 0);
|
vst1q_lane_f32(to + stride*0, from, 0);
|
||||||
to[stride*1] = vgetq_lane_f32(from, 1);
|
vst1q_lane_f32(to + stride*1, from, 1);
|
||||||
to[stride*2] = vgetq_lane_f32(from, 2);
|
vst1q_lane_f32(to + stride*2, from, 2);
|
||||||
to[stride*3] = vgetq_lane_f32(from, 3);
|
vst1q_lane_f32(to + stride*3, from, 3);
|
||||||
}
|
}
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from, Index stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = vgetq_lane_s32(from, 0);
|
vst1q_lane_s32(to + stride*0, from, 0);
|
||||||
to[stride*1] = vgetq_lane_s32(from, 1);
|
vst1q_lane_s32(to + stride*1, from, 1);
|
||||||
to[stride*2] = vgetq_lane_s32(from, 2);
|
vst1q_lane_s32(to + stride*2, from, 2);
|
||||||
to[stride*3] = vgetq_lane_s32(from, 3);
|
vst1q_lane_s32(to + stride*3, from, 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void prefetch<float> (const float* addr) { EIGEN_ARM_PREFETCH(addr); }
|
template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ARM_PREFETCH(addr); }
|
||||||
template<> EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); }
|
template<> EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); }
|
||||||
|
|
||||||
// FIXME only store the 2 first elements ?
|
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return vgetq_lane_f32(a,0); }
|
||||||
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { EIGEN_ALIGN16 float x[4]; vst1q_f32(x, a); return x[0]; }
|
template<> EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) { return vgetq_lane_s32(a,0); }
|
||||||
template<> EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) { EIGEN_ALIGN16 int32_t x[4]; vst1q_s32(x, a); return x[0]; }
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
|
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
|
||||||
float32x2_t a_lo, a_hi;
|
{
|
||||||
Packet4f a_r64;
|
const float32x4_t a_r64 = vrev64q_f32(a);
|
||||||
|
return vcombine_f32(vget_high_f32(a_r64), vget_low_f32(a_r64));
|
||||||
a_r64 = vrev64q_f32(a);
|
|
||||||
a_lo = vget_low_f32(a_r64);
|
|
||||||
a_hi = vget_high_f32(a_r64);
|
|
||||||
return vcombine_f32(a_hi, a_lo);
|
|
||||||
}
|
}
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {
|
template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
|
||||||
int32x2_t a_lo, a_hi;
|
{
|
||||||
Packet4i a_r64;
|
const int32x4_t a_r64 = vrev64q_s32(a);
|
||||||
|
return vcombine_s32(vget_high_s32(a_r64), vget_low_s32(a_r64));
|
||||||
a_r64 = vrev64q_s32(a);
|
}
|
||||||
a_lo = vget_low_s32(a_r64);
|
template<> EIGEN_STRONG_INLINE Packet4ui preverse(const Packet4ui& a)
|
||||||
a_hi = vget_high_s32(a_r64);
|
{
|
||||||
return vcombine_s32(a_hi, a_lo);
|
const uint32x4_t a_r64 = vrev64q_u32(a);
|
||||||
|
return vcombine_u32(vget_high_u32(a_r64), vget_low_u32(a_r64));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); }
|
template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); }
|
template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent) {
|
template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent)
|
||||||
return pfrexp_float(a,exponent);
|
{ return pfrexp_float(a,exponent); }
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent) {
|
template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent)
|
||||||
return pldexp_float(a,exponent);
|
{ return pldexp_float(a,exponent); }
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
|
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
|
||||||
{
|
{
|
||||||
float32x2_t a_lo, a_hi, sum;
|
const float32x2_t sum = vadd_f32(vget_low_f32(a), vget_high_f32(a));
|
||||||
|
return vget_lane_f32(vpadd_f32(sum, sum), 0);
|
||||||
a_lo = vget_low_f32(a);
|
}
|
||||||
a_hi = vget_high_f32(a);
|
template<> EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a)
|
||||||
sum = vpadd_f32(a_lo, a_hi);
|
{
|
||||||
sum = vpadd_f32(sum, sum);
|
const int32x2_t sum = vadd_s32(vget_low_s32(a), vget_high_s32(a));
|
||||||
return vget_lane_f32(sum, 0);
|
return vget_lane_s32(vpadd_s32(sum, sum), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
|
template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
|
||||||
{
|
{
|
||||||
float32x4x2_t vtrn1, vtrn2, res1, res2;
|
const float32x4x2_t vtrn1 = vzipq_f32(vecs[0], vecs[2]);
|
||||||
Packet4f sum1, sum2, sum;
|
const float32x4x2_t vtrn2 = vzipq_f32(vecs[1], vecs[3]);
|
||||||
|
const float32x4x2_t res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
|
||||||
// NEON zip performs interleaving of the supplied vectors.
|
const float32x4x2_t res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
|
||||||
// We perform two interleaves in a row to acquire the transposed vector
|
return vaddq_f32(vaddq_f32(res1.val[0], res1.val[1]), vaddq_f32(res2.val[0], res2.val[1]));
|
||||||
vtrn1 = vzipq_f32(vecs[0], vecs[2]);
|
|
||||||
vtrn2 = vzipq_f32(vecs[1], vecs[3]);
|
|
||||||
res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
|
|
||||||
res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
|
|
||||||
|
|
||||||
// Do the addition of the resulting vectors
|
|
||||||
sum1 = vaddq_f32(res1.val[0], res1.val[1]);
|
|
||||||
sum2 = vaddq_f32(res2.val[0], res2.val[1]);
|
|
||||||
sum = vaddq_f32(sum1, sum2);
|
|
||||||
|
|
||||||
return sum;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a)
|
|
||||||
{
|
|
||||||
int32x2_t a_lo, a_hi, sum;
|
|
||||||
|
|
||||||
a_lo = vget_low_s32(a);
|
|
||||||
a_hi = vget_high_s32(a);
|
|
||||||
sum = vpadd_s32(a_lo, a_hi);
|
|
||||||
sum = vpadd_s32(sum, sum);
|
|
||||||
return vget_lane_s32(sum, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
|
template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
|
||||||
{
|
{
|
||||||
int32x4x2_t vtrn1, vtrn2, res1, res2;
|
const int32x4x2_t vtrn1 = vzipq_s32(vecs[0], vecs[2]);
|
||||||
Packet4i sum1, sum2, sum;
|
const int32x4x2_t vtrn2 = vzipq_s32(vecs[1], vecs[3]);
|
||||||
|
const int32x4x2_t res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);
|
||||||
// NEON zip performs interleaving of the supplied vectors.
|
const int32x4x2_t res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);
|
||||||
// We perform two interleaves in a row to acquire the transposed vector
|
return vaddq_s32(vaddq_s32(res1.val[0], res1.val[1]), vaddq_s32(res2.val[0], res2.val[1]));
|
||||||
vtrn1 = vzipq_s32(vecs[0], vecs[2]);
|
|
||||||
vtrn2 = vzipq_s32(vecs[1], vecs[3]);
|
|
||||||
res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);
|
|
||||||
res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);
|
|
||||||
|
|
||||||
// Do the addition of the resulting vectors
|
|
||||||
sum1 = vaddq_s32(res1.val[0], res1.val[1]);
|
|
||||||
sum2 = vaddq_s32(res2.val[0], res2.val[1]);
|
|
||||||
sum = vaddq_s32(sum1, sum2);
|
|
||||||
|
|
||||||
return sum;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Other reduction functions:
|
// Other reduction functions:
|
||||||
// mul
|
// mul
|
||||||
template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
|
template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
|
||||||
{
|
{
|
||||||
float32x2_t a_lo, a_hi, prod;
|
|
||||||
|
|
||||||
// Get a_lo = |a1|a2| and a_hi = |a3|a4|
|
|
||||||
a_lo = vget_low_f32(a);
|
|
||||||
a_hi = vget_high_f32(a);
|
|
||||||
// Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
|
// Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
|
||||||
prod = vmul_f32(a_lo, a_hi);
|
const float32x2_t prod = vmul_f32(vget_low_f32(a), vget_high_f32(a));
|
||||||
// Multiply prod with its swapped value |a2*a4|a1*a3|
|
// Multiply prod with its swapped value |a2*a4|a1*a3|
|
||||||
prod = vmul_f32(prod, vrev64_f32(prod));
|
return vget_lane_f32(vmul_f32(prod, vrev64_f32(prod)), 0);
|
||||||
|
|
||||||
return vget_lane_f32(prod, 0);
|
|
||||||
}
|
}
|
||||||
template<> EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a)
|
template<> EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a)
|
||||||
{
|
{
|
||||||
int32x2_t a_lo, a_hi, prod;
|
|
||||||
|
|
||||||
// Get a_lo = |a1|a2| and a_hi = |a3|a4|
|
|
||||||
a_lo = vget_low_s32(a);
|
|
||||||
a_hi = vget_high_s32(a);
|
|
||||||
// Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
|
// Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
|
||||||
prod = vmul_s32(a_lo, a_hi);
|
const int32x2_t prod = vmul_s32(vget_low_s32(a), vget_high_s32(a));
|
||||||
// Multiply prod with its swapped value |a2*a4|a1*a3|
|
// Multiply prod with its swapped value |a2*a4|a1*a3|
|
||||||
prod = vmul_s32(prod, vrev64_s32(prod));
|
return vget_lane_s32(vmul_s32(prod, vrev64_s32(prod)), 0);
|
||||||
|
|
||||||
return vget_lane_s32(prod, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// min
|
// min
|
||||||
template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
|
template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
|
||||||
{
|
{
|
||||||
float32x2_t a_lo, a_hi, min;
|
const float32x2_t min = vmin_f32(vget_low_f32(a), vget_high_f32(a));
|
||||||
|
return vget_lane_f32(vpmin_f32(min, min), 0);
|
||||||
a_lo = vget_low_f32(a);
|
|
||||||
a_hi = vget_high_f32(a);
|
|
||||||
min = vpmin_f32(a_lo, a_hi);
|
|
||||||
min = vpmin_f32(min, min);
|
|
||||||
|
|
||||||
return vget_lane_f32(min, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a)
|
template<> EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a)
|
||||||
{
|
{
|
||||||
int32x2_t a_lo, a_hi, min;
|
const int32x2_t min = vmin_s32(vget_low_s32(a), vget_high_s32(a));
|
||||||
|
return vget_lane_s32(vpmin_s32(min, min), 0);
|
||||||
a_lo = vget_low_s32(a);
|
|
||||||
a_hi = vget_high_s32(a);
|
|
||||||
min = vpmin_s32(a_lo, a_hi);
|
|
||||||
min = vpmin_s32(min, min);
|
|
||||||
|
|
||||||
return vget_lane_s32(min, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// max
|
// max
|
||||||
template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
|
template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
|
||||||
{
|
{
|
||||||
float32x2_t a_lo, a_hi, max;
|
const float32x2_t max = vmax_f32(vget_low_f32(a), vget_high_f32(a));
|
||||||
|
return vget_lane_f32(vpmax_f32(max, max), 0);
|
||||||
a_lo = vget_low_f32(a);
|
|
||||||
a_hi = vget_high_f32(a);
|
|
||||||
max = vpmax_f32(a_lo, a_hi);
|
|
||||||
max = vpmax_f32(max, max);
|
|
||||||
|
|
||||||
return vget_lane_f32(max, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a)
|
template<> EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a)
|
||||||
{
|
{
|
||||||
int32x2_t a_lo, a_hi, max;
|
const int32x2_t max = vmax_s32(vget_low_s32(a), vget_high_s32(a));
|
||||||
|
return vget_lane_s32(vpmax_s32(max, max), 0);
|
||||||
a_lo = vget_low_s32(a);
|
|
||||||
a_hi = vget_high_s32(a);
|
|
||||||
max = vpmax_s32(a_lo, a_hi);
|
|
||||||
max = vpmax_s32(max, max);
|
|
||||||
|
|
||||||
return vget_lane_s32(max, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
|
template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
|
||||||
{
|
{
|
||||||
uint32x2_t tmp = vorr_u32(vget_low_u32( vreinterpretq_u32_f32(x)),
|
uint32x2_t tmp = vorr_u32(vget_low_u32( vreinterpretq_u32_f32(x)),
|
||||||
vget_high_u32(vreinterpretq_u32_f32(x)));
|
vget_high_u32(vreinterpretq_u32_f32(x)));
|
||||||
return vget_lane_u32(vpmax_u32(tmp,tmp),0);
|
return vget_lane_u32(vpmax_u32(tmp, tmp), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
|
// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
|
||||||
@ -573,21 +547,22 @@ struct palign_impl<Offset,Type>\
|
|||||||
}\
|
}\
|
||||||
};\
|
};\
|
||||||
|
|
||||||
PALIGN_NEON(0,Packet4f,vextq_f32)
|
PALIGN_NEON(0, Packet4f, vextq_f32)
|
||||||
PALIGN_NEON(1,Packet4f,vextq_f32)
|
PALIGN_NEON(1, Packet4f, vextq_f32)
|
||||||
PALIGN_NEON(2,Packet4f,vextq_f32)
|
PALIGN_NEON(2, Packet4f, vextq_f32)
|
||||||
PALIGN_NEON(3,Packet4f,vextq_f32)
|
PALIGN_NEON(3, Packet4f, vextq_f32)
|
||||||
PALIGN_NEON(0,Packet4i,vextq_s32)
|
|
||||||
PALIGN_NEON(1,Packet4i,vextq_s32)
|
PALIGN_NEON(0, Packet4i, vextq_s32)
|
||||||
PALIGN_NEON(2,Packet4i,vextq_s32)
|
PALIGN_NEON(1, Packet4i, vextq_s32)
|
||||||
PALIGN_NEON(3,Packet4i,vextq_s32)
|
PALIGN_NEON(2, Packet4i, vextq_s32)
|
||||||
|
PALIGN_NEON(3, Packet4i, vextq_s32)
|
||||||
|
|
||||||
#undef PALIGN_NEON
|
#undef PALIGN_NEON
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC inline void
|
EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4f, 4>& kernel)
|
||||||
ptranspose(PacketBlock<Packet4f,4>& kernel) {
|
{
|
||||||
float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]);
|
const float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]);
|
||||||
float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]);
|
const float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]);
|
||||||
|
|
||||||
kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0]));
|
kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0]));
|
||||||
kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0]));
|
kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0]));
|
||||||
@ -595,10 +570,11 @@ ptranspose(PacketBlock<Packet4f,4>& kernel) {
|
|||||||
kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1]));
|
kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1]));
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC inline void
|
EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4i, 4>& kernel)
|
||||||
ptranspose(PacketBlock<Packet4i,4>& kernel) {
|
{
|
||||||
int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]);
|
const int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]);
|
||||||
int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]);
|
const int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]);
|
||||||
|
|
||||||
kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0]));
|
kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0]));
|
||||||
kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0]));
|
kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0]));
|
||||||
kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1]));
|
kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1]));
|
||||||
@ -624,17 +600,9 @@ ptranspose(PacketBlock<Packet4i,4>& kernel) {
|
|||||||
// Defining these functions as templates ensures that if these intrinsics are
|
// Defining these functions as templates ensures that if these intrinsics are
|
||||||
// already defined in arm_neon.h, then our workaround doesn't cause a conflict
|
// already defined in arm_neon.h, then our workaround doesn't cause a conflict
|
||||||
// and has lower priority in overload resolution.
|
// and has lower priority in overload resolution.
|
||||||
template <typename T>
|
template <typename T> uint64x2_t vreinterpretq_u64_f64(T a) { return (uint64x2_t) a; }
|
||||||
uint64x2_t vreinterpretq_u64_f64(T a)
|
|
||||||
{
|
|
||||||
return (uint64x2_t) a;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T> float64x2_t vreinterpretq_f64_u64(T a) { return (float64x2_t) a; }
|
||||||
float64x2_t vreinterpretq_f64_u64(T a)
|
|
||||||
{
|
|
||||||
return (float64x2_t) a;
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef float64x2_t Packet2d;
|
typedef float64x2_t Packet2d;
|
||||||
typedef float64x1_t Packet1d;
|
typedef float64x1_t Packet1d;
|
||||||
@ -643,12 +611,13 @@ template<> struct packet_traits<double> : default_packet_traits
|
|||||||
{
|
{
|
||||||
typedef Packet2d type;
|
typedef Packet2d type;
|
||||||
typedef Packet2d half;
|
typedef Packet2d half;
|
||||||
enum {
|
enum
|
||||||
|
{
|
||||||
Vectorizable = 1,
|
Vectorizable = 1,
|
||||||
AlignedOnScalar = 1,
|
AlignedOnScalar = 1,
|
||||||
size = 2,
|
size = 2,
|
||||||
HasHalfPacket=0,
|
HasHalfPacket = 0,
|
||||||
|
|
||||||
HasDiv = 1,
|
HasDiv = 1,
|
||||||
// FIXME check the Has*
|
// FIXME check the Has*
|
||||||
HasSin = 0,
|
HasSin = 0,
|
||||||
@ -659,16 +628,28 @@ template<> struct packet_traits<double> : default_packet_traits
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet2d half; };
|
template<> struct unpacket_traits<Packet2d>
|
||||||
|
{
|
||||||
|
typedef double type;
|
||||||
|
enum
|
||||||
|
{
|
||||||
|
size = 2,
|
||||||
|
alignment = Aligned16,
|
||||||
|
vectorizable = true,
|
||||||
|
masked_load_available = false,
|
||||||
|
masked_store_available = false
|
||||||
|
};
|
||||||
|
typedef Packet2d half;
|
||||||
|
};
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return vdupq_n_f64(from); }
|
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return vdupq_n_f64(from); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a)
|
template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a)
|
||||||
{
|
{
|
||||||
const double countdown_raw[] = {0.0,1.0};
|
const double c[] = {0.0,1.0};
|
||||||
const Packet2d countdown = vld1q_f64(countdown_raw);
|
return vaddq_f64(pset1<Packet2d>(a), vld1q_f64(c));
|
||||||
return vaddq_f64(pset1<Packet2d>(a), countdown);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return vaddq_f64(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return vaddq_f64(a,b); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return vsubq_f64(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return vsubq_f64(a,b); }
|
||||||
@ -683,9 +664,11 @@ template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const
|
|||||||
|
|
||||||
#ifdef __ARM_FEATURE_FMA
|
#ifdef __ARM_FEATURE_FMA
|
||||||
// See bug 936. See above comment about FMA for float.
|
// See bug 936. See above comment about FMA for float.
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vfmaq_f64(c,a,b); }
|
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c)
|
||||||
|
{ return vfmaq_f64(c,a,b); }
|
||||||
#else
|
#else
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vmlaq_f64(c,a,b); }
|
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c)
|
||||||
|
{ return vmlaq_f64(c,a,b); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vminq_f64(a,b); }
|
template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vminq_f64(a,b); }
|
||||||
@ -694,94 +677,93 @@ template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const
|
|||||||
|
|
||||||
// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
|
// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b)
|
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b)
|
||||||
{
|
{ return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); }
|
||||||
return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b)
|
template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b)
|
||||||
{
|
{ return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); }
|
||||||
return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b)
|
template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b)
|
||||||
{
|
{ return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); }
|
||||||
return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b)
|
template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b)
|
||||||
{
|
{ return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); }
|
||||||
return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(vcleq_f64(a,b)); }
|
template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b)
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(vcltq_f64(a,b)); }
|
{ return vreinterpretq_f64_u64(vcleq_f64(a,b)); }
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(vceqq_f64(a,b)); }
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); }
|
template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b)
|
||||||
|
{ return vreinterpretq_f64_u64(vcltq_f64(a,b)); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); }
|
template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b)
|
||||||
|
{ return vreinterpretq_f64_u64(vceqq_f64(a,b)); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
|
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from)
|
||||||
{
|
{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); }
|
||||||
return vld1q_dup_f64(from);
|
|
||||||
}
|
|
||||||
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to, from); }
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); }
|
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
|
||||||
|
{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from) { return vld1q_dup_f64(from); }
|
||||||
|
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from)
|
||||||
|
{ EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to,from); }
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from)
|
||||||
|
{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to,from); }
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
|
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
|
||||||
{
|
{
|
||||||
Packet2d res = pset1<Packet2d>(0.0);
|
Packet2d res = pset1<Packet2d>(0.0);
|
||||||
res = vsetq_lane_f64(from[0*stride], res, 0);
|
res = vld1q_lane_f64(from + 0*stride, res, 0);
|
||||||
res = vsetq_lane_f64(from[1*stride], res, 1);
|
res = vld1q_lane_f64(from + 1*stride, res, 1);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
|
||||||
{
|
{
|
||||||
to[stride*0] = vgetq_lane_f64(from, 0);
|
vst1q_lane_f64(to + stride*0, from, 0);
|
||||||
to[stride*1] = vgetq_lane_f64(from, 1);
|
vst1q_lane_f64(to + stride*1, from, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ARM_PREFETCH(addr); }
|
template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ARM_PREFETCH(addr); }
|
||||||
|
|
||||||
// FIXME only store the 2 first elements ?
|
// FIXME only store the 2 first elements ?
|
||||||
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(a, 0); }
|
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(a,0); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); }
|
template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
|
||||||
|
{ return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vabsq_f64(a); }
|
template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vabsq_f64(a); }
|
||||||
|
|
||||||
#if EIGEN_COMP_CLANG && defined(__apple_build_version__)
|
#if EIGEN_COMP_CLANG && defined(__apple_build_version__)
|
||||||
// workaround ICE, see bug 907
|
// workaround ICE, see bug 907
|
||||||
template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) + vget_high_f64(a))[0]; }
|
template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
|
||||||
|
{ return (vget_low_f64(a) + vget_high_f64(a))[0]; }
|
||||||
#else
|
#else
|
||||||
template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); }
|
template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
|
||||||
|
{ return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
|
template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
|
||||||
{
|
{
|
||||||
float64x2_t trn1, trn2;
|
return vaddq_f64(vzip1q_f64(vecs[0], vecs[1]), vzip2q_f64(vecs[0], vecs[1]));
|
||||||
|
|
||||||
// NEON zip performs interleaving of the supplied vectors.
|
|
||||||
// We perform two interleaves in a row to acquire the transposed vector
|
|
||||||
trn1 = vzip1q_f64(vecs[0], vecs[1]);
|
|
||||||
trn2 = vzip2q_f64(vecs[0], vecs[1]);
|
|
||||||
|
|
||||||
// Do the addition of the resulting vectors
|
|
||||||
return vaddq_f64(trn1, trn2);
|
|
||||||
}
|
}
|
||||||
// Other reduction functions:
|
// Other reduction functions:
|
||||||
// mul
|
// mul
|
||||||
#if EIGEN_COMP_CLANG && defined(__apple_build_version__)
|
#if EIGEN_COMP_CLANG && defined(__apple_build_version__)
|
||||||
template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) * vget_high_f64(a))[0]; }
|
template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
|
||||||
|
{ return (vget_low_f64(a) * vget_high_f64(a))[0]; }
|
||||||
#else
|
#else
|
||||||
template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); }
|
template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
|
||||||
|
{ return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// min
|
// min
|
||||||
template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpminq_f64(a, a), 0); }
|
template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
|
||||||
|
{ return vgetq_lane_f64(vpminq_f64(a,a), 0); }
|
||||||
|
|
||||||
// max
|
// max
|
||||||
template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpmaxq_f64(a, a), 0); }
|
template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
|
||||||
|
{ return vgetq_lane_f64(vpmaxq_f64(a,a), 0); }
|
||||||
|
|
||||||
// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
|
// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
|
||||||
// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074
|
// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074
|
||||||
@ -796,19 +778,20 @@ struct palign_impl<Offset,Type>\
|
|||||||
}\
|
}\
|
||||||
};\
|
};\
|
||||||
|
|
||||||
PALIGN_NEON(0,Packet2d,vextq_f64)
|
PALIGN_NEON(0, Packet2d, vextq_f64)
|
||||||
PALIGN_NEON(1,Packet2d,vextq_f64)
|
PALIGN_NEON(1, Packet2d, vextq_f64)
|
||||||
#undef PALIGN_NEON
|
#undef PALIGN_NEON
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC inline void
|
EIGEN_DEVICE_FUNC inline void
|
||||||
ptranspose(PacketBlock<Packet2d,2>& kernel) {
|
ptranspose(PacketBlock<Packet2d, 2>& kernel)
|
||||||
float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]);
|
{
|
||||||
float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]);
|
const float64x2_t tmp1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]);
|
||||||
|
const float64x2_t tmp2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]);
|
||||||
|
|
||||||
kernel.packet[0] = trn1;
|
kernel.packet[0] = tmp1;
|
||||||
kernel.packet[1] = trn2;
|
kernel.packet[1] = tmp2;
|
||||||
}
|
}
|
||||||
#endif // EIGEN_ARCH_ARM64
|
#endif // EIGEN_ARCH_ARM64
|
||||||
|
|
||||||
} // end namespace internal
|
} // end namespace internal
|
||||||
|
|
||||||
|
@ -10,44 +10,24 @@
|
|||||||
#ifndef EIGEN_TYPE_CASTING_NEON_H
|
#ifndef EIGEN_TYPE_CASTING_NEON_H
|
||||||
#define EIGEN_TYPE_CASTING_NEON_H
|
#define EIGEN_TYPE_CASTING_NEON_H
|
||||||
|
|
||||||
|
#include <Eigen/src/Core/util/Meta.h>
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
template <>
|
template<> struct type_casting_traits<float,numext::int32_t>
|
||||||
struct type_casting_traits<float, int> {
|
{ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; };
|
||||||
enum {
|
template<> struct type_casting_traits<numext::int32_t,float>
|
||||||
VectorizedCast = 1,
|
{ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; };
|
||||||
SrcCoeffRatio = 1,
|
|
||||||
TgtCoeffRatio = 1
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
template <>
|
template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i,Packet4f>(const Packet4i& a) { return vcvtq_f32_s32(a); }
|
||||||
struct type_casting_traits<int, float> {
|
template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f,Packet4i>(const Packet4f& a) { return vcvtq_s32_f32(a); }
|
||||||
enum {
|
|
||||||
VectorizedCast = 1,
|
|
||||||
SrcCoeffRatio = 1,
|
|
||||||
TgtCoeffRatio = 1
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
|
template<> EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f,Packet4i>(const Packet4i& a)
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
|
{ return vreinterpretq_f32_s32(a); }
|
||||||
return vcvtq_s32_f32(a);
|
template<> EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i,Packet4f>(const Packet4f& a)
|
||||||
}
|
{ return vreinterpretq_s32_f32(a); }
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
|
|
||||||
return vcvtq_f32_s32(a);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i,Packet4f>(const Packet4f& a) {
|
|
||||||
return vreinterpretq_s32_f32(a);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f,Packet4i>(const Packet4i& a) {
|
|
||||||
return vreinterpretq_f32_s32(a);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // end namespace internal
|
} // end namespace internal
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user