diff --git a/Eigen/src/Core/arch/AVX/PacketMath.h b/Eigen/src/Core/arch/AVX/PacketMath.h index af4742b22..e4aac9e6f 100644 --- a/Eigen/src/Core/arch/AVX/PacketMath.h +++ b/Eigen/src/Core/arch/AVX/PacketMath.h @@ -37,22 +37,32 @@ typedef __m256d Packet4d; typedef eigen_packet_wrapper<__m128i, 2> Packet8h; #endif typedef eigen_packet_wrapper<__m128i, 3> Packet8bf; +typedef eigen_packet_wrapper<__m256i, 4> Packet8ui; #ifdef EIGEN_VECTORIZE_AVX2 // Start from 3 to be compatible with AVX512 typedef eigen_packet_wrapper<__m256i, 3> Packet4l; +typedef eigen_packet_wrapper<__m256i, 5> Packet4ul; #endif template<> struct is_arithmetic<__m256> { enum { value = true }; }; template<> struct is_arithmetic<__m256i> { enum { value = true }; }; template<> struct is_arithmetic<__m256d> { enum { value = true }; }; template<> struct is_arithmetic { enum { value = true }; }; +// Note that `Packet8ui` uses the underlying type `__m256i`, which is +// interpreted as a vector of _signed_ `int32`s, which breaks some arithmetic +// operations used in `GenericPacketMath.h`. +template<> struct is_arithmetic { enum { value = false }; }; #ifndef EIGEN_VECTORIZE_AVX512FP16 template<> struct is_arithmetic { enum { value = true }; }; #endif template<> struct is_arithmetic { enum { value = true }; }; #ifdef EIGEN_VECTORIZE_AVX2 template<> struct is_arithmetic { enum { value = true }; }; +// Note that `Packet4ul` uses the underlying type `__m256i`, which is +// interpreted as a vector of _signed_ `int32`s, which breaks some arithmetic +// operations used in `GenericPacketMath.h`. +template<> struct is_arithmetic { enum { value = false }; }; #endif // Use the packet_traits defined in AVX512/PacketMath.h instead if we're going @@ -214,6 +224,25 @@ template<> struct packet_traits : default_packet_traits size=8 }; }; +template<> struct packet_traits : default_packet_traits +{ + typedef Packet8ui type; + typedef Packet4ui half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + + HasDiv = 0, + HasNegate = 0, + HasSqrt = 0, + + HasCmp = 1, + HasMin = 1, + HasMax = 1, + HasShift = 1 + }; +}; #ifdef EIGEN_VECTORIZE_AVX2 template<> struct packet_traits : default_packet_traits @@ -229,6 +258,29 @@ template<> struct packet_traits : default_packet_traits size=4 }; }; +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4ul type; + // There is no half-size packet for current Packet4ul. + // TODO: support as SSE path. + typedef Packet4ul half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + + // HasMin = 0, + // HasMax = 0, + HasDiv = 0, + HasBlend = 0, + HasTranspose = 0, + HasNegate = 0, + HasSqrt = 0, + + HasCmp = 1, + HasShift = 1 + }; +}; #endif #endif @@ -257,12 +309,22 @@ template<> struct unpacket_traits { typedef Packet4i half; enum {size=8, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false}; }; +template<> struct unpacket_traits { + typedef uint32_t type; + typedef Packet4ui half; + enum {size = 8, alignment = Aligned32, vectorizable = true, masked_load_available = false, masked_store_available = false}; +}; #ifdef EIGEN_VECTORIZE_AVX2 template<> struct unpacket_traits { typedef int64_t type; typedef Packet4l half; enum {size=4, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false}; }; +template<> struct unpacket_traits { + typedef uint64_t type; + typedef Packet4ul half; + enum {size = 4, alignment = Aligned32, vectorizable = true, masked_load_available = false, masked_store_available = false}; +}; #endif template<> struct unpacket_traits { typedef bfloat16 type; @@ -283,30 +345,58 @@ EIGEN_STRONG_INLINE Packet4l pset1(const int64_t& from) { return _mm256_set1_epi64x(from); } template <> +EIGEN_STRONG_INLINE Packet4ul pset1(const uint64_t& from) { + return _mm256_set1_epi64x(numext::bit_cast(from)); +} +template <> EIGEN_STRONG_INLINE Packet4l pzero(const Packet4l& /*a*/) { return _mm256_setzero_si256(); } template <> +EIGEN_STRONG_INLINE Packet4ul pzero(const Packet4ul& /*a*/) { + return _mm256_setzero_si256(); +} +template <> EIGEN_STRONG_INLINE Packet4l peven_mask(const Packet4l& /*a*/) { return _mm256_set_epi64x(0ll, -1ll, 0ll, -1ll); } template <> +EIGEN_STRONG_INLINE Packet4ul peven_mask(const Packet4ul& /*a*/) { + return _mm256_set_epi64x(0ll, -1ll, 0ll, -1ll); +} +template <> EIGEN_STRONG_INLINE Packet4l pload1(const int64_t* from) { return _mm256_set1_epi64x(*from); } template <> +EIGEN_STRONG_INLINE Packet4ul pload1(const uint64_t* from) { + return _mm256_set1_epi64x(*from); +} +template <> EIGEN_STRONG_INLINE Packet4l padd(const Packet4l& a, const Packet4l& b) { return _mm256_add_epi64(a, b); } template <> +EIGEN_STRONG_INLINE Packet4ul padd(const Packet4ul& a, const Packet4ul& b) { + return _mm256_add_epi64(a, b); +} +template<> EIGEN_STRONG_INLINE Packet4l plset(const int64_t& a) { return padd(pset1(a), Packet4l(_mm256_set_epi64x(3ll, 2ll, 1ll, 0ll))); } template <> +EIGEN_STRONG_INLINE Packet4ul plset(const uint64_t& a) { + return padd(pset1(a), Packet4ul(_mm256_set_epi64x(3ll, 2ll, 1ll, 0ll))); +} +template <> EIGEN_STRONG_INLINE Packet4l psub(const Packet4l& a, const Packet4l& b) { return _mm256_sub_epi64(a, b); } template <> +EIGEN_STRONG_INLINE Packet4ul psub(const Packet4ul& a, const Packet4ul& b) { + return _mm256_sub_epi64(a, b); +} +template <> EIGEN_STRONG_INLINE Packet4l pnegate(const Packet4l& a) { return psub(pzero(a), a); } @@ -319,18 +409,36 @@ EIGEN_STRONG_INLINE Packet4l pcmp_le(const Packet4l& a, const Packet4l& b) { return _mm256_xor_si256(_mm256_cmpgt_epi64(a, b), _mm256_set1_epi32(-1)); } template <> +EIGEN_STRONG_INLINE Packet4ul pcmp_le(const Packet4ul& a, const Packet4ul& b) { + return (Packet4ul)pcmp_le((Packet4l)psub(a, pset1(0x8000000000000000UL)), + (Packet4l)psub(b, pset1(0x8000000000000000UL))); +} +template <> EIGEN_STRONG_INLINE Packet4l pcmp_lt(const Packet4l& a, const Packet4l& b) { return _mm256_cmpgt_epi64(b, a); } template <> +EIGEN_STRONG_INLINE Packet4ul pcmp_lt(const Packet4ul& a, const Packet4ul& b) { + return (Packet4ul)pcmp_lt((Packet4l)psub(a, pset1(0x8000000000000000UL)), + (Packet4l)psub(b, pset1(0x8000000000000000UL))); +} +template <> EIGEN_STRONG_INLINE Packet4l pcmp_eq(const Packet4l& a, const Packet4l& b) { return _mm256_cmpeq_epi64(a, b); } template <> +EIGEN_STRONG_INLINE Packet4ul pcmp_eq(const Packet4ul& a, const Packet4ul& b) { + return _mm256_cmpeq_epi64(a, b); +} +template <> EIGEN_STRONG_INLINE Packet4l ptrue(const Packet4l& a) { return _mm256_cmpeq_epi64(a, a); } template <> +EIGEN_STRONG_INLINE Packet4ul ptrue(const Packet4ul& a) { + return _mm256_cmpeq_epi64(a, a); +} +template <> EIGEN_STRONG_INLINE Packet4l pand(const Packet4l& a, const Packet4l& b) { return _mm256_and_si256(a, b); } @@ -343,6 +451,10 @@ EIGEN_STRONG_INLINE Packet4l pxor(const Packet4l& a, const Packet4l& b return _mm256_xor_si256(a, b); } template <> +EIGEN_STRONG_INLINE Packet4ul pxor(const Packet4ul& a, const Packet4ul& b) { + return _mm256_xor_si256(a, b); +} +template <> EIGEN_STRONG_INLINE Packet4l pandnot(const Packet4l& a, const Packet4l& b) { return _mm256_andnot_si256(b, a); } @@ -388,28 +500,54 @@ EIGEN_STRONG_INLINE Packet4l pload(const int64_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast(from)); } template <> +EIGEN_STRONG_INLINE Packet4ul pload(const uint64_t* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast(from)); +} +template <> EIGEN_STRONG_INLINE Packet4l ploadu(const int64_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast(from)); } +template <> +EIGEN_STRONG_INLINE Packet4ul ploadu(const uint64_t* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast(from)); +} // Loads 2 int64_ts from memory a returns the packet {a0, a0, a1, a1} template <> EIGEN_STRONG_INLINE Packet4l ploaddup(const int64_t* from) { const Packet4l a = _mm256_castsi128_si256(_mm_loadu_si128(reinterpret_cast(from))); return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 1, 0, 1, 2, 3, 2, 3)); } +// Loads 2 uint64_ts from memory a returns the packet {a0, a0, a1, a1} template <> +EIGEN_STRONG_INLINE Packet4ul ploaddup(const uint64_t* from) { + const Packet4ul a = _mm256_castsi128_si256(_mm_loadu_si128(reinterpret_cast(from))); + return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 1, 0, 1, 2, 3, 2, 3)); +} +template<> EIGEN_STRONG_INLINE void pstore(int64_t* to, const Packet4l& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } template <> +EIGEN_STRONG_INLINE void pstore(uint64_t* to, const Packet4ul& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); +} +template <> EIGEN_STRONG_INLINE void pstoreu(int64_t* to, const Packet4l& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } template <> +EIGEN_STRONG_INLINE void pstoreu(uint64_t* to, const Packet4ul& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); +} +template <> EIGEN_DEVICE_FUNC inline Packet4l pgather(const int64_t* from, Index stride) { return _mm256_set_epi64x(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]); } template <> +EIGEN_DEVICE_FUNC inline Packet4ul pgather(const uint64_t* from, Index stride) { + return _mm256_set_epi64x(from[3 * stride], from[2 * stride], from[1 * stride], from[0 * stride]); +} +template <> EIGEN_DEVICE_FUNC inline void pscatter(int64_t* to, const Packet4l& from, Index stride) { __m128i low = _mm256_extractf128_si256(from, 0); to[stride * 0] = _mm_extract_epi64(low, 0); @@ -420,19 +558,43 @@ EIGEN_DEVICE_FUNC inline void pscatter(int64_t* to, const Pac to[stride * 3] = _mm_extract_epi64(high, 1); } template <> +EIGEN_DEVICE_FUNC inline void pscatter(uint64_t* to, const Packet4ul& from, Index stride) { + __m128i low = _mm256_extractf128_si256(from, 0); + to[stride * 0] = _mm_extract_epi64(low, 0); + to[stride * 1] = _mm_extract_epi64(low, 1); + + __m128i high = _mm256_extractf128_si256(from, 1); + to[stride * 2] = _mm_extract_epi64(high, 0); + to[stride * 3] = _mm_extract_epi64(high, 1); +} +template <> EIGEN_STRONG_INLINE void pstore1(int64_t* to, const int64_t& a) { Packet4l pa = pset1(a); pstore(to, pa); } template <> +EIGEN_STRONG_INLINE void pstore1(uint64_t* to, const uint64_t& a) { + Packet4ul pa = pset1(a); + pstore(to, pa); +} +template<> EIGEN_STRONG_INLINE int64_t pfirst(const Packet4l& a) { return _mm_cvtsi128_si64(_mm256_castsi256_si128(a)); } template <> +EIGEN_STRONG_INLINE uint64_t pfirst(const Packet4ul& a) { + return _mm_cvtsi128_si64(_mm256_castsi256_si128(a)); +} +template <> EIGEN_STRONG_INLINE int64_t predux(const Packet4l& a) { __m128i r = _mm_add_epi64(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); return _mm_extract_epi64(r, 0) + _mm_extract_epi64(r, 1); } +template <> +EIGEN_STRONG_INLINE uint64_t predux(const Packet4ul& a) { + __m128i r = _mm_add_epi64(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); + return numext::bit_cast(_mm_extract_epi64(r, 0) + _mm_extract_epi64(r, 1)); +} #define MM256_SHUFFLE_EPI64(A, B, M) _mm256_shuffle_pd(_mm256_castsi256_pd(A), _mm256_castsi256_pd(B), M) EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { __m256d T0 = MM256_SHUFFLE_EPI64(kernel.packet[0], kernel.packet[1], 15); @@ -445,6 +607,9 @@ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { kernel.packet[0] = _mm256_castpd_si256(_mm256_permute2f128_pd(T1, T3, 32)); kernel.packet[2] = _mm256_castpd_si256(_mm256_permute2f128_pd(T1, T3, 49)); } +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + ptranspose((PacketBlock&)kernel); +} template <> EIGEN_STRONG_INLINE Packet4l pmin(const Packet4l& a, const Packet4l& b) { __m256i cmp = _mm256_cmpgt_epi64(a, b); @@ -453,6 +618,12 @@ EIGEN_STRONG_INLINE Packet4l pmin(const Packet4l& a, const Packet4l& b return Packet4l(_mm256_or_si256(a_min, b_min)); } template <> +EIGEN_STRONG_INLINE Packet4ul pmin(const Packet4ul& a, const Packet4ul& b) { + return padd((Packet4ul)pmin((Packet4l)psub(a, pset1(0x8000000000000000UL)), + (Packet4l)psub(b, pset1(0x8000000000000000UL))), + pset1(0x8000000000000000UL)); +} +template <> EIGEN_STRONG_INLINE Packet4l pmax(const Packet4l& a, const Packet4l& b) { __m256i cmp = _mm256_cmpgt_epi64(a, b); __m256i a_min = _mm256_and_si256(cmp, a); @@ -460,12 +631,22 @@ EIGEN_STRONG_INLINE Packet4l pmax(const Packet4l& a, const Packet4l& b return Packet4l(_mm256_or_si256(a_min, b_min)); } template <> +EIGEN_STRONG_INLINE Packet4ul pmax(const Packet4ul& a, const Packet4ul& b) { + return padd((Packet4ul)pmax((Packet4l)psub(a, pset1(0x8000000000000000UL)), + (Packet4l)psub(b, pset1(0x8000000000000000UL))), + pset1(0x8000000000000000UL)); +} +template <> EIGEN_STRONG_INLINE Packet4l pabs(const Packet4l& a) { Packet4l pz = pzero(a); Packet4l cmp = _mm256_cmpgt_epi64(a, pz); return psub(cmp, pxor(a, cmp)); } template <> +EIGEN_STRONG_INLINE Packet4ul pabs(const Packet4ul& a) { + return a; +} +template <> EIGEN_STRONG_INLINE Packet4l pmul(const Packet4l& a, const Packet4l& b) { // 64-bit mul requires avx512, so do this with 32-bit multiplication __m256i upper32_a = _mm256_srli_epi64(a, 32); @@ -485,6 +666,7 @@ EIGEN_STRONG_INLINE Packet4l pmul(const Packet4l& a, const Packet4l& b template<> EIGEN_STRONG_INLINE Packet8f pset1(const float& from) { return _mm256_set1_ps(from); } template<> EIGEN_STRONG_INLINE Packet4d pset1(const double& from) { return _mm256_set1_pd(from); } template<> EIGEN_STRONG_INLINE Packet8i pset1(const int& from) { return _mm256_set1_epi32(from); } +template<> EIGEN_STRONG_INLINE Packet8ui pset1(const uint32_t& from) { return _mm256_set1_epi32(from); } template<> EIGEN_STRONG_INLINE Packet8f pset1frombits(unsigned int from) { return _mm256_castsi256_ps(pset1(from)); } template<> EIGEN_STRONG_INLINE Packet4d pset1frombits(uint64_t from) { return _mm256_castsi256_pd(_mm256_set1_epi64x(from)); } @@ -492,10 +674,12 @@ template<> EIGEN_STRONG_INLINE Packet4d pset1frombits(uint64_t from) { template<> EIGEN_STRONG_INLINE Packet8f pzero(const Packet8f& /*a*/) { return _mm256_setzero_ps(); } template<> EIGEN_STRONG_INLINE Packet4d pzero(const Packet4d& /*a*/) { return _mm256_setzero_pd(); } template<> EIGEN_STRONG_INLINE Packet8i pzero(const Packet8i& /*a*/) { return _mm256_setzero_si256(); } +template<> EIGEN_STRONG_INLINE Packet8ui pzero(const Packet8ui& /*a*/) { return _mm256_setzero_si256(); } template<> EIGEN_STRONG_INLINE Packet8f peven_mask(const Packet8f& /*a*/) { return _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1)); } template<> EIGEN_STRONG_INLINE Packet8i peven_mask(const Packet8i& /*a*/) { return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1); } +template<> EIGEN_STRONG_INLINE Packet8ui peven_mask(const Packet8ui& /*a*/) { return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1); } template<> EIGEN_STRONG_INLINE Packet4d peven_mask(const Packet4d& /*a*/) { return _mm256_castsi256_pd(_mm256_set_epi32(0, 0, -1, -1, 0, 0, -1, -1)); } template<> EIGEN_STRONG_INLINE Packet8f pload1(const float* from) { return _mm256_broadcast_ss(from); } @@ -522,10 +706,21 @@ template<> EIGEN_STRONG_INLINE Packet8i padd(const Packet8i& a, const return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui padd(const Packet8ui& a, const Packet8ui& b) +{ +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_add_epi32(a, b); +#else + __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); + __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} template<> EIGEN_STRONG_INLINE Packet8f plset(const float& a) { return padd(pset1(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); } template<> EIGEN_STRONG_INLINE Packet4d plset(const double& a) { return padd(pset1(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); } template<> EIGEN_STRONG_INLINE Packet8i plset(const int& a) { return padd(pset1(a), (Packet8i)_mm256_set_epi32(7,6,5,4,3,2,1,0)); } +template<> EIGEN_STRONG_INLINE Packet8ui plset(const uint32_t& a) { return padd(pset1(a), (Packet8ui)_mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0)); } template<> EIGEN_STRONG_INLINE Packet8f psub(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet4d psub(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); } @@ -538,6 +733,16 @@ template<> EIGEN_STRONG_INLINE Packet8i psub(const Packet8i& a, const return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui psub(const Packet8ui& a, const Packet8ui& b) +{ +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_sub_epi32(a, b); +#else + __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); + __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} template<> EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a) { @@ -569,6 +774,16 @@ template<> EIGEN_STRONG_INLINE Packet8i pmul(const Packet8i& a, const return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui pmul(const Packet8ui& a, const Packet8ui& b) +{ +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_mullo_epi32(a, b); +#else + const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); + const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} template<> EIGEN_STRONG_INLINE Packet8f pdiv(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet4d pdiv(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); } @@ -577,7 +792,7 @@ template<> EIGEN_STRONG_INLINE Packet8i pdiv(const Packet8i& a, const { #ifdef EIGEN_VECTORIZE_AVX512 return _mm512_cvttpd_epi32(_mm512_div_pd(_mm512_cvtepi32_pd(a), _mm512_cvtepi32_pd(b))); -#else +#else Packet4i lo = pdiv(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); Packet4i hi = pdiv(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1); @@ -666,6 +881,15 @@ template<> EIGEN_STRONG_INLINE Packet8i pcmp_eq(const Packet8i& a, const Packet8 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui pcmp_eq(const Packet8ui& a, const Packet8ui& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_cmpeq_epi32(a, b); +#else + __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); + __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} template<> EIGEN_STRONG_INLINE Packet8f pmin(const Packet8f& a, const Packet8f& b) { #if EIGEN_GNUC_STRICT_LESS_THAN(6,3,0) @@ -701,6 +925,15 @@ template<> EIGEN_STRONG_INLINE Packet8i pmin(const Packet8i& a, const return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui pmin(const Packet8ui& a, const Packet8ui& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_min_epu32(a, b); +#else + __m128i lo = _mm_min_epu32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); + __m128i hi = _mm_min_epu32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} template<> EIGEN_STRONG_INLINE Packet8f pmax(const Packet8f& a, const Packet8f& b) { #if EIGEN_GNUC_STRICT_LESS_THAN(6,3,0) @@ -733,6 +966,15 @@ template<> EIGEN_STRONG_INLINE Packet8i pmax(const Packet8i& a, const return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui pmax(const Packet8ui& a, const Packet8ui& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_max_epu32(a, b); +#else + __m128i lo = _mm_max_epu32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); + __m128i hi = _mm_max_epu32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} #ifdef EIGEN_VECTORIZE_AVX2 template<> EIGEN_STRONG_INLINE Packet8i psign(const Packet8i& a) { @@ -823,6 +1065,13 @@ template<> EIGEN_STRONG_INLINE Packet8i pand(const Packet8i& a, const return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b))); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui pand(const Packet8ui& a, const Packet8ui& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_and_si256(a,b); +#else + return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b))); +#endif +} template<> EIGEN_STRONG_INLINE Packet8f por(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet4d por(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); } @@ -833,6 +1082,13 @@ template<> EIGEN_STRONG_INLINE Packet8i por(const Packet8i& a, const P return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b))); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui por(const Packet8ui& a, const Packet8ui& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_or_si256(a,b); +#else + return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b))); +#endif +} template<> EIGEN_STRONG_INLINE Packet8f pxor(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet4d pxor(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); } @@ -843,6 +1099,13 @@ template<> EIGEN_STRONG_INLINE Packet8i pxor(const Packet8i& a, const return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b))); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui pxor(const Packet8ui& a, const Packet8ui& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_xor_si256(a, b); +#else + return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b))); +#endif +} template<> EIGEN_STRONG_INLINE Packet8f pandnot(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(b,a); } template<> EIGEN_STRONG_INLINE Packet4d pandnot(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(b,a); } @@ -853,6 +1116,20 @@ template<> EIGEN_STRONG_INLINE Packet8i pandnot(const Packet8i& a, con return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b),_mm256_castsi256_ps(a))); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui pandnot(const Packet8ui& a, const Packet8ui& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_andnot_si256(b,a); +#else + return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b),_mm256_castsi256_ps(a))); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8ui pcmp_lt(const Packet8ui& a, const Packet8ui& b) { + return pxor(pcmp_eq(a, pmax(a, b)), ptrue(a)); +} +template<> EIGEN_STRONG_INLINE Packet8ui pcmp_le(const Packet8ui& a, const Packet8ui& b) { + return pcmp_eq(a, pmin(a, b)); +} template<> EIGEN_STRONG_INLINE Packet8f pround(const Packet8f& a) { @@ -871,6 +1148,8 @@ template<> EIGEN_STRONG_INLINE Packet8f pselect(const Packet8f& mask, { return _mm256_blendv_ps(b,a,mask); } template<> EIGEN_STRONG_INLINE Packet8i pselect(const Packet8i& mask, const Packet8i& a, const Packet8i& b) { return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a), _mm256_castsi256_ps(mask))); } +template<> EIGEN_STRONG_INLINE Packet8ui pselect(const Packet8ui& mask, const Packet8ui& a, const Packet8ui& b) +{ return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(b), _mm256_castsi256_ps(a), _mm256_castsi256_ps(mask))); } template<> EIGEN_STRONG_INLINE Packet4d pselect(const Packet4d& mask, const Packet4d& a, const Packet4d& b) { return _mm256_blendv_pd(b,a,mask); } @@ -905,13 +1184,25 @@ template EIGEN_STRONG_INLINE Packet8i plogical_shift_left(Packet8i a) { #endif } +template EIGEN_STRONG_INLINE Packet8ui parithmetic_shift_right(Packet8ui a) { + return (Packet8ui)plogical_shift_right((Packet8i)a); +} +template EIGEN_STRONG_INLINE Packet8ui plogical_shift_right(Packet8ui a) { + return (Packet8ui)plogical_shift_right((Packet8i)a); +} +template EIGEN_STRONG_INLINE Packet8ui plogical_shift_left(Packet8ui a) { + return (Packet8ui)plogical_shift_left((Packet8i)a); +} + template<> EIGEN_STRONG_INLINE Packet8f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); } template<> EIGEN_STRONG_INLINE Packet4d pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); } template<> EIGEN_STRONG_INLINE Packet8i pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast(from)); } +template<> EIGEN_STRONG_INLINE Packet8ui pload(const uint32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast(from)); } template<> EIGEN_STRONG_INLINE Packet8f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); } template<> EIGEN_STRONG_INLINE Packet4d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); } template<> EIGEN_STRONG_INLINE Packet8i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast(from)); } +template<> EIGEN_STRONG_INLINE Packet8ui ploadu(const uint32_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast(from)); } template<> EIGEN_STRONG_INLINE Packet8f ploadu(const float* from, uint8_t umask) { #ifdef EIGEN_VECTORIZE_AVX512 @@ -930,7 +1221,7 @@ template<> EIGEN_STRONG_INLINE Packet8f ploadu(const float* from, uint template<> EIGEN_STRONG_INLINE Packet8f ploaddup(const float* from) { // TODO try to find a way to avoid the need of a temporary register -// Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from)); + // Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from)); // tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1); // return _mm256_unpacklo_ps(tmp,tmp); @@ -961,6 +1252,20 @@ template<> EIGEN_STRONG_INLINE Packet8i ploaddup(const int* from) return _mm256_castps_si256(_mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2))); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui ploaddup(const uint32_t* from) { +#ifdef EIGEN_VECTORIZE_AVX2 + const Packet8ui a = _mm256_castsi128_si256(ploadu(from)); + return _mm256_permutevar8x32_epi32(a, _mm256_setr_epi32(0, 0, 1, 1, 2, 2, 3, 3)); +#else + __m256 tmp = _mm256_broadcast_ps((const __m128*)(const void*)from); + // mimic an "inplace" permutation of the lower 128bits using a blend + tmp = _mm256_blend_ps( + tmp, _mm256_castps128_ps256(_mm_permute_ps(_mm256_castps256_ps128(tmp), _MM_SHUFFLE(1, 0, 1, 0))), 15); + // then we can perform a consistent permutation on the global register to get + // everything in shape: + return _mm256_castps_si256(_mm256_permute_ps(tmp, _MM_SHUFFLE(3, 3, 2, 2))); +#endif +} // Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1} template<> EIGEN_STRONG_INLINE Packet8f ploadquad(const float* from) @@ -972,14 +1277,19 @@ template<> EIGEN_STRONG_INLINE Packet8i ploadquad(const int* from) { return _mm256_insertf128_si256(_mm256_set1_epi32(*from), _mm_set1_epi32(*(from+1)), 1); } +template<> EIGEN_STRONG_INLINE Packet8ui ploadquad(const uint32_t* from) { + return _mm256_insertf128_si256(_mm256_set1_epi32(*from), _mm_set1_epi32(*(from + 1)), 1); +} template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); } template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); } template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } +template<> EIGEN_STRONG_INLINE void pstore(uint32_t* to, const Packet8ui& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); } template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); } template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } +template<> EIGEN_STRONG_INLINE void pstoreu(uint32_t* to, const Packet8ui& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet8f& from, uint8_t umask) { #ifdef EIGEN_VECTORIZE_AVX512 @@ -1017,6 +1327,9 @@ template<> EIGEN_DEVICE_FUNC inline Packet8i pgather(const int* f return _mm256_set_epi32(from[7*stride], from[6*stride], from[5*stride], from[4*stride], from[3*stride], from[2*stride], from[1*stride], from[0*stride]); } +template<> EIGEN_DEVICE_FUNC inline Packet8ui pgather(const uint32_t* from, Index stride) { + return (Packet8ui)pgather((int*)from, stride); +} template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet8f& from, Index stride) { @@ -1055,6 +1368,9 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const to[stride*6] = _mm_extract_epi32(high, 2); to[stride*7] = _mm_extract_epi32(high, 3); } +template<> EIGEN_DEVICE_FUNC inline void pscatter(uint32_t* to, const Packet8ui& from, Index stride) { + pscatter((int*)to, (Packet8i)from, stride); +} template<> EIGEN_STRONG_INLINE void pstore1(float* to, const float& a) { @@ -1076,6 +1392,7 @@ template<> EIGEN_STRONG_INLINE void pstore1(int* to, const int& a) template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const uint32_t* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } #endif template<> EIGEN_STRONG_INLINE float pfirst(const Packet8f& a) { @@ -1087,6 +1404,9 @@ template<> EIGEN_STRONG_INLINE double pfirst(const Packet4d& a) { template<> EIGEN_STRONG_INLINE int pfirst(const Packet8i& a) { return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); } +template<> EIGEN_STRONG_INLINE uint32_t pfirst(const Packet8ui& a) { + return numext::bit_cast(_mm_cvtsi128_si32(_mm256_castsi256_si128(a))); +} template<> EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a) @@ -1098,21 +1418,27 @@ template<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a) { __m256d tmp = _mm256_shuffle_pd(a,a,5); return _mm256_permute2f128_pd(tmp, tmp, 1); - #if 0 +#if 0 // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd // exhibit the same latency/throughput, but it is here for future reference/benchmarking... __m256d swap_halves = _mm256_permute2f128_pd(a,a,1); return _mm256_permute_pd(swap_halves,5); - #endif +#endif } template<> EIGEN_STRONG_INLINE Packet8i preverse(const Packet8i& a) { return _mm256_castps_si256(preverse(_mm256_castsi256_ps(a))); } +template<> EIGEN_STRONG_INLINE Packet8ui preverse(const Packet8ui& a) { + return _mm256_castps_si256(preverse(_mm256_castsi256_ps(a))); +} #ifdef EIGEN_VECTORIZE_AVX2 template<> EIGEN_STRONG_INLINE Packet4l preverse(const Packet4l& a) -{ + { + return _mm256_castpd_si256(preverse(_mm256_castsi256_pd(a))); +} +template<> EIGEN_STRONG_INLINE Packet4ul preverse(const Packet4ul& a) { return _mm256_castpd_si256(preverse(_mm256_castsi256_pd(a))); } #endif @@ -1138,12 +1464,15 @@ template<> EIGEN_STRONG_INLINE Packet8i pabs(const Packet8i& a) return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); #endif } +template<> EIGEN_STRONG_INLINE Packet8ui pabs(const Packet8ui& a) { return a; } template<> EIGEN_STRONG_INLINE Packet8h psignbit(const Packet8h& a) { return _mm_srai_epi16(a, 15); } template<> EIGEN_STRONG_INLINE Packet8bf psignbit(const Packet8bf& a) { return _mm_srai_epi16(a, 15); } template<> EIGEN_STRONG_INLINE Packet8f psignbit(const Packet8f& a) { return _mm256_castsi256_ps(parithmetic_shift_right<31>((Packet8i)_mm256_castps_si256(a))); } +template<> EIGEN_STRONG_INLINE Packet8ui psignbit(const Packet8ui& a) { return pzero(a); } #ifdef EIGEN_VECTORIZE_AVX2 template<> EIGEN_STRONG_INLINE Packet4d psignbit(const Packet4d& a) { return _mm256_castsi256_pd(parithmetic_shift_right<63>((Packet4l)_mm256_castpd_si256(a))); } +template<> EIGEN_STRONG_INLINE Packet4ul psignbit(const Packet4ul& a) { return pzero(a); } #endif template<> EIGEN_STRONG_INLINE Packet8f pfrexp(const Packet8f& a, Packet8f& exponent) { @@ -1186,18 +1515,18 @@ template<> EIGEN_STRONG_INLINE Packet4d pldexp(const Packet4d& a, cons // Clamp exponent to [-2099, 2099] const Packet4d max_exponent = pset1(2099.0); const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent)); - + // Split 2^e into four factors and multiply. const Packet4i bias = pset1(1023); Packet4i b = parithmetic_shift_right<2>(e); // floor(e/4) - + // 2^b Packet4i hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3); Packet4i lo = _mm_slli_epi64(hi, 52); hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52); Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1)); Packet4d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b) - + // 2^(e - 3b) b = psub(psub(psub(e, b), b), b); // e - 3b hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3); @@ -1220,6 +1549,9 @@ template<> EIGEN_STRONG_INLINE int predux(const Packet8i& a) { return predux(Packet4i(_mm_add_epi32(_mm256_castsi256_si128(a),_mm256_extractf128_si256(a,1)))); } +template<> EIGEN_STRONG_INLINE uint32_t predux(const Packet8ui& a) { + return predux(Packet4ui(_mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)))); +} template<> EIGEN_STRONG_INLINE Packet4f predux_half_dowto4(const Packet8f& a) { @@ -1229,6 +1561,9 @@ template<> EIGEN_STRONG_INLINE Packet4i predux_half_dowto4(const Packe { return _mm_add_epi32(_mm256_castsi256_si128(a),_mm256_extractf128_si256(a,1)); } +template<> EIGEN_STRONG_INLINE Packet4ui predux_half_dowto4(const Packet8ui& a) { + return _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); +} template<> EIGEN_STRONG_INLINE float predux_mul(const Packet8f& a) { @@ -1284,6 +1619,10 @@ template<> EIGEN_STRONG_INLINE bool predux_any(const Packet8i& x) { return _mm256_movemask_ps(_mm256_castsi256_ps(x)) != 0; } +template<> EIGEN_STRONG_INLINE bool predux_any(const Packet8ui& x) +{ + return _mm256_movemask_ps(_mm256_castsi256_ps(x)) != 0; +} EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { @@ -1372,6 +1711,9 @@ ptranspose(PacketBlock& kernel) { kernel.packet[6] = _mm256_permute2f128_si256(S2, S6, 0x31); kernel.packet[7] = _mm256_permute2f128_si256(S3, S7, 0x31); } +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + ptranspose((PacketBlock&)kernel); +} EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { @@ -1390,6 +1732,9 @@ ptranspose(PacketBlock& kernel) { kernel.packet[2] = _mm256_permute2f128_si256(S0, S1, 0x31); kernel.packet[3] = _mm256_permute2f128_si256(S2, S3, 0x31); } +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + ptranspose((PacketBlock&)kernel); +} EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { @@ -1405,7 +1750,7 @@ ptranspose(PacketBlock& kernel) { } template<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket, const Packet8f& elsePacket) { -#ifdef EIGEN_VECTORIZE_AVX2 +#ifdef EIGEN_VECTORIZE_AVX2 const __m256i zero = _mm256_setzero_si256(); const __m256i select = _mm256_set_epi32(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); __m256i false_mask = _mm256_cmpeq_epi32(zero, select); @@ -1419,7 +1764,7 @@ template<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, cons } template<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket, const Packet4d& elsePacket) { -#ifdef EIGEN_VECTORIZE_AVX2 +#ifdef EIGEN_VECTORIZE_AVX2 const __m256i zero = _mm256_setzero_si256(); const __m256i select = _mm256_set_epi64x(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); __m256i false_mask = _mm256_cmpeq_epi64(select, zero); @@ -1478,7 +1823,7 @@ ploadquad(const Eigen::half* from) { } template<> EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h& a) { - return _mm_cmpeq_epi32(a, a); + return _mm_cmpeq_epi32(a, a); } template <> @@ -1850,7 +2195,7 @@ ploadquad(const bfloat16* from) { } template<> EIGEN_STRONG_INLINE Packet8bf ptrue(const Packet8bf& a) { - return _mm_cmpeq_epi32(a, a); + return _mm_cmpeq_epi32(a, a); } template <> diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h index 499c16bed..027bd82f7 100644 --- a/Eigen/src/Core/arch/SSE/PacketMath.h +++ b/Eigen/src/Core/arch/SSE/PacketMath.h @@ -10,6 +10,7 @@ #ifndef EIGEN_PACKET_MATH_SSE_H #define EIGEN_PACKET_MATH_SSE_H +#include #include "../../InternalHeaderCheck.h" namespace Eigen { @@ -47,11 +48,16 @@ typedef __m128d Packet2d; typedef eigen_packet_wrapper<__m128i, 0> Packet4i; typedef eigen_packet_wrapper<__m128i, 1> Packet16b; +typedef eigen_packet_wrapper<__m128i, 4> Packet4ui; template<> struct is_arithmetic<__m128> { enum { value = true }; }; template<> struct is_arithmetic<__m128i> { enum { value = true }; }; template<> struct is_arithmetic<__m128d> { enum { value = true }; }; template<> struct is_arithmetic { enum { value = true }; }; +// Note that `Packet4ui` uses the underlying type `__m128i`, which is +// interpreted as a vector of _signed_ `int32`s, which breaks some arithmetic +// operations used in `GenericPacketMath.h`. +template<> struct is_arithmetic { enum { value = false }; }; template<> struct is_arithmetic { enum { value = true }; }; template @@ -66,6 +72,9 @@ struct shuffle_mask{ #define vec4i_swizzle1(v,p,q,r,s) \ Packet4i(_mm_shuffle_epi32( v, (shuffle_mask::mask))) +#define vec4ui_swizzle1(v, p, q, r, s) \ + Packet4ui(vec4i_swizzle1(v,p,q,r,s)) + #define vec2d_swizzle1(v,p,q) \ Packet2d(_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), (shuffle_mask<2*p,2*p+1,2*q,2*q+1>::mask)))) @@ -75,6 +84,9 @@ struct shuffle_mask{ #define vec4i_swizzle2(a,b,p,q,r,s) \ Packet4i(_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), (shuffle_mask::mask))))) +#define vec4ui_swizzle2(a,b,p,q,r,s) \ + Packet4i(vec4i_swizzle2(a,b,p,q,r,s)) + EIGEN_STRONG_INLINE Packet4f vec4f_movelh(const Packet4f& a, const Packet4f& b) { return Packet4f(_mm_movelh_ps(a,b)); @@ -120,6 +132,7 @@ EIGEN_STRONG_INLINE Packet2d vec2d_unpackhi(const Packet2d& a, const Packet2d& b #define EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ const Packet4i p4i_##NAME = pset1(X) +#define EIGEN_DECLARE_CONST_Packet4ui(NAME, X) const Packet4ui p4ui_##NAME = pset1(X) // Use the packet_traits defined in AVX/PacketMath.h instead if we're going // to leverage AVX instructions. @@ -202,6 +215,33 @@ template<> struct packet_traits : default_packet_traits HasBlend = 1 }; }; +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4ui type; + typedef Packet4ui half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + + HasDiv = 0, + HasNegate = 0, + HasSqrt = 0, + +#ifdef EIGEN_VECTORIZE_SSE4_1 + HasCmp = 1, + HasMin = 1, + HasMax = 1, +#else + HasCmp = 0, + HasMin = 0, + HasMax = 0, +#endif + + HasShift = 1, + HasBlend = 1 + }; +}; #endif template<> struct packet_traits : default_packet_traits { @@ -211,7 +251,7 @@ template<> struct packet_traits : default_packet_traits Vectorizable = 1, AlignedOnScalar = 1, size=16, - + HasAdd = 1, HasSub = 1, HasCmp = 1, // note -- only pcmp_eq is defined @@ -244,6 +284,11 @@ template<> struct unpacket_traits { typedef Packet4i half; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; }; +template<> struct unpacket_traits { + typedef uint32_t type; + typedef Packet4ui half; + enum {size = 4, alignment = Aligned16, vectorizable = true, masked_load_available = false, masked_store_available = false}; +}; template<> struct unpacket_traits { typedef bool type; typedef Packet16b half; @@ -258,6 +303,7 @@ template<> struct scalar_div_cost { enum { value = 8 }; }; template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return _mm_set_ps1(from); } template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return _mm_set1_pd(from); } template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return _mm_set1_epi32(from); } +template<> EIGEN_STRONG_INLINE Packet4ui pset1(const uint32_t& from) { return _mm_set1_epi32(numext::bit_cast(from)); } template<> EIGEN_STRONG_INLINE Packet16b pset1(const bool& from) { return _mm_set1_epi8(static_cast(from)); } template<> EIGEN_STRONG_INLINE Packet4f pset1frombits(unsigned int from) { return _mm_castsi128_ps(pset1(from)); } @@ -265,11 +311,13 @@ template<> EIGEN_STRONG_INLINE Packet2d pset1frombits(uint64_t from) { template<> EIGEN_STRONG_INLINE Packet4f peven_mask(const Packet4f& /*a*/) { return _mm_castsi128_ps(_mm_set_epi32(0, -1, 0, -1)); } template<> EIGEN_STRONG_INLINE Packet4i peven_mask(const Packet4i& /*a*/) { return _mm_set_epi32(0, -1, 0, -1); } +template<> EIGEN_STRONG_INLINE Packet4ui peven_mask(const Packet4ui& /*a*/) { return _mm_set_epi32(0, -1, 0, -1); } template<> EIGEN_STRONG_INLINE Packet2d peven_mask(const Packet2d& /*a*/) { return _mm_castsi128_pd(_mm_set_epi32(0, 0, -1, -1)); } template<> EIGEN_STRONG_INLINE Packet4f pzero(const Packet4f& /*a*/) { return _mm_setzero_ps(); } template<> EIGEN_STRONG_INLINE Packet2d pzero(const Packet2d& /*a*/) { return _mm_setzero_pd(); } template<> EIGEN_STRONG_INLINE Packet4i pzero(const Packet4i& /*a*/) { return _mm_setzero_si128(); } +template<> EIGEN_STRONG_INLINE Packet4ui pzero(const Packet4ui& /*a*/) { return _mm_setzero_si128(); } // GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction. // However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203) @@ -285,10 +333,12 @@ template<> EIGEN_STRONG_INLINE Packet4f pload1(const float *from) { template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { return _mm_add_ps(pset1(a), _mm_set_ps(3,2,1,0)); } template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { return _mm_add_pd(pset1(a),_mm_set_pd(1,0)); } template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { return _mm_add_epi32(pset1(a),_mm_set_epi32(3,2,1,0)); } +template<> EIGEN_STRONG_INLINE Packet4ui plset(const uint32_t& a) { return _mm_add_epi32(pset1(a), _mm_set_epi32(3, 2, 1, 0)); } template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui padd(const Packet4ui& a, const Packet4ui& b) { return _mm_add_epi32(a, b); } template<> EIGEN_STRONG_INLINE Packet16b padd(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); } @@ -299,6 +349,7 @@ template<> EIGEN_STRONG_INLINE Packet2d padds(const Packet2d& a, const template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui psub(const Packet4ui& a, const Packet4ui& b) { return _mm_sub_epi32(a, b); } template<> EIGEN_STRONG_INLINE Packet16b psub(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b); @@ -315,7 +366,7 @@ template<> EIGEN_STRONG_INLINE Packet4f paddsub(const Packet4f& a, con template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& , const Packet2d& ); template<> EIGEN_STRONG_INLINE Packet2d paddsub(const Packet2d& a, const Packet2d& b) { -#ifdef EIGEN_VECTORIZE_SSE3 +#ifdef EIGEN_VECTORIZE_SSE3 return _mm_addsub_pd(a,b); #else const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x0)); @@ -364,6 +415,21 @@ template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const 0,2,1,3); #endif } +template<> EIGEN_STRONG_INLINE Packet4ui pmul(const Packet4ui& a, const Packet4ui& b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_mullo_epi32(a,b); +#else + // this version is slightly faster than 4 scalar products + return vec4ui_swizzle1( + vec4ui_swizzle2( + _mm_mul_epu32(a,b), + _mm_mul_epu32(vec4ui_swizzle1(a,1,0,3,2), + vec4ui_swizzle1(b,1,0,3,2)), + 0,2,0,2), + 0,2,1,3); +#endif +} template<> EIGEN_STRONG_INLINE Packet16b pmul(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); } @@ -388,6 +454,7 @@ EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& a, // for some weird raisons, it has to be overloaded for packet of integers template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); } +template<> EIGEN_STRONG_INLINE Packet4ui pmadd(const Packet4ui& a, const Packet4ui& b, const Packet4ui& c) { return padd(pmul(a, b), c); } #ifdef EIGEN_VECTORIZE_FMA template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); } template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); } @@ -412,6 +479,10 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pselect(const Packet4i& mask, const return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(b),_mm_castsi128_ps(a),_mm_castsi128_ps(mask))); } +template<> EIGEN_DEVICE_FUNC inline Packet4ui pselect(const Packet4ui& mask, const Packet4ui& a, const Packet4ui& b) { + return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(b),_mm_castsi128_ps(a),_mm_castsi128_ps(mask))); +} + template<> EIGEN_DEVICE_FUNC inline Packet2d pselect(const Packet2d& mask, const Packet2d& a, const Packet2d& b) { return _mm_blendv_pd(b,a,mask); } template<> EIGEN_DEVICE_FUNC inline Packet16b pselect(const Packet16b& mask, const Packet16b& a, const Packet16b& b) { @@ -442,21 +513,25 @@ ptrue(const Packet2d& a) { template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pand(const Packet4ui& a, const Packet4ui& b) { return _mm_and_si128(a, b); } template<> EIGEN_STRONG_INLINE Packet16b pand(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui por(const Packet4ui& a, const Packet4ui& b) { return _mm_or_si128(a, b); } template<> EIGEN_STRONG_INLINE Packet16b por(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pxor(const Packet4ui& a, const Packet4ui& b) { return _mm_xor_si128(a, b); } template<> EIGEN_STRONG_INLINE Packet16b pxor(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(b,a); } template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(b,a); } template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(b,a); } +template<> EIGEN_STRONG_INLINE Packet4ui pandnot(const Packet4ui& a, const Packet4ui& b) { return _mm_andnot_si128(b, a); } template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return _mm_cmple_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return _mm_cmplt_ps(a,b); } @@ -470,22 +545,23 @@ template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2 template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i& a, const Packet4i& b) { return _mm_cmplt_epi32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return _mm_cmpeq_epi32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pcmp_eq(const Packet4ui& a, const Packet4ui& b) { return _mm_cmpeq_epi32(a, b); } template<> EIGEN_STRONG_INLINE Packet16b pcmp_eq(const Packet16b& a, const Packet16b& b) { return _mm_cmpeq_epi8(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pcmp_le(const Packet4i& a, const Packet4i& b) { return por(pcmp_lt(a,b), pcmp_eq(a,b)); } template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { #if EIGEN_GNUC_STRICT_LESS_THAN(6,3,0) - // There appears to be a bug in GCC, by which the optimizer may - // flip the argument order in calls to _mm_min_ps, so we have to - // resort to inline ASM here. This is supposed to be fixed in gcc6.3, - // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 - #ifdef EIGEN_VECTORIZE_AVX +// There appears to be a bug in GCC, by which the optimizer may +// flip the argument order in calls to _mm_min_ps, so we have to +// resort to inline ASM here. This is supposed to be fixed in gcc6.3, +// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 +#ifdef EIGEN_VECTORIZE_AVX Packet4f res; asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); - #else +#else Packet4f res = b; asm("minps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); - #endif +#endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::min. @@ -494,17 +570,17 @@ template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const } template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { #if EIGEN_GNUC_STRICT_LESS_THAN(6,3,0) - // There appears to be a bug in GCC, by which the optimizer may - // flip the argument order in calls to _mm_min_pd, so we have to - // resort to inline ASM here. This is supposed to be fixed in gcc6.3, - // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 - #ifdef EIGEN_VECTORIZE_AVX +// There appears to be a bug in GCC, by which the optimizer may +// flip the argument order in calls to _mm_min_pd, so we have to +// resort to inline ASM here. This is supposed to be fixed in gcc6.3, +// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 +#ifdef EIGEN_VECTORIZE_AVX Packet2d res; asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); - #else +#else Packet2d res = b; asm("minpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); - #endif +#endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::min. @@ -521,21 +597,30 @@ template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); #endif } +template<> EIGEN_STRONG_INLINE Packet4ui pmin(const Packet4ui& a, const Packet4ui& b) { +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_min_epu32(a, b); +#else + return padd((Packet4ui)pmin((Packet4i)psub(a, pset1(0x80000000UL)), + (Packet4i)psub(b, pset1(0x80000000UL))), + pset1(0x80000000UL)); +#endif +} template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { #if EIGEN_GNUC_STRICT_LESS_THAN(6,3,0) - // There appears to be a bug in GCC, by which the optimizer may - // flip the argument order in calls to _mm_max_ps, so we have to - // resort to inline ASM here. This is supposed to be fixed in gcc6.3, - // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 - #ifdef EIGEN_VECTORIZE_AVX +// There appears to be a bug in GCC, by which the optimizer may +// flip the argument order in calls to _mm_max_ps, so we have to +// resort to inline ASM here. This is supposed to be fixed in gcc6.3, +// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 +#ifdef EIGEN_VECTORIZE_AVX Packet4f res; asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); - #else +#else Packet4f res = b; asm("maxps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); - #endif +#endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::max. @@ -544,17 +629,17 @@ template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const } template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { #if EIGEN_GNUC_STRICT_LESS_THAN(6,3,0) - // There appears to be a bug in GCC, by which the optimizer may - // flip the argument order in calls to _mm_max_pd, so we have to - // resort to inline ASM here. This is supposed to be fixed in gcc6.3, - // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 - #ifdef EIGEN_VECTORIZE_AVX +// There appears to be a bug in GCC, by which the optimizer may +// flip the argument order in calls to _mm_max_pd, so we have to +// resort to inline ASM here. This is supposed to be fixed in gcc6.3, +// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 +#ifdef EIGEN_VECTORIZE_AVX Packet2d res; asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); - #else +#else Packet2d res = b; asm("maxpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); - #endif +#endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::max. @@ -571,6 +656,32 @@ template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); #endif } +template<> EIGEN_STRONG_INLINE Packet4ui pmax(const Packet4ui& a, const Packet4ui& b) { +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_max_epu32(a, b); +#else + return padd((Packet4ui)pmax((Packet4i)psub(a, pset1(0x80000000UL)), + (Packet4i)psub(b, pset1(0x80000000UL))), + pset1(0x80000000UL)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4ui pcmp_lt(const Packet4ui& a, const Packet4ui& b) { +#ifdef EIGEN_VECTORIZE_SSE4_1 + return pxor(pcmp_eq(a, pmax(a, b)), ptrue(a)); +#else + return (Packet4ui)pcmp_lt((Packet4i)psub(a, pset1(0x80000000UL)), + (Packet4i)psub(b, pset1(0x80000000UL))); +#endif +} +template<> EIGEN_STRONG_INLINE Packet4ui pcmp_le(const Packet4ui& a, const Packet4ui& b) { +#ifdef EIGEN_VECTORIZE_SSE4_1 + return pcmp_eq(a, pmin(a, b)); +#else + return (Packet4ui)pcmp_le((Packet4i)psub(a, pset1(0x80000000UL)), + (Packet4i)psub(b, pset1(0x80000000UL))); +#endif +} template EIGEN_STRONG_INLINE Packet pminmax_propagate_numbers(const Packet& a, const Packet& b, Op op) { @@ -628,6 +739,10 @@ template EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(const Packe template EIGEN_STRONG_INLINE Packet4i plogical_shift_right (const Packet4i& a) { return _mm_srli_epi32(a,N); } template EIGEN_STRONG_INLINE Packet4i plogical_shift_left (const Packet4i& a) { return _mm_slli_epi32(a,N); } +template EIGEN_STRONG_INLINE Packet4ui parithmetic_shift_right(const Packet4ui& a) { return _mm_srli_epi32(a,N); } +template EIGEN_STRONG_INLINE Packet4ui plogical_shift_right (const Packet4ui& a) { return _mm_srli_epi32(a,N); } +template EIGEN_STRONG_INLINE Packet4ui plogical_shift_left (const Packet4ui& a) { return _mm_slli_epi32(a,N); } + template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF)); @@ -640,24 +755,26 @@ template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) } template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { - #ifdef EIGEN_VECTORIZE_SSSE3 +#ifdef EIGEN_VECTORIZE_SSSE3 return _mm_abs_epi32(a); - #else +#else Packet4i aux = _mm_srai_epi32(a,31); return _mm_sub_epi32(_mm_xor_si128(a,aux),aux); - #endif +#endif } +template<> EIGEN_STRONG_INLINE Packet4ui pabs(const Packet4ui& a) { return a; } template<> EIGEN_STRONG_INLINE Packet4f psignbit(const Packet4f& a) { return _mm_castsi128_ps(_mm_srai_epi32(_mm_castps_si128(a), 31)); } template<> EIGEN_STRONG_INLINE Packet2d psignbit(const Packet2d& a) { - Packet4f tmp = psignbit(_mm_castpd_ps(a)); + Packet4f tmp = psignbit(_mm_castpd_ps(a)); #ifdef EIGEN_VECTORIZE_AVX - return _mm_castps_pd(_mm_permute_ps(tmp, (shuffle_mask<1, 1, 3, 3>::mask))); + return _mm_castps_pd(_mm_permute_ps(tmp, (shuffle_mask<1, 1, 3, 3>::mask))); #else - return _mm_castps_pd(_mm_shuffle_ps(tmp, tmp, (shuffle_mask<1, 1, 3, 3>::mask))); + return _mm_castps_pd(_mm_shuffle_ps(tmp, tmp, (shuffle_mask<1, 1, 3, 3>::mask))); #endif // EIGEN_VECTORIZE_AVX } +template<> EIGEN_STRONG_INLINE Packet4ui psignbit(const Packet4ui& a) { return pzero(a); } #ifdef EIGEN_VECTORIZE_SSE4_1 template<> EIGEN_STRONG_INLINE Packet4f pround(const Packet4f& a) @@ -756,13 +873,14 @@ template<> EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); } template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); } template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast(from)); } +template<> EIGEN_STRONG_INLINE Packet4ui pload(const uint32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast(from)); } template<> EIGEN_STRONG_INLINE Packet16b pload(const bool* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast(from)); } #if EIGEN_COMP_MSVC template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { - EIGEN_DEBUG_UNALIGNED_LOAD - return _mm_loadu_ps(from); - } + EIGEN_DEBUG_UNALIGNED_LOAD + return _mm_loadu_ps(from); +} #else // NOTE: with the code below, MSVC's compiler crashes! @@ -783,6 +901,11 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast(from)); } +template<> EIGEN_STRONG_INLINE Packet4ui ploadu(const uint32_t* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD + return _mm_loadu_si128(reinterpret_cast(from)); +} template<> EIGEN_STRONG_INLINE Packet16b ploadu(const bool* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast(from)); @@ -810,6 +933,12 @@ template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) tmp = _mm_loadl_epi64(reinterpret_cast(from)); return vec4i_swizzle1(tmp, 0, 0, 1, 1); } +template<> EIGEN_STRONG_INLINE Packet4ui ploaddup(const uint32_t* from) +{ + Packet4ui tmp; + tmp = _mm_loadl_epi64(reinterpret_cast(from)); + return vec4ui_swizzle1(tmp, 0, 0, 1, 1); +} // Loads 8 bools from memory and returns the packet // {b0, b0, b1, b1, b2, b2, b3, b3, b4, b4, b5, b5, b6, b6, b7, b7} @@ -831,11 +960,13 @@ ploadquad(const bool* from) { template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); } template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); } template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } +template<> EIGEN_STRONG_INLINE void pstore(uint32_t* to, const Packet4ui& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } template<> EIGEN_STRONG_INLINE void pstore(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); } template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); } template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); } +template<> EIGEN_STRONG_INLINE void pstoreu(uint32_t* to, const Packet4ui& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); } template<> EIGEN_STRONG_INLINE void pstoreu(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); } template EIGEN_STRONG_INLINE void pstorel(Scalar* to, const Packet& from); @@ -858,6 +989,11 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* f { return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); } +template<> EIGEN_DEVICE_FUNC inline Packet4ui pgather(const uint32_t* from, Index stride) +{ + return _mm_set_epi32(numext::bit_cast(from[3 * stride]), numext::bit_cast(from[2 * stride]), + numext::bit_cast(from[1 * stride]), numext::bit_cast(from[0 * stride])); +} template<> EIGEN_DEVICE_FUNC inline Packet16b pgather(const bool* from, Index stride) { @@ -886,6 +1022,13 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2)); to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3)); } +template<> EIGEN_DEVICE_FUNC inline void pscatter(uint32_t* to, const Packet4ui& from, Index stride) +{ + to[stride * 0] = numext::bit_cast(_mm_cvtsi128_si32(from)); + to[stride * 1] = numext::bit_cast(_mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1))); + to[stride * 2] = numext::bit_cast(_mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2))); + to[stride * 3] = numext::bit_cast(_mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3))); +} template<> EIGEN_DEVICE_FUNC inline void pscatter(bool* to, const Packet16b& from, Index stride) { to[4*stride*0] = _mm_cvtsi128_si32(from); @@ -918,6 +1061,7 @@ typedef const char * SsePrefetchPtrType; template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const uint32_t* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } #endif #if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64 @@ -926,21 +1070,25 @@ template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_p template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return a.m128_f32[0]; } template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return a.m128d_f64[0]; } template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } +template<> EIGEN_STRONG_INLINE uint32_t pfirst(const Packet4i& a) { uint32_t x = numext::bit_cast(_mm_cvtsi128_si32(a)); return x; } #elif EIGEN_COMP_MSVC_STRICT // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; } template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; } template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4ui& a) { uint32_t x = numext::bit_cast(_mm_cvtsi128_si32(a)); return x; } #else template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return _mm_cvtss_f32(a); } template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return _mm_cvtsd_f64(a); } template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { return _mm_cvtsi128_si32(a); } +template<> EIGEN_STRONG_INLINE uint32_t pfirst(const Packet4ui& a) { return numext::bit_cast(_mm_cvtsi128_si32(a)); } #endif template<> EIGEN_STRONG_INLINE bool pfirst(const Packet16b& a) { int x = _mm_cvtsi128_si32(a); return static_cast(x & 1); } template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return _mm_shuffle_ps(a,a,0x1B); } template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return _mm_shuffle_pd(a,a,0x1); } template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return _mm_shuffle_epi32(a,0x1B); } +template<> EIGEN_STRONG_INLINE Packet4ui preverse(const Packet4ui& a) { return _mm_shuffle_epi32(a, 0x1B); } template<> EIGEN_STRONG_INLINE Packet16b preverse(const Packet16b& a) { #ifdef EIGEN_VECTORIZE_SSSE3 __m128i mask = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); @@ -979,10 +1127,10 @@ template<> EIGEN_STRONG_INLINE Packet2d pldexp(const Packet2d& a, cons // Clamp exponent to [-2099, 2099] const Packet2d max_exponent = pset1(2099.0); const Packet2d e = pmin(pmax(exponent, pnegate(max_exponent)), max_exponent); - + // Convert e to integer and swizzle to low-order bits. const Packet4i ei = vec4i_swizzle1(_mm_cvtpd_epi32(e), 0, 3, 1, 3); - + // Split 2^e into four factors and multiply: const Packet4i bias = _mm_set_epi32(0, 1023, 0, 1023); Packet4i b = parithmetic_shift_right<2>(ei); // floor(e/4) @@ -1038,24 +1186,24 @@ template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) { // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures // (from Nehalem to Haswell) -// #ifdef EIGEN_VECTORIZE_SSE3 -// Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3)); -// return pfirst(_mm_hadd_ps(tmp, tmp)); -// #else + // #ifdef EIGEN_VECTORIZE_SSE3 + // Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3)); + // return pfirst(_mm_hadd_ps(tmp, tmp)); + // #else Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a)); return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); -// #endif + // #endif } template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) { // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures // (from Nehalem to Haswell) -// #ifdef EIGEN_VECTORIZE_SSE3 -// return pfirst(_mm_hadd_pd(a, a)); -// #else + // #ifdef EIGEN_VECTORIZE_SSE3 + // return pfirst(_mm_hadd_pd(a, a)); + // #else return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a))); -// #endif + // #endif } #ifdef EIGEN_VECTORIZE_SSSE3 @@ -1064,6 +1212,11 @@ template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) Packet4i tmp0 = _mm_hadd_epi32(a,a); return pfirst(_mm_hadd_epi32(tmp0,tmp0)); } +template<> EIGEN_STRONG_INLINE uint32_t predux(const Packet4ui& a) +{ + Packet4ui tmp0 = _mm_hadd_epi32(a, a); + return pfirst(_mm_hadd_epi32(tmp0, tmp0)); +} #else template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) @@ -1071,6 +1224,10 @@ template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a)); return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1)); } +template<> EIGEN_STRONG_INLINE uint32_t predux(const Packet4ui& a) { + Packet4ui tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a, a)); + return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1)); +} #endif template<> EIGEN_STRONG_INLINE bool predux(const Packet16b& a) { @@ -1100,6 +1257,15 @@ template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) pstore(aux, a); return (aux[0] * aux[1]) * (aux[2] * aux[3]); } +template<> EIGEN_STRONG_INLINE uint32_t predux_mul(const Packet4ui& a) +{ + // after some experiments, it is seems this is the fastest way to implement it + // for GCC (eg., reusing pmul is very slow !) + // TODO try to call _mm_mul_epu32 directly + EIGEN_ALIGN16 uint32_t aux[4]; + pstore(aux, a); + return (aux[0] * aux[1]) * (aux[2] * aux[3]); +} template<> EIGEN_STRONG_INLINE bool predux_mul(const Packet16b& a) { Packet4i tmp = _mm_and_si128(a, _mm_unpackhi_epi64(a,a)); @@ -1132,6 +1298,21 @@ template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) return aux0 EIGEN_STRONG_INLINE uint32_t predux_min(const Packet4ui& a) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + Packet4ui tmp = _mm_min_epu32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2))); + return pfirst(_mm_min_epu32(tmp,_mm_shuffle_epi32(tmp, 1))); +#else + // after some experiments, it is seems this is the fastest way to implement it + // for GCC (eg., it does not like using std::min after the pstore !!) + EIGEN_ALIGN16 uint32_t aux[4]; + pstore(aux, a); + uint32_t aux0 = aux[0] EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) @@ -1158,6 +1339,21 @@ template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) return aux0>aux2 ? aux0 : aux2; #endif // EIGEN_VECTORIZE_SSE4_1 } +template<> EIGEN_STRONG_INLINE uint32_t predux_max(const Packet4ui& a) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + Packet4ui tmp = _mm_max_epu32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2))); + return pfirst(_mm_max_epu32(tmp,_mm_shuffle_epi32(tmp, 1))); +#else + // after some experiments, it is seems this is the fastest way to implement it + // for GCC (eg., it does not like using std::min after the pstore !!) + EIGEN_ALIGN16 uint32_t aux[4]; + pstore(aux, a); + uint32_t aux0 = aux[0]>aux[1] ? aux[0] : aux[1]; + uint32_t aux2 = aux[2]>aux[3] ? aux[2] : aux[3]; + return aux0>aux2 ? aux0 : aux2; +#endif // EIGEN_VECTORIZE_SSE4_1 +} // not needed yet // template<> EIGEN_STRONG_INLINE bool predux_all(const Packet4f& x) @@ -1174,6 +1370,10 @@ template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4i& x) { return _mm_movemask_ps(_mm_castsi128_ps(x)) != 0x0; } +template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4ui& x) +{ + return _mm_movemask_ps(_mm_castsi128_ps(x)) != 0x0; +} EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { @@ -1199,6 +1399,9 @@ ptranspose(PacketBlock& kernel) { kernel.packet[2] = _mm_unpacklo_epi64(T2, T3); kernel.packet[3] = _mm_unpackhi_epi64(T2, T3); } +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + ptranspose((PacketBlock&)kernel); +} EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { @@ -1304,6 +1507,10 @@ template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, cons return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket)); #endif } +template<> EIGEN_STRONG_INLINE Packet4ui pblend(const Selector<4>& ifPacket, const Packet4ui& thenPacket, + const Packet4ui& elsePacket) { + return (Packet4ui)pblend(ifPacket, (Packet4i)thenPacket, (Packet4i)elsePacket); +} template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) { const __m128 zero = _mm_setzero_ps(); const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); @@ -1357,7 +1564,7 @@ template<> EIGEN_STRONG_INLINE double pnmsub(const double& a, const double& b, c // Helpers for half->float and float->half conversions. // Currently only used by the AVX code. EIGEN_STRONG_INLINE __m128i half2floatsse(__m128i h) { - __m128i input = _mm_cvtepu16_epi32(h); + __m128i input = _mm_cvtepu16_epi32(h); // Direct vectorization of half_to_float, C parts in the comments. __m128i shifted_exp = _mm_set1_epi32(0x7c00 << 13); diff --git a/test/packetmath.cpp b/test/packetmath.cpp index a98a014df..5dd4cbc3d 100644 --- a/test/packetmath.cpp +++ b/test/packetmath.cpp @@ -77,7 +77,7 @@ inline T REF_FREXP(const T& x, T& exp) { EIGEN_USING_STD(frexp) const T out = static_cast(frexp(x, &iexp)); exp = static_cast(iexp); - + // The exponent value is unspecified if the input is inf or NaN, but MSVC // seems to set it to 1. We need to set it back to zero for consistency. if (!(numext::isfinite)(x)) { @@ -340,60 +340,78 @@ void packetmath_boolean_mask_ops_real() { CHECK_CWISE2_IF(true, internal::pcmp_lt_or_nan, internal::pcmp_lt_or_nan); } +template +struct packetmath_boolean_mask_ops_notcomplex_test { + static void run() {} +}; + template -void packetmath_boolean_mask_ops_notcomplex() { - const int PacketSize = internal::unpacket_traits::size; - const int size = 2 * PacketSize; - EIGEN_ALIGN_MAX Scalar data1[size]; - EIGEN_ALIGN_MAX Scalar data2[size]; - EIGEN_ALIGN_MAX Scalar ref[size]; +struct packetmath_boolean_mask_ops_notcomplex_test< + Scalar, Packet, + std::enable_if_t::HasCmp && + !internal::is_same::value>> { + static void run() { + const int PacketSize = internal::unpacket_traits::size; + const int size = 2 * PacketSize; + EIGEN_ALIGN_MAX Scalar data1[size]; + EIGEN_ALIGN_MAX Scalar data2[size]; + EIGEN_ALIGN_MAX Scalar ref[size]; - for (int i = 0; i < PacketSize; ++i) { - data1[i] = internal::random(); - data1[i + PacketSize] = internal::random() ? data1[i] : Scalar(0); - } + for (int i = 0; i < PacketSize; ++i) { + data1[i] = internal::random(); + data1[i + PacketSize] = internal::random() ? data1[i] : Scalar(0); + } - CHECK_CWISE2_IF(true, internal::pcmp_le, internal::pcmp_le); - CHECK_CWISE2_IF(true, internal::pcmp_lt, internal::pcmp_lt); + CHECK_CWISE2_IF(true, internal::pcmp_le, internal::pcmp_le); + CHECK_CWISE2_IF(true, internal::pcmp_lt, internal::pcmp_lt); //Test (-0) <=/< (0) for signed operations - for (int i = 0; i < PacketSize; ++i) { - data1[i] = Scalar(-0.0); - data1[i + PacketSize] = internal::random() ? data1[i] : Scalar(0); - } - CHECK_CWISE2_IF(true, internal::pcmp_le, internal::pcmp_le); - CHECK_CWISE2_IF(true, internal::pcmp_lt, internal::pcmp_lt); + for (int i = 0; i < PacketSize; ++i) { + data1[i] = Scalar(-0.0); + data1[i + PacketSize] = internal::random() ? data1[i] : Scalar(0); + } + CHECK_CWISE2_IF(true, internal::pcmp_le, internal::pcmp_le); + CHECK_CWISE2_IF(true, internal::pcmp_lt, internal::pcmp_lt); //Test NaN - for (int i = 0; i < PacketSize; ++i) { - data1[i] = NumTraits::quiet_NaN(); - data1[i + PacketSize] = internal::random() ? data1[i] : Scalar(0); + for (int i = 0; i < PacketSize; ++i) { + data1[i] = NumTraits::quiet_NaN(); + data1[i + PacketSize] = internal::random() ? data1[i] : Scalar(0); + } + CHECK_CWISE2_IF(true, internal::pcmp_le, internal::pcmp_le); + CHECK_CWISE2_IF(true, internal::pcmp_lt, internal::pcmp_lt); } - CHECK_CWISE2_IF(true, internal::pcmp_le, internal::pcmp_le); - CHECK_CWISE2_IF(true, internal::pcmp_lt, internal::pcmp_lt); -} +}; -// Packet16b representing bool does not support ptrue, pandnot or pcmp_eq, since the scalar path -// (for some compilers) compute the bitwise and with 0x1 of the results to keep the value in [0,1]. -template<> +// Packet16b representing bool does not support ptrue, pandnot or pcmp_eq, since +// the scalar path (for some compilers) compute the bitwise and with 0x1 of the +// results to keep the value in [0,1]. +template <> void packetmath_boolean_mask_ops::type>() {} -template<> -void packetmath_boolean_mask_ops_notcomplex::type>() {} + +template +struct packetmath_minus_zero_add_test { + static void run() {} +}; template -void packetmath_minus_zero_add() { - const int PacketSize = internal::unpacket_traits::size; - const int size = 2 * PacketSize; - EIGEN_ALIGN_MAX Scalar data1[size] = {}; - EIGEN_ALIGN_MAX Scalar data2[size] = {}; - EIGEN_ALIGN_MAX Scalar ref[size] = {}; - - for (int i = 0; i < PacketSize; ++i) { - data1[i] = Scalar(-0.0); - data1[i + PacketSize] = Scalar(-0.0); +struct packetmath_minus_zero_add_test< + Scalar, Packet, + std::enable_if_t::IsInteger>> { + static void run() { + const int PacketSize = internal::unpacket_traits::size; + const int size = 2 * PacketSize; + EIGEN_ALIGN_MAX Scalar data1[size] = {}; + EIGEN_ALIGN_MAX Scalar data2[size] = {}; + EIGEN_ALIGN_MAX Scalar ref[size] = {}; + + for (int i = 0; i < PacketSize; ++i) { + data1[i] = Scalar(-0.0); + data1[i + PacketSize] = Scalar(-0.0); + } + CHECK_CWISE2_IF(internal::packet_traits::HasAdd, REF_ADD, internal::padd); } - CHECK_CWISE2_IF(internal::packet_traits::HasAdd, REF_ADD, internal::padd); -} +}; // Ensure optimization barrier compiles and doesn't modify contents. // Only applies to raw types, so will not work for std::complex, Eigen::half @@ -673,7 +691,7 @@ void packetmath() { packetmath_boolean_mask_ops(); packetmath_pcast_ops_runner::run(); - packetmath_minus_zero_add(); + packetmath_minus_zero_add_test::run(); for (int i = 0; i < size; ++i) { data1[i] = numext::abs(internal::random()); @@ -682,9 +700,9 @@ void packetmath() { CHECK_CWISE1_IF(PacketTraits::HasRsqrt, numext::rsqrt, internal::prsqrt); CHECK_CWISE3_IF(true, REF_MADD, internal::pmadd); if (!std::is_same::value && NumTraits::IsSigned) { - CHECK_CWISE3_IF(true, REF_NMSUB, internal::pnmsub); + CHECK_CWISE3_IF(PacketTraits::HasNegate, REF_NMSUB, internal::pnmsub); } - + // For pmsub, pnmadd, the values can cancel each other to become near zero, // which can lead to very flaky tests. Here we ensure the signs are such that // they do not cancel. @@ -695,7 +713,7 @@ void packetmath() { } if (!std::is_same::value && NumTraits::IsSigned) { CHECK_CWISE3_IF(true, REF_MSUB, internal::pmsub); - CHECK_CWISE3_IF(true, REF_NMADD, internal::pnmadd); + CHECK_CWISE3_IF(PacketTraits::HasNegate, REF_NMADD, internal::pnmadd); } } @@ -714,7 +732,7 @@ struct Name { \ T operator()(const T& val) const { \ return Func(val); \ } \ -} + } CREATE_FUNCTOR(psqrt_functor, internal::psqrt); CREATE_FUNCTOR(prsqrt_functor, internal::prsqrt); @@ -742,20 +760,20 @@ void packetmath_test_IEEE_corner_cases(const RefFunctorT& ref_fun, // When EIGEN_FAST_MATH is 1 we relax the conditions slightly, and allow the function // to return the same value for subnormals as the reference would return for zero with // the same sign as the input. - #if EIGEN_FAST_MATH - data1[0] = Scalar(scale) * std::numeric_limits::denorm_min(); - data1[1] = -data1[0]; - test::packet_helper h; - h.store(data2, fun(h.load(data1))); +#if EIGEN_FAST_MATH + data1[0] = Scalar(scale) * std::numeric_limits::denorm_min(); + data1[1] = -data1[0]; + test::packet_helper h; + h.store(data2, fun(h.load(data1))); for (int i=0; i < PacketSize; ++i) { - const Scalar ref_zero = ref_fun(data1[i] < 0 ? -Scalar(0) : Scalar(0)); - const Scalar ref_val = ref_fun(data1[i]); - VERIFY(((std::isnan)(data2[i]) && (std::isnan)(ref_val)) || data2[i] == ref_zero || - verifyIsApprox(data2[i], ref_val)); - } - #else - CHECK_CWISE1_IF(Cond, ref_fun, fun); - #endif + const Scalar ref_zero = ref_fun(data1[i] < 0 ? -Scalar(0) : Scalar(0)); + const Scalar ref_val = ref_fun(data1[i]); + VERIFY(((std::isnan)(data2[i]) && (std::isnan)(ref_val)) || data2[i] == ref_zero || + verifyIsApprox(data2[i], ref_val)); + } +#else + CHECK_CWISE1_IF(Cond, ref_fun, fun); +#endif } } @@ -763,7 +781,7 @@ void packetmath_test_IEEE_corner_cases(const RefFunctorT& ref_fun, data1[0] = norm_min; data1[1] = -data1[0]; CHECK_CWISE1_IF(Cond, ref_fun, fun); - + // Test for largest floats. data1[0] = norm_max; data1[1] = -data1[0]; @@ -794,7 +812,7 @@ void packetmath_real() { EIGEN_ALIGN_MAX Scalar data1[PacketSize * 4] = {}; EIGEN_ALIGN_MAX Scalar data2[PacketSize * 4] = {}; EIGEN_ALIGN_MAX Scalar ref[PacketSize * 4] = {}; - + // Negate with -0. if (PacketTraits::HasNegate) { test::packet_helper h; @@ -831,7 +849,7 @@ void packetmath_real() { CHECK_CWISE1_IF(PacketTraits::HasSign, numext::sign, internal::psign); packetmath_boolean_mask_ops_real(); - + // Rounding edge cases. if (PacketTraits::HasRound || PacketTraits::HasCeil || PacketTraits::HasFloor || PacketTraits::HasRint) { typedef typename internal::make_integer::type IntType; @@ -864,7 +882,7 @@ void packetmath_real() { values.push_back(NumTraits::infinity()); values.push_back(-NumTraits::infinity()); values.push_back(NumTraits::quiet_NaN()); - + for (size_t k=0; k::infinity(); } CHECK_CWISE1_IF(PacketTraits::HasExp, std::exp, internal::pexp); - + CHECK_CWISE1_BYREF1_IF(PacketTraits::HasExp, REF_FREXP, internal::pfrexp); if (PacketTraits::HasExp) { // Check denormals: @@ -900,11 +918,11 @@ void packetmath_real() { data1[0] = -data1[0]; CHECK_CWISE1_BYREF1_IF(PacketTraits::HasExp, REF_FREXP, internal::pfrexp); } - + // zero data1[0] = Scalar(0); CHECK_CWISE1_BYREF1_IF(PacketTraits::HasExp, REF_FREXP, internal::pfrexp); - + // inf and NaN only compare output fraction, not exponent. test::packet_helper h; Packet pout; @@ -919,7 +937,7 @@ void packetmath_real() { VERIFY(test::areApprox(ref, data2, 1) && "internal::pfrexp"); } } - + for (int i = 0; i < PacketSize; ++i) { data1[i] = Scalar(internal::random(-1, 1)); data2[i] = Scalar(internal::random(-1, 1)); @@ -1166,7 +1184,7 @@ void packetmath_real() { ref[i] = SCALAR(REFOP(static_cast(data1[i]))); \ h.store(data2, POP(h.load(data1))); \ VERIFY(test::areApprox(ref, data2, PacketSize) && #POP); \ -} + } template Scalar propagate_nan_max(const Scalar& a, const Scalar& b) { @@ -1293,7 +1311,7 @@ void packetmath_notcomplex() { CHECK_CWISE2_IF(PacketTraits::HasMax, propagate_nan_max, internal::pmax); } - packetmath_boolean_mask_ops_notcomplex(); + packetmath_boolean_mask_ops_notcomplex_test::run(); } template