mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-06-04 18:54:00 +08:00

- In particular refactor the i0e and i1e code so scalar and vectorized path share code. - Move chebevl to GenericPacketMathFunctions. A brief benchmark with building Eigen with FMA, AVX and AVX2 flags Before: CPU: Intel Haswell with HyperThreading (6 cores) Benchmark Time(ns) CPU(ns) Iterations ----------------------------------------------------------------- BM_eigen_i0e_double/1 57.3 57.3 10000000 BM_eigen_i0e_double/8 398 398 1748554 BM_eigen_i0e_double/64 3184 3184 218961 BM_eigen_i0e_double/512 25579 25579 27330 BM_eigen_i0e_double/4k 205043 205042 3418 BM_eigen_i0e_double/32k 1646038 1646176 422 BM_eigen_i0e_double/256k 13180959 13182613 53 BM_eigen_i0e_double/1M 52684617 52706132 10 BM_eigen_i0e_float/1 28.4 28.4 24636711 BM_eigen_i0e_float/8 75.7 75.7 9207634 BM_eigen_i0e_float/64 512 512 1000000 BM_eigen_i0e_float/512 4194 4194 166359 BM_eigen_i0e_float/4k 32756 32761 21373 BM_eigen_i0e_float/32k 261133 261153 2678 BM_eigen_i0e_float/256k 2087938 2088231 333 BM_eigen_i0e_float/1M 8380409 8381234 84 BM_eigen_i1e_double/1 56.3 56.3 10000000 BM_eigen_i1e_double/8 397 397 1772376 BM_eigen_i1e_double/64 3114 3115 223881 BM_eigen_i1e_double/512 25358 25361 27761 BM_eigen_i1e_double/4k 203543 203593 3462 BM_eigen_i1e_double/32k 1613649 1613803 428 BM_eigen_i1e_double/256k 12910625 12910374 54 BM_eigen_i1e_double/1M 51723824 51723991 10 BM_eigen_i1e_float/1 28.3 28.3 24683049 BM_eigen_i1e_float/8 74.8 74.9 9366216 BM_eigen_i1e_float/64 505 505 1000000 BM_eigen_i1e_float/512 4068 4068 171690 BM_eigen_i1e_float/4k 31803 31806 21948 BM_eigen_i1e_float/32k 253637 253692 2763 BM_eigen_i1e_float/256k 2019711 2019918 346 BM_eigen_i1e_float/1M 8238681 8238713 86 After: CPU: Intel Haswell with HyperThreading (6 cores) Benchmark Time(ns) CPU(ns) Iterations ----------------------------------------------------------------- BM_eigen_i0e_double/1 15.8 15.8 44097476 BM_eigen_i0e_double/8 99.3 99.3 7014884 BM_eigen_i0e_double/64 777 777 886612 BM_eigen_i0e_double/512 6180 6181 100000 BM_eigen_i0e_double/4k 48136 48140 14678 BM_eigen_i0e_double/32k 385936 385943 1801 BM_eigen_i0e_double/256k 3293324 3293551 228 BM_eigen_i0e_double/1M 12423600 12424458 57 BM_eigen_i0e_float/1 16.3 16.3 43038042 BM_eigen_i0e_float/8 30.1 30.1 23456931 BM_eigen_i0e_float/64 169 169 4132875 BM_eigen_i0e_float/512 1338 1339 516860 BM_eigen_i0e_float/4k 10191 10191 68513 BM_eigen_i0e_float/32k 81338 81337 8531 BM_eigen_i0e_float/256k 651807 651984 1000 BM_eigen_i0e_float/1M 2633821 2634187 268 BM_eigen_i1e_double/1 16.2 16.2 42352499 BM_eigen_i1e_double/8 110 110 6316524 BM_eigen_i1e_double/64 822 822 851065 BM_eigen_i1e_double/512 6480 6481 100000 BM_eigen_i1e_double/4k 51843 51843 10000 BM_eigen_i1e_double/32k 414854 414852 1680 BM_eigen_i1e_double/256k 3320001 3320568 212 BM_eigen_i1e_double/1M 13442795 13442391 53 BM_eigen_i1e_float/1 17.6 17.6 41025735 BM_eigen_i1e_float/8 35.5 35.5 19597891 BM_eigen_i1e_float/64 240 240 2924237 BM_eigen_i1e_float/512 1424 1424 485953 BM_eigen_i1e_float/4k 10722 10723 65162 BM_eigen_i1e_float/32k 86286 86297 8048 BM_eigen_i1e_float/256k 691821 691868 1000 BM_eigen_i1e_float/1M 2777336 2777747 256 This shows anywhere from a 50% to 75% improvement on these operations. I've also benchmarked without any of these flags turned on, and got similar performance to before (if not better). Also tested packetmath.cpp + special_functions to ensure no regressions.
1284 lines
49 KiB
C++
Executable File
1284 lines
49 KiB
C++
Executable File
// This file is part of Eigen, a lightweight C++ template library
|
|
// for linear algebra.
|
|
//
|
|
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
|
//
|
|
// This Source Code Form is subject to the terms of the Mozilla
|
|
// Public License v. 2.0. If a copy of the MPL was not distributed
|
|
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
#ifndef EIGEN_PACKET_MATH_SSE_H
|
|
#define EIGEN_PACKET_MATH_SSE_H
|
|
|
|
namespace Eigen {
|
|
|
|
namespace internal {
|
|
|
|
#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
|
|
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
|
|
#endif
|
|
|
|
#if !defined(EIGEN_VECTORIZE_AVX) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
|
|
// 32 bits => 8 registers
|
|
// 64 bits => 16 registers
|
|
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
|
|
#endif
|
|
|
|
#ifdef EIGEN_VECTORIZE_FMA
|
|
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
|
|
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 1
|
|
#endif
|
|
#endif
|
|
|
|
#if ((defined EIGEN_VECTORIZE_AVX) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_MINGW) && (__GXX_ABI_VERSION < 1004)) || EIGEN_OS_QNX
|
|
// With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot
|
|
// have overloads for both types without linking error.
|
|
// One solution is to increase ABI version using -fabi-version=4 (or greater).
|
|
// Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper
|
|
// structure:
|
|
template<typename T>
|
|
struct eigen_packet_wrapper
|
|
{
|
|
EIGEN_ALWAYS_INLINE operator T&() { return m_val; }
|
|
EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; }
|
|
EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {}
|
|
EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {}
|
|
EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) {
|
|
m_val = v;
|
|
return *this;
|
|
}
|
|
|
|
T m_val;
|
|
};
|
|
typedef eigen_packet_wrapper<__m128> Packet4f;
|
|
typedef eigen_packet_wrapper<__m128i> Packet4i;
|
|
typedef eigen_packet_wrapper<__m128d> Packet2d;
|
|
#else
|
|
typedef __m128 Packet4f;
|
|
typedef __m128i Packet4i;
|
|
typedef __m128d Packet2d;
|
|
#endif
|
|
|
|
template<> struct is_arithmetic<__m128> { enum { value = true }; };
|
|
template<> struct is_arithmetic<__m128i> { enum { value = true }; };
|
|
template<> struct is_arithmetic<__m128d> { enum { value = true }; };
|
|
|
|
#define EIGEN_SSE_SHUFFLE_MASK(p,q,r,s) ((s)<<6|(r)<<4|(q)<<2|(p))
|
|
|
|
#define vec4f_swizzle1(v,p,q,r,s) \
|
|
(_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), EIGEN_SSE_SHUFFLE_MASK(p,q,r,s))))
|
|
|
|
#define vec4i_swizzle1(v,p,q,r,s) \
|
|
(_mm_shuffle_epi32( v, EIGEN_SSE_SHUFFLE_MASK(p,q,r,s)))
|
|
|
|
#define vec2d_swizzle1(v,p,q) \
|
|
(_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), EIGEN_SSE_SHUFFLE_MASK(2*p,2*p+1,2*q,2*q+1))))
|
|
|
|
#define vec4f_swizzle2(a,b,p,q,r,s) \
|
|
(_mm_shuffle_ps( (a), (b), EIGEN_SSE_SHUFFLE_MASK(p,q,r,s)))
|
|
|
|
#define vec4i_swizzle2(a,b,p,q,r,s) \
|
|
(_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), EIGEN_SSE_SHUFFLE_MASK(p,q,r,s)))))
|
|
|
|
#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
|
|
const Packet4f p4f_##NAME = pset1<Packet4f>(X)
|
|
|
|
#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
|
|
const Packet2d p2d_##NAME = pset1<Packet2d>(X)
|
|
|
|
#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
|
|
const Packet4f p4f_##NAME = pset1frombits<Packet4f>(X)
|
|
|
|
#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
|
|
const Packet4i p4i_##NAME = pset1<Packet4i>(X)
|
|
|
|
|
|
// Use the packet_traits defined in AVX/PacketMath.h instead if we're going
|
|
// to leverage AVX instructions.
|
|
#ifndef EIGEN_VECTORIZE_AVX
|
|
template<> struct packet_traits<float> : default_packet_traits
|
|
{
|
|
typedef Packet4f type;
|
|
typedef Packet4f half;
|
|
enum {
|
|
Vectorizable = 1,
|
|
AlignedOnScalar = 1,
|
|
size=4,
|
|
HasHalfPacket = 0,
|
|
|
|
HasDiv = 1,
|
|
HasSin = EIGEN_FAST_MATH,
|
|
HasCos = EIGEN_FAST_MATH,
|
|
HasLog = 1,
|
|
HasLog1p = 1,
|
|
HasExpm1 = 1,
|
|
HasNdtri = 1,
|
|
HasExp = 1,
|
|
HasI0e = 1,
|
|
HasI1e = 1,
|
|
HasSqrt = 1,
|
|
HasRsqrt = 1,
|
|
HasTanh = EIGEN_FAST_MATH,
|
|
HasBlend = 1,
|
|
HasFloor = 1
|
|
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
,
|
|
HasRound = 1,
|
|
HasCeil = 1
|
|
#endif
|
|
};
|
|
};
|
|
template<> struct packet_traits<double> : default_packet_traits
|
|
{
|
|
typedef Packet2d type;
|
|
typedef Packet2d half;
|
|
enum {
|
|
Vectorizable = 1,
|
|
AlignedOnScalar = 1,
|
|
size=2,
|
|
HasHalfPacket = 0,
|
|
|
|
HasDiv = 1,
|
|
HasExp = 1,
|
|
HasSqrt = 1,
|
|
HasRsqrt = 1,
|
|
HasBlend = 1
|
|
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
,
|
|
HasRound = 1,
|
|
HasFloor = 1,
|
|
HasCeil = 1
|
|
#endif
|
|
};
|
|
};
|
|
#endif
|
|
template<> struct packet_traits<int> : default_packet_traits
|
|
{
|
|
typedef Packet4i type;
|
|
typedef Packet4i half;
|
|
enum {
|
|
Vectorizable = 1,
|
|
AlignedOnScalar = 1,
|
|
size=4,
|
|
|
|
HasBlend = 1
|
|
};
|
|
};
|
|
|
|
template<> struct unpacket_traits<Packet4f> {
|
|
typedef float type;
|
|
typedef Packet4f half;
|
|
typedef Packet4i integer_packet;
|
|
enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
|
|
};
|
|
template<> struct unpacket_traits<Packet2d> {
|
|
typedef double type;
|
|
typedef Packet2d half;
|
|
enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
|
|
};
|
|
template<> struct unpacket_traits<Packet4i> {
|
|
typedef int type;
|
|
typedef Packet4i half;
|
|
enum {size=4, alignment=Aligned16, vectorizable=false, masked_load_available=false, masked_store_available=false};
|
|
};
|
|
|
|
#ifndef EIGEN_VECTORIZE_AVX
|
|
template<> struct scalar_div_cost<float,true> { enum { value = 7 }; };
|
|
template<> struct scalar_div_cost<double,true> { enum { value = 8 }; };
|
|
#endif
|
|
|
|
#if EIGEN_COMP_MSVC==1500
|
|
// Workaround MSVC 9 internal compiler error.
|
|
// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
|
|
// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
|
|
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps(from,from,from,from); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set_epi32(from,from,from,from); }
|
|
#else
|
|
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps1(from); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
|
|
#endif
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) { return _mm_castsi128_ps(pset1<Packet4i>(from)); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pzero(const Packet4f& /*a*/) { return _mm_setzero_ps(); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pzero(const Packet2d& /*a*/) { return _mm_setzero_pd(); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i pzero(const Packet4i& /*a*/) { return _mm_setzero_si128(); }
|
|
|
|
// GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction.
|
|
// However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203)
|
|
// Using inline assembly is also not an option because then gcc fails to reorder properly the instructions.
|
|
// Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply.
|
|
// Also note that with AVX, we want it to generate a vbroadcastss.
|
|
#if EIGEN_COMP_GNUC_STRICT && (!defined __AVX__)
|
|
template<> EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float *from) {
|
|
return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0);
|
|
}
|
|
#endif
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
|
|
{
|
|
const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
|
|
return _mm_xor_ps(a,mask);
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
|
|
{
|
|
const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
|
|
return _mm_xor_pd(a,mask);
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
|
|
{
|
|
return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a);
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
|
|
template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
return _mm_mullo_epi32(a,b);
|
|
#else
|
|
// this version is slightly faster than 4 scalar products
|
|
return vec4i_swizzle1(
|
|
vec4i_swizzle2(
|
|
_mm_mul_epu32(a,b),
|
|
_mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
|
|
vec4i_swizzle1(b,1,0,3,2)),
|
|
0,2,0,2),
|
|
0,2,1,3);
|
|
#endif
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
|
|
|
|
// for some weird raisons, it has to be overloaded for packet of integers
|
|
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
|
|
#ifdef EIGEN_VECTORIZE_FMA
|
|
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); }
|
|
#endif
|
|
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
template<> EIGEN_DEVICE_FUNC inline Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) { return _mm_blendv_ps(b,a,mask); }
|
|
|
|
template<> EIGEN_DEVICE_FUNC inline Packet2d pselect(const Packet2d& mask, const Packet2d& a, const Packet2d& b) { return _mm_blendv_pd(b,a,mask); }
|
|
#endif
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) {
|
|
#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
|
|
// There appears to be a bug in GCC, by which the optimizer may
|
|
// flip the argument order in calls to _mm_min_ps, so we have to
|
|
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
|
|
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
|
|
#ifdef EIGEN_VECTORIZE_AVX
|
|
Packet4f res;
|
|
asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
|
|
#else
|
|
Packet4f res = b;
|
|
asm("minps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
|
|
#endif
|
|
return res;
|
|
#else
|
|
// Arguments are reversed to match NaN propagation behavior of std::min.
|
|
return _mm_min_ps(b, a);
|
|
#endif
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) {
|
|
#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
|
|
// There appears to be a bug in GCC, by which the optimizer may
|
|
// flip the argument order in calls to _mm_min_pd, so we have to
|
|
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
|
|
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
|
|
#ifdef EIGEN_VECTORIZE_AVX
|
|
Packet2d res;
|
|
asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
|
|
#else
|
|
Packet2d res = b;
|
|
asm("minpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
|
|
#endif
|
|
return res;
|
|
#else
|
|
// Arguments are reversed to match NaN propagation behavior of std::min.
|
|
return _mm_min_pd(b, a);
|
|
#endif
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
return _mm_min_epi32(a,b);
|
|
#else
|
|
// after some bench, this version *is* faster than a scalar implementation
|
|
Packet4i mask = _mm_cmplt_epi32(a,b);
|
|
return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
|
|
#endif
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) {
|
|
#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
|
|
// There appears to be a bug in GCC, by which the optimizer may
|
|
// flip the argument order in calls to _mm_max_ps, so we have to
|
|
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
|
|
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
|
|
#ifdef EIGEN_VECTORIZE_AVX
|
|
Packet4f res;
|
|
asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
|
|
#else
|
|
Packet4f res = b;
|
|
asm("maxps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
|
|
#endif
|
|
return res;
|
|
#else
|
|
// Arguments are reversed to match NaN propagation behavior of std::max.
|
|
return _mm_max_ps(b, a);
|
|
#endif
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) {
|
|
#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
|
|
// There appears to be a bug in GCC, by which the optimizer may
|
|
// flip the argument order in calls to _mm_max_pd, so we have to
|
|
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
|
|
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
|
|
#ifdef EIGEN_VECTORIZE_AVX
|
|
Packet2d res;
|
|
asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
|
|
#else
|
|
Packet2d res = b;
|
|
asm("maxpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
|
|
#endif
|
|
return res;
|
|
#else
|
|
// Arguments are reversed to match NaN propagation behavior of std::max.
|
|
return _mm_max_pd(b, a);
|
|
#endif
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
return _mm_max_epi32(a,b);
|
|
#else
|
|
// after some bench, this version *is* faster than a scalar implementation
|
|
Packet4i mask = _mm_cmpgt_epi32(a,b);
|
|
return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
|
|
#endif
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return _mm_cmple_ps(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return _mm_cmplt_ps(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return _mm_cmpeq_ps(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return _mm_cmpeq_epi32(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return _mm_cmpeq_pd(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { return _mm_cmpnge_ps(a,b); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4i ptrue<Packet4i>(const Packet4i& a) { return _mm_cmpeq_epi32(a, a); }
|
|
template<> EIGEN_STRONG_INLINE Packet4f
|
|
ptrue<Packet4f>(const Packet4f& a) {
|
|
Packet4i b = _mm_castps_si128(a);
|
|
return _mm_castsi128_ps(_mm_cmpeq_epi32(b, b));
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet2d
|
|
ptrue<Packet2d>(const Packet2d& a) {
|
|
Packet4i b = _mm_castpd_si128(a);
|
|
return _mm_castsi128_pd(_mm_cmpeq_epi32(b, b));
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(b,a); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(b,a); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(b,a); }
|
|
|
|
template<int N> EIGEN_STRONG_INLINE Packet4i pshiftright(Packet4i a) { return _mm_srli_epi32(a,N); }
|
|
template<int N> EIGEN_STRONG_INLINE Packet4i pshiftleft(Packet4i a) { return _mm_slli_epi32(a,N); }
|
|
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return _mm_round_ps(a, 0); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return _mm_round_pd(a, 0); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return _mm_ceil_ps(a); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return _mm_ceil_pd(a); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return _mm_floor_ps(a); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return _mm_floor_pd(a); }
|
|
#else
|
|
template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
|
|
{
|
|
const Packet4f cst_1 = pset1<Packet4f>(1.0f);
|
|
Packet4i emm0 = _mm_cvttps_epi32(a);
|
|
Packet4f tmp = _mm_cvtepi32_ps(emm0);
|
|
/* if greater, substract 1 */
|
|
Packet4f mask = _mm_cmpgt_ps(tmp, a);
|
|
mask = pand(mask, cst_1);
|
|
return psub(tmp, mask);
|
|
}
|
|
|
|
// WARNING: this pfloor implementation makes sense for small inputs only,
|
|
// It is currently only used by pexp and not exposed through HasFloor.
|
|
template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a)
|
|
{
|
|
const Packet2d cst_1 = pset1<Packet2d>(1.0);
|
|
Packet4i emm0 = _mm_cvttpd_epi32(a);
|
|
Packet2d tmp = _mm_cvtepi32_pd(emm0);
|
|
/* if greater, substract 1 */
|
|
Packet2d mask = _mm_cmpgt_pd(tmp, a);
|
|
mask = pand(mask, cst_1);
|
|
return psub(tmp, mask);
|
|
}
|
|
#endif
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
|
|
|
|
#if EIGEN_COMP_MSVC
|
|
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
|
|
EIGEN_DEBUG_UNALIGNED_LOAD
|
|
#if (EIGEN_COMP_MSVC==1600)
|
|
// NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
|
|
// (i.e., it does not generate an unaligned load!!
|
|
__m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
|
|
res = _mm_loadh_pi(res, (const __m64*)(from+2));
|
|
return res;
|
|
#else
|
|
return _mm_loadu_ps(from);
|
|
#endif
|
|
}
|
|
#else
|
|
// NOTE: with the code below, MSVC's compiler crashes!
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
|
|
{
|
|
EIGEN_DEBUG_UNALIGNED_LOAD
|
|
return _mm_loadu_ps(from);
|
|
}
|
|
#endif
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
|
|
{
|
|
EIGEN_DEBUG_UNALIGNED_LOAD
|
|
return _mm_loadu_pd(from);
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
|
|
{
|
|
EIGEN_DEBUG_UNALIGNED_LOAD
|
|
return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
|
|
}
|
|
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
|
|
{
|
|
return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
|
|
{ return pset1<Packet2d>(from[0]); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
|
|
{
|
|
Packet4i tmp;
|
|
tmp = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(from));
|
|
return vec4i_swizzle1(tmp, 0, 0, 1, 1);
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
|
|
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
|
|
template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
|
|
|
|
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); }
|
|
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); }
|
|
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
|
|
|
|
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
|
|
{
|
|
return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
|
}
|
|
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
|
|
{
|
|
return _mm_set_pd(from[1*stride], from[0*stride]);
|
|
}
|
|
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
|
|
{
|
|
return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
|
}
|
|
|
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
|
|
{
|
|
to[stride*0] = _mm_cvtss_f32(from);
|
|
to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));
|
|
to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));
|
|
to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));
|
|
}
|
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
|
|
{
|
|
to[stride*0] = _mm_cvtsd_f64(from);
|
|
to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
|
|
}
|
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
|
|
{
|
|
to[stride*0] = _mm_cvtsi128_si32(from);
|
|
to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
|
|
to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
|
|
to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
|
|
}
|
|
|
|
// some compilers might be tempted to perform multiple moves instead of using a vector path.
|
|
template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
|
|
{
|
|
Packet4f pa = _mm_set_ss(a);
|
|
pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0)));
|
|
}
|
|
// some compilers might be tempted to perform multiple moves instead of using a vector path.
|
|
template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
|
|
{
|
|
Packet2d pa = _mm_set_sd(a);
|
|
pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
|
|
}
|
|
|
|
#if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900
|
|
typedef const void * SsePrefetchPtrType;
|
|
#else
|
|
typedef const char * SsePrefetchPtrType;
|
|
#endif
|
|
|
|
#ifndef EIGEN_VECTORIZE_AVX
|
|
template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
|
|
template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
|
|
template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
|
|
#endif
|
|
|
|
#if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64
|
|
// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
|
|
// Direct of the struct members fixed bug #62.
|
|
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
|
|
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
|
|
template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
|
|
#elif EIGEN_COMP_MSVC_STRICT
|
|
// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
|
|
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
|
|
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
|
|
template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
|
|
#else
|
|
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
|
|
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
|
|
template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
|
|
#endif
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
|
|
{ return _mm_shuffle_ps(a,a,0x1B); }
|
|
template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
|
|
{ return _mm_shuffle_pd(a,a,0x1); }
|
|
template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
|
|
{ return _mm_shuffle_epi32(a,0x1B); }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
|
|
{
|
|
const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
|
|
return _mm_and_ps(a,mask);
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
|
|
{
|
|
const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
|
|
return _mm_and_pd(a,mask);
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSSE3
|
|
return _mm_abs_epi32(a);
|
|
#else
|
|
Packet4i aux = _mm_srai_epi32(a,31);
|
|
return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
|
|
#endif
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent) {
|
|
return pfrexp_float(a,exponent);
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent) {
|
|
return pldexp_float(a,exponent);
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet2d pldexp<Packet2d>(const Packet2d& a, const Packet2d& exponent) {
|
|
const Packet4i cst_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
|
|
Packet4i emm0 = _mm_cvttpd_epi32(exponent);
|
|
emm0 = padd(emm0, cst_1023_0);
|
|
emm0 = _mm_slli_epi32(emm0, 20);
|
|
emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
|
|
return pmul(a, Packet2d(_mm_castsi128_pd(emm0)));
|
|
}
|
|
|
|
// with AVX, the default implementations based on pload1 are faster
|
|
#ifndef __AVX__
|
|
template<> EIGEN_STRONG_INLINE void
|
|
pbroadcast4<Packet4f>(const float *a,
|
|
Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
|
|
{
|
|
a3 = pload<Packet4f>(a);
|
|
a0 = vec4f_swizzle1(a3, 0,0,0,0);
|
|
a1 = vec4f_swizzle1(a3, 1,1,1,1);
|
|
a2 = vec4f_swizzle1(a3, 2,2,2,2);
|
|
a3 = vec4f_swizzle1(a3, 3,3,3,3);
|
|
}
|
|
template<> EIGEN_STRONG_INLINE void
|
|
pbroadcast4<Packet2d>(const double *a,
|
|
Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSE3
|
|
a0 = _mm_loaddup_pd(a+0);
|
|
a1 = _mm_loaddup_pd(a+1);
|
|
a2 = _mm_loaddup_pd(a+2);
|
|
a3 = _mm_loaddup_pd(a+3);
|
|
#else
|
|
a1 = pload<Packet2d>(a);
|
|
a0 = vec2d_swizzle1(a1, 0,0);
|
|
a1 = vec2d_swizzle1(a1, 1,1);
|
|
a3 = pload<Packet2d>(a+2);
|
|
a2 = vec2d_swizzle1(a3, 0,0);
|
|
a3 = vec2d_swizzle1(a3, 1,1);
|
|
#endif
|
|
}
|
|
#endif
|
|
|
|
EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
|
|
{
|
|
vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
|
|
vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
|
|
vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
|
|
vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
|
|
}
|
|
|
|
#ifdef EIGEN_VECTORIZE_SSE3
|
|
template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
|
|
{
|
|
return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
|
|
{
|
|
return _mm_hadd_pd(vecs[0], vecs[1]);
|
|
}
|
|
|
|
#else
|
|
template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
|
|
{
|
|
Packet4f tmp0, tmp1, tmp2;
|
|
tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
|
|
tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
|
|
tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
|
|
tmp0 = _mm_add_ps(tmp0, tmp1);
|
|
tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
|
|
tmp1 = _mm_add_ps(tmp1, tmp2);
|
|
tmp2 = _mm_movehl_ps(tmp1, tmp0);
|
|
tmp0 = _mm_movelh_ps(tmp0, tmp1);
|
|
return _mm_add_ps(tmp0, tmp2);
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
|
|
{
|
|
return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
|
|
}
|
|
#endif // SSE3
|
|
|
|
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
|
|
{
|
|
// Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
|
|
// (from Nehalem to Haswell)
|
|
// #ifdef EIGEN_VECTORIZE_SSE3
|
|
// Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3));
|
|
// return pfirst<Packet4f>(_mm_hadd_ps(tmp, tmp));
|
|
// #else
|
|
Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
|
|
return pfirst<Packet4f>(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
|
|
// #endif
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
|
|
{
|
|
// Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
|
|
// (from Nehalem to Haswell)
|
|
// #ifdef EIGEN_VECTORIZE_SSE3
|
|
// return pfirst<Packet2d>(_mm_hadd_pd(a, a));
|
|
// #else
|
|
return pfirst<Packet2d>(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
|
|
// #endif
|
|
}
|
|
|
|
#ifdef EIGEN_VECTORIZE_SSSE3
|
|
template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
|
|
{
|
|
return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
|
|
}
|
|
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
|
|
{
|
|
Packet4i tmp0 = _mm_hadd_epi32(a,a);
|
|
return pfirst<Packet4i>(_mm_hadd_epi32(tmp0,tmp0));
|
|
}
|
|
#else
|
|
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
|
|
{
|
|
Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
|
|
return pfirst(tmp) + pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1));
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
|
|
{
|
|
Packet4i tmp0, tmp1, tmp2;
|
|
tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
|
|
tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
|
|
tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
|
|
tmp0 = _mm_add_epi32(tmp0, tmp1);
|
|
tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
|
|
tmp1 = _mm_add_epi32(tmp1, tmp2);
|
|
tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
|
|
tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
|
|
return _mm_add_epi32(tmp0, tmp2);
|
|
}
|
|
#endif
|
|
// Other reduction functions:
|
|
|
|
// mul
|
|
template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
|
|
{
|
|
Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
|
|
return pfirst<Packet4f>(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
|
|
}
|
|
template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
|
|
{
|
|
return pfirst<Packet2d>(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
|
|
}
|
|
template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
|
|
{
|
|
// after some experiments, it is seems this is the fastest way to implement it
|
|
// for GCC (eg., reusing pmul is very slow !)
|
|
// TODO try to call _mm_mul_epu32 directly
|
|
EIGEN_ALIGN16 int aux[4];
|
|
pstore(aux, a);
|
|
return (aux[0] * aux[1]) * (aux[2] * aux[3]);
|
|
}
|
|
|
|
// min
|
|
template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
|
|
{
|
|
Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
|
|
return pfirst<Packet4f>(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
|
|
}
|
|
template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
|
|
{
|
|
return pfirst<Packet2d>(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
|
|
}
|
|
template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
|
|
return pfirst<Packet4i>(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
|
|
#else
|
|
// after some experiments, it is seems this is the fastest way to implement it
|
|
// for GCC (eg., it does not like using std::min after the pstore !!)
|
|
EIGEN_ALIGN16 int aux[4];
|
|
pstore(aux, a);
|
|
int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
|
|
int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
|
|
return aux0<aux2 ? aux0 : aux2;
|
|
#endif // EIGEN_VECTORIZE_SSE4_1
|
|
}
|
|
|
|
// max
|
|
template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
|
|
{
|
|
Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
|
|
return pfirst<Packet4f>(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
|
|
}
|
|
template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
|
|
{
|
|
return pfirst<Packet2d>(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
|
|
}
|
|
template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
|
|
return pfirst<Packet4i>(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
|
|
#else
|
|
// after some experiments, it is seems this is the fastest way to implement it
|
|
// for GCC (eg., it does not like using std::min after the pstore !!)
|
|
EIGEN_ALIGN16 int aux[4];
|
|
pstore(aux, a);
|
|
int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
|
|
int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
|
|
return aux0>aux2 ? aux0 : aux2;
|
|
#endif // EIGEN_VECTORIZE_SSE4_1
|
|
}
|
|
|
|
// not needed yet
|
|
// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet4f& x)
|
|
// {
|
|
// return _mm_movemask_ps(x) == 0xF;
|
|
// }
|
|
|
|
template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
|
|
{
|
|
return _mm_movemask_ps(x) != 0x0;
|
|
}
|
|
|
|
#if EIGEN_COMP_GNUC
|
|
// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
|
|
// {
|
|
// Packet4f res = b;
|
|
// asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
|
|
// return res;
|
|
// }
|
|
// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i& a, const Packet4i& b, const int i)
|
|
// {
|
|
// Packet4i res = a;
|
|
// asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
|
|
// return res;
|
|
// }
|
|
#endif
|
|
|
|
#ifdef EIGEN_VECTORIZE_SSSE3
|
|
// SSSE3 versions
|
|
template<int Offset>
|
|
struct palign_impl<Offset,Packet4f>
|
|
{
|
|
static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
|
|
{
|
|
if (Offset!=0)
|
|
first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
|
|
}
|
|
};
|
|
|
|
template<int Offset>
|
|
struct palign_impl<Offset,Packet4i>
|
|
{
|
|
static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
|
|
{
|
|
if (Offset!=0)
|
|
first = _mm_alignr_epi8(second,first, Offset*4);
|
|
}
|
|
};
|
|
|
|
template<int Offset>
|
|
struct palign_impl<Offset,Packet2d>
|
|
{
|
|
static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
|
|
{
|
|
if (Offset==1)
|
|
first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
|
|
}
|
|
};
|
|
#else
|
|
// SSE2 versions
|
|
template<int Offset>
|
|
struct palign_impl<Offset,Packet4f>
|
|
{
|
|
static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
|
|
{
|
|
if (Offset==1)
|
|
{
|
|
first = _mm_move_ss(first,second);
|
|
first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
|
|
}
|
|
else if (Offset==2)
|
|
{
|
|
first = _mm_movehl_ps(first,first);
|
|
first = _mm_movelh_ps(first,second);
|
|
}
|
|
else if (Offset==3)
|
|
{
|
|
first = _mm_move_ss(first,second);
|
|
first = _mm_shuffle_ps(first,second,0x93);
|
|
}
|
|
}
|
|
};
|
|
|
|
template<int Offset>
|
|
struct palign_impl<Offset,Packet4i>
|
|
{
|
|
static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
|
|
{
|
|
if (Offset==1)
|
|
{
|
|
first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
|
|
first = _mm_shuffle_epi32(first,0x39);
|
|
}
|
|
else if (Offset==2)
|
|
{
|
|
first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
|
|
first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
|
|
}
|
|
else if (Offset==3)
|
|
{
|
|
first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
|
|
first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
|
|
}
|
|
}
|
|
};
|
|
|
|
template<int Offset>
|
|
struct palign_impl<Offset,Packet2d>
|
|
{
|
|
static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
|
|
{
|
|
if (Offset==1)
|
|
{
|
|
first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
|
|
first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
|
|
}
|
|
}
|
|
};
|
|
#endif
|
|
|
|
EIGEN_DEVICE_FUNC inline void
|
|
ptranspose(PacketBlock<Packet4f,4>& kernel) {
|
|
_MM_TRANSPOSE4_PS(kernel.packet[0], kernel.packet[1], kernel.packet[2], kernel.packet[3]);
|
|
}
|
|
|
|
EIGEN_DEVICE_FUNC inline void
|
|
ptranspose(PacketBlock<Packet2d,2>& kernel) {
|
|
__m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
|
|
kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
|
|
kernel.packet[1] = tmp;
|
|
}
|
|
|
|
EIGEN_DEVICE_FUNC inline void
|
|
ptranspose(PacketBlock<Packet4i,4>& kernel) {
|
|
__m128i T0 = _mm_unpacklo_epi32(kernel.packet[0], kernel.packet[1]);
|
|
__m128i T1 = _mm_unpacklo_epi32(kernel.packet[2], kernel.packet[3]);
|
|
__m128i T2 = _mm_unpackhi_epi32(kernel.packet[0], kernel.packet[1]);
|
|
__m128i T3 = _mm_unpackhi_epi32(kernel.packet[2], kernel.packet[3]);
|
|
|
|
kernel.packet[0] = _mm_unpacklo_epi64(T0, T1);
|
|
kernel.packet[1] = _mm_unpackhi_epi64(T0, T1);
|
|
kernel.packet[2] = _mm_unpacklo_epi64(T2, T3);
|
|
kernel.packet[3] = _mm_unpackhi_epi64(T2, T3);
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
|
|
const __m128i zero = _mm_setzero_si128();
|
|
const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
|
|
__m128i false_mask = _mm_cmpeq_epi32(select, zero);
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
return _mm_blendv_epi8(thenPacket, elsePacket, false_mask);
|
|
#else
|
|
return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket));
|
|
#endif
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
|
|
const __m128 zero = _mm_setzero_ps();
|
|
const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
|
|
__m128 false_mask = _mm_cmpeq_ps(select, zero);
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
return _mm_blendv_ps(thenPacket, elsePacket, false_mask);
|
|
#else
|
|
return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket));
|
|
#endif
|
|
}
|
|
template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
|
|
const __m128d zero = _mm_setzero_pd();
|
|
const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]);
|
|
__m128d false_mask = _mm_cmpeq_pd(select, zero);
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
return _mm_blendv_pd(thenPacket, elsePacket, false_mask);
|
|
#else
|
|
return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket));
|
|
#endif
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pinsertfirst(const Packet4f& a, float b)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
return _mm_blend_ps(a,pset1<Packet4f>(b),1);
|
|
#else
|
|
return _mm_move_ss(a, _mm_load_ss(&b));
|
|
#endif
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet2d pinsertfirst(const Packet2d& a, double b)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
return _mm_blend_pd(a,pset1<Packet2d>(b),1);
|
|
#else
|
|
return _mm_move_sd(a, _mm_load_sd(&b));
|
|
#endif
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4f pinsertlast(const Packet4f& a, float b)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
return _mm_blend_ps(a,pset1<Packet4f>(b),(1<<3));
|
|
#else
|
|
const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x0,0x0,0x0,0xFFFFFFFF));
|
|
return _mm_or_ps(_mm_andnot_ps(mask, a), _mm_and_ps(mask, pset1<Packet4f>(b)));
|
|
#endif
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet2d pinsertlast(const Packet2d& a, double b)
|
|
{
|
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
|
return _mm_blend_pd(a,pset1<Packet2d>(b),(1<<1));
|
|
#else
|
|
const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x0,0xFFFFFFFF,0xFFFFFFFF));
|
|
return _mm_or_pd(_mm_andnot_pd(mask, a), _mm_and_pd(mask, pset1<Packet2d>(b)));
|
|
#endif
|
|
}
|
|
|
|
// Scalar path for pmadd with FMA to ensure consistency with vectorized path.
|
|
#ifdef EIGEN_VECTORIZE_FMA
|
|
template<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) {
|
|
return ::fmaf(a,b,c);
|
|
}
|
|
template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, const double& c) {
|
|
return ::fma(a,b,c);
|
|
}
|
|
#endif
|
|
|
|
|
|
// Packet math for Eigen::half
|
|
// Disable the following code since it's broken on too many platforms / compilers.
|
|
//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
|
|
#if 0
|
|
|
|
typedef struct {
|
|
__m64 x;
|
|
} Packet4h;
|
|
|
|
|
|
template<> struct is_arithmetic<Packet4h> { enum { value = true }; };
|
|
|
|
template <>
|
|
struct packet_traits<Eigen::half> : default_packet_traits {
|
|
typedef Packet4h type;
|
|
// There is no half-size packet for Packet4h.
|
|
typedef Packet4h half;
|
|
enum {
|
|
Vectorizable = 1,
|
|
AlignedOnScalar = 1,
|
|
size = 4,
|
|
HasHalfPacket = 0,
|
|
HasAdd = 1,
|
|
HasSub = 1,
|
|
HasMul = 1,
|
|
HasDiv = 1,
|
|
HasNegate = 0,
|
|
HasAbs = 0,
|
|
HasAbs2 = 0,
|
|
HasMin = 0,
|
|
HasMax = 0,
|
|
HasConj = 0,
|
|
HasSetLinear = 0,
|
|
HasSqrt = 0,
|
|
HasRsqrt = 0,
|
|
HasExp = 0,
|
|
HasLog = 0,
|
|
HasBlend = 0
|
|
};
|
|
};
|
|
|
|
|
|
template<> struct unpacket_traits<Packet4h> { typedef Eigen::half type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4h half; };
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4h pset1<Packet4h>(const Eigen::half& from) {
|
|
Packet4h result;
|
|
result.x = _mm_set1_pi16(from.x);
|
|
return result;
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet4h>(const Packet4h& from) {
|
|
return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm_cvtsi64_si32(from.x)));
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4h pconj(const Packet4h& a) { return a; }
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4h padd<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
|
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
|
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
|
|
|
Eigen::half h[4];
|
|
|
|
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
|
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
|
h[0] = ha + hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
|
h[1] = ha + hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
|
h[2] = ha + hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
|
h[3] = ha + hb;
|
|
Packet4h result;
|
|
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
|
return result;
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4h psub<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
|
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
|
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
|
|
|
Eigen::half h[4];
|
|
|
|
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
|
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
|
h[0] = ha - hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
|
h[1] = ha - hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
|
h[2] = ha - hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
|
h[3] = ha - hb;
|
|
Packet4h result;
|
|
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
|
return result;
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4h pmul<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
|
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
|
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
|
|
|
Eigen::half h[4];
|
|
|
|
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
|
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
|
h[0] = ha * hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
|
h[1] = ha * hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
|
h[2] = ha * hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
|
h[3] = ha * hb;
|
|
Packet4h result;
|
|
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
|
return result;
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4h pdiv<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
|
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
|
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
|
|
|
Eigen::half h[4];
|
|
|
|
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
|
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
|
h[0] = ha / hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
|
h[1] = ha / hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
|
h[2] = ha / hb;
|
|
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
|
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
|
h[3] = ha / hb;
|
|
Packet4h result;
|
|
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
|
return result;
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4h pload<Packet4h>(const Eigen::half* from) {
|
|
Packet4h result;
|
|
result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
|
|
return result;
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4h ploadu<Packet4h>(const Eigen::half* from) {
|
|
Packet4h result;
|
|
result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
|
|
return result;
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet4h& from) {
|
|
__int64_t r = _mm_cvtm64_si64(from.x);
|
|
*(reinterpret_cast<__int64_t*>(to)) = r;
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet4h& from) {
|
|
__int64_t r = _mm_cvtm64_si64(from.x);
|
|
*(reinterpret_cast<__int64_t*>(to)) = r;
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4h
|
|
ploadquad<Packet4h>(const Eigen::half* from) {
|
|
return pset1<Packet4h>(*from);
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE Packet4h pgather<Eigen::half, Packet4h>(const Eigen::half* from, Index stride)
|
|
{
|
|
Packet4h result;
|
|
result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
|
|
return result;
|
|
}
|
|
|
|
template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet4h>(Eigen::half* to, const Packet4h& from, Index stride)
|
|
{
|
|
__int64_t a = _mm_cvtm64_si64(from.x);
|
|
to[stride*0].x = static_cast<unsigned short>(a);
|
|
to[stride*1].x = static_cast<unsigned short>(a >> 16);
|
|
to[stride*2].x = static_cast<unsigned short>(a >> 32);
|
|
to[stride*3].x = static_cast<unsigned short>(a >> 48);
|
|
}
|
|
|
|
EIGEN_STRONG_INLINE void
|
|
ptranspose(PacketBlock<Packet4h,4>& kernel) {
|
|
__m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x);
|
|
__m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x);
|
|
__m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x);
|
|
__m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x);
|
|
|
|
kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1);
|
|
kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1);
|
|
kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3);
|
|
kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3);
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
} // end namespace internal
|
|
|
|
} // end namespace Eigen
|
|
|
|
#if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900
|
|
// PGI++ does not define the following intrinsics in C++ mode.
|
|
static inline __m128 _mm_castpd_ps (__m128d x) { return reinterpret_cast<__m128&>(x); }
|
|
static inline __m128i _mm_castpd_si128(__m128d x) { return reinterpret_cast<__m128i&>(x); }
|
|
static inline __m128d _mm_castps_pd (__m128 x) { return reinterpret_cast<__m128d&>(x); }
|
|
static inline __m128i _mm_castps_si128(__m128 x) { return reinterpret_cast<__m128i&>(x); }
|
|
static inline __m128 _mm_castsi128_ps(__m128i x) { return reinterpret_cast<__m128&>(x); }
|
|
static inline __m128d _mm_castsi128_pd(__m128i x) { return reinterpret_cast<__m128d&>(x); }
|
|
#endif
|
|
|
|
#endif // EIGEN_PACKET_MATH_SSE_H
|