mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-10-10 23:21:29 +08:00
Merging from eigen/eigen.
This commit is contained in:
commit
99036a3615
10
Eigen/Core
10
Eigen/Core
@ -160,6 +160,9 @@ using std::ptrdiff_t;
|
||||
#include "src/Core/GenericPacketMath.h"
|
||||
#include "src/Core/MathFunctionsImpl.h"
|
||||
#include "src/Core/arch/Default/ConjHelper.h"
|
||||
// Generic half float support
|
||||
#include "src/Core/arch/Default/Half.h"
|
||||
#include "src/Core/arch/Default/TypeCasting.h"
|
||||
|
||||
#if defined EIGEN_VECTORIZE_AVX512
|
||||
#include "src/Core/arch/SSE/PacketMath.h"
|
||||
@ -169,6 +172,7 @@ using std::ptrdiff_t;
|
||||
#include "src/Core/arch/AVX/TypeCasting.h"
|
||||
#include "src/Core/arch/AVX/Complex.h"
|
||||
#include "src/Core/arch/AVX512/PacketMath.h"
|
||||
#include "src/Core/arch/AVX512/TypeCasting.h"
|
||||
#include "src/Core/arch/AVX512/Complex.h"
|
||||
#include "src/Core/arch/SSE/MathFunctions.h"
|
||||
#include "src/Core/arch/AVX/MathFunctions.h"
|
||||
@ -207,14 +211,10 @@ using std::ptrdiff_t;
|
||||
#include "src/Core/arch/MSA/Complex.h"
|
||||
#endif
|
||||
|
||||
// Half float support
|
||||
#include "src/Core/arch/GPU/Half.h"
|
||||
#include "src/Core/arch/GPU/PacketMathHalf.h"
|
||||
#include "src/Core/arch/GPU/TypeCasting.h"
|
||||
|
||||
#if defined EIGEN_VECTORIZE_GPU
|
||||
#include "src/Core/arch/GPU/PacketMath.h"
|
||||
#include "src/Core/arch/GPU/MathFunctions.h"
|
||||
#include "src/Core/arch/GPU/TypeCasting.h"
|
||||
#endif
|
||||
|
||||
#if defined(EIGEN_USE_SYCL)
|
||||
|
@ -501,7 +501,8 @@ namespace std_fallback {
|
||||
}
|
||||
|
||||
EIGEN_USING_STD_MATH(log);
|
||||
return (u - RealScalar(1)) * x / log(u);
|
||||
Scalar logu = log(u);
|
||||
return numext::equal_strict(u, logu) ? u : (u - RealScalar(1)) * x / logu;
|
||||
}
|
||||
}
|
||||
|
||||
@ -548,7 +549,10 @@ namespace std_fallback {
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
EIGEN_USING_STD_MATH(log);
|
||||
Scalar x1p = RealScalar(1) + x;
|
||||
return numext::equal_strict(x1p, Scalar(1)) ? x : x * ( log(x1p) / (x1p - RealScalar(1)) );
|
||||
Scalar log_1p = log(x1p);
|
||||
const bool is_small = numext::equal_strict(x1p, Scalar(1));
|
||||
const bool is_inf = numext::equal_strict(x1p, log_1p);
|
||||
return (is_small || is_inf) ? x : x * (log_1p / (x1p - RealScalar(1)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,16 @@ plog<Packet8f>(const Packet8f& _x) {
|
||||
return plog_float(_x);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
||||
Packet8f plog1p<Packet8f>(const Packet8f& _x) {
|
||||
return generic_plog1p(_x);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
||||
Packet8f pexpm1<Packet8f>(const Packet8f& _x) {
|
||||
return generic_expm1(_x);
|
||||
}
|
||||
|
||||
// Exponential function. Works by writing "x = m*log(2) + r" where
|
||||
// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then
|
||||
// "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1).
|
||||
|
@ -31,10 +31,14 @@ namespace internal {
|
||||
typedef __m256 Packet8f;
|
||||
typedef __m256i Packet8i;
|
||||
typedef __m256d Packet4d;
|
||||
typedef struct {
|
||||
__m128i x;
|
||||
} Packet8h;
|
||||
|
||||
template<> struct is_arithmetic<__m256> { enum { value = true }; };
|
||||
template<> struct is_arithmetic<__m256i> { enum { value = true }; };
|
||||
template<> struct is_arithmetic<__m256d> { enum { value = true }; };
|
||||
template<> struct is_arithmetic<Packet8h> { enum { value = true }; };
|
||||
|
||||
#define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \
|
||||
const Packet8f p8f_##NAME = pset1<Packet8f>(X)
|
||||
@ -65,6 +69,8 @@ template<> struct packet_traits<float> : default_packet_traits
|
||||
HasSin = EIGEN_FAST_MATH,
|
||||
HasCos = EIGEN_FAST_MATH,
|
||||
HasLog = 1,
|
||||
HasLog1p = 1,
|
||||
HasExpm1 = 1,
|
||||
HasExp = 1,
|
||||
HasNdtri = 1,
|
||||
HasSqrt = 1,
|
||||
@ -96,6 +102,35 @@ template<> struct packet_traits<double> : default_packet_traits
|
||||
HasCeil = 1
|
||||
};
|
||||
};
|
||||
|
||||
template <>
|
||||
struct packet_traits<Eigen::half> : default_packet_traits {
|
||||
typedef Packet8h type;
|
||||
// There is no half-size packet for Packet8h.
|
||||
typedef Packet8h half;
|
||||
enum {
|
||||
Vectorizable = 1,
|
||||
AlignedOnScalar = 1,
|
||||
size = 8,
|
||||
HasHalfPacket = 0,
|
||||
HasAdd = 1,
|
||||
HasSub = 1,
|
||||
HasMul = 1,
|
||||
HasDiv = 1,
|
||||
HasNegate = 1,
|
||||
HasAbs = 0,
|
||||
HasAbs2 = 0,
|
||||
HasMin = 0,
|
||||
HasMax = 0,
|
||||
HasConj = 0,
|
||||
HasSetLinear = 0,
|
||||
HasSqrt = 0,
|
||||
HasRsqrt = 0,
|
||||
HasExp = 0,
|
||||
HasLog = 0,
|
||||
HasBlend = 0
|
||||
};
|
||||
};
|
||||
#endif
|
||||
|
||||
template<> struct scalar_div_cost<float,true> { enum { value = 14 }; };
|
||||
@ -846,6 +881,335 @@ template<> EIGEN_STRONG_INLINE Packet4d pinsertlast(const Packet4d& a, double b)
|
||||
return _mm256_blend_pd(a,pset1<Packet4d>(b),(1<<3));
|
||||
}
|
||||
|
||||
|
||||
// Packet math for Eigen::half
|
||||
template<> struct unpacket_traits<Packet8h> { typedef Eigen::half type; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet8h half; };
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(const Eigen::half& from) {
|
||||
Packet8h result;
|
||||
result.x = _mm_set1_epi16(from.x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8h>(const Packet8h& from) {
|
||||
return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm_extract_epi16(from.x, 0)));
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(const Eigen::half* from) {
|
||||
Packet8h result;
|
||||
result.x = _mm_load_si128(reinterpret_cast<const __m128i*>(from));
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(const Eigen::half* from) {
|
||||
Packet8h result;
|
||||
result.x = _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet8h& from) {
|
||||
_mm_store_si128(reinterpret_cast<__m128i*>(to), from.x);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet8h& from) {
|
||||
_mm_storeu_si128(reinterpret_cast<__m128i*>(to), from.x);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h
|
||||
ploaddup<Packet8h>(const Eigen::half* from) {
|
||||
Packet8h result;
|
||||
unsigned short a = from[0].x;
|
||||
unsigned short b = from[1].x;
|
||||
unsigned short c = from[2].x;
|
||||
unsigned short d = from[3].x;
|
||||
result.x = _mm_set_epi16(d, d, c, c, b, b, a, a);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h
|
||||
ploadquad<Packet8h>(const Eigen::half* from) {
|
||||
Packet8h result;
|
||||
unsigned short a = from[0].x;
|
||||
unsigned short b = from[1].x;
|
||||
result.x = _mm_set_epi16(b, b, b, b, a, a, a, a);
|
||||
return result;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) {
|
||||
#ifdef EIGEN_HAS_FP16_C
|
||||
return _mm256_cvtph_ps(a.x);
|
||||
#else
|
||||
EIGEN_ALIGN32 Eigen::half aux[8];
|
||||
pstore(aux, a);
|
||||
float f0(aux[0]);
|
||||
float f1(aux[1]);
|
||||
float f2(aux[2]);
|
||||
float f3(aux[3]);
|
||||
float f4(aux[4]);
|
||||
float f5(aux[5]);
|
||||
float f6(aux[6]);
|
||||
float f7(aux[7]);
|
||||
|
||||
return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0);
|
||||
#endif
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {
|
||||
#ifdef EIGEN_HAS_FP16_C
|
||||
Packet8h result;
|
||||
result.x = _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
|
||||
return result;
|
||||
#else
|
||||
EIGEN_ALIGN32 float aux[8];
|
||||
pstore(aux, a);
|
||||
Eigen::half h0(aux[0]);
|
||||
Eigen::half h1(aux[1]);
|
||||
Eigen::half h2(aux[2]);
|
||||
Eigen::half h3(aux[3]);
|
||||
Eigen::half h4(aux[4]);
|
||||
Eigen::half h5(aux[5]);
|
||||
Eigen::half h6(aux[6]);
|
||||
Eigen::half h7(aux[7]);
|
||||
|
||||
Packet8h result;
|
||||
result.x = _mm_set_epi16(h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h& a) {
|
||||
Packet8h r; r.x = _mm_cmpeq_epi32(a.x, a.x); return r;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h por(const Packet8h& a,const Packet8h& b) {
|
||||
// in some cases Packet4i is a wrapper around __m128i, so we either need to
|
||||
// cast to Packet4i to directly call the intrinsics as below:
|
||||
Packet8h r; r.x = _mm_or_si128(a.x,b.x); return r;
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pxor(const Packet8h& a,const Packet8h& b) {
|
||||
Packet8h r; r.x = _mm_xor_si128(a.x,b.x); return r;
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pand(const Packet8h& a,const Packet8h& b) {
|
||||
Packet8h r; r.x = _mm_and_si128(a.x,b.x); return r;
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pandnot(const Packet8h& a,const Packet8h& b) {
|
||||
Packet8h r; r.x = _mm_andnot_si128(b.x,a.x); return r;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pselect(const Packet8h& mask, const Packet8h& a, const Packet8h& b) {
|
||||
Packet8h r; r.x = _mm_blendv_epi8(b.x, a.x, mask.x); return r;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pcmp_eq(const Packet8h& a,const Packet8h& b) {
|
||||
Packet8f af = half2float(a);
|
||||
Packet8f bf = half2float(b);
|
||||
Packet8f rf = pcmp_eq(af, bf);
|
||||
// Pack the 32-bit flags into 16-bits flags.
|
||||
Packet8h result; result.x = _mm_packs_epi32(_mm256_extractf128_si256(_mm256_castps_si256(rf), 0),
|
||||
_mm256_extractf128_si256(_mm256_castps_si256(rf), 1));
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pnegate(const Packet8h& a) {
|
||||
Packet8h sign_mask; sign_mask.x = _mm_set1_epi16(static_cast<unsigned short>(0x8000));
|
||||
Packet8h result; result.x = _mm_xor_si128(a.x, sign_mask.x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const Packet8h& b) {
|
||||
Packet8f af = half2float(a);
|
||||
Packet8f bf = half2float(b);
|
||||
Packet8f rf = padd(af, bf);
|
||||
return float2half(rf);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h psub<Packet8h>(const Packet8h& a, const Packet8h& b) {
|
||||
Packet8f af = half2float(a);
|
||||
Packet8f bf = half2float(b);
|
||||
Packet8f rf = psub(af, bf);
|
||||
return float2half(rf);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const Packet8h& b) {
|
||||
Packet8f af = half2float(a);
|
||||
Packet8f bf = half2float(b);
|
||||
Packet8f rf = pmul(af, bf);
|
||||
return float2half(rf);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pdiv<Packet8h>(const Packet8h& a, const Packet8h& b) {
|
||||
Packet8f af = half2float(a);
|
||||
Packet8f bf = half2float(b);
|
||||
Packet8f rf = pdiv(af, bf);
|
||||
return float2half(rf);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(const Eigen::half* from, Index stride)
|
||||
{
|
||||
Packet8h result;
|
||||
result.x = _mm_set_epi16(from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x, from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8h>(Eigen::half* to, const Packet8h& from, Index stride)
|
||||
{
|
||||
EIGEN_ALIGN32 Eigen::half aux[8];
|
||||
pstore(aux, from);
|
||||
to[stride*0].x = aux[0].x;
|
||||
to[stride*1].x = aux[1].x;
|
||||
to[stride*2].x = aux[2].x;
|
||||
to[stride*3].x = aux[3].x;
|
||||
to[stride*4].x = aux[4].x;
|
||||
to[stride*5].x = aux[5].x;
|
||||
to[stride*6].x = aux[6].x;
|
||||
to[stride*7].x = aux[7].x;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Eigen::half predux<Packet8h>(const Packet8h& a) {
|
||||
Packet8f af = half2float(a);
|
||||
float reduced = predux<Packet8f>(af);
|
||||
return Eigen::half(reduced);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8h>(const Packet8h& a) {
|
||||
Packet8f af = half2float(a);
|
||||
float reduced = predux_max<Packet8f>(af);
|
||||
return Eigen::half(reduced);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8h>(const Packet8h& a) {
|
||||
Packet8f af = half2float(a);
|
||||
float reduced = predux_min<Packet8f>(af);
|
||||
return Eigen::half(reduced);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h& a) {
|
||||
Packet8f af = half2float(a);
|
||||
float reduced = predux_mul<Packet8f>(af);
|
||||
return Eigen::half(reduced);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h preduxp<Packet8h>(const Packet8h* p) {
|
||||
Packet8f pf[8];
|
||||
pf[0] = half2float(p[0]);
|
||||
pf[1] = half2float(p[1]);
|
||||
pf[2] = half2float(p[2]);
|
||||
pf[3] = half2float(p[3]);
|
||||
pf[4] = half2float(p[4]);
|
||||
pf[5] = half2float(p[5]);
|
||||
pf[6] = half2float(p[6]);
|
||||
pf[7] = half2float(p[7]);
|
||||
Packet8f reduced = preduxp<Packet8f>(pf);
|
||||
return float2half(reduced);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h preverse(const Packet8h& a)
|
||||
{
|
||||
__m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
|
||||
Packet8h res;
|
||||
res.x = _mm_shuffle_epi8(a.x,m);
|
||||
return res;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pinsertfirst(const Packet8h& a, Eigen::half b)
|
||||
{
|
||||
Packet8h res;
|
||||
res.x = _mm_insert_epi16(a.x,int(b.x),0);
|
||||
return res;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pinsertlast(const Packet8h& a, Eigen::half b)
|
||||
{
|
||||
Packet8h res;
|
||||
res.x = _mm_insert_epi16(a.x,int(b.x),7);
|
||||
return res;
|
||||
}
|
||||
|
||||
template<int Offset>
|
||||
struct palign_impl<Offset,Packet8h>
|
||||
{
|
||||
static EIGEN_STRONG_INLINE void run(Packet8h& first, const Packet8h& second)
|
||||
{
|
||||
if (Offset!=0)
|
||||
first.x = _mm_alignr_epi8(second.x,first.x, Offset*2);
|
||||
}
|
||||
};
|
||||
|
||||
EIGEN_STRONG_INLINE void
|
||||
ptranspose(PacketBlock<Packet8h,8>& kernel) {
|
||||
__m128i a = kernel.packet[0].x;
|
||||
__m128i b = kernel.packet[1].x;
|
||||
__m128i c = kernel.packet[2].x;
|
||||
__m128i d = kernel.packet[3].x;
|
||||
__m128i e = kernel.packet[4].x;
|
||||
__m128i f = kernel.packet[5].x;
|
||||
__m128i g = kernel.packet[6].x;
|
||||
__m128i h = kernel.packet[7].x;
|
||||
|
||||
__m128i a03b03 = _mm_unpacklo_epi16(a, b);
|
||||
__m128i c03d03 = _mm_unpacklo_epi16(c, d);
|
||||
__m128i e03f03 = _mm_unpacklo_epi16(e, f);
|
||||
__m128i g03h03 = _mm_unpacklo_epi16(g, h);
|
||||
__m128i a47b47 = _mm_unpackhi_epi16(a, b);
|
||||
__m128i c47d47 = _mm_unpackhi_epi16(c, d);
|
||||
__m128i e47f47 = _mm_unpackhi_epi16(e, f);
|
||||
__m128i g47h47 = _mm_unpackhi_epi16(g, h);
|
||||
|
||||
__m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
|
||||
__m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
|
||||
__m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
|
||||
__m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
|
||||
__m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
|
||||
__m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
|
||||
__m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
|
||||
__m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
|
||||
|
||||
__m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
|
||||
__m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
|
||||
__m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
|
||||
__m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
|
||||
__m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
|
||||
__m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
|
||||
__m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
|
||||
__m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
|
||||
|
||||
kernel.packet[0].x = a0b0c0d0e0f0g0h0;
|
||||
kernel.packet[1].x = a1b1c1d1e1f1g1h1;
|
||||
kernel.packet[2].x = a2b2c2d2e2f2g2h2;
|
||||
kernel.packet[3].x = a3b3c3d3e3f3g3h3;
|
||||
kernel.packet[4].x = a4b4c4d4e4f4g4h4;
|
||||
kernel.packet[5].x = a5b5c5d5e5f5g5h5;
|
||||
kernel.packet[6].x = a6b6c6d6e6f6g6h6;
|
||||
kernel.packet[7].x = a7b7c7d7e7f7g7h7;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE void
|
||||
ptranspose(PacketBlock<Packet8h,4>& kernel) {
|
||||
EIGEN_ALIGN32 Eigen::half in[4][8];
|
||||
pstore<Eigen::half>(in[0], kernel.packet[0]);
|
||||
pstore<Eigen::half>(in[1], kernel.packet[1]);
|
||||
pstore<Eigen::half>(in[2], kernel.packet[2]);
|
||||
pstore<Eigen::half>(in[3], kernel.packet[3]);
|
||||
|
||||
EIGEN_ALIGN32 Eigen::half out[4][8];
|
||||
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
for (int j = 0; j < 4; ++j) {
|
||||
out[i][j] = in[j][2*i];
|
||||
}
|
||||
for (int j = 0; j < 4; ++j) {
|
||||
out[i][j+4] = in[j][2*i+1];
|
||||
}
|
||||
}
|
||||
|
||||
kernel.packet[0] = pload<Packet8h>(out[0]);
|
||||
kernel.packet[1] = pload<Packet8h>(out[1]);
|
||||
kernel.packet[2] = pload<Packet8h>(out[2]);
|
||||
kernel.packet[3] = pload<Packet8h>(out[3]);
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -52,6 +52,36 @@ template<> EIGEN_STRONG_INLINE Packet8f preinterpret<Packet8f,Packet8i>(const Pa
|
||||
return _mm256_castsi256_ps(a);
|
||||
}
|
||||
|
||||
#ifndef EIGEN_VECTORIZE_AVX512
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<Eigen::half, float> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8h, Packet8f>(const Packet8h& a) {
|
||||
return half2float(a);
|
||||
}
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<float, Eigen::half> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
#endif // EIGEN_VECTORIZE_AVX512
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pcast<Packet8f, Packet8h>(const Packet8f& a) {
|
||||
return float2half(a);
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -393,6 +393,18 @@ pcos<Packet16f>(const Packet16f& _x) {
|
||||
return pcos_float(_x);
|
||||
}
|
||||
|
||||
#if defined(EIGEN_VECTORIZE_AVX512DQ)
|
||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
||||
Packet16f plog1p<Packet16f>(const Packet16f& _x) {
|
||||
return generic_plog1p(_x);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
||||
Packet16f pexpm1<Packet16f>(const Packet16f& _x) {
|
||||
return generic_expm1(_x);
|
||||
}
|
||||
#endif
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -44,6 +44,41 @@ template <>
|
||||
struct is_arithmetic<__m512d> {
|
||||
enum { value = true };
|
||||
};
|
||||
typedef struct {
|
||||
__m256i x;
|
||||
} Packet16h;
|
||||
|
||||
|
||||
template<> struct is_arithmetic<Packet16h> { enum { value = true }; };
|
||||
|
||||
template <>
|
||||
struct packet_traits<half> : default_packet_traits {
|
||||
typedef Packet16h type;
|
||||
// There is no half-size packet for Packet16h.
|
||||
typedef Packet16h half;
|
||||
enum {
|
||||
Vectorizable = 1,
|
||||
AlignedOnScalar = 1,
|
||||
size = 16,
|
||||
HasHalfPacket = 0,
|
||||
HasAdd = 1,
|
||||
HasSub = 1,
|
||||
HasMul = 1,
|
||||
HasDiv = 1,
|
||||
HasNegate = 1,
|
||||
HasAbs = 0,
|
||||
HasAbs2 = 0,
|
||||
HasMin = 0,
|
||||
HasMax = 0,
|
||||
HasConj = 0,
|
||||
HasSetLinear = 0,
|
||||
HasSqrt = 0,
|
||||
HasRsqrt = 0,
|
||||
HasExp = 0,
|
||||
HasLog = 0,
|
||||
HasBlend = 0
|
||||
};
|
||||
};
|
||||
|
||||
template<> struct packet_traits<float> : default_packet_traits
|
||||
{
|
||||
@ -121,6 +156,13 @@ struct unpacket_traits<Packet16i> {
|
||||
enum { size = 16, alignment=Aligned64, vectorizable=false, masked_load_available=false, masked_store_available=false };
|
||||
};
|
||||
|
||||
template<>
|
||||
struct unpacket_traits<Packet16h> {
|
||||
typedef Eigen::half type;
|
||||
typedef Packet16h half;
|
||||
enum {size=16, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
|
||||
};
|
||||
|
||||
template <>
|
||||
EIGEN_STRONG_INLINE Packet16f pset1<Packet16f>(const float& from) {
|
||||
return _mm512_set1_ps(from);
|
||||
@ -1398,6 +1440,467 @@ template<> EIGEN_STRONG_INLINE Packet16f preinterpret<Packet16f,Packet16i>(const
|
||||
return _mm512_castsi512_ps(a);
|
||||
}
|
||||
|
||||
|
||||
// Packet math for Eigen::half
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pset1<Packet16h>(const Eigen::half& from) {
|
||||
Packet16h result;
|
||||
result.x = _mm256_set1_epi16(from.x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet16h>(const Packet16h& from) {
|
||||
return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm256_extract_epi16(from.x, 0)));
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pload<Packet16h>(const Eigen::half* from) {
|
||||
Packet16h result;
|
||||
result.x = _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h ploadu<Packet16h>(const Eigen::half* from) {
|
||||
Packet16h result;
|
||||
result.x = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstore<half>(Eigen::half* to, const Packet16h& from) {
|
||||
// (void*) -> workaround clang warning:
|
||||
// cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
|
||||
_mm256_store_si256((__m256i*)(void*)to, from.x);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<half>(Eigen::half* to, const Packet16h& from) {
|
||||
// (void*) -> workaround clang warning:
|
||||
// cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
|
||||
_mm256_storeu_si256((__m256i*)(void*)to, from.x);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h
|
||||
ploaddup<Packet16h>(const Eigen::half* from) {
|
||||
Packet16h result;
|
||||
unsigned short a = from[0].x;
|
||||
unsigned short b = from[1].x;
|
||||
unsigned short c = from[2].x;
|
||||
unsigned short d = from[3].x;
|
||||
unsigned short e = from[4].x;
|
||||
unsigned short f = from[5].x;
|
||||
unsigned short g = from[6].x;
|
||||
unsigned short h = from[7].x;
|
||||
result.x = _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h
|
||||
ploadquad(const Eigen::half* from) {
|
||||
Packet16h result;
|
||||
unsigned short a = from[0].x;
|
||||
unsigned short b = from[1].x;
|
||||
unsigned short c = from[2].x;
|
||||
unsigned short d = from[3].x;
|
||||
result.x = _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
|
||||
return result;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Packet16f half2float(const Packet16h& a) {
|
||||
#ifdef EIGEN_HAS_FP16_C
|
||||
return _mm512_cvtph_ps(a.x);
|
||||
#else
|
||||
EIGEN_ALIGN64 half aux[16];
|
||||
pstore(aux, a);
|
||||
float f0(aux[0]);
|
||||
float f1(aux[1]);
|
||||
float f2(aux[2]);
|
||||
float f3(aux[3]);
|
||||
float f4(aux[4]);
|
||||
float f5(aux[5]);
|
||||
float f6(aux[6]);
|
||||
float f7(aux[7]);
|
||||
float f8(aux[8]);
|
||||
float f9(aux[9]);
|
||||
float fa(aux[10]);
|
||||
float fb(aux[11]);
|
||||
float fc(aux[12]);
|
||||
float fd(aux[13]);
|
||||
float fe(aux[14]);
|
||||
float ff(aux[15]);
|
||||
|
||||
return _mm512_set_ps(
|
||||
ff, fe, fd, fc, fb, fa, f9, f8, f7, f6, f5, f4, f3, f2, f1, f0);
|
||||
#endif
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Packet16h float2half(const Packet16f& a) {
|
||||
#ifdef EIGEN_HAS_FP16_C
|
||||
Packet16h result;
|
||||
result.x = _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
|
||||
return result;
|
||||
#else
|
||||
EIGEN_ALIGN64 float aux[16];
|
||||
pstore(aux, a);
|
||||
half h0(aux[0]);
|
||||
half h1(aux[1]);
|
||||
half h2(aux[2]);
|
||||
half h3(aux[3]);
|
||||
half h4(aux[4]);
|
||||
half h5(aux[5]);
|
||||
half h6(aux[6]);
|
||||
half h7(aux[7]);
|
||||
half h8(aux[8]);
|
||||
half h9(aux[9]);
|
||||
half ha(aux[10]);
|
||||
half hb(aux[11]);
|
||||
half hc(aux[12]);
|
||||
half hd(aux[13]);
|
||||
half he(aux[14]);
|
||||
half hf(aux[15]);
|
||||
|
||||
Packet16h result;
|
||||
result.x = _mm256_set_epi16(
|
||||
hf.x, he.x, hd.x, hc.x, hb.x, ha.x, h9.x, h8.x,
|
||||
h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pnot(const Packet16h& a) {
|
||||
Packet16h r; r.x = _mm256_xor_si256(a.x, pcmp_eq(a.x, a.x)); return r;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h ptrue(const Packet16h& a) {
|
||||
Packet16h r; r.x = Packet8i(ptrue(a.x)); return r;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h por(const Packet16h& a,const Packet16h& b) {
|
||||
// in some cases Packet8i is a wrapper around __m256i, so we need to
|
||||
// cast to Packet8i to call the correct overload.
|
||||
Packet16h r; r.x = por(Packet8i(a.x),Packet8i(b.x)); return r;
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pxor(const Packet16h& a,const Packet16h& b) {
|
||||
Packet16h r; r.x = pxor(Packet8i(a.x),Packet8i(b.x)); return r;
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pand(const Packet16h& a,const Packet16h& b) {
|
||||
Packet16h r; r.x = pand(Packet8i(a.x),Packet8i(b.x)); return r;
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pandnot(const Packet16h& a,const Packet16h& b) {
|
||||
Packet16h r; r.x = pandnot(Packet8i(a.x),Packet8i(b.x)); return r;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pselect(const Packet16h& mask, const Packet16h& a, const Packet16h& b) {
|
||||
Packet16h r; r.x = _mm256_blendv_epi8(b.x, a.x, mask.x); return r;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pcmp_eq(const Packet16h& a,const Packet16h& b) {
|
||||
Packet16f af = half2float(a);
|
||||
Packet16f bf = half2float(b);
|
||||
Packet16f rf = pcmp_eq(af, bf);
|
||||
// Pack the 32-bit flags into 16-bits flags.
|
||||
__m256i lo = _mm256_castps_si256(extract256<0>(rf));
|
||||
__m256i hi = _mm256_castps_si256(extract256<1>(rf));
|
||||
__m128i result_lo = _mm_packs_epi32(_mm256_extractf128_si256(lo, 0),
|
||||
_mm256_extractf128_si256(lo, 1));
|
||||
__m128i result_hi = _mm_packs_epi32(_mm256_extractf128_si256(hi, 0),
|
||||
_mm256_extractf128_si256(hi, 1));
|
||||
Packet16h result; result.x = _mm256_insertf128_si256(_mm256_castsi128_si256(result_lo), result_hi, 1);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pnegate(const Packet16h& a) {
|
||||
Packet16h sign_mask; sign_mask.x = _mm256_set1_epi16(static_cast<unsigned short>(0x8000));
|
||||
Packet16h result; result.x = _mm256_xor_si256(a.x, sign_mask.x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h padd<Packet16h>(const Packet16h& a, const Packet16h& b) {
|
||||
Packet16f af = half2float(a);
|
||||
Packet16f bf = half2float(b);
|
||||
Packet16f rf = padd(af, bf);
|
||||
return float2half(rf);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h psub<Packet16h>(const Packet16h& a, const Packet16h& b) {
|
||||
Packet16f af = half2float(a);
|
||||
Packet16f bf = half2float(b);
|
||||
Packet16f rf = psub(af, bf);
|
||||
return float2half(rf);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pmul<Packet16h>(const Packet16h& a, const Packet16h& b) {
|
||||
Packet16f af = half2float(a);
|
||||
Packet16f bf = half2float(b);
|
||||
Packet16f rf = pmul(af, bf);
|
||||
return float2half(rf);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pdiv<Packet16h>(const Packet16h& a, const Packet16h& b) {
|
||||
Packet16f af = half2float(a);
|
||||
Packet16f bf = half2float(b);
|
||||
Packet16f rf = pdiv(af, bf);
|
||||
return float2half(rf);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE half predux<Packet16h>(const Packet16h& from) {
|
||||
Packet16f from_float = half2float(from);
|
||||
return half(predux(from_float));
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE half predux_mul<Packet16h>(const Packet16h& from) {
|
||||
Packet16f from_float = half2float(from);
|
||||
return half(predux_mul(from_float));
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h preduxp<Packet16h>(const Packet16h* p) {
|
||||
Packet16f pf[16];
|
||||
pf[0] = half2float(p[0]);
|
||||
pf[1] = half2float(p[1]);
|
||||
pf[2] = half2float(p[2]);
|
||||
pf[3] = half2float(p[3]);
|
||||
pf[4] = half2float(p[4]);
|
||||
pf[5] = half2float(p[5]);
|
||||
pf[6] = half2float(p[6]);
|
||||
pf[7] = half2float(p[7]);
|
||||
pf[8] = half2float(p[8]);
|
||||
pf[9] = half2float(p[9]);
|
||||
pf[10] = half2float(p[10]);
|
||||
pf[11] = half2float(p[11]);
|
||||
pf[12] = half2float(p[12]);
|
||||
pf[13] = half2float(p[13]);
|
||||
pf[14] = half2float(p[14]);
|
||||
pf[15] = half2float(p[15]);
|
||||
Packet16f reduced = preduxp<Packet16f>(pf);
|
||||
return float2half(reduced);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h preverse(const Packet16h& a)
|
||||
{
|
||||
__m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
|
||||
Packet16h res;
|
||||
res.x = _mm256_insertf128_si256(
|
||||
_mm256_castsi128_si256(_mm_shuffle_epi8(_mm256_extractf128_si256(a.x,1),m)),
|
||||
_mm_shuffle_epi8(_mm256_extractf128_si256(a.x,0),m), 1);
|
||||
return res;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pinsertfirst(const Packet16h& a, Eigen::half b)
|
||||
{
|
||||
Packet16h res;
|
||||
res.x = _mm256_insert_epi16(a.x,b.x,0);
|
||||
return res;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pinsertlast(const Packet16h& a, Eigen::half b)
|
||||
{
|
||||
Packet16h res;
|
||||
res.x = _mm256_insert_epi16(a.x,b.x,15);
|
||||
return res;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pgather<Eigen::half, Packet16h>(const Eigen::half* from, Index stride)
|
||||
{
|
||||
Packet16h result;
|
||||
result.x = _mm256_set_epi16(
|
||||
from[15*stride].x, from[14*stride].x, from[13*stride].x, from[12*stride].x,
|
||||
from[11*stride].x, from[10*stride].x, from[9*stride].x, from[8*stride].x,
|
||||
from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x,
|
||||
from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pscatter<half, Packet16h>(half* to, const Packet16h& from, Index stride)
|
||||
{
|
||||
EIGEN_ALIGN64 half aux[16];
|
||||
pstore(aux, from);
|
||||
to[stride*0].x = aux[0].x;
|
||||
to[stride*1].x = aux[1].x;
|
||||
to[stride*2].x = aux[2].x;
|
||||
to[stride*3].x = aux[3].x;
|
||||
to[stride*4].x = aux[4].x;
|
||||
to[stride*5].x = aux[5].x;
|
||||
to[stride*6].x = aux[6].x;
|
||||
to[stride*7].x = aux[7].x;
|
||||
to[stride*8].x = aux[8].x;
|
||||
to[stride*9].x = aux[9].x;
|
||||
to[stride*10].x = aux[10].x;
|
||||
to[stride*11].x = aux[11].x;
|
||||
to[stride*12].x = aux[12].x;
|
||||
to[stride*13].x = aux[13].x;
|
||||
to[stride*14].x = aux[14].x;
|
||||
to[stride*15].x = aux[15].x;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE void
|
||||
ptranspose(PacketBlock<Packet16h,16>& kernel) {
|
||||
__m256i a = kernel.packet[0].x;
|
||||
__m256i b = kernel.packet[1].x;
|
||||
__m256i c = kernel.packet[2].x;
|
||||
__m256i d = kernel.packet[3].x;
|
||||
__m256i e = kernel.packet[4].x;
|
||||
__m256i f = kernel.packet[5].x;
|
||||
__m256i g = kernel.packet[6].x;
|
||||
__m256i h = kernel.packet[7].x;
|
||||
__m256i i = kernel.packet[8].x;
|
||||
__m256i j = kernel.packet[9].x;
|
||||
__m256i k = kernel.packet[10].x;
|
||||
__m256i l = kernel.packet[11].x;
|
||||
__m256i m = kernel.packet[12].x;
|
||||
__m256i n = kernel.packet[13].x;
|
||||
__m256i o = kernel.packet[14].x;
|
||||
__m256i p = kernel.packet[15].x;
|
||||
|
||||
__m256i ab_07 = _mm256_unpacklo_epi16(a, b);
|
||||
__m256i cd_07 = _mm256_unpacklo_epi16(c, d);
|
||||
__m256i ef_07 = _mm256_unpacklo_epi16(e, f);
|
||||
__m256i gh_07 = _mm256_unpacklo_epi16(g, h);
|
||||
__m256i ij_07 = _mm256_unpacklo_epi16(i, j);
|
||||
__m256i kl_07 = _mm256_unpacklo_epi16(k, l);
|
||||
__m256i mn_07 = _mm256_unpacklo_epi16(m, n);
|
||||
__m256i op_07 = _mm256_unpacklo_epi16(o, p);
|
||||
|
||||
__m256i ab_8f = _mm256_unpackhi_epi16(a, b);
|
||||
__m256i cd_8f = _mm256_unpackhi_epi16(c, d);
|
||||
__m256i ef_8f = _mm256_unpackhi_epi16(e, f);
|
||||
__m256i gh_8f = _mm256_unpackhi_epi16(g, h);
|
||||
__m256i ij_8f = _mm256_unpackhi_epi16(i, j);
|
||||
__m256i kl_8f = _mm256_unpackhi_epi16(k, l);
|
||||
__m256i mn_8f = _mm256_unpackhi_epi16(m, n);
|
||||
__m256i op_8f = _mm256_unpackhi_epi16(o, p);
|
||||
|
||||
__m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
|
||||
__m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
|
||||
__m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
|
||||
__m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
|
||||
__m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
|
||||
__m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
|
||||
__m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
|
||||
__m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
|
||||
|
||||
__m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
|
||||
__m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
|
||||
__m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
|
||||
__m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
|
||||
__m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
|
||||
__m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
|
||||
__m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
|
||||
__m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
|
||||
|
||||
__m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
|
||||
__m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
|
||||
__m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
|
||||
__m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
|
||||
__m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
|
||||
__m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
|
||||
__m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
|
||||
__m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
|
||||
__m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
|
||||
__m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
|
||||
__m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
|
||||
__m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
|
||||
__m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
|
||||
__m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
|
||||
__m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
|
||||
__m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
|
||||
|
||||
// NOTE: no unpacklo/hi instr in this case, so using permute instr.
|
||||
__m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
|
||||
__m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
|
||||
__m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
|
||||
__m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
|
||||
__m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
|
||||
__m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
|
||||
__m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
|
||||
__m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
|
||||
__m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
|
||||
__m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
|
||||
__m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
|
||||
__m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
|
||||
__m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
|
||||
__m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
|
||||
__m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
|
||||
__m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
|
||||
|
||||
kernel.packet[0].x = a_p_0;
|
||||
kernel.packet[1].x = a_p_1;
|
||||
kernel.packet[2].x = a_p_2;
|
||||
kernel.packet[3].x = a_p_3;
|
||||
kernel.packet[4].x = a_p_4;
|
||||
kernel.packet[5].x = a_p_5;
|
||||
kernel.packet[6].x = a_p_6;
|
||||
kernel.packet[7].x = a_p_7;
|
||||
kernel.packet[8].x = a_p_8;
|
||||
kernel.packet[9].x = a_p_9;
|
||||
kernel.packet[10].x = a_p_a;
|
||||
kernel.packet[11].x = a_p_b;
|
||||
kernel.packet[12].x = a_p_c;
|
||||
kernel.packet[13].x = a_p_d;
|
||||
kernel.packet[14].x = a_p_e;
|
||||
kernel.packet[15].x = a_p_f;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE void
|
||||
ptranspose(PacketBlock<Packet16h,8>& kernel) {
|
||||
EIGEN_ALIGN64 half in[8][16];
|
||||
pstore<half>(in[0], kernel.packet[0]);
|
||||
pstore<half>(in[1], kernel.packet[1]);
|
||||
pstore<half>(in[2], kernel.packet[2]);
|
||||
pstore<half>(in[3], kernel.packet[3]);
|
||||
pstore<half>(in[4], kernel.packet[4]);
|
||||
pstore<half>(in[5], kernel.packet[5]);
|
||||
pstore<half>(in[6], kernel.packet[6]);
|
||||
pstore<half>(in[7], kernel.packet[7]);
|
||||
|
||||
EIGEN_ALIGN64 half out[8][16];
|
||||
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
for (int j = 0; j < 8; ++j) {
|
||||
out[i][j] = in[j][2*i];
|
||||
}
|
||||
for (int j = 0; j < 8; ++j) {
|
||||
out[i][j+8] = in[j][2*i+1];
|
||||
}
|
||||
}
|
||||
|
||||
kernel.packet[0] = pload<Packet16h>(out[0]);
|
||||
kernel.packet[1] = pload<Packet16h>(out[1]);
|
||||
kernel.packet[2] = pload<Packet16h>(out[2]);
|
||||
kernel.packet[3] = pload<Packet16h>(out[3]);
|
||||
kernel.packet[4] = pload<Packet16h>(out[4]);
|
||||
kernel.packet[5] = pload<Packet16h>(out[5]);
|
||||
kernel.packet[6] = pload<Packet16h>(out[6]);
|
||||
kernel.packet[7] = pload<Packet16h>(out[7]);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE void
|
||||
ptranspose(PacketBlock<Packet16h,4>& kernel) {
|
||||
EIGEN_ALIGN64 half in[4][16];
|
||||
pstore<half>(in[0], kernel.packet[0]);
|
||||
pstore<half>(in[1], kernel.packet[1]);
|
||||
pstore<half>(in[2], kernel.packet[2]);
|
||||
pstore<half>(in[3], kernel.packet[3]);
|
||||
|
||||
EIGEN_ALIGN64 half out[4][16];
|
||||
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
for (int j = 0; j < 4; ++j) {
|
||||
out[i][j] = in[j][4*i];
|
||||
}
|
||||
for (int j = 0; j < 4; ++j) {
|
||||
out[i][j+4] = in[j][4*i+1];
|
||||
}
|
||||
for (int j = 0; j < 4; ++j) {
|
||||
out[i][j+8] = in[j][4*i+2];
|
||||
}
|
||||
for (int j = 0; j < 4; ++j) {
|
||||
out[i][j+12] = in[j][4*i+3];
|
||||
}
|
||||
}
|
||||
|
||||
kernel.packet[0] = pload<Packet16h>(out[0]);
|
||||
kernel.packet[1] = pload<Packet16h>(out[1]);
|
||||
kernel.packet[2] = pload<Packet16h>(out[2]);
|
||||
kernel.packet[3] = pload<Packet16h>(out[3]);
|
||||
}
|
||||
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
47
Eigen/src/Core/arch/AVX512/TypeCasting.h
Normal file
47
Eigen/src/Core/arch/AVX512/TypeCasting.h
Normal file
@ -0,0 +1,47 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2019 Rasmus Munk Larsen <rmlarsen@google.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_TYPE_CASTING_AVX512_H
|
||||
#define EIGEN_TYPE_CASTING_AVX512_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<half, float> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16h, Packet16f>(const Packet16h& a) {
|
||||
return half2float(a);
|
||||
}
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<float, half> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pcast<Packet16f, Packet16h>(const Packet16f& a) {
|
||||
return float2half(a);
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_TYPE_CASTING_AVX512_H
|
@ -83,15 +83,6 @@ static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 };
|
||||
static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 };
|
||||
static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 };
|
||||
|
||||
// Mask alignment
|
||||
#ifdef __PPC64__
|
||||
#define _EIGEN_MASK_ALIGNMENT 0xfffffffffffffff0
|
||||
#else
|
||||
#define _EIGEN_MASK_ALIGNMENT 0xfffffff0
|
||||
#endif
|
||||
|
||||
#define _EIGEN_ALIGNED_PTR(x) ((std::ptrdiff_t)(x) & _EIGEN_MASK_ALIGNMENT)
|
||||
|
||||
// Handle endianness properly while loading constants
|
||||
// Define global static constants:
|
||||
#ifdef _BIG_ENDIAN
|
||||
@ -249,42 +240,38 @@ inline std::ostream & operator <<(std::ostream & s, const Packet4ui & v)
|
||||
// Need to define them first or we get specialization after instantiation errors
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
|
||||
{
|
||||
// some versions of GCC throw "unused-but-set-parameter".
|
||||
// ignoring these warnings for now.
|
||||
EIGEN_UNUSED_VARIABLE(from);
|
||||
EIGEN_DEBUG_ALIGNED_LOAD
|
||||
#ifdef __VSX__
|
||||
return vec_vsx_ld(0, from);
|
||||
#else
|
||||
return vec_ld(0, from);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from)
|
||||
{
|
||||
// some versions of GCC throw "unused-but-set-parameter".
|
||||
// ignoring these warnings for now.
|
||||
EIGEN_UNUSED_VARIABLE(from);
|
||||
EIGEN_DEBUG_ALIGNED_LOAD
|
||||
#ifdef __VSX__
|
||||
return vec_vsx_ld(0, from);
|
||||
#else
|
||||
return vec_ld(0, from);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
|
||||
{
|
||||
// some versions of GCC throw "unused-but-set-parameter" (float *to).
|
||||
// ignoring these warnings for now.
|
||||
EIGEN_UNUSED_VARIABLE(to);
|
||||
EIGEN_DEBUG_ALIGNED_STORE
|
||||
#ifdef __VSX__
|
||||
vec_vsx_st(from, 0, to);
|
||||
#else
|
||||
vec_st(from, 0, to);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from)
|
||||
{
|
||||
// some versions of GCC throw "unused-but-set-parameter" (float *to).
|
||||
// ignoring these warnings for now.
|
||||
EIGEN_UNUSED_VARIABLE(to);
|
||||
EIGEN_DEBUG_ALIGNED_STORE
|
||||
#ifdef __VSX__
|
||||
vec_vsx_st(from, 0, to);
|
||||
#else
|
||||
vec_st(from, 0, to);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) {
|
||||
@ -452,7 +439,7 @@ template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, con
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, vec_nor(b, b)); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) {
|
||||
return vec_sel(b, a, mask);
|
||||
return vec_sel(b, a, reinterpret_cast<Packet4ui>(mask));
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return vec_round(a); }
|
||||
@ -487,12 +474,12 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
|
||||
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
|
||||
{
|
||||
EIGEN_DEBUG_UNALIGNED_LOAD
|
||||
return (Packet4i) vec_vsx_ld((long)from & 15, (const int*) _EIGEN_ALIGNED_PTR(from));
|
||||
return vec_vsx_ld(0, from);
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
|
||||
{
|
||||
EIGEN_DEBUG_UNALIGNED_LOAD
|
||||
return (Packet4f) vec_vsx_ld((long)from & 15, (const float*) _EIGEN_ALIGNED_PTR(from));
|
||||
return vec_vsx_ld(0, from);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -552,13 +539,13 @@ template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& f
|
||||
// We also need to redefine little endian loading of Packet4i/Packet4f using VSX
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from)
|
||||
{
|
||||
EIGEN_DEBUG_ALIGNED_STORE
|
||||
vec_vsx_st(from, (long)to & 15, (int*) _EIGEN_ALIGNED_PTR(to));
|
||||
EIGEN_DEBUG_UNALIGNED_STORE
|
||||
vec_vsx_st(from, 0, to);
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from)
|
||||
{
|
||||
EIGEN_DEBUG_ALIGNED_STORE
|
||||
vec_vsx_st(from, (long)to & 15, (float*) _EIGEN_ALIGNED_PTR(to));
|
||||
EIGEN_DEBUG_UNALIGNED_STORE
|
||||
vec_vsx_st(from, 0, to);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -949,21 +936,13 @@ inline std::ostream & operator <<(std::ostream & s, const Packet2d & v)
|
||||
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from)
|
||||
{
|
||||
EIGEN_DEBUG_ALIGNED_LOAD
|
||||
#ifdef __VSX__
|
||||
return vec_vsx_ld(0, from);
|
||||
#else
|
||||
return vec_ld(0, from);
|
||||
#endif
|
||||
return vec_xl(0, const_cast<double *>(from)); // cast needed by Clang
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from)
|
||||
{
|
||||
EIGEN_DEBUG_ALIGNED_STORE
|
||||
#ifdef __VSX__
|
||||
vec_vsx_st(from, 0, to);
|
||||
#else
|
||||
vec_st(from, 0, to);
|
||||
#endif
|
||||
vec_xst(from, 0, to);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) {
|
||||
@ -1030,6 +1009,14 @@ template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) { return reinterpret_cast<Packet2d>(vec_cmple(a,b)); }
|
||||
template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) { return reinterpret_cast<Packet2d>(vec_cmplt(a,b)); }
|
||||
template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return reinterpret_cast<Packet2d>(vec_cmpeq(a,b)); }
|
||||
template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt_or_nan(const Packet2d& a, const Packet2d& b) {
|
||||
Packet2d c = reinterpret_cast<Packet2d>(vec_cmpge(a,b));
|
||||
return vec_nor(c,c);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); }
|
||||
@ -1044,8 +1031,8 @@ template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { re
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
|
||||
{
|
||||
EIGEN_DEBUG_ALIGNED_LOAD
|
||||
return (Packet2d) vec_vsx_ld((long)from & 15, (const double*) _EIGEN_ALIGNED_PTR(from));
|
||||
EIGEN_DEBUG_UNALIGNED_LOAD
|
||||
return vec_vsx_ld(0, from);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
|
||||
@ -1058,8 +1045,8 @@ template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from)
|
||||
{
|
||||
EIGEN_DEBUG_ALIGNED_STORE
|
||||
vec_vsx_st((Packet4f)from, (long)to & 15, (float*) _EIGEN_ALIGNED_PTR(to));
|
||||
EIGEN_DEBUG_UNALIGNED_STORE
|
||||
vec_vsx_st(from, 0, to);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_PPC_PREFETCH(addr); }
|
||||
|
@ -126,6 +126,51 @@ Packet plog_float(const Packet _x)
|
||||
por(pselect(pos_inf_mask,cst_pos_inf,x), invalid_mask));
|
||||
}
|
||||
|
||||
/** \internal \returns log(1 + x) computed using W. Kahan's formula.
|
||||
See: http://www.plunk.org/~hatch/rightway.php
|
||||
*/
|
||||
template<typename Packet>
|
||||
Packet generic_plog1p(const Packet& x)
|
||||
{
|
||||
typedef typename unpacket_traits<Packet>::type ScalarType;
|
||||
const Packet one = pset1<Packet>(ScalarType(1));
|
||||
Packet xp1 = padd(x, one);
|
||||
Packet small_mask = pcmp_eq(xp1, one);
|
||||
Packet log1 = plog(xp1);
|
||||
Packet inf_mask = pcmp_eq(xp1, log1);
|
||||
Packet log_large = pmul(x, pdiv(log1, psub(xp1, one)));
|
||||
return pselect(por(small_mask, inf_mask), x, log_large);
|
||||
}
|
||||
|
||||
/** \internal \returns exp(x)-1 computed using W. Kahan's formula.
|
||||
See: http://www.plunk.org/~hatch/rightway.php
|
||||
*/
|
||||
template<typename Packet>
|
||||
Packet generic_expm1(const Packet& x)
|
||||
{
|
||||
typedef typename unpacket_traits<Packet>::type ScalarType;
|
||||
const Packet one = pset1<Packet>(ScalarType(1));
|
||||
const Packet neg_one = pset1<Packet>(ScalarType(-1));
|
||||
Packet u = pexp(x);
|
||||
Packet one_mask = pcmp_eq(u, one);
|
||||
Packet u_minus_one = psub(u, one);
|
||||
Packet neg_one_mask = pcmp_eq(u_minus_one, neg_one);
|
||||
Packet logu = plog(u);
|
||||
// The following comparison is to catch the case where
|
||||
// exp(x) = +inf. It is written in this way to avoid having
|
||||
// to form the constant +inf, which depends on the packet
|
||||
// type.
|
||||
Packet pos_inf_mask = pcmp_eq(logu, u);
|
||||
Packet expm1 = pmul(u_minus_one, pdiv(x, logu));
|
||||
expm1 = pselect(pos_inf_mask, u, expm1);
|
||||
return pselect(one_mask,
|
||||
x,
|
||||
pselect(neg_one_mask,
|
||||
neg_one,
|
||||
expm1));
|
||||
}
|
||||
|
||||
|
||||
// Exponential function. Works by writing "x = m*log(2) + r" where
|
||||
// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then
|
||||
// "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1).
|
||||
|
@ -33,8 +33,8 @@
|
||||
// to disk and the likes), but fast on GPUs.
|
||||
|
||||
|
||||
#ifndef EIGEN_HALF_GPU_H
|
||||
#define EIGEN_HALF_GPU_H
|
||||
#ifndef EIGEN_HALF_H
|
||||
#define EIGEN_HALF_H
|
||||
|
||||
#if __cplusplus > 199711L
|
||||
#define EIGEN_EXPLICIT_CAST(tgt_type) explicit operator tgt_type()
|
||||
@ -76,7 +76,6 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h);
|
||||
|
||||
struct half_base : public __half_raw {
|
||||
EIGEN_DEVICE_FUNC half_base() {}
|
||||
EIGEN_DEVICE_FUNC half_base(const half_base& h) : __half_raw(h) {}
|
||||
EIGEN_DEVICE_FUNC half_base(const __half_raw& h) : __half_raw(h) {}
|
||||
|
||||
#if defined(EIGEN_HAS_GPU_FP16)
|
||||
@ -114,7 +113,6 @@ struct half : public half_impl::half_base {
|
||||
EIGEN_DEVICE_FUNC half() {}
|
||||
|
||||
EIGEN_DEVICE_FUNC half(const __half_raw& h) : half_impl::half_base(h) {}
|
||||
EIGEN_DEVICE_FUNC half(const half& h) : half_impl::half_base(h) {}
|
||||
|
||||
#if defined(EIGEN_HAS_GPU_FP16)
|
||||
#if defined(EIGEN_HAS_HIP_FP16)
|
||||
@ -175,12 +173,6 @@ struct half : public half_impl::half_base {
|
||||
EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(double) const {
|
||||
return static_cast<double>(half_impl::half_to_float(*this));
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC half& operator=(const half& other) {
|
||||
x = other.x;
|
||||
return *this;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
@ -761,4 +753,4 @@ bool (isfinite)(const Eigen::half& h) {
|
||||
} // namespace numext
|
||||
#endif
|
||||
|
||||
#endif // EIGEN_HALF_GPU_H
|
||||
#endif // EIGEN_HALF_H
|
77
Eigen/src/Core/arch/Default/TypeCasting.h
Normal file
77
Eigen/src/Core/arch/Default/TypeCasting.h
Normal file
@ -0,0 +1,77 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
// Copyright (C) 2019 Rasmus Munk Larsen <rmlarsen@google.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_GENERIC_TYPE_CASTING_H
|
||||
#define EIGEN_GENERIC_TYPE_CASTING_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<>
|
||||
struct scalar_cast_op<float, Eigen::half> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
|
||||
typedef Eigen::half result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const float& a) const {
|
||||
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
|
||||
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
||||
return __float2half(a);
|
||||
#else
|
||||
return Eigen::half(a);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct functor_traits<scalar_cast_op<float, Eigen::half> >
|
||||
{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
|
||||
|
||||
|
||||
template<>
|
||||
struct scalar_cast_op<int, Eigen::half> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
|
||||
typedef Eigen::half result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const int& a) const {
|
||||
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
|
||||
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
||||
return __float2half(static_cast<float>(a));
|
||||
#else
|
||||
return Eigen::half(static_cast<float>(a));
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct functor_traits<scalar_cast_op<int, Eigen::half> >
|
||||
{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
|
||||
|
||||
|
||||
template<>
|
||||
struct scalar_cast_op<Eigen::half, float> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
|
||||
typedef float result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::half& a) const {
|
||||
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
|
||||
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
||||
return __half2float(a);
|
||||
#else
|
||||
return static_cast<float>(a);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct functor_traits<scalar_cast_op<Eigen::half, float> >
|
||||
{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif // EIGEN_GENERIC_TYPE_CASTING_H
|
@ -460,6 +460,546 @@ ptranspose(PacketBlock<double2,2>& kernel) {
|
||||
|
||||
#endif
|
||||
|
||||
// Packet math for Eigen::half
|
||||
// Most of the following operations require arch >= 3.0
|
||||
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDACC) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
|
||||
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIPCC) && defined(EIGEN_HIP_DEVICE_COMPILE)) || \
|
||||
(defined(EIGEN_HAS_CUDA_FP16) && defined(__clang__) && defined(__CUDA__))
|
||||
|
||||
template<> struct is_arithmetic<half2> { enum { value = true }; };
|
||||
|
||||
template<> struct packet_traits<Eigen::half> : default_packet_traits
|
||||
{
|
||||
typedef half2 type;
|
||||
typedef half2 half;
|
||||
enum {
|
||||
Vectorizable = 1,
|
||||
AlignedOnScalar = 1,
|
||||
size=2,
|
||||
HasHalfPacket = 0,
|
||||
HasAdd = 1,
|
||||
HasSub = 1,
|
||||
HasMul = 1,
|
||||
HasDiv = 1,
|
||||
HasSqrt = 1,
|
||||
HasRsqrt = 1,
|
||||
HasExp = 1,
|
||||
HasExpm1 = 1,
|
||||
HasLog = 1,
|
||||
HasLog1p = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> struct unpacket_traits<half2> { typedef Eigen::half type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef half2 half; };
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pset1<half2>(const Eigen::half& from) {
|
||||
#if !defined(EIGEN_CUDA_ARCH) && !defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
half2 r;
|
||||
r.x = from;
|
||||
r.y = from;
|
||||
return r;
|
||||
#else
|
||||
return __half2half2(from);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pload<half2>(const Eigen::half* from) {
|
||||
return *reinterpret_cast<const half2*>(from);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ploadu<half2>(const Eigen::half* from) {
|
||||
return __halves2half2(from[0], from[1]);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ploaddup<half2>(const Eigen::half* from) {
|
||||
return __halves2half2(from[0], from[0]);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const half2& from) {
|
||||
*reinterpret_cast<half2*>(to) = from;
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const half2& from) {
|
||||
#if !defined(EIGEN_CUDA_ARCH) && !defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
to[0] = from.x;
|
||||
to[1] = from.y;
|
||||
#else
|
||||
to[0] = __low2half(from);
|
||||
to[1] = __high2half(from);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<>
|
||||
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE half2 ploadt_ro<half2, Aligned>(const Eigen::half* from) {
|
||||
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __ldg((const half2*)from);
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 350
|
||||
return __ldg((const half2*)from);
|
||||
#else
|
||||
return __halves2half2(*(from+0), *(from+1));
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<>
|
||||
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE half2 ploadt_ro<half2, Unaligned>(const Eigen::half* from) {
|
||||
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __halves2half2(__ldg(from+0), __ldg(from+1));
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 350
|
||||
return __halves2half2(__ldg(from+0), __ldg(from+1));
|
||||
#else
|
||||
return __halves2half2(*(from+0), *(from+1));
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pgather<Eigen::half, half2>(const Eigen::half* from, Index stride) {
|
||||
return __halves2half2(from[0*stride], from[1*stride]);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<Eigen::half, half2>(Eigen::half* to, const half2& from, Index stride) {
|
||||
to[stride*0] = __low2half(from);
|
||||
to[stride*1] = __high2half(from);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half pfirst<half2>(const half2& a) {
|
||||
return __low2half(a);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pabs<half2>(const half2& a) {
|
||||
half a1 = __low2half(a);
|
||||
half a2 = __high2half(a);
|
||||
half result1 = half_impl::raw_uint16_to_half(a1.x & 0x7FFF);
|
||||
half result2 = half_impl::raw_uint16_to_half(a2.x & 0x7FFF);
|
||||
return __halves2half2(result1, result2);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ptrue<half2>(const half2& a) {
|
||||
half true_half = half_impl::raw_uint16_to_half(0xffffu);
|
||||
return pset1<half2>(true_half);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pzero<half2>(const half2& a) {
|
||||
half false_half = half_impl::raw_uint16_to_half(0x0000u);
|
||||
return pset1<half2>(false_half);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
|
||||
ptranspose(PacketBlock<half2,2>& kernel) {
|
||||
__half a1 = __low2half(kernel.packet[0]);
|
||||
__half a2 = __high2half(kernel.packet[0]);
|
||||
__half b1 = __low2half(kernel.packet[1]);
|
||||
__half b2 = __high2half(kernel.packet[1]);
|
||||
kernel.packet[0] = __halves2half2(a1, b1);
|
||||
kernel.packet[1] = __halves2half2(a2, b2);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plset<half2>(const Eigen::half& a) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __halves2half2(a, __hadd(a, __float2half(1.0f)));
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 530
|
||||
return __halves2half2(a, __hadd(a, __float2half(1.0f)));
|
||||
#else
|
||||
float f = __half2float(a) + 1.0f;
|
||||
return __halves2half2(a, __float2half(f));
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template <>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pselect<half2>(const half2& mask,
|
||||
const half2& a,
|
||||
const half2& b) {
|
||||
half mask_low = __low2half(mask);
|
||||
half mask_high = __high2half(mask);
|
||||
half result_low = mask_low == half(0) ? __low2half(b) : __low2half(a);
|
||||
half result_high = mask_high == half(0) ? __high2half(b) : __high2half(a);
|
||||
return __halves2half2(result_low, result_high);
|
||||
}
|
||||
|
||||
template <>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcmp_eq<half2>(const half2& a,
|
||||
const half2& b) {
|
||||
half true_half = half_impl::raw_uint16_to_half(0xffffu);
|
||||
half false_half = half_impl::raw_uint16_to_half(0x0000u);
|
||||
half a1 = __low2half(a);
|
||||
half a2 = __high2half(a);
|
||||
half b1 = __low2half(b);
|
||||
half b2 = __high2half(b);
|
||||
half eq1 = __half2float(a1) == __half2float(b1) ? true_half : false_half;
|
||||
half eq2 = __half2float(a2) == __half2float(b2) ? true_half : false_half;
|
||||
return __halves2half2(eq1, eq2);
|
||||
}
|
||||
|
||||
template <>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pand<half2>(const half2& a,
|
||||
const half2& b) {
|
||||
half a1 = __low2half(a);
|
||||
half a2 = __high2half(a);
|
||||
half b1 = __low2half(b);
|
||||
half b2 = __high2half(b);
|
||||
half result1 = half_impl::raw_uint16_to_half(a1.x & b1.x);
|
||||
half result2 = half_impl::raw_uint16_to_half(a2.x & b2.x);
|
||||
return __halves2half2(result1, result2);
|
||||
}
|
||||
|
||||
template <>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 por<half2>(const half2& a,
|
||||
const half2& b) {
|
||||
half a1 = __low2half(a);
|
||||
half a2 = __high2half(a);
|
||||
half b1 = __low2half(b);
|
||||
half b2 = __high2half(b);
|
||||
half result1 = half_impl::raw_uint16_to_half(a1.x | b1.x);
|
||||
half result2 = half_impl::raw_uint16_to_half(a2.x | b2.x);
|
||||
return __halves2half2(result1, result2);
|
||||
}
|
||||
|
||||
template <>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pxor<half2>(const half2& a,
|
||||
const half2& b) {
|
||||
half a1 = __low2half(a);
|
||||
half a2 = __high2half(a);
|
||||
half b1 = __low2half(b);
|
||||
half b2 = __high2half(b);
|
||||
half result1 = half_impl::raw_uint16_to_half(a1.x ^ b1.x);
|
||||
half result2 = half_impl::raw_uint16_to_half(a2.x ^ b2.x);
|
||||
return __halves2half2(result1, result2);
|
||||
}
|
||||
|
||||
template <>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pandnot<half2>(const half2& a,
|
||||
const half2& b) {
|
||||
half a1 = __low2half(a);
|
||||
half a2 = __high2half(a);
|
||||
half b1 = __low2half(b);
|
||||
half b2 = __high2half(b);
|
||||
half result1 = half_impl::raw_uint16_to_half(a1.x & ~b1.x);
|
||||
half result2 = half_impl::raw_uint16_to_half(a2.x & ~b2.x);
|
||||
return __halves2half2(result1, result2);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 padd<half2>(const half2& a, const half2& b) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __hadd2(a, b);
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 530
|
||||
return __hadd2(a, b);
|
||||
#else
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float b1 = __low2float(b);
|
||||
float b2 = __high2float(b);
|
||||
float r1 = a1 + b1;
|
||||
float r2 = a2 + b2;
|
||||
return __floats2half2_rn(r1, r2);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 psub<half2>(const half2& a, const half2& b) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __hsub2(a, b);
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 530
|
||||
return __hsub2(a, b);
|
||||
#else
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float b1 = __low2float(b);
|
||||
float b2 = __high2float(b);
|
||||
float r1 = a1 - b1;
|
||||
float r2 = a2 - b2;
|
||||
return __floats2half2_rn(r1, r2);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pnegate(const half2& a) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __hneg2(a);
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 530
|
||||
return __hneg2(a);
|
||||
#else
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
return __floats2half2_rn(-a1, -a2);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pconj(const half2& a) { return a; }
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmul<half2>(const half2& a, const half2& b) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __hmul2(a, b);
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 530
|
||||
return __hmul2(a, b);
|
||||
#else
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float b1 = __low2float(b);
|
||||
float b2 = __high2float(b);
|
||||
float r1 = a1 * b1;
|
||||
float r2 = a2 * b2;
|
||||
return __floats2half2_rn(r1, r2);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmadd<half2>(const half2& a, const half2& b, const half2& c) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __hfma2(a, b, c);
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 530
|
||||
return __hfma2(a, b, c);
|
||||
#else
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float b1 = __low2float(b);
|
||||
float b2 = __high2float(b);
|
||||
float c1 = __low2float(c);
|
||||
float c2 = __high2float(c);
|
||||
float r1 = a1 * b1 + c1;
|
||||
float r2 = a2 * b2 + c2;
|
||||
return __floats2half2_rn(r1, r2);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pdiv<half2>(const half2& a, const half2& b) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __h2div(a, b);
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float b1 = __low2float(b);
|
||||
float b2 = __high2float(b);
|
||||
float r1 = a1 / b1;
|
||||
float r2 = a2 / b2;
|
||||
return __floats2half2_rn(r1, r2);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmin<half2>(const half2& a, const half2& b) {
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float b1 = __low2float(b);
|
||||
float b2 = __high2float(b);
|
||||
__half r1 = a1 < b1 ? __low2half(a) : __low2half(b);
|
||||
__half r2 = a2 < b2 ? __high2half(a) : __high2half(b);
|
||||
return __halves2half2(r1, r2);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmax<half2>(const half2& a, const half2& b) {
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float b1 = __low2float(b);
|
||||
float b2 = __high2float(b);
|
||||
__half r1 = a1 > b1 ? __low2half(a) : __low2half(b);
|
||||
__half r2 = a2 > b2 ? __high2half(a) : __high2half(b);
|
||||
return __halves2half2(r1, r2);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux<half2>(const half2& a) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __hadd(__low2half(a), __high2half(a));
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 530
|
||||
return __hadd(__low2half(a), __high2half(a));
|
||||
#else
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
return Eigen::half(__float2half(a1 + a2));
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_max<half2>(const half2& a) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
__half first = __low2half(a);
|
||||
__half second = __high2half(a);
|
||||
return __hgt(first, second) ? first : second;
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 530
|
||||
__half first = __low2half(a);
|
||||
__half second = __high2half(a);
|
||||
return __hgt(first, second) ? first : second;
|
||||
#else
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
return a1 > a2 ? __low2half(a) : __high2half(a);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_min<half2>(const half2& a) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
__half first = __low2half(a);
|
||||
__half second = __high2half(a);
|
||||
return __hlt(first, second) ? first : second;
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 530
|
||||
__half first = __low2half(a);
|
||||
__half second = __high2half(a);
|
||||
return __hlt(first, second) ? first : second;
|
||||
#else
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
return a1 < a2 ? __low2half(a) : __high2half(a);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_mul<half2>(const half2& a) {
|
||||
#if defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
return __hmul(__low2half(a), __high2half(a));
|
||||
|
||||
#else // EIGEN_CUDA_ARCH
|
||||
|
||||
#if EIGEN_CUDA_ARCH >= 530
|
||||
return __hmul(__low2half(a), __high2half(a));
|
||||
#else
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
return Eigen::half(__float2half(a1 * a2));
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plog1p<half2>(const half2& a) {
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float r1 = log1pf(a1);
|
||||
float r2 = log1pf(a2);
|
||||
return __floats2half2_rn(r1, r2);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pexpm1<half2>(const half2& a) {
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float r1 = expm1f(a1);
|
||||
float r2 = expm1f(a2);
|
||||
return __floats2half2_rn(r1, r2);
|
||||
}
|
||||
|
||||
#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \
|
||||
defined(EIGEN_HIP_DEVICE_COMPILE)
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
half2 plog<half2>(const half2& a) {
|
||||
return h2log(a);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
half2 pexp<half2>(const half2& a) {
|
||||
return h2exp(a);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
half2 psqrt<half2>(const half2& a) {
|
||||
return h2sqrt(a);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
half2 prsqrt<half2>(const half2& a) {
|
||||
return h2rsqrt(a);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plog<half2>(const half2& a) {
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float r1 = logf(a1);
|
||||
float r2 = logf(a2);
|
||||
return __floats2half2_rn(r1, r2);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pexp<half2>(const half2& a) {
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float r1 = expf(a1);
|
||||
float r2 = expf(a2);
|
||||
return __floats2half2_rn(r1, r2);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 psqrt<half2>(const half2& a) {
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float r1 = sqrtf(a1);
|
||||
float r2 = sqrtf(a2);
|
||||
return __floats2half2_rn(r1, r2);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 prsqrt<half2>(const half2& a) {
|
||||
float a1 = __low2float(a);
|
||||
float a2 = __high2float(a);
|
||||
float r1 = rsqrtf(a1);
|
||||
float r2 = rsqrtf(a2);
|
||||
return __floats2half2_rn(r1, r2);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -14,64 +14,6 @@ namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<>
|
||||
struct scalar_cast_op<float, Eigen::half> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
|
||||
typedef Eigen::half result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const float& a) const {
|
||||
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
|
||||
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
||||
return __float2half(a);
|
||||
#else
|
||||
return Eigen::half(a);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct functor_traits<scalar_cast_op<float, Eigen::half> >
|
||||
{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
|
||||
|
||||
|
||||
template<>
|
||||
struct scalar_cast_op<int, Eigen::half> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
|
||||
typedef Eigen::half result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const int& a) const {
|
||||
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
|
||||
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
||||
return __float2half(static_cast<float>(a));
|
||||
#else
|
||||
return Eigen::half(static_cast<float>(a));
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct functor_traits<scalar_cast_op<int, Eigen::half> >
|
||||
{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
|
||||
|
||||
|
||||
template<>
|
||||
struct scalar_cast_op<Eigen::half, float> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
|
||||
typedef float result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::half& a) const {
|
||||
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
|
||||
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
||||
return __half2float(a);
|
||||
#else
|
||||
return static_cast<float>(a);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct functor_traits<scalar_cast_op<Eigen::half, float> >
|
||||
{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
|
||||
|
||||
|
||||
|
||||
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
|
||||
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
||||
|
||||
@ -104,109 +46,6 @@ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcast<float4, half2>(cons
|
||||
return __floats2half2_rn(a.x, a.y);
|
||||
}
|
||||
|
||||
#elif defined EIGEN_VECTORIZE_AVX512
|
||||
template <>
|
||||
struct type_casting_traits<half, float> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16h, Packet16f>(const Packet16h& a) {
|
||||
return half2float(a);
|
||||
}
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<float, half> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet16h pcast<Packet16f, Packet16h>(const Packet16f& a) {
|
||||
return float2half(a);
|
||||
}
|
||||
|
||||
#elif defined EIGEN_VECTORIZE_AVX
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<Eigen::half, float> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8h, Packet8f>(const Packet8h& a) {
|
||||
return half2float(a);
|
||||
}
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<float, Eigen::half> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet8h pcast<Packet8f, Packet8h>(const Packet8f& a) {
|
||||
return float2half(a);
|
||||
}
|
||||
|
||||
// Disable the following code since it's broken on too many platforms / compilers.
|
||||
//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
|
||||
#elif 0
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<Eigen::half, float> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4h, Packet4f>(const Packet4h& a) {
|
||||
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
||||
Eigen::half h = raw_uint16_to_half(static_cast<unsigned short>(a64));
|
||||
float f1 = static_cast<float>(h);
|
||||
h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
||||
float f2 = static_cast<float>(h);
|
||||
h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
||||
float f3 = static_cast<float>(h);
|
||||
h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
||||
float f4 = static_cast<float>(h);
|
||||
return _mm_set_ps(f4, f3, f2, f1);
|
||||
}
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<float, Eigen::half> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h pcast<Packet4f, Packet4h>(const Packet4f& a) {
|
||||
EIGEN_ALIGN16 float aux[4];
|
||||
pstore(aux, a);
|
||||
Eigen::half h0(aux[0]);
|
||||
Eigen::half h1(aux[1]);
|
||||
Eigen::half h2(aux[2]);
|
||||
Eigen::half h3(aux[3]);
|
||||
|
||||
Packet4h result;
|
||||
result.x = _mm_set_pi16(h3.x, h2.x, h1.x, h0.x);
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // end namespace internal
|
||||
|
@ -22,11 +22,20 @@ namespace Eigen {
|
||||
namespace internal {
|
||||
|
||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
||||
Packet4f plog<Packet4f>(const Packet4f& _x)
|
||||
{
|
||||
Packet4f plog<Packet4f>(const Packet4f& _x) {
|
||||
return plog_float(_x);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
||||
Packet4f plog1p<Packet4f>(const Packet4f& _x) {
|
||||
return generic_plog1p(_x);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
||||
Packet4f pexpm1<Packet4f>(const Packet4f& _x) {
|
||||
return generic_expm1(_x);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
||||
Packet4f pexp<Packet4f>(const Packet4f& _x)
|
||||
{
|
||||
|
@ -110,9 +110,9 @@ template<> struct packet_traits<float> : default_packet_traits
|
||||
HasSin = EIGEN_FAST_MATH,
|
||||
HasCos = EIGEN_FAST_MATH,
|
||||
HasLog = 1,
|
||||
HasExp = 1,
|
||||
HasLog1p = 1,
|
||||
HasExpm1 = 1,
|
||||
HasExp = 1,
|
||||
HasNdtri = 1,
|
||||
HasSqrt = 1,
|
||||
HasRsqrt = 1,
|
||||
@ -1056,6 +1056,214 @@ template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, co
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// Packet math for Eigen::half
|
||||
// Disable the following code since it's broken on too many platforms / compilers.
|
||||
//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
|
||||
#if 0
|
||||
|
||||
typedef struct {
|
||||
__m64 x;
|
||||
} Packet4h;
|
||||
|
||||
|
||||
template<> struct is_arithmetic<Packet4h> { enum { value = true }; };
|
||||
|
||||
template <>
|
||||
struct packet_traits<Eigen::half> : default_packet_traits {
|
||||
typedef Packet4h type;
|
||||
// There is no half-size packet for Packet4h.
|
||||
typedef Packet4h half;
|
||||
enum {
|
||||
Vectorizable = 1,
|
||||
AlignedOnScalar = 1,
|
||||
size = 4,
|
||||
HasHalfPacket = 0,
|
||||
HasAdd = 1,
|
||||
HasSub = 1,
|
||||
HasMul = 1,
|
||||
HasDiv = 1,
|
||||
HasNegate = 0,
|
||||
HasAbs = 0,
|
||||
HasAbs2 = 0,
|
||||
HasMin = 0,
|
||||
HasMax = 0,
|
||||
HasConj = 0,
|
||||
HasSetLinear = 0,
|
||||
HasSqrt = 0,
|
||||
HasRsqrt = 0,
|
||||
HasExp = 0,
|
||||
HasLog = 0,
|
||||
HasBlend = 0
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
template<> struct unpacket_traits<Packet4h> { typedef Eigen::half type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4h half; };
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h pset1<Packet4h>(const Eigen::half& from) {
|
||||
Packet4h result;
|
||||
result.x = _mm_set1_pi16(from.x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet4h>(const Packet4h& from) {
|
||||
return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm_cvtsi64_si32(from.x)));
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h pconj(const Packet4h& a) { return a; }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h padd<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
||||
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
||||
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
||||
|
||||
Eigen::half h[4];
|
||||
|
||||
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
||||
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
||||
h[0] = ha + hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
||||
h[1] = ha + hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
||||
h[2] = ha + hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
||||
h[3] = ha + hb;
|
||||
Packet4h result;
|
||||
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h psub<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
||||
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
||||
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
||||
|
||||
Eigen::half h[4];
|
||||
|
||||
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
||||
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
||||
h[0] = ha - hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
||||
h[1] = ha - hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
||||
h[2] = ha - hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
||||
h[3] = ha - hb;
|
||||
Packet4h result;
|
||||
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h pmul<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
||||
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
||||
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
||||
|
||||
Eigen::half h[4];
|
||||
|
||||
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
||||
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
||||
h[0] = ha * hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
||||
h[1] = ha * hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
||||
h[2] = ha * hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
||||
h[3] = ha * hb;
|
||||
Packet4h result;
|
||||
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h pdiv<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
||||
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
||||
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
||||
|
||||
Eigen::half h[4];
|
||||
|
||||
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
||||
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
||||
h[0] = ha / hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
||||
h[1] = ha / hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
||||
h[2] = ha / hb;
|
||||
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
||||
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
||||
h[3] = ha / hb;
|
||||
Packet4h result;
|
||||
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h pload<Packet4h>(const Eigen::half* from) {
|
||||
Packet4h result;
|
||||
result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h ploadu<Packet4h>(const Eigen::half* from) {
|
||||
Packet4h result;
|
||||
result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet4h& from) {
|
||||
__int64_t r = _mm_cvtm64_si64(from.x);
|
||||
*(reinterpret_cast<__int64_t*>(to)) = r;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet4h& from) {
|
||||
__int64_t r = _mm_cvtm64_si64(from.x);
|
||||
*(reinterpret_cast<__int64_t*>(to)) = r;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h
|
||||
ploadquad<Packet4h>(const Eigen::half* from) {
|
||||
return pset1<Packet4h>(*from);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h pgather<Eigen::half, Packet4h>(const Eigen::half* from, Index stride)
|
||||
{
|
||||
Packet4h result;
|
||||
result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet4h>(Eigen::half* to, const Packet4h& from, Index stride)
|
||||
{
|
||||
__int64_t a = _mm_cvtm64_si64(from.x);
|
||||
to[stride*0].x = static_cast<unsigned short>(a);
|
||||
to[stride*1].x = static_cast<unsigned short>(a >> 16);
|
||||
to[stride*2].x = static_cast<unsigned short>(a >> 32);
|
||||
to[stride*3].x = static_cast<unsigned short>(a >> 48);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE void
|
||||
ptranspose(PacketBlock<Packet4h,4>& kernel) {
|
||||
__m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x);
|
||||
__m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x);
|
||||
__m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x);
|
||||
__m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x);
|
||||
|
||||
kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1);
|
||||
kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1);
|
||||
kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3);
|
||||
kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -77,6 +77,57 @@ template<> EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f,Packet4i>(const Pa
|
||||
return _mm_castsi128_ps(a);
|
||||
}
|
||||
|
||||
|
||||
// Disable the following code since it's broken on too many platforms / compilers.
|
||||
//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
|
||||
#if 0
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<Eigen::half, float> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4h, Packet4f>(const Packet4h& a) {
|
||||
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
||||
Eigen::half h = raw_uint16_to_half(static_cast<unsigned short>(a64));
|
||||
float f1 = static_cast<float>(h);
|
||||
h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
||||
float f2 = static_cast<float>(h);
|
||||
h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
||||
float f3 = static_cast<float>(h);
|
||||
h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
||||
float f4 = static_cast<float>(h);
|
||||
return _mm_set_ps(f4, f3, f2, f1);
|
||||
}
|
||||
|
||||
template <>
|
||||
struct type_casting_traits<float, Eigen::half> {
|
||||
enum {
|
||||
VectorizedCast = 1,
|
||||
SrcCoeffRatio = 1,
|
||||
TgtCoeffRatio = 1
|
||||
};
|
||||
};
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4h pcast<Packet4f, Packet4h>(const Packet4f& a) {
|
||||
EIGEN_ALIGN16 float aux[4];
|
||||
pstore(aux, a);
|
||||
Eigen::half h0(aux[0]);
|
||||
Eigen::half h1(aux[1]);
|
||||
Eigen::half h2(aux[2]);
|
||||
Eigen::half h3(aux[3]);
|
||||
|
||||
Packet4h result;
|
||||
result.x = _mm_set_pi16(h3.x, h2.x, h1.x, h0.x);
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -44,6 +44,11 @@
|
||||
#if __clang_major__ >= 3 && __clang_minor__ >= 5
|
||||
#pragma clang diagnostic ignored "-Wabsolute-value"
|
||||
#endif
|
||||
#if ( defined(__ALTIVEC__) || defined(__VSX__) ) && __cplusplus < 201103L
|
||||
// warning: generic selections are a C11-specific feature
|
||||
// ignoring warnings thrown at vec_ctf in Altivec/PacketMath.h
|
||||
#pragma clang diagnostic ignored "-Wc11-extensions"
|
||||
#endif
|
||||
|
||||
#elif defined __GNUC__
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -129,17 +129,17 @@ class COLAMDOrdering
|
||||
StorageIndex n = StorageIndex(mat.cols());
|
||||
StorageIndex nnz = StorageIndex(mat.nonZeros());
|
||||
// Get the recommended value of Alen to be used by colamd
|
||||
StorageIndex Alen = internal::colamd_recommended(nnz, m, n);
|
||||
StorageIndex Alen = internal::Colamd::recommended(nnz, m, n);
|
||||
// Set the default parameters
|
||||
double knobs [COLAMD_KNOBS];
|
||||
StorageIndex stats [COLAMD_STATS];
|
||||
internal::colamd_set_defaults(knobs);
|
||||
double knobs [internal::Colamd::NKnobs];
|
||||
StorageIndex stats [internal::Colamd::NStats];
|
||||
internal::Colamd::set_defaults(knobs);
|
||||
|
||||
IndexVector p(n+1), A(Alen);
|
||||
for(StorageIndex i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
|
||||
for(StorageIndex i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
|
||||
// Call Colamd routine to compute the ordering
|
||||
StorageIndex info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
|
||||
StorageIndex info = internal::Colamd::compute_ordering(m, n, Alen, A.data(), p.data(), knobs, stats);
|
||||
EIGEN_UNUSED_VARIABLE(info);
|
||||
eigen_assert( info && "COLAMD failed " );
|
||||
|
||||
|
@ -386,14 +386,15 @@ class PardisoLU : public PardisoImpl< PardisoLU<MatrixType> >
|
||||
{
|
||||
protected:
|
||||
typedef PardisoImpl<PardisoLU> Base;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::RealScalar RealScalar;
|
||||
using Base::pardisoInit;
|
||||
using Base::m_matrix;
|
||||
friend class PardisoImpl< PardisoLU<MatrixType> >;
|
||||
|
||||
public:
|
||||
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::RealScalar RealScalar;
|
||||
|
||||
using Base::compute;
|
||||
using Base::solve;
|
||||
|
||||
@ -441,14 +442,14 @@ class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
|
||||
{
|
||||
protected:
|
||||
typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::RealScalar RealScalar;
|
||||
using Base::pardisoInit;
|
||||
using Base::m_matrix;
|
||||
friend class PardisoImpl< PardisoLLT<MatrixType,_UpLo> >;
|
||||
|
||||
public:
|
||||
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::RealScalar RealScalar;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
enum { UpLo = _UpLo };
|
||||
using Base::compute;
|
||||
@ -504,14 +505,14 @@ class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
|
||||
{
|
||||
protected:
|
||||
typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::RealScalar RealScalar;
|
||||
using Base::pardisoInit;
|
||||
using Base::m_matrix;
|
||||
friend class PardisoImpl< PardisoLDLT<MatrixType,Options> >;
|
||||
|
||||
public:
|
||||
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::RealScalar RealScalar;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
using Base::compute;
|
||||
enum { UpLo = Options&(Upper|Lower) };
|
||||
|
@ -1,25 +0,0 @@
|
||||
# libxsmm support.
|
||||
# libxsmm provides matrix multiplication kernels optimized for
|
||||
# the latest Intel architectures.
|
||||
# Download the library from https://github.com/hfp/libxsmm
|
||||
# Compile with make BLAS=0
|
||||
|
||||
if (LIBXSMM)
|
||||
set(XSMM_FIND_QUIETLY TRUE)
|
||||
set(XSMM_INCLUDES ${LIBXSMM}/include)
|
||||
set(XSMM_LIBRARIES ${LIBXSMM}/lib)
|
||||
endif (LIBXSMM)
|
||||
|
||||
find_path(LIBXSMM
|
||||
NAMES
|
||||
libxsmm.h
|
||||
PATHS
|
||||
$ENV{XSMMDIR}/include
|
||||
${INCLUDE_INSTALL_DIR}
|
||||
)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(XSMM DEFAULT_MSG
|
||||
LIBXSMM)
|
||||
|
||||
mark_as_advanced(LIBXSMM)
|
@ -9,7 +9,7 @@
|
||||
|
||||
#include "main.h"
|
||||
|
||||
#include <Eigen/src/Core/arch/GPU/Half.h>
|
||||
#include <Eigen/src/Core/arch/Default/Half.h>
|
||||
|
||||
// Make sure it's possible to forward declare Eigen::half
|
||||
namespace Eigen {
|
||||
|
@ -610,11 +610,15 @@ template<typename Scalar,typename Packet> void packetmath_real()
|
||||
CHECK_CWISE1_IF(PacketTraits::HasSqrt, Scalar(1)/std::sqrt, internal::prsqrt);
|
||||
CHECK_CWISE1_IF(PacketTraits::HasLog, std::log, internal::plog);
|
||||
#if EIGEN_HAS_C99_MATH && (__cplusplus > 199711L)
|
||||
CHECK_CWISE1_IF(PacketTraits::HasExpm1, std::expm1, internal::pexpm1);
|
||||
CHECK_CWISE1_IF(PacketTraits::HasLog1p, std::log1p, internal::plog1p);
|
||||
CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasLGamma, std::lgamma, internal::plgamma);
|
||||
CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasErf, std::erf, internal::perf);
|
||||
CHECK_CWISE1_IF(internal::packet_traits<Scalar>::HasErfc, std::erfc, internal::perfc);
|
||||
data1[0] = std::numeric_limits<Scalar>::infinity();
|
||||
data1[1] = Scalar(-1);
|
||||
CHECK_CWISE1_IF(PacketTraits::HasLog1p, std::log1p, internal::plog1p);
|
||||
data1[0] = std::numeric_limits<Scalar>::infinity();
|
||||
data1[1] = -std::numeric_limits<Scalar>::infinity();
|
||||
CHECK_CWISE1_IF(PacketTraits::HasExpm1, std::expm1, internal::pexpm1);
|
||||
#endif
|
||||
|
||||
if(PacketSize>=2)
|
||||
@ -654,6 +658,14 @@ template<typename Scalar,typename Packet> void packetmath_real()
|
||||
h.store(data2, internal::plog(h.load(data1)));
|
||||
VERIFY((numext::isinf)(data2[0]));
|
||||
}
|
||||
if(PacketTraits::HasLog1p) {
|
||||
packet_helper<PacketTraits::HasLog1p,Packet> h;
|
||||
data1[0] = Scalar(-2);
|
||||
data1[1] = -std::numeric_limits<Scalar>::infinity();
|
||||
h.store(data2, internal::plog1p(h.load(data1)));
|
||||
VERIFY((numext::isnan)(data2[0]));
|
||||
VERIFY((numext::isnan)(data2[1]));
|
||||
}
|
||||
if(PacketTraits::HasSqrt)
|
||||
{
|
||||
packet_helper<PacketTraits::HasSqrt,Packet> h;
|
||||
|
@ -73,10 +73,6 @@ typedef unsigned __int64 uint64_t;
|
||||
#include <time.h>
|
||||
#endif
|
||||
|
||||
#if defined(EIGEN_USE_LIBXSMM)
|
||||
#include "libxsmm.h"
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
#include "ThreadPool"
|
||||
#endif
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include <thread>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include "src/util/CXX11Meta.h"
|
||||
#include "src/util/MaxSizeVector.h"
|
||||
|
||||
|
@ -1742,6 +1742,7 @@ sizes:
|
||||
2x2 image patches can be extracted and indexed using the following code:
|
||||
|
||||
*) 2D patch: ColMajor (patch indexed by second-to-last dimension)
|
||||
|
||||
Tensor<float, 5> twod_patch;
|
||||
twod_patch = tensor.extract_image_patches<2, 2>();
|
||||
// twod_patch.dimension(0) == 2
|
||||
@ -1751,6 +1752,7 @@ sizes:
|
||||
// twod_patch.dimension(4) == 7
|
||||
|
||||
*) 2D patch: RowMajor (patch indexed by the second dimension)
|
||||
|
||||
Tensor<float, 5, RowMajor> twod_patch_row_major;
|
||||
twod_patch_row_major = tensor_row_major.extract_image_patches<2, 2>();
|
||||
// twod_patch_row_major.dimension(0) == 7
|
||||
|
@ -147,6 +147,18 @@ struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
|
||||
// by the rhs to the lhs.
|
||||
return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data());
|
||||
}
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
template <typename EvalSubExprsCallback>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
|
||||
EvaluatorPointerType, EvalSubExprsCallback done) {
|
||||
m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done](bool) {
|
||||
m_rightImpl.evalSubExprsIfNeededAsync(
|
||||
m_leftImpl.data(), [done](bool need_assign) { done(need_assign); });
|
||||
});
|
||||
}
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
|
||||
m_leftImpl.cleanup();
|
||||
m_rightImpl.cleanup();
|
||||
|
@ -1069,6 +1069,17 @@ class TensorBase : public TensorBase<Derived, ReadOnlyAccessors> {
|
||||
return TensorDevice<Derived, DeviceType>(dev, derived());
|
||||
}
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
// Select the async device on which to evaluate the expression.
|
||||
template <typename DeviceType>
|
||||
typename internal::enable_if<
|
||||
internal::is_same<DeviceType, ThreadPoolDevice>::value,
|
||||
TensorAsyncDevice<Derived, DeviceType>>::type
|
||||
device(const DeviceType& dev, std::function<void()> done) {
|
||||
return TensorAsyncDevice<Derived, DeviceType>(dev, derived(), std::move(done));
|
||||
}
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
protected:
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Derived& derived() { return *static_cast<Derived*>(this); }
|
||||
|
@ -932,6 +932,7 @@ class TensorBlockMapper {
|
||||
typedef TensorBlock<Scalar, StorageIndex, NumDims, Layout> Block;
|
||||
typedef DSizes<StorageIndex, NumDims> Dimensions;
|
||||
|
||||
TensorBlockMapper() {}
|
||||
TensorBlockMapper(const Dimensions& dims,
|
||||
const TensorBlockShapeType block_shape,
|
||||
Index min_target_size)
|
||||
|
@ -214,6 +214,14 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
template <typename EvalSubExprsCallback>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
|
||||
EvaluatorPointerType, EvalSubExprsCallback done) {
|
||||
m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); });
|
||||
}
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
|
||||
m_impl.cleanup();
|
||||
}
|
||||
|
@ -20,70 +20,6 @@ namespace Eigen {
|
||||
*
|
||||
*/
|
||||
namespace internal {
|
||||
#if defined(EIGEN_VECTORIZE_AVX) && defined(EIGEN_USE_LIBXSMM)
|
||||
template<typename Scalar, typename Index>
|
||||
void pack_simple(Scalar * dst, const Scalar * src, Index cols, Index rows, Index lddst, Index ldsrc) {
|
||||
size_t psize = packet_traits<Scalar>::size; // Packet size
|
||||
typedef typename packet_traits<Scalar>::type Packet; // Packet type
|
||||
size_t alignment = psize*sizeof(Scalar); // Needed alignment
|
||||
if (rows % psize == 0 && (lddst*sizeof(Scalar)) % alignment == 0 &&
|
||||
(ldsrc*sizeof(Scalar)) % alignment == 0 &&
|
||||
reinterpret_cast<uintptr_t>(src) % alignment == 0 &&
|
||||
reinterpret_cast<uintptr_t>(dst) % alignment == 0) {
|
||||
// Optimized version using packets
|
||||
size_t num_packets = rows / psize;
|
||||
for (Index col = 0; col < cols; ++col) {
|
||||
EIGEN_ASM_COMMENT("begin pack_simple inner copy");
|
||||
// Unrolled manually 4 times.
|
||||
for (size_t i=0; i < num_packets/4; ++i) {
|
||||
internal::pstore(dst, internal::pload<Packet>(src));
|
||||
dst += psize; src += psize;
|
||||
internal::pstore(dst, internal::pload<Packet>(src));
|
||||
dst += psize; src += psize;
|
||||
internal::pstore(dst, internal::pload<Packet>(src));
|
||||
dst += psize; src += psize;
|
||||
internal::pstore(dst, internal::pload<Packet>(src));
|
||||
dst += psize; src += psize;
|
||||
}
|
||||
for (size_t i=0; i < num_packets%4; ++i) {
|
||||
internal::pstore(dst, internal::pload<Packet>(src));
|
||||
dst += psize; src += psize;
|
||||
}
|
||||
dst += lddst - num_packets*psize;
|
||||
src += ldsrc - num_packets*psize;
|
||||
EIGEN_ASM_COMMENT("end pack_simple inner copy");
|
||||
}
|
||||
} else {
|
||||
// Naive memcpy calls
|
||||
for (Index col = 0; col < cols; ++col) {
|
||||
memcpy(dst + col*lddst, src + col*ldsrc, rows*sizeof(Scalar));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename LhsScalar, typename RhsScalar, typename Scalar>
|
||||
struct libxsmm_wrapper {
|
||||
libxsmm_wrapper() {}
|
||||
libxsmm_wrapper(int, int, int, int, int, int, int, float, float, int) {}
|
||||
void operator()(const LhsScalar*, const RhsScalar*, Scalar*) {}
|
||||
void operator()(const LhsScalar*, const RhsScalar*, Scalar*, const LhsScalar*, const RhsScalar*, const Scalar*) {}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct libxsmm_wrapper<float, float, float>: public libxsmm_mmfunction<float> {
|
||||
libxsmm_wrapper(): libxsmm_mmfunction() {}
|
||||
libxsmm_wrapper(int flags, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta, int prefetch) :
|
||||
libxsmm_mmfunction(flags, m, n, k, lda, ldb, ldc, alpha, beta, prefetch) {}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct libxsmm_wrapper<double, double, double>: public libxsmm_mmfunction<double> {
|
||||
libxsmm_wrapper(): libxsmm_mmfunction() {}
|
||||
libxsmm_wrapper(int flags, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta, int prefetch) :
|
||||
libxsmm_mmfunction(flags, m, n, k, lda, ldb, ldc, alpha, beta, prefetch) {}
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
template<typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
|
||||
struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType> >
|
||||
@ -640,8 +576,6 @@ struct TensorContractionEvaluatorBase
|
||||
}
|
||||
}
|
||||
|
||||
EnableXSMMIfPossible(eval_op_indices);
|
||||
|
||||
// If the layout is RowMajor, we need to reverse the m_dimensions
|
||||
if (static_cast<int>(Layout) == static_cast<int>(RowMajor)) {
|
||||
for (int i = 0, j = NumDims - 1; i < j; i++, j--) {
|
||||
@ -671,48 +605,99 @@ struct TensorContractionEvaluatorBase
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
template <typename EvalSubExprsCallback>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
|
||||
EvaluatorPointerType dest, EvalSubExprsCallback done) {
|
||||
m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
|
||||
m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
|
||||
if (dest) {
|
||||
evalToAsync(dest, [done]() { done(false); });
|
||||
} else {
|
||||
m_result = static_cast<EvaluatorPointerType>(
|
||||
m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
|
||||
evalToAsync(m_result, [done]() { done(true); });
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
|
||||
if (this->m_lhs_inner_dim_contiguous) { \
|
||||
if (this->m_rhs_inner_dim_contiguous) { \
|
||||
if (this->m_rhs_inner_dim_reordered) { \
|
||||
METHOD<true, true, true, ALIGNMENT> ARGS; \
|
||||
} \
|
||||
else { \
|
||||
} else { \
|
||||
METHOD<true, true, false, ALIGNMENT> ARGS; \
|
||||
} \
|
||||
} \
|
||||
else { \
|
||||
} else { \
|
||||
if (this->m_rhs_inner_dim_reordered) { \
|
||||
METHOD<true, false, true, ALIGNMENT> ARGS; \
|
||||
} \
|
||||
else { \
|
||||
} else { \
|
||||
METHOD<true, false, false, ALIGNMENT> ARGS; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
else { \
|
||||
} else { \
|
||||
if (this->m_rhs_inner_dim_contiguous) { \
|
||||
if (this->m_rhs_inner_dim_reordered) { \
|
||||
METHOD<false, true, true, ALIGNMENT> ARGS; \
|
||||
} \
|
||||
else { \
|
||||
} else { \
|
||||
METHOD<false, true, false, ALIGNMENT> ARGS; \
|
||||
} \
|
||||
} \
|
||||
else { \
|
||||
} else { \
|
||||
if (this->m_rhs_inner_dim_reordered) { \
|
||||
METHOD<false, false, true, ALIGNMENT> ARGS; \
|
||||
} \
|
||||
else { \
|
||||
} else { \
|
||||
METHOD<false, false, false, ALIGNMENT> ARGS; \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \
|
||||
if (this->m_lhs_inner_dim_contiguous) { \
|
||||
if (this->m_rhs_inner_dim_contiguous) { \
|
||||
if (this->m_rhs_inner_dim_reordered) { \
|
||||
(new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \
|
||||
} else { \
|
||||
(new METHOD<DONE, true, true, false, ALIGNMENT> ARGS)->FN; \
|
||||
} \
|
||||
} else { \
|
||||
if (this->m_rhs_inner_dim_reordered) { \
|
||||
(new METHOD<DONE, true, false, true, ALIGNMENT> ARGS)->FN; \
|
||||
} else { \
|
||||
(new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \
|
||||
} \
|
||||
} \
|
||||
} else { \
|
||||
if (this->m_rhs_inner_dim_contiguous) { \
|
||||
if (this->m_rhs_inner_dim_reordered) { \
|
||||
(new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \
|
||||
} else { \
|
||||
(new METHOD<DONE, false, true, false, ALIGNMENT> ARGS)->FN; \
|
||||
} \
|
||||
} else { \
|
||||
if (this->m_rhs_inner_dim_reordered) { \
|
||||
(new METHOD<DONE, false, false, true, ALIGNMENT> ARGS)->FN; \
|
||||
} else { \
|
||||
(new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const {
|
||||
static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer);
|
||||
}
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
template <typename EvalToCallback>
|
||||
void evalToAsync(Scalar* buffer, EvalToCallback done) const {
|
||||
static_cast<const Derived*>(this)
|
||||
->template evalProductAsync<EvalToCallback, Unaligned>(buffer,
|
||||
std::move(done));
|
||||
}
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
|
||||
bool rhs_inner_dim_reordered, int Alignment>
|
||||
void evalProductSequential(Scalar* buffer) const {
|
||||
@ -780,13 +765,6 @@ struct TensorContractionEvaluatorBase
|
||||
EIGEN_DEVICE_FUNC
|
||||
#endif
|
||||
void evalGemm(Scalar* buffer) const {
|
||||
#if defined(EIGEN_VECTORIZE_AVX) && defined(EIGEN_USE_LIBXSMM)
|
||||
if (m_can_use_xsmm) {
|
||||
evalGemmXSMM(buffer);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
// columns in left side, rows in right side
|
||||
const Index k = this->m_k_size;
|
||||
|
||||
@ -942,213 +920,6 @@ struct TensorContractionEvaluatorBase
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return m_result; }
|
||||
|
||||
protected:
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void EnableXSMMIfPossible(const array<IndexPair<Index>, ContractDims>& eval_op_indices) {
|
||||
m_can_use_xsmm = false;
|
||||
|
||||
#if defined(EIGEN_VECTORIZE_AVX) && defined(EIGEN_USE_LIBXSMM)
|
||||
typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
|
||||
typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
|
||||
if (!std::is_same<Scalar, LhsScalar>::value ||
|
||||
!std::is_same<Scalar, RhsScalar>::value ||
|
||||
!(std::is_same<Scalar, float>::value ||
|
||||
std::is_same<Scalar, double>::value) ||
|
||||
m_leftImpl.data() == NULL ||
|
||||
m_rightImpl.data() == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if we can use faster matmul algorithms. For contraction to be
|
||||
// equivalent to matmul, we need both lhs and rhs contracting dims sequences
|
||||
// to be either a prefix or suffix of all dims. Also, the order of both
|
||||
// must be the same, so we don't have to do reordering.
|
||||
// For example:
|
||||
// * OK: lhs 4D, rhs 4D, contraction: [(0, 2), (1, 3)]
|
||||
// * BAD: lhs 3D, rhs 3D, contraction: [(1,1)]
|
||||
// * BAD: lhs 3D, rhs 3D, contraction: [(0, 0), (2, 2)]
|
||||
// * BAD: lhs 3D, rhs 3D, contraction: [(0, 2), (1, 1)]
|
||||
// Depending if contraction dims are prefix or suffix of all dims we need to
|
||||
// pre-transpose matrices in matmul algorithm:
|
||||
// lhs: prefix -> transpose, suffix -> no transpose
|
||||
// rhs: prefix -> no transpose, suffix -> transpose
|
||||
// For example, for lhs 2D, rhs 2D, contraction [(1, 0)] is regular,
|
||||
// non-transposed matmul.
|
||||
if (ContractDims == 0) {
|
||||
// This case is totally uninteresting, filter it out to avoid problems
|
||||
// with iterations in further tests.
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if RHS dims list is increasing. LHS already is, so if not, the
|
||||
// order is different and we cannot do matmul.
|
||||
for (int i = 1; i < ContractDims; i++) {
|
||||
if (eval_op_indices[i].second < eval_op_indices[i-1].second) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if no holes.
|
||||
int diff;
|
||||
for (int i = 1; i < ContractDims; i++) {
|
||||
// LHS contract dims are sorted to form an increasing seq.
|
||||
diff = eval_op_indices[i].first - eval_op_indices[i-1].first;
|
||||
if (diff != 1) {
|
||||
return;
|
||||
}
|
||||
// Now we may already assume RHS contract dims seq is increasing too.
|
||||
diff = eval_op_indices[i].second - eval_op_indices[i-1].second;
|
||||
if (diff != 1) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if suffix or prefix.
|
||||
if (eval_op_indices[0].first != 0 &&
|
||||
eval_op_indices[ContractDims-1].first != LDims-1) {
|
||||
return;
|
||||
}
|
||||
if (eval_op_indices[0].second != 0 &&
|
||||
eval_op_indices[ContractDims-1].second != RDims-1) {
|
||||
return;
|
||||
}
|
||||
|
||||
m_can_use_xsmm = true;
|
||||
#else
|
||||
EIGEN_UNUSED_VARIABLE(eval_op_indices);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(EIGEN_VECTORIZE_AVX) && defined(EIGEN_USE_LIBXSMM)
|
||||
EIGEN_DEVICE_FUNC void evalGemmXSMM(Scalar* buffer) const {
|
||||
// columns in left side, rows in right side
|
||||
const Index k = this->m_k_size;
|
||||
|
||||
// rows in left side
|
||||
const Index m = this->m_i_size;
|
||||
|
||||
// columns in right side
|
||||
const Index n = this->m_j_size;
|
||||
|
||||
const bool transposeA = !m_lhs_inner_dim_contiguous;
|
||||
const bool transposeB = !m_rhs_inner_dim_contiguous;
|
||||
|
||||
typedef typename internal::remove_const<typename EvalLeftArgType::Scalar>::type LhsScalar;
|
||||
typedef typename internal::remove_const<typename EvalRightArgType::Scalar>::type RhsScalar;
|
||||
|
||||
internal::TensorXsmmContractionBlocking<LhsScalar, RhsScalar, Index> blocking(
|
||||
k, m, n, 1, transposeA, transposeB);
|
||||
|
||||
// Outer blocks sizes
|
||||
const Index mc_outer = blocking.outer_m();
|
||||
const Index nc_outer = blocking.outer_n();
|
||||
const Index kc_outer = blocking.outer_k();
|
||||
// Inner blocks sizes
|
||||
const Index mc = blocking.mc();
|
||||
const Index nc = blocking.nc();
|
||||
const Index kc = blocking.kc();
|
||||
// Decisions whether we should copy parts of matrices
|
||||
const bool copyA = blocking.copyA();
|
||||
const bool copyB = blocking.copyB();
|
||||
|
||||
const LhsScalar* leftData = m_leftImpl.data();
|
||||
const RhsScalar* rightData = m_rightImpl.data();
|
||||
|
||||
const libxsmm_blasint stride_A = static_cast<libxsmm_blasint>(transposeA ? k : m);
|
||||
const libxsmm_blasint stride_B = static_cast<libxsmm_blasint>(transposeB ? n : k);
|
||||
const libxsmm_blasint stride_C = static_cast<libxsmm_blasint>(m);
|
||||
|
||||
const libxsmm_blasint stride_blockA = static_cast<libxsmm_blasint>(mc);
|
||||
// Use bigger stride to avoid hitting same cache line too often.
|
||||
// This consistently gives +~0.5 Gflops.
|
||||
const libxsmm_blasint stride_panelB = static_cast<libxsmm_blasint>(
|
||||
kc % 32 == 0 ? kc + 16 : kc
|
||||
);
|
||||
|
||||
// Kernel for the general case (not edges)
|
||||
internal::libxsmm_wrapper<LhsScalar, RhsScalar, Scalar> kernel;
|
||||
|
||||
LhsScalar* blockA = NULL;
|
||||
RhsScalar* panelB = NULL;
|
||||
|
||||
if (copyA) {
|
||||
blockA = static_cast<LhsScalar*>(this->m_device.allocate(mc * kc * sizeof(LhsScalar)));
|
||||
}
|
||||
if (copyB) {
|
||||
panelB = static_cast<RhsScalar*>(this->m_device.allocate(nc_outer * stride_panelB * sizeof(RhsScalar)));
|
||||
}
|
||||
|
||||
const Index kernel_stride_A = copyA ? stride_blockA : stride_A;
|
||||
const Index kernel_stride_B = copyB ? stride_panelB : stride_B;
|
||||
kernel = internal::libxsmm_wrapper<LhsScalar, RhsScalar, Scalar>(0, mc, nc, kc, kernel_stride_A, kernel_stride_B, stride_C, 1, 1, blocking.prefetch());
|
||||
|
||||
// Outer blocking
|
||||
for (Index ki_outer = 0; ki_outer < k; ki_outer += kc_outer) {
|
||||
for (Index mi_outer = 0; mi_outer < m; mi_outer += mc_outer) {
|
||||
for (Index ni_outer = 0; ni_outer < n; ni_outer += nc_outer) {
|
||||
using numext::mini;
|
||||
|
||||
Index actual_nc_outer = mini(ni_outer+nc_outer, n) - ni_outer;
|
||||
|
||||
// Inner blocking
|
||||
for (Index ki = ki_outer; ki < mini(ki_outer+kc_outer, k); ki += kc) {
|
||||
const Index actual_kc = mini(ki_outer+kc_outer, mini(ki+kc, k)) - ki;
|
||||
const float beta = ki == 0 ? 0 : 1;
|
||||
|
||||
if (copyB) {
|
||||
if (transposeB) {
|
||||
libxsmm_otrans(panelB, rightData + ki*stride_B + ni_outer, sizeof(RhsScalar), actual_nc_outer, actual_kc, stride_B, stride_panelB);
|
||||
} else {
|
||||
internal::pack_simple<RhsScalar, Index>(panelB, rightData + ni_outer*stride_B + ki, actual_nc_outer, actual_kc, stride_panelB, stride_B);
|
||||
}
|
||||
}
|
||||
|
||||
for (Index mi = mi_outer; mi < mini(mi_outer+mc_outer, m); mi += mc) {
|
||||
const Index actual_mc = mini(mi_outer+mc_outer, mini(mi+mc, m)) - mi;
|
||||
|
||||
const LhsScalar* a = transposeA ? leftData + mi*stride_A + ki :
|
||||
leftData + ki*stride_A + mi;
|
||||
|
||||
if (copyA) {
|
||||
if (transposeA) {
|
||||
libxsmm_otrans(blockA, a, sizeof(LhsScalar), actual_kc, actual_mc, stride_A, stride_blockA);
|
||||
} else {
|
||||
internal::pack_simple<LhsScalar, Index>(blockA, a, actual_kc, actual_mc, stride_blockA, stride_A);
|
||||
}
|
||||
}
|
||||
const LhsScalar* actual_a = copyA ? blockA : a;
|
||||
|
||||
for (Index ni = ni_outer; ni < mini(ni_outer+nc_outer, n); ni += nc) {
|
||||
const Index actual_nc = mini(ni_outer+nc_outer, mini(ni+nc, n)) - ni;
|
||||
|
||||
const RhsScalar* b = rightData + ni*stride_B + ki;
|
||||
Scalar* c = buffer + ni*stride_C + mi;
|
||||
const Scalar* cp = c + nc*stride_C;
|
||||
|
||||
const RhsScalar* actual_b = copyB ? panelB + (ni-ni_outer)*stride_panelB : b;
|
||||
const RhsScalar* bp = copyB ? panelB + nc*stride_panelB : b + nc*stride_B;
|
||||
|
||||
if (actual_mc == mc && actual_kc == kc && actual_nc == nc && beta == 1) {
|
||||
// Most used, cached kernel.
|
||||
kernel(actual_a, actual_b, c, actual_a, bp, cp);
|
||||
} else {
|
||||
// Edges - use libxsmm kernel cache.
|
||||
internal::libxsmm_wrapper<LhsScalar, RhsScalar, Scalar>(0, actual_mc, actual_nc, actual_kc, kernel_stride_A, kernel_stride_B, stride_C, 1, beta, blocking.prefetch())(actual_a, actual_b, c, actual_a, bp, cp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (copyA) {
|
||||
this->m_device.deallocate(blockA);
|
||||
}
|
||||
if (copyB) {
|
||||
this->m_device.deallocate(panelB);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Prevent assignment
|
||||
TensorContractionEvaluatorBase& operator = (const TensorContractionEvaluatorBase&);
|
||||
Dimensions m_dimensions;
|
||||
@ -1177,7 +948,6 @@ protected:
|
||||
const Device EIGEN_DEVICE_REF m_device;
|
||||
OutputKernelType m_output_kernel;
|
||||
EvaluatorPointerType m_result;
|
||||
bool m_can_use_xsmm;
|
||||
};
|
||||
|
||||
|
||||
|
@ -67,141 +67,6 @@ class TensorContractionBlocking {
|
||||
StorageIndex nc_;
|
||||
};
|
||||
|
||||
|
||||
|
||||
#if defined(EIGEN_USE_LIBXSMM)
|
||||
template <typename LhsScalar, typename RhsScalar, typename StorageIndex>
|
||||
class TensorXsmmContractionBlocking {
|
||||
public:
|
||||
TensorXsmmContractionBlocking(StorageIndex k, StorageIndex m, StorageIndex n,
|
||||
size_t max_num_threads = 1, bool transposeA = false,
|
||||
bool transposeB = false):
|
||||
k_(k), m_(m), n_(n), transposeA_(transposeA),
|
||||
transposeB_(transposeB), num_threads_(max_num_threads) {
|
||||
#ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES
|
||||
if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {
|
||||
mc_ = EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M;
|
||||
kc_ = EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K;
|
||||
nc_ = EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N;
|
||||
outer_m_ = EIGEN_TEST_SPECIFIC_OUTER_BLOCKING_SIZE_M;
|
||||
outer_k_ = EIGEN_TEST_SPECIFIC_OUTER_BLOCKING_SIZE_K;
|
||||
outer_n_ = EIGEN_TEST_SPECIFIC_OUTER_BLOCKING_SIZE_N;
|
||||
copyA_ = EIGEN_TEST_SPECIFIC_BLOCKING_COPY_A;
|
||||
copyB_ = EIGEN_TEST_SPECIFIC_BLOCKING_COPY_B;
|
||||
outer_m_ = outer_m_ != 0 ? outer_m_ : m;
|
||||
outer_k_ = outer_k_ != 0 ? outer_k_ : k;
|
||||
outer_n_ = outer_n_ != 0 ? outer_n_ : n;
|
||||
}
|
||||
#else
|
||||
// Defaults, possibly overridden per-platform.
|
||||
copyA_ = true;
|
||||
copyB_ = false;
|
||||
|
||||
// If the matrix is small enough, don't do blocking, just call single xsmm
|
||||
// kernel.
|
||||
if (static_cast<double>(m)*k*n <= LIBXSMM_THRESHOLD) {
|
||||
mc_ = m; kc_ = k; nc_ = n;
|
||||
outer_m_ = m; outer_k_ = k; outer_n_ = n;
|
||||
copyA_ = false; copyB_ = false;
|
||||
} else {
|
||||
int arch = libxsmm_cpuid_x86();
|
||||
|
||||
if (arch == LIBXSMM_X86_AVX512_CORE) {
|
||||
// skylake
|
||||
mc_ = 64; kc_ = 64; nc_ = 24;
|
||||
outer_m_ = 512; outer_k_ = 512; outer_n_ = 24*22;
|
||||
// Hack to use this kernel architecture as the other one has performance
|
||||
// issues (no hardware prefetching).
|
||||
// TODO(nishantpatil): This should be removed if the issues are fixed,
|
||||
// or this one becomes the default.
|
||||
setenv("LIBXSMM_AVX512_CLASSIC_GEMM", "1", 1);
|
||||
} else if (arch == LIBXSMM_X86_AVX2) {
|
||||
// haswell
|
||||
mc_ = 32; kc_ = 192; nc_ = 33;
|
||||
outer_m_ = 512; outer_k_ = 3*192; outer_n_ = 33*16;
|
||||
} else if (arch == LIBXSMM_X86_AVX) {
|
||||
// ivybridge
|
||||
mc_ = 32; kc_ = 192; nc_ = 48;
|
||||
outer_m_ = 512; outer_k_ = 3*192; outer_n_ = 48*11;
|
||||
} else {
|
||||
// generic kernel size, usually performing well
|
||||
mc_ = 32; kc_ = 128; nc_ = 32;
|
||||
outer_m_ = 512; outer_k_ = 512; outer_n_ = 512;
|
||||
}
|
||||
|
||||
// Only copy if it makes the stride smaller.
|
||||
copyA_ = copyA_ && (m > mc_);
|
||||
copyB_ = copyB_ && (k > kc_);
|
||||
}
|
||||
|
||||
// We need to copy anyway if transposing
|
||||
copyA_ = copyA_ || transposeA;
|
||||
copyB_ = copyB_ || transposeB;
|
||||
|
||||
// See libxsmm_gemm_prefetch_type definition in libxsmm_typedefs.h
|
||||
prefetch_ = LIBXSMM_PREFETCH_AL2CL2BL2_VIA_C;
|
||||
|
||||
#endif
|
||||
|
||||
mc_ = mc_ > m ? m : mc_;
|
||||
nc_ = nc_ > n ? n : nc_;
|
||||
kc_ = kc_ > k ? k : kc_;
|
||||
|
||||
size_t compute_parallelism = (m / mc_) * (n / nc_);
|
||||
size_t pack_parallelism = 0;
|
||||
if (copyA_) {
|
||||
pack_parallelism += (m / mc_) * (k / kc_);
|
||||
}
|
||||
if (copyB_) {
|
||||
pack_parallelism += (n / nc_) * (k / kc_);
|
||||
}
|
||||
size_t parallelism = numext::maxi(compute_parallelism, pack_parallelism);
|
||||
|
||||
num_threads_ = numext::mini<size_t>(num_threads_,
|
||||
parallelism / MIN_JOBS_PER_THREAD);
|
||||
num_threads_ = numext::maxi<size_t>(num_threads_, 1);
|
||||
|
||||
// For optimal performance outer block sizes should be multiplies of kernel
|
||||
// sizes, or bigger than matrix size (=no outer blocking).
|
||||
eigen_assert(outer_m_ % mc_ == 0 || outer_m_ >= m);
|
||||
eigen_assert(outer_k_ % kc_ == 0 || outer_k_ >= k);
|
||||
eigen_assert(outer_n_ % nc_ == 0 || outer_n_ >= n);
|
||||
}
|
||||
|
||||
EIGEN_ALWAYS_INLINE StorageIndex kc() const { return kc_; }
|
||||
EIGEN_ALWAYS_INLINE StorageIndex mc() const { return mc_; }
|
||||
EIGEN_ALWAYS_INLINE StorageIndex nc() const { return nc_; }
|
||||
EIGEN_ALWAYS_INLINE StorageIndex outer_k() const { return outer_k_; }
|
||||
EIGEN_ALWAYS_INLINE StorageIndex outer_m() const { return outer_m_; }
|
||||
EIGEN_ALWAYS_INLINE StorageIndex outer_n() const { return outer_n_; }
|
||||
EIGEN_ALWAYS_INLINE bool copyA() const { return copyA_; }
|
||||
EIGEN_ALWAYS_INLINE bool copyB() const { return copyB_; }
|
||||
EIGEN_ALWAYS_INLINE bool transposeA() const { return transposeA_; }
|
||||
EIGEN_ALWAYS_INLINE bool transposeB() const { return transposeB_; }
|
||||
EIGEN_ALWAYS_INLINE int num_threads() const { return num_threads_; }
|
||||
EIGEN_ALWAYS_INLINE StorageIndex blocks_m() const { return divup(m_, mc_); }
|
||||
EIGEN_ALWAYS_INLINE StorageIndex blocks_k() const { return divup(k_, kc_); }
|
||||
EIGEN_ALWAYS_INLINE StorageIndex blocks_n() const { return divup(n_, nc_); }
|
||||
EIGEN_ALWAYS_INLINE libxsmm_gemm_prefetch_type prefetch() const {
|
||||
return prefetch_;
|
||||
}
|
||||
|
||||
private:
|
||||
StorageIndex k_, m_, n_;
|
||||
StorageIndex kc_, mc_, nc_;
|
||||
StorageIndex outer_k_, outer_m_, outer_n_;
|
||||
bool copyA_, copyB_, transposeA_, transposeB_;
|
||||
size_t num_threads_;
|
||||
|
||||
// Threshold for m*k*n to skip blocking and just call libxsmm
|
||||
const double LIBXSMM_THRESHOLD = 80*80*80;
|
||||
// For computing optimal number of threads - so that each thread gets at least
|
||||
// that many jobs.
|
||||
const double MIN_JOBS_PER_THREAD = 3;
|
||||
libxsmm_gemm_prefetch_type prefetch_;
|
||||
};
|
||||
#endif // EIGEN_USE_LIBXSMM
|
||||
|
||||
} // end namespace internal
|
||||
} // end namespace Eigen
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -63,6 +63,47 @@ template <typename ExpressionType, typename DeviceType> class TensorDevice {
|
||||
ExpressionType& m_expression;
|
||||
};
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
|
||||
/** \class TensorAsyncDevice
|
||||
* \ingroup CXX11_Tensor_Module
|
||||
*
|
||||
* \brief Pseudo expression providing an operator = that will evaluate its
|
||||
* argument asynchronously on the specified device (currently supports only
|
||||
* ThreadPoolDevice).
|
||||
*
|
||||
* Example:
|
||||
* std::function<void()> done = []() {};
|
||||
* C.device(EIGEN_THREAD_POOL, std::move(done)) = A + B;
|
||||
*/
|
||||
|
||||
template <typename ExpressionType, typename DeviceType>
|
||||
class TensorAsyncDevice {
|
||||
public:
|
||||
TensorAsyncDevice(const DeviceType& device, ExpressionType& expression,
|
||||
std::function<void()> done)
|
||||
: m_device(device), m_expression(expression), m_done(std::move(done)) {}
|
||||
|
||||
template <typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE TensorAsyncDevice& operator=(const OtherDerived& other) {
|
||||
typedef TensorAssignOp<ExpressionType, const OtherDerived> Assign;
|
||||
typedef internal::TensorAsyncExecutor<const Assign, DeviceType> Executor;
|
||||
|
||||
// WARNING: After assignment 'm_done' callback will be in undefined state.
|
||||
Assign assign(m_expression, other);
|
||||
Executor::runAsync(assign, m_device, std::move(m_done));
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
protected:
|
||||
const DeviceType& m_device;
|
||||
ExpressionType& m_expression;
|
||||
std::function<void()> m_done;
|
||||
};
|
||||
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
|
||||
|
@ -52,7 +52,7 @@ class Allocator {
|
||||
// Build a thread pool device on top the an existing pool of threads.
|
||||
struct ThreadPoolDevice {
|
||||
// The ownership of the thread pool remains with the caller.
|
||||
ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores, Allocator* allocator = NULL)
|
||||
ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores, Allocator* allocator = nullptr)
|
||||
: pool_(pool), num_threads_(num_cores), allocator_(allocator) { }
|
||||
|
||||
EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
|
||||
@ -90,7 +90,6 @@ struct ThreadPoolDevice {
|
||||
// CPU cycles due to the threads competing for memory bandwidth, so we
|
||||
// statically schedule at most 4 block copies here.
|
||||
const size_t kMinBlockSize = 32768;
|
||||
typedef TensorCostModel<ThreadPoolDevice> CostModel;
|
||||
const size_t num_threads = CostModel::numThreads(n, TensorOpCost(1.0, 1.0, 0), 4);
|
||||
if (n <= kMinBlockSize || num_threads < 2) {
|
||||
::memcpy(dst, src, n);
|
||||
@ -181,44 +180,187 @@ struct ThreadPoolDevice {
|
||||
return pool_->CurrentThreadId();
|
||||
}
|
||||
|
||||
// parallelFor executes f with [0, n) arguments in parallel and waits for
|
||||
// completion. F accepts a half-open interval [first, last).
|
||||
// Block size is chosen based on the iteration cost and resulting parallel
|
||||
// WARNING: This function is synchronous and will block the calling thread.
|
||||
//
|
||||
// Synchronous parallelFor executes f with [0, n) arguments in parallel and
|
||||
// waits for completion. F accepts a half-open interval [first, last). Block
|
||||
// size is chosen based on the iteration cost and resulting parallel
|
||||
// efficiency. If block_align is not nullptr, it is called to round up the
|
||||
// block size.
|
||||
void parallelFor(Index n, const TensorOpCost& cost,
|
||||
std::function<Index(Index)> block_align,
|
||||
std::function<void(Index, Index)> f) const {
|
||||
typedef TensorCostModel<ThreadPoolDevice> CostModel;
|
||||
// Compute small problems directly in the caller thread.
|
||||
if (n <= 1 || numThreads() == 1 ||
|
||||
CostModel::numThreads(n, cost, static_cast<int>(numThreads())) == 1) {
|
||||
f(0, n);
|
||||
return;
|
||||
}
|
||||
|
||||
// Calculate block size based on (1) the iteration cost and (2) parallel
|
||||
// efficiency. We want blocks to be not too small to mitigate
|
||||
// parallelization overheads; not too large to mitigate tail
|
||||
// effect and potential load imbalance and we also want number
|
||||
// of blocks to be evenly dividable across threads.
|
||||
// Compute block size and total count of blocks.
|
||||
ParallelForBlock block = CalculateParallelForBlock(n, cost, block_align);
|
||||
|
||||
double block_size_f = 1.0 / CostModel::taskSize(1, cost);
|
||||
// Recursively divide size into halves until we reach block_size.
|
||||
// Division code rounds mid to block_size, so we are guaranteed to get
|
||||
// block_count leaves that do actual computations.
|
||||
Barrier barrier(static_cast<unsigned int>(block.count));
|
||||
std::function<void(Index, Index)> handleRange;
|
||||
handleRange = [=, &handleRange, &barrier, &f](Index firstIdx,
|
||||
Index lastIdx) {
|
||||
while (lastIdx - firstIdx > block.size) {
|
||||
// Split into halves and schedule the second half on a different thread.
|
||||
const Index midIdx = firstIdx + divup((lastIdx - firstIdx) / 2, block.size) * block.size;
|
||||
pool_->Schedule([=, &handleRange]() { handleRange(midIdx, lastIdx); });
|
||||
lastIdx = midIdx;
|
||||
}
|
||||
// Single block or less, execute directly.
|
||||
f(firstIdx, lastIdx);
|
||||
barrier.Notify();
|
||||
};
|
||||
|
||||
if (block.count <= numThreads()) {
|
||||
// Avoid a thread hop by running the root of the tree and one block on the
|
||||
// main thread.
|
||||
handleRange(0, n);
|
||||
} else {
|
||||
// Execute the root in the thread pool to avoid running work on more than
|
||||
// numThreads() threads.
|
||||
pool_->Schedule([=, &handleRange]() { handleRange(0, n); });
|
||||
}
|
||||
|
||||
barrier.Wait();
|
||||
}
|
||||
|
||||
// Convenience wrapper for parallelFor that does not align blocks.
|
||||
void parallelFor(Index n, const TensorOpCost& cost,
|
||||
std::function<void(Index, Index)> f) const {
|
||||
parallelFor(n, cost, nullptr, std::move(f));
|
||||
}
|
||||
|
||||
// WARNING: This function is asynchronous and will not block the calling thread.
|
||||
//
|
||||
// Asynchronous parallelFor executes f with [0, n) arguments in parallel
|
||||
// without waiting for completion. When the last block finished, it will call
|
||||
// 'done' callback. F accepts a half-open interval [first, last). Block size
|
||||
// is chosen based on the iteration cost and resulting parallel efficiency. If
|
||||
// block_align is not nullptr, it is called to round up the block size.
|
||||
void parallelForAsync(Index n, const TensorOpCost& cost,
|
||||
std::function<Index(Index)> block_align,
|
||||
std::function<void(Index, Index)> f,
|
||||
std::function<void()> done) const {
|
||||
// Compute small problems directly in the caller thread.
|
||||
if (n <= 1 || numThreads() == 1 ||
|
||||
CostModel::numThreads(n, cost, static_cast<int>(numThreads())) == 1) {
|
||||
f(0, n);
|
||||
done();
|
||||
return;
|
||||
}
|
||||
|
||||
// Compute block size and total count of blocks.
|
||||
ParallelForBlock block = CalculateParallelForBlock(n, cost, block_align);
|
||||
|
||||
ParallelForAsyncContext* const ctx =
|
||||
new ParallelForAsyncContext(block.count, std::move(f), std::move(done));
|
||||
|
||||
// Recursively divide size into halves until we reach block_size.
|
||||
// Division code rounds mid to block_size, so we are guaranteed to get
|
||||
// block_count leaves that do actual computations.
|
||||
ctx->handle_range = [this, ctx, block](Index firstIdx, Index lastIdx) {
|
||||
while (lastIdx - firstIdx > block.size) {
|
||||
// Split into halves and schedule the second half on a different thread.
|
||||
const Index midIdx = firstIdx + divup((lastIdx - firstIdx) / 2, block.size) * block.size;
|
||||
pool_->Schedule(
|
||||
[ctx, midIdx, lastIdx]() { ctx->handle_range(midIdx, lastIdx); });
|
||||
lastIdx = midIdx;
|
||||
}
|
||||
|
||||
// Single block or less, execute directly.
|
||||
ctx->f(firstIdx, lastIdx);
|
||||
|
||||
// Delete async context if it was the last block.
|
||||
if (ctx->count.fetch_sub(1) == 1) delete ctx;
|
||||
};
|
||||
|
||||
if (block.count <= numThreads()) {
|
||||
// Avoid a thread hop by running the root of the tree and one block on the
|
||||
// main thread.
|
||||
ctx->handle_range(0, n);
|
||||
} else {
|
||||
// Execute the root in the thread pool to avoid running work on more than
|
||||
// numThreads() threads.
|
||||
pool_->Schedule([ctx, n]() { ctx->handle_range(0, n); });
|
||||
}
|
||||
}
|
||||
|
||||
// Convenience wrapper for parallelForAsync that does not align blocks.
|
||||
void parallelForAsync(Index n, const TensorOpCost& cost,
|
||||
std::function<void(Index, Index)> f,
|
||||
std::function<void()> done) const {
|
||||
parallelForAsync(n, cost, nullptr, std::move(f), std::move(done));
|
||||
}
|
||||
|
||||
// Thread pool accessor.
|
||||
ThreadPoolInterface* getPool() const { return pool_; }
|
||||
|
||||
// Allocator accessor.
|
||||
Allocator* allocator() const { return allocator_; }
|
||||
|
||||
private:
|
||||
typedef TensorCostModel<ThreadPoolDevice> CostModel;
|
||||
|
||||
// For parallelForAsync we must keep passed in closures on the heap, and
|
||||
// delete them only after `done` callback finished.
|
||||
struct ParallelForAsyncContext {
|
||||
ParallelForAsyncContext(Index block_count,
|
||||
std::function<void(Index, Index)> block_f,
|
||||
std::function<void()> done_callback)
|
||||
: count(block_count),
|
||||
f(std::move(block_f)),
|
||||
done(std::move(done_callback)) {}
|
||||
~ParallelForAsyncContext() { done(); }
|
||||
|
||||
std::atomic<Index> count;
|
||||
std::function<void(Index, Index)> f;
|
||||
std::function<void()> done;
|
||||
|
||||
std::function<void(Index, Index)> handle_range;
|
||||
};
|
||||
|
||||
struct ParallelForBlock {
|
||||
Index size; // block size
|
||||
Index count; // number of blocks
|
||||
};
|
||||
|
||||
// Calculates block size based on (1) the iteration cost and (2) parallel
|
||||
// efficiency. We want blocks to be not too small to mitigate parallelization
|
||||
// overheads; not too large to mitigate tail effect and potential load
|
||||
// imbalance and we also want number of blocks to be evenly dividable across
|
||||
// threads.
|
||||
ParallelForBlock CalculateParallelForBlock(
|
||||
const Index n, const TensorOpCost& cost,
|
||||
std::function<Index(Index)> block_align) const {
|
||||
const double block_size_f = 1.0 / CostModel::taskSize(1, cost);
|
||||
const Index max_oversharding_factor = 4;
|
||||
Index block_size = numext::mini(
|
||||
n, numext::maxi<Index>(divup<Index>(n, max_oversharding_factor * numThreads()),
|
||||
n, numext::maxi<Index>(
|
||||
divup<Index>(n, max_oversharding_factor * numThreads()),
|
||||
block_size_f));
|
||||
const Index max_block_size = numext::mini(n, 2 * block_size);
|
||||
|
||||
if (block_align) {
|
||||
Index new_block_size = block_align(block_size);
|
||||
eigen_assert(new_block_size >= block_size);
|
||||
block_size = numext::mini(n, new_block_size);
|
||||
}
|
||||
|
||||
Index block_count = divup(n, block_size);
|
||||
|
||||
// Calculate parallel efficiency as fraction of total CPU time used for
|
||||
// computations:
|
||||
double max_efficiency =
|
||||
static_cast<double>(block_count) /
|
||||
(divup<int>(block_count, numThreads()) * numThreads());
|
||||
|
||||
// Now try to increase block size up to max_block_size as long as it
|
||||
// doesn't decrease parallel efficiency.
|
||||
for (Index prev_block_count = block_count;
|
||||
@ -251,47 +393,9 @@ struct ThreadPoolDevice {
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively divide size into halves until we reach block_size.
|
||||
// Division code rounds mid to block_size, so we are guaranteed to get
|
||||
// block_count leaves that do actual computations.
|
||||
Barrier barrier(static_cast<unsigned int>(block_count));
|
||||
std::function<void(Index, Index)> handleRange;
|
||||
handleRange = [=, &handleRange, &barrier, &f](Index firstIdx, Index lastIdx) {
|
||||
while (lastIdx - firstIdx > block_size) {
|
||||
// Split into halves and schedule the second half on a different thread.
|
||||
const Index midIdx = firstIdx + divup((lastIdx - firstIdx) / 2, block_size) * block_size;
|
||||
pool_->Schedule([=, &handleRange]() { handleRange(midIdx, lastIdx); });
|
||||
lastIdx = midIdx;
|
||||
}
|
||||
// Single block or less, execute directly.
|
||||
f(firstIdx, lastIdx);
|
||||
barrier.Notify();
|
||||
};
|
||||
if (block_count <= numThreads()) {
|
||||
// Avoid a thread hop by running the root of the tree and one block on the
|
||||
// main thread.
|
||||
handleRange(0, n);
|
||||
} else {
|
||||
// Execute the root in the thread pool to avoid running work on more than
|
||||
// numThreads() threads.
|
||||
pool_->Schedule([=, &handleRange]() { handleRange(0, n); });
|
||||
}
|
||||
barrier.Wait();
|
||||
return {block_size, block_count};
|
||||
}
|
||||
|
||||
// Convenience wrapper for parallelFor that does not align blocks.
|
||||
void parallelFor(Index n, const TensorOpCost& cost,
|
||||
std::function<void(Index, Index)> f) const {
|
||||
parallelFor(n, cost, NULL, std::move(f));
|
||||
}
|
||||
|
||||
// Thread pool accessor.
|
||||
ThreadPoolInterface* getPool() const { return pool_; }
|
||||
|
||||
// Allocator accessor.
|
||||
Allocator* allocator() const { return allocator_; }
|
||||
|
||||
private:
|
||||
ThreadPoolInterface* pool_;
|
||||
int num_threads_;
|
||||
Allocator* allocator_;
|
||||
|
@ -79,6 +79,15 @@ struct TensorEvaluator
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
template <typename EvalSubExprsCallback>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
|
||||
EvaluatorPointerType dest, EvalSubExprsCallback done) {
|
||||
// TODO(ezhulenev): ThreadPoolDevice memcpy is blockign operation.
|
||||
done(evalSubExprsIfNeeded(dest));
|
||||
}
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
|
||||
@ -247,6 +256,15 @@ struct TensorEvaluator<const Derived, Device>
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
template <typename EvalSubExprsCallback>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
|
||||
EvaluatorPointerType dest, EvalSubExprsCallback done) {
|
||||
// TODO(ezhulenev): ThreadPoolDevice memcpy is a blockign operation.
|
||||
done(evalSubExprsIfNeeded(dest));
|
||||
}
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
|
||||
@ -346,6 +364,15 @@ struct TensorEvaluator<const TensorCwiseNullaryOp<NullaryOp, ArgType>, Device>
|
||||
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { return true; }
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
template <typename EvalSubExprsCallback>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
|
||||
EvaluatorPointerType, EvalSubExprsCallback done) {
|
||||
done(true);
|
||||
}
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
|
||||
|
||||
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
|
||||
@ -425,6 +452,15 @@ struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType>, Device>
|
||||
m_argImpl.evalSubExprsIfNeeded(NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
template <typename EvalSubExprsCallback>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
|
||||
EvaluatorPointerType, EvalSubExprsCallback done) {
|
||||
m_argImpl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); });
|
||||
}
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
|
||||
m_argImpl.cleanup();
|
||||
}
|
||||
@ -546,6 +582,19 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
|
||||
m_rightImpl.evalSubExprsIfNeeded(NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
template <typename EvalSubExprsCallback>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(
|
||||
EvaluatorPointerType, EvalSubExprsCallback done) {
|
||||
// TODO(ezhulenev): Evaluate two expression in parallel?
|
||||
m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done](bool) {
|
||||
m_rightImpl.evalSubExprsIfNeededAsync(nullptr,
|
||||
[done](bool) { done(true); });
|
||||
});
|
||||
}
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
|
||||
m_leftImpl.cleanup();
|
||||
m_rightImpl.cleanup();
|
||||
|
@ -97,6 +97,14 @@ class TensorExecutor {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Default async execution strategy is not implemented. Currently it's only
|
||||
* available for ThreadPoolDevice (see definition below).
|
||||
*/
|
||||
template <typename Expression, typename Device, bool Vectorizable,
|
||||
bool Tileable>
|
||||
class TensorAsyncExecutor {};
|
||||
|
||||
/**
|
||||
* Process all the data with a single cpu thread, using vectorized instructions.
|
||||
*/
|
||||
@ -107,8 +115,8 @@ class TensorExecutor<Expression, DefaultDevice, /*Vectorizable*/ true,
|
||||
typedef typename Expression::Index StorageIndex;
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE void run(const Expression& expr,
|
||||
const DefaultDevice& device = DefaultDevice()) {
|
||||
static EIGEN_STRONG_INLINE void run(
|
||||
const Expression& expr, const DefaultDevice& device = DefaultDevice()) {
|
||||
TensorEvaluator<Expression, DefaultDevice> evaluator(expr, device);
|
||||
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
|
||||
if (needs_assign) {
|
||||
@ -206,8 +214,81 @@ class TensorExecutor<Expression, DefaultDevice, Vectorizable,
|
||||
/**
|
||||
* Multicore strategy: the index space is partitioned and each partition is
|
||||
* executed on a single core.
|
||||
*
|
||||
* (1) TensorExecutor will submit work to the ThreadPoolDevice managed thread
|
||||
* pool, and will block the caller thread until all tasks are finished.
|
||||
*
|
||||
* (2) TensorAsyncExecutor is a non-blocking version, that will submit work to
|
||||
* the ThreadPoolDevice managed thread pool, and will return immediately.
|
||||
* It will call 'done' callback after all tasks are finished.
|
||||
*/
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
|
||||
template <typename TensorBlockMapper>
|
||||
struct TensorExecutorTilingContext {
|
||||
typedef typename TensorBlockMapper::Block TensorBlock;
|
||||
|
||||
TensorExecutorTilingContext() : buffer(nullptr) {}
|
||||
TensorExecutorTilingContext(const TensorBlockMapper& b_mapper,
|
||||
const TensorOpCost& b_cost, void* b_buffer,
|
||||
size_t b_aligned_size)
|
||||
: block_mapper(b_mapper),
|
||||
cost(b_cost),
|
||||
buffer(b_buffer),
|
||||
aligned_blocksize(b_aligned_size) {}
|
||||
|
||||
template <typename Scalar>
|
||||
Scalar* GetCurrentThreadBuffer(const ThreadPoolDevice& device) const {
|
||||
// ThreadPoolDevice::currentThreadId() returns -1 if called from a thread
|
||||
// not in the thread pool, such as the main thread dispatching Eigen
|
||||
// expressions.
|
||||
const int thread_idx = device.currentThreadId();
|
||||
eigen_assert(thread_idx >= -1 && thread_idx < device.numThreads());
|
||||
|
||||
const Index offset = aligned_blocksize * (thread_idx + 1);
|
||||
return reinterpret_cast<Scalar*>(static_cast<char*>(buffer) + offset);
|
||||
}
|
||||
|
||||
TensorBlockMapper block_mapper; // navigate through blocks
|
||||
TensorOpCost cost; // cost of computing a single block
|
||||
void* buffer; // temporary buffer for blocks
|
||||
size_t aligned_blocksize; // block size after memory alignment
|
||||
};
|
||||
|
||||
// Computes a block evaluation parameters, and allocates temporary memory buffer
|
||||
// for blocks. See TensorExecutor/TensorAsyncExecutor (Tileable=true) below.
|
||||
template <typename Evaluator, typename TensorBlockMapper, bool Vectorizable>
|
||||
TensorExecutorTilingContext<TensorBlockMapper> GetTensorExecutorTilingContext(
|
||||
const ThreadPoolDevice& device, const Evaluator& evaluator) {
|
||||
// Prefer blocks skewed toward inner dimension.
|
||||
TensorBlockShapeType block_shape = kSkewedInnerDims;
|
||||
Index block_total_size = 0;
|
||||
|
||||
// Query expression tree for desired block size/shape.
|
||||
std::vector<TensorOpResourceRequirements> resources;
|
||||
evaluator.getResourceRequirements(&resources);
|
||||
MergeResourceRequirements(resources, &block_shape, &block_total_size);
|
||||
int num_threads = device.numThreads();
|
||||
|
||||
// Estimate minimum block size based on cost.
|
||||
TensorOpCost cost = evaluator.costPerCoeff(Vectorizable);
|
||||
double taskSize = TensorCostModel<ThreadPoolDevice>::taskSize(1, cost);
|
||||
size_t block_size = static_cast<size_t>(1.0 / taskSize);
|
||||
|
||||
TensorBlockMapper block_mapper(
|
||||
typename TensorBlockMapper::Dimensions(evaluator.dimensions()),
|
||||
block_shape, block_size);
|
||||
|
||||
block_size = block_mapper.block_dims_total_size();
|
||||
const size_t align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1);
|
||||
const size_t aligned_blocksize =
|
||||
align *
|
||||
divup<size_t>(block_size * sizeof(typename Evaluator::Scalar), align);
|
||||
void* buf = device.allocate((num_threads + 1) * aligned_blocksize);
|
||||
|
||||
return {block_mapper, cost * block_size, buf, aligned_blocksize};
|
||||
}
|
||||
|
||||
template <typename Evaluator, typename StorageIndex, bool Vectorizable>
|
||||
struct EvalRange {
|
||||
static void run(Evaluator* evaluator_in, const StorageIndex firstIdx,
|
||||
@ -274,7 +355,7 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, Tileable> {
|
||||
typedef EvalRange<Evaluator, StorageIndex, Vectorizable> EvalRange;
|
||||
|
||||
Evaluator evaluator(expr, device);
|
||||
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
|
||||
const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr);
|
||||
if (needs_assign) {
|
||||
const StorageIndex size = array_prod(evaluator.dimensions());
|
||||
device.parallelFor(size, evaluator.costPerCoeff(Vectorizable),
|
||||
@ -290,18 +371,18 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, Tileable> {
|
||||
template <typename Expression, bool Vectorizable>
|
||||
class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, /*Tileable*/ true> {
|
||||
public:
|
||||
typedef typename traits<Expression>::Index StorageIndex;
|
||||
typedef typename traits<Expression>::Scalar Scalar;
|
||||
typedef typename remove_const<Scalar>::type ScalarNoConst;
|
||||
|
||||
typedef TensorEvaluator<Expression, ThreadPoolDevice> Evaluator;
|
||||
typedef typename traits<Expression>::Index StorageIndex;
|
||||
|
||||
static const int NumDims = traits<Expression>::NumDimensions;
|
||||
|
||||
typedef TensorEvaluator<Expression, ThreadPoolDevice> Evaluator;
|
||||
typedef TensorBlockMapper<ScalarNoConst, StorageIndex, NumDims, Evaluator::Layout> BlockMapper;
|
||||
typedef TensorExecutorTilingContext<BlockMapper> TilingContext;
|
||||
|
||||
static EIGEN_STRONG_INLINE void run(const Expression& expr,
|
||||
const ThreadPoolDevice& device) {
|
||||
typedef TensorBlockMapper<ScalarNoConst, StorageIndex, NumDims, Evaluator::Layout> TensorBlockMapper;
|
||||
|
||||
Evaluator evaluator(expr, device);
|
||||
Index total_size = array_prod(evaluator.dimensions());
|
||||
Index cache_size = device.firstLevelCacheSize() / sizeof(Scalar);
|
||||
@ -315,50 +396,162 @@ class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable, /*Tileable*/ tr
|
||||
return;
|
||||
}
|
||||
|
||||
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
|
||||
const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr);
|
||||
if (needs_assign) {
|
||||
TensorBlockShapeType block_shape = kSkewedInnerDims;
|
||||
Index block_total_size = 0;
|
||||
// Query expression tree for desired block size/shape.
|
||||
std::vector<internal::TensorOpResourceRequirements> resources;
|
||||
evaluator.getResourceRequirements(&resources);
|
||||
MergeResourceRequirements(resources, &block_shape, &block_total_size);
|
||||
int num_threads = device.numThreads();
|
||||
const TilingContext tiling =
|
||||
internal::GetTensorExecutorTilingContext<Evaluator, BlockMapper,
|
||||
Vectorizable>(device, evaluator);
|
||||
|
||||
// Estimate minimum block size based on cost.
|
||||
TensorOpCost cost = evaluator.costPerCoeff(Vectorizable);
|
||||
double taskSize = TensorCostModel<ThreadPoolDevice>::taskSize(1, cost);
|
||||
size_t block_size = static_cast<size_t>(1.0 / taskSize);
|
||||
TensorBlockMapper block_mapper(
|
||||
typename TensorBlockMapper::Dimensions(evaluator.dimensions()),
|
||||
block_shape, block_size);
|
||||
block_size = block_mapper.block_dims_total_size();
|
||||
const size_t align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1);
|
||||
const size_t aligned_blocksize =
|
||||
align * divup<size_t>(block_size * sizeof(Scalar), align);
|
||||
void* buf = device.allocate((num_threads + 1) * aligned_blocksize);
|
||||
device.parallelFor(
|
||||
block_mapper.total_block_count(), cost * block_size,
|
||||
[=, &device, &evaluator, &block_mapper](StorageIndex firstIdx,
|
||||
tiling.block_mapper.total_block_count(), tiling.cost,
|
||||
[=, &device, &evaluator, &tiling](StorageIndex firstIdx,
|
||||
StorageIndex lastIdx) {
|
||||
// currentThreadId() returns -1 if called from a thread not in the
|
||||
// thread pool, such as the main thread dispatching Eigen
|
||||
// expressions.
|
||||
const int thread_idx = device.currentThreadId();
|
||||
eigen_assert(thread_idx >= -1 && thread_idx < num_threads);
|
||||
ScalarNoConst* thread_buf = reinterpret_cast<ScalarNoConst*>(
|
||||
static_cast<char*>(buf) + aligned_blocksize * (thread_idx + 1));
|
||||
ScalarNoConst* thread_buf =
|
||||
tiling.template GetCurrentThreadBuffer<ScalarNoConst>(device);
|
||||
for (StorageIndex i = firstIdx; i < lastIdx; ++i) {
|
||||
auto block = block_mapper.GetBlockForIndex(i, thread_buf);
|
||||
auto block = tiling.block_mapper.GetBlockForIndex(i, thread_buf);
|
||||
evaluator.evalBlock(&block);
|
||||
}
|
||||
});
|
||||
device.deallocate(buf);
|
||||
device.deallocate(tiling.buffer);
|
||||
}
|
||||
evaluator.cleanup();
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Expression, bool Vectorizable, bool Tileable>
|
||||
class TensorAsyncExecutor<Expression, ThreadPoolDevice, Vectorizable, Tileable> {
|
||||
public:
|
||||
typedef typename Expression::Index StorageIndex;
|
||||
typedef TensorEvaluator<Expression, ThreadPoolDevice> Evaluator;
|
||||
|
||||
static EIGEN_STRONG_INLINE void runAsync(const Expression& expr,
|
||||
const ThreadPoolDevice& device,
|
||||
std::function<void()> done) {
|
||||
TensorAsyncExecutorContext* const ctx =
|
||||
new TensorAsyncExecutorContext(expr, device, std::move(done));
|
||||
|
||||
const auto on_eval_subexprs = [ctx, &device](bool need_assign) -> void {
|
||||
if (!need_assign) {
|
||||
delete ctx;
|
||||
return;
|
||||
}
|
||||
|
||||
typedef EvalRange<Evaluator, StorageIndex, Vectorizable> EvalRange;
|
||||
const StorageIndex size = array_prod(ctx->evaluator.dimensions());
|
||||
device.parallelForAsync(
|
||||
size, ctx->evaluator.costPerCoeff(Vectorizable),
|
||||
EvalRange::alignBlockSize,
|
||||
[ctx](StorageIndex firstIdx, StorageIndex lastIdx) {
|
||||
EvalRange::run(&ctx->evaluator, firstIdx, lastIdx);
|
||||
},
|
||||
[ctx]() { delete ctx; });
|
||||
};
|
||||
|
||||
ctx->evaluator.evalSubExprsIfNeededAsync(nullptr, on_eval_subexprs);
|
||||
}
|
||||
|
||||
private:
|
||||
struct TensorAsyncExecutorContext {
|
||||
TensorAsyncExecutorContext(const Expression& expr,
|
||||
const ThreadPoolDevice& thread_pool,
|
||||
std::function<void()> done)
|
||||
: evaluator(expr, thread_pool), on_done(std::move(done)) {}
|
||||
|
||||
~TensorAsyncExecutorContext() {
|
||||
on_done();
|
||||
evaluator.cleanup();
|
||||
}
|
||||
|
||||
Evaluator evaluator;
|
||||
|
||||
private:
|
||||
std::function<void()> on_done;
|
||||
};
|
||||
};
|
||||
|
||||
template <typename Expression, bool Vectorizable>
|
||||
class TensorAsyncExecutor<Expression, ThreadPoolDevice, Vectorizable, /*Tileable*/ true> {
|
||||
public:
|
||||
typedef typename traits<Expression>::Index StorageIndex;
|
||||
typedef typename traits<Expression>::Scalar Scalar;
|
||||
typedef typename remove_const<Scalar>::type ScalarNoConst;
|
||||
|
||||
static const int NumDims = traits<Expression>::NumDimensions;
|
||||
|
||||
typedef TensorEvaluator<Expression, ThreadPoolDevice> Evaluator;
|
||||
typedef TensorBlockMapper<ScalarNoConst, StorageIndex, NumDims, Evaluator::Layout> BlockMapper;
|
||||
typedef TensorExecutorTilingContext<BlockMapper> TilingContext;
|
||||
|
||||
static EIGEN_STRONG_INLINE void runAsync(const Expression& expr,
|
||||
const ThreadPoolDevice& device,
|
||||
std::function<void()> done) {
|
||||
TensorAsyncExecutorContext* const ctx =
|
||||
new TensorAsyncExecutorContext(expr, device, std::move(done));
|
||||
|
||||
Index total_size = array_prod(ctx->evaluator.dimensions());
|
||||
Index cache_size = device.firstLevelCacheSize() / sizeof(Scalar);
|
||||
|
||||
if (total_size < cache_size &&
|
||||
!ExpressionHasTensorBroadcastingOp<Expression>::value) {
|
||||
internal::TensorAsyncExecutor<Expression, ThreadPoolDevice, Vectorizable,
|
||||
/*Tileable*/ false>::runAsync(
|
||||
expr, device, [ctx]() { delete ctx; });
|
||||
return;
|
||||
}
|
||||
|
||||
const auto on_eval_subexprs = [ctx, &device](bool need_assign) -> void {
|
||||
if (!need_assign) {
|
||||
delete ctx;
|
||||
return;
|
||||
}
|
||||
|
||||
ctx->tiling =
|
||||
GetTensorExecutorTilingContext<Evaluator, BlockMapper,
|
||||
Vectorizable>(device, ctx->evaluator);
|
||||
|
||||
device.parallelForAsync(
|
||||
ctx->tiling.block_mapper.total_block_count(), ctx->tiling.cost,
|
||||
[ctx](StorageIndex firstIdx, StorageIndex lastIdx) {
|
||||
ScalarNoConst* thread_buf =
|
||||
ctx->tiling.template GetCurrentThreadBuffer<ScalarNoConst>(
|
||||
ctx->device);
|
||||
for (StorageIndex i = firstIdx; i < lastIdx; ++i) {
|
||||
auto block =
|
||||
ctx->tiling.block_mapper.GetBlockForIndex(i, thread_buf);
|
||||
ctx->evaluator.evalBlock(&block);
|
||||
}
|
||||
},
|
||||
[ctx]() { delete ctx; });
|
||||
};
|
||||
|
||||
ctx->evaluator.evalSubExprsIfNeededAsync(nullptr, on_eval_subexprs);
|
||||
}
|
||||
|
||||
private:
|
||||
struct TensorAsyncExecutorContext {
|
||||
TensorAsyncExecutorContext(const Expression& expr,
|
||||
const ThreadPoolDevice& thread_pool,
|
||||
std::function<void()> done)
|
||||
: device(thread_pool),
|
||||
evaluator(expr, thread_pool),
|
||||
on_done(std::move(done)) {}
|
||||
|
||||
~TensorAsyncExecutorContext() {
|
||||
on_done();
|
||||
device.deallocate(tiling.buffer);
|
||||
evaluator.cleanup();
|
||||
}
|
||||
|
||||
const ThreadPoolDevice& device;
|
||||
Evaluator evaluator;
|
||||
TilingContext tiling;
|
||||
|
||||
private:
|
||||
std::function<void()> on_done;
|
||||
};
|
||||
};
|
||||
|
||||
#endif // EIGEN_USE_THREADS
|
||||
|
||||
|
||||
@ -419,7 +612,7 @@ template <typename Expression, bool Vectorizable, bool Tileable>
|
||||
EIGEN_STRONG_INLINE void TensorExecutor<Expression, GpuDevice, Vectorizable, Tileable>::run(
|
||||
const Expression& expr, const GpuDevice& device) {
|
||||
TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
|
||||
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
|
||||
const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr);
|
||||
if (needs_assign) {
|
||||
|
||||
const int block_size = device.maxGpuThreadsPerBlock();
|
||||
@ -520,7 +713,7 @@ class TensorExecutor<Expression, Eigen::SyclDevice, Vectorizable, Tileable> {
|
||||
typedef typename Expression::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(const Expression &expr, const Eigen::SyclDevice &dev) {
|
||||
Eigen::TensorEvaluator<Expression, Eigen::SyclDevice> evaluator(expr, dev);
|
||||
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
|
||||
const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr);
|
||||
if (needs_assign) {
|
||||
Index range, GRange, tileSize;
|
||||
Index total_size = ::Eigen::internal::array_prod(evaluator.dimensions());
|
||||
|
@ -20,6 +20,7 @@ namespace Eigen {
|
||||
// map_allocator.
|
||||
template<typename T> struct MakePointer {
|
||||
typedef T* Type;
|
||||
typedef const T* ConstType;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
@ -93,6 +94,7 @@ template<typename XprType, template <class> class MakePointer_ = MakePointer> cl
|
||||
template<typename XprType> class TensorForcedEvalOp;
|
||||
|
||||
template<typename ExpressionType, typename DeviceType> class TensorDevice;
|
||||
template<typename ExpressionType, typename DeviceType> class TensorAsyncDevice;
|
||||
template<typename Derived, typename Device> struct TensorEvaluator;
|
||||
|
||||
struct NoOpOutputKernel;
|
||||
@ -166,6 +168,11 @@ template <typename Expression, typename Device,
|
||||
bool Tileable = IsTileable<Device, Expression>::value>
|
||||
class TensorExecutor;
|
||||
|
||||
template <typename Expression, typename Device,
|
||||
bool Vectorizable = IsVectorizable<Device, Expression>::value,
|
||||
bool Tileable = IsTileable<Device, Expression>::value>
|
||||
class TensorAsyncExecutor;
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -42,13 +42,27 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef typename Base::CoeffReturnType CoeffReturnType;
|
||||
|
||||
/* typedef typename internal::conditional<
|
||||
bool(internal::is_lvalue<PlainObjectType>::value),
|
||||
Scalar *,
|
||||
const Scalar *>::type
|
||||
PointerType;*/
|
||||
typedef typename MakePointer_<Scalar>::Type PointerType;
|
||||
typedef PointerType PointerArgType;
|
||||
typedef typename MakePointer_<Scalar>::ConstType PointerConstType;
|
||||
|
||||
// WARN: PointerType still can be a pointer to const (const Scalar*), for
|
||||
// example in TensorMap<Tensor<const Scalar, ...>> expression. This type of
|
||||
// expression should be illegal, but adding this restriction is not possible
|
||||
// in practice (see https://bitbucket.org/eigen/eigen/pull-requests/488).
|
||||
typedef typename internal::conditional<
|
||||
bool(internal::is_lvalue<PlainObjectType>::value),
|
||||
PointerType, // use simple pointer in lvalue expressions
|
||||
PointerConstType // use const pointer in rvalue expressions
|
||||
>::type StoragePointerType;
|
||||
|
||||
// If TensorMap was constructed over rvalue expression (e.g. const Tensor),
|
||||
// we should return a reference to const from operator() (and others), even
|
||||
// if TensorMap itself is not const.
|
||||
typedef typename internal::conditional<
|
||||
bool(internal::is_lvalue<PlainObjectType>::value),
|
||||
Scalar&,
|
||||
const Scalar&
|
||||
>::type StorageRefType;
|
||||
|
||||
static const int Options = Options_;
|
||||
|
||||
@ -63,47 +77,47 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr) : m_data(dataPtr), m_dimensions() {
|
||||
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr) : m_data(dataPtr), m_dimensions() {
|
||||
// The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
|
||||
EIGEN_STATIC_ASSERT((0 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
|
||||
#if EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions(firstDimension, otherDimensions...) {
|
||||
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions(firstDimension, otherDimensions...) {
|
||||
// The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
|
||||
EIGEN_STATIC_ASSERT((sizeof...(otherDimensions) + 1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
#else
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions(firstDimension) {
|
||||
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions(firstDimension) {
|
||||
// The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
|
||||
EIGEN_STATIC_ASSERT((1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2) : m_data(dataPtr), m_dimensions(dim1, dim2) {
|
||||
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2) : m_data(dataPtr), m_dimensions(dim1, dim2) {
|
||||
EIGEN_STATIC_ASSERT(2 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2, Index dim3) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3) {
|
||||
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3) {
|
||||
EIGEN_STATIC_ASSERT(3 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4) {
|
||||
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4) {
|
||||
EIGEN_STATIC_ASSERT(4 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4, dim5) {
|
||||
EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4, dim5) {
|
||||
EIGEN_STATIC_ASSERT(5 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
}
|
||||
#endif
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, const array<Index, NumIndices>& dimensions)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, const array<Index, NumIndices>& dimensions)
|
||||
: m_data(dataPtr), m_dimensions(dimensions)
|
||||
{ }
|
||||
|
||||
template <typename Dimensions>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, const Dimensions& dimensions)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, const Dimensions& dimensions)
|
||||
: m_data(dataPtr), m_dimensions(dimensions)
|
||||
{ }
|
||||
|
||||
@ -120,12 +134,12 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Index size() const { return m_dimensions.TotalSize(); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE PointerType data() { return m_data; }
|
||||
EIGEN_STRONG_INLINE StoragePointerType data() { return m_data; }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const PointerType data() const { return m_data; }
|
||||
EIGEN_STRONG_INLINE StoragePointerType data() const { return m_data; }
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(const array<Index, NumIndices>& indices) const
|
||||
{
|
||||
// eigen_assert(checkIndexRange(indices));
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
@ -138,14 +152,14 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()() const
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()() const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
return m_data[0];
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index index) const
|
||||
{
|
||||
eigen_internal_assert(index >= 0 && index < size());
|
||||
return m_data[index];
|
||||
@ -153,7 +167,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
|
||||
#if EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
eigen_assert(internal::all((Eigen::NumTraits<Index>::highest() >= otherIndices)...));
|
||||
@ -167,7 +181,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
#else
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1) const
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i1 + i0 * m_dimensions[1];
|
||||
@ -178,7 +192,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2) const
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0);
|
||||
@ -189,7 +203,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3) const
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0));
|
||||
@ -200,7 +214,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)));
|
||||
@ -213,7 +227,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
#endif
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(const array<Index, NumIndices>& indices)
|
||||
{
|
||||
// eigen_assert(checkIndexRange(indices));
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
@ -226,14 +240,14 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator()()
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
return m_data[0];
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(Index index)
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index index)
|
||||
{
|
||||
eigen_internal_assert(index >= 0 && index < size());
|
||||
return m_data[index];
|
||||
@ -241,7 +255,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
|
||||
#if EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename... IndexTypes> EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
|
||||
{
|
||||
static_assert(sizeof...(otherIndices) + 2 == NumIndices || NumIndices == Dynamic, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
|
||||
eigen_assert(internal::all((Eigen::NumTraits<Index>::highest() >= otherIndices)...));
|
||||
@ -256,7 +270,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
#else
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1)
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i1 + i0 * m_dimensions[1];
|
||||
@ -267,7 +281,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2)
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0);
|
||||
@ -278,7 +292,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3)
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0));
|
||||
@ -289,7 +303,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
}
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
|
||||
EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
|
||||
{
|
||||
if (PlainObjectType::Options&RowMajor) {
|
||||
const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)));
|
||||
@ -320,7 +334,7 @@ template<typename PlainObjectType, int Options_, template <class> class MakePoin
|
||||
}
|
||||
|
||||
private:
|
||||
typename MakePointer_<Scalar>::Type m_data;
|
||||
StoragePointerType m_data;
|
||||
Dimensions m_dimensions;
|
||||
};
|
||||
|
||||
|
@ -45,6 +45,14 @@ EIGEN_DEVICE_FUNC uint64_t get_random_seed() {
|
||||
uint64_t rnd = ::random() ^ mach_absolute_time();
|
||||
return rnd;
|
||||
|
||||
#elif defined __native_client__
|
||||
// Same approach as for win32, except using clock_gettime
|
||||
timespec ts;
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
int rnd1 = ::rand();
|
||||
int rnd2 = ::rand();
|
||||
uint64_t rnd = (rnd1 | rnd2 << 16) ^ ts.tv_nsec;
|
||||
return rnd;
|
||||
|
||||
#else
|
||||
// Augment the current time with pseudo random number generation
|
||||
|
@ -25,6 +25,9 @@ class Barrier {
|
||||
void Notify() {
|
||||
unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2;
|
||||
if (v != 1) {
|
||||
// Clear the lowest bit (waiter flag) and check that the original state
|
||||
// value was not zero. If it was zero, it means that notify was called
|
||||
// more times than the original count.
|
||||
eigen_plain_assert(((v + 2) & ~1) != 0);
|
||||
return; // either count has not dropped to 0, or waiter is not waiting
|
||||
}
|
||||
|
@ -12,17 +12,6 @@ include_directories(../../test ../../unsupported ../../Eigen
|
||||
|
||||
find_package (Threads)
|
||||
|
||||
find_package(Xsmm)
|
||||
if(XSMM_FOUND)
|
||||
add_definitions("-DEIGEN_USE_LIBXSMM")
|
||||
include_directories(${XSMM_INCLUDES})
|
||||
link_directories(${XSMM_LIBRARIES})
|
||||
set(EXTERNAL_LIBS ${EXTERNAL_LIBS} xsmm)
|
||||
ei_add_property(EIGEN_TESTED_BACKENDS "Xsmm, ")
|
||||
else(XSMM_FOUND)
|
||||
ei_add_property(EIGEN_MISSING_BACKENDS "Xsmm, ")
|
||||
endif(XSMM_FOUND)
|
||||
|
||||
find_package(GoogleHash)
|
||||
if(GOOGLEHASH_FOUND)
|
||||
add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT")
|
||||
|
@ -511,8 +511,6 @@ static void test_const_inputs()
|
||||
VERIFY_IS_APPROX(mat3(1,1), mat1(1,0)*mat2(0,1) + mat1(1,1)*mat2(1,1) + mat1(1,2)*mat2(2,1));
|
||||
}
|
||||
|
||||
#if !defined(EIGEN_USE_LIBXSMM)
|
||||
|
||||
// Apply Sqrt to all output elements.
|
||||
struct SqrtOutputKernel {
|
||||
template <typename Index, typename Scalar>
|
||||
@ -562,9 +560,6 @@ static void test_large_contraction_with_output_kernel() {
|
||||
}
|
||||
}
|
||||
|
||||
#endif // !defined(EIGEN_USE_LIBXSMM)
|
||||
|
||||
|
||||
EIGEN_DECLARE_TEST(cxx11_tensor_contraction)
|
||||
{
|
||||
CALL_SUBTEST(test_evals<ColMajor>());
|
||||
@ -597,8 +592,6 @@ EIGEN_DECLARE_TEST(cxx11_tensor_contraction)
|
||||
CALL_SUBTEST(test_tensor_product<RowMajor>());
|
||||
CALL_SUBTEST(test_const_inputs<ColMajor>());
|
||||
CALL_SUBTEST(test_const_inputs<RowMajor>());
|
||||
#if !defined(EIGEN_USE_LIBXSMM)
|
||||
CALL_SUBTEST(test_large_contraction_with_output_kernel<ColMajor>());
|
||||
CALL_SUBTEST(test_large_contraction_with_output_kernel<RowMajor>());
|
||||
#endif
|
||||
}
|
||||
|
@ -562,6 +562,68 @@ static void test_execute_reverse_rvalue(Device d)
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, int NumDims, typename Device, bool Vectorizable,
|
||||
bool Tileable, int Layout>
|
||||
static void test_async_execute_unary_expr(Device d)
|
||||
{
|
||||
static constexpr int Options = 0 | Layout;
|
||||
|
||||
// Pick a large enough tensor size to bypass small tensor block evaluation
|
||||
// optimization.
|
||||
auto dims = RandomDims<NumDims>(50 / NumDims, 100 / NumDims);
|
||||
|
||||
Tensor<T, NumDims, Options, Index> src(dims);
|
||||
Tensor<T, NumDims, Options, Index> dst(dims);
|
||||
|
||||
src.setRandom();
|
||||
const auto expr = src.square();
|
||||
|
||||
using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
|
||||
using Executor = internal::TensorAsyncExecutor<const Assign, Device,
|
||||
Vectorizable, Tileable>;
|
||||
Eigen::Barrier done(1);
|
||||
Executor::runAsync(Assign(dst, expr), d, [&done]() { done.Notify(); });
|
||||
done.Wait();
|
||||
|
||||
for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
|
||||
T square = src.coeff(i) * src.coeff(i);
|
||||
VERIFY_IS_EQUAL(square, dst.coeff(i));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, int NumDims, typename Device, bool Vectorizable,
|
||||
bool Tileable, int Layout>
|
||||
static void test_async_execute_binary_expr(Device d)
|
||||
{
|
||||
static constexpr int Options = 0 | Layout;
|
||||
|
||||
// Pick a large enough tensor size to bypass small tensor block evaluation
|
||||
// optimization.
|
||||
auto dims = RandomDims<NumDims>(50 / NumDims, 100 / NumDims);
|
||||
|
||||
Tensor<T, NumDims, Options, Index> lhs(dims);
|
||||
Tensor<T, NumDims, Options, Index> rhs(dims);
|
||||
Tensor<T, NumDims, Options, Index> dst(dims);
|
||||
|
||||
lhs.setRandom();
|
||||
rhs.setRandom();
|
||||
|
||||
const auto expr = lhs + rhs;
|
||||
|
||||
using Assign = TensorAssignOp<decltype(dst), const decltype(expr)>;
|
||||
using Executor = internal::TensorAsyncExecutor<const Assign, Device,
|
||||
Vectorizable, Tileable>;
|
||||
|
||||
Eigen::Barrier done(1);
|
||||
Executor::runAsync(Assign(dst, expr), d, [&done]() { done.Notify(); });
|
||||
done.Wait();
|
||||
|
||||
for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) {
|
||||
T sum = lhs.coeff(i) + rhs.coeff(i);
|
||||
VERIFY_IS_EQUAL(sum, dst.coeff(i));
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef EIGEN_DONT_VECTORIZE
|
||||
#define VECTORIZABLE(VAL) !EIGEN_DONT_VECTORIZE && VAL
|
||||
#else
|
||||
@ -589,10 +651,23 @@ static void test_execute_reverse_rvalue(Device d)
|
||||
CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, VECTORIZABLE(true), false, RowMajor>(tp_device))); \
|
||||
CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, VECTORIZABLE(true), true, RowMajor>(tp_device)))
|
||||
|
||||
// NOTE: Currently only ThreadPoolDevice supports async expression evaluation.
|
||||
#define CALL_ASYNC_SUBTEST_COMBINATIONS(PART, NAME, T, NUM_DIMS) \
|
||||
CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, false, false, ColMajor>(tp_device))); \
|
||||
CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, false, true, ColMajor>(tp_device))); \
|
||||
CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, VECTORIZABLE(true), false, ColMajor>(tp_device))); \
|
||||
CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, VECTORIZABLE(true), true, ColMajor>(tp_device))); \
|
||||
CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, false, false, RowMajor>(tp_device))); \
|
||||
CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, false, true, RowMajor>(tp_device))); \
|
||||
CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, VECTORIZABLE(true), false, RowMajor>(tp_device))); \
|
||||
CALL_SUBTEST_PART(PART)((NAME<T, NUM_DIMS, ThreadPoolDevice, VECTORIZABLE(true), true, RowMajor>(tp_device)))
|
||||
|
||||
EIGEN_DECLARE_TEST(cxx11_tensor_executor) {
|
||||
Eigen::DefaultDevice default_device;
|
||||
// Default device is unused in ASYNC tests.
|
||||
EIGEN_UNUSED_VARIABLE(default_device);
|
||||
|
||||
const auto num_threads = internal::random<int>(1, 24);
|
||||
const auto num_threads = internal::random<int>(20, 24);
|
||||
Eigen::ThreadPool tp(num_threads);
|
||||
Eigen::ThreadPoolDevice tp_device(&tp, num_threads);
|
||||
|
||||
@ -660,8 +735,16 @@ EIGEN_DECLARE_TEST(cxx11_tensor_executor) {
|
||||
CALL_SUBTEST_COMBINATIONS(14, test_execute_reverse_rvalue, float, 4);
|
||||
CALL_SUBTEST_COMBINATIONS(14, test_execute_reverse_rvalue, float, 5);
|
||||
|
||||
CALL_ASYNC_SUBTEST_COMBINATIONS(15, test_async_execute_unary_expr, float, 3);
|
||||
CALL_ASYNC_SUBTEST_COMBINATIONS(15, test_async_execute_unary_expr, float, 4);
|
||||
CALL_ASYNC_SUBTEST_COMBINATIONS(15, test_async_execute_unary_expr, float, 5);
|
||||
|
||||
CALL_ASYNC_SUBTEST_COMBINATIONS(16, test_async_execute_binary_expr, float, 3);
|
||||
CALL_ASYNC_SUBTEST_COMBINATIONS(16, test_async_execute_binary_expr, float, 4);
|
||||
CALL_ASYNC_SUBTEST_COMBINATIONS(16, test_async_execute_binary_expr, float, 5);
|
||||
|
||||
// Force CMake to split this test.
|
||||
// EIGEN_SUFFIXES;1;2;3;4;5;6;7;8;9;10;11;12;13;14
|
||||
// EIGEN_SUFFIXES;1;2;3;4;5;6;7;8;9;10;11;12;13;14;15;16
|
||||
}
|
||||
|
||||
#undef CALL_SUBTEST_COMBINATIONS
|
||||
|
@ -19,8 +19,8 @@ static void test_0d()
|
||||
Tensor<int, 0> scalar1;
|
||||
Tensor<int, 0, RowMajor> scalar2;
|
||||
|
||||
TensorMap<Tensor<const int, 0> > scalar3(scalar1.data());
|
||||
TensorMap<Tensor<const int, 0, RowMajor> > scalar4(scalar2.data());
|
||||
TensorMap<const Tensor<int, 0> > scalar3(scalar1.data());
|
||||
TensorMap<const Tensor<int, 0, RowMajor> > scalar4(scalar2.data());
|
||||
|
||||
scalar1() = 7;
|
||||
scalar2() = 13;
|
||||
@ -37,8 +37,8 @@ static void test_1d()
|
||||
Tensor<int, 1> vec1(6);
|
||||
Tensor<int, 1, RowMajor> vec2(6);
|
||||
|
||||
TensorMap<Tensor<const int, 1> > vec3(vec1.data(), 6);
|
||||
TensorMap<Tensor<const int, 1, RowMajor> > vec4(vec2.data(), 6);
|
||||
TensorMap<const Tensor<int, 1> > vec3(vec1.data(), 6);
|
||||
TensorMap<const Tensor<int, 1, RowMajor> > vec4(vec2.data(), 6);
|
||||
|
||||
vec1(0) = 4; vec2(0) = 0;
|
||||
vec1(1) = 8; vec2(1) = 1;
|
||||
@ -85,8 +85,8 @@ static void test_2d()
|
||||
mat2(1,1) = 4;
|
||||
mat2(1,2) = 5;
|
||||
|
||||
TensorMap<Tensor<const int, 2> > mat3(mat1.data(), 2, 3);
|
||||
TensorMap<Tensor<const int, 2, RowMajor> > mat4(mat2.data(), 2, 3);
|
||||
TensorMap<const Tensor<int, 2> > mat3(mat1.data(), 2, 3);
|
||||
TensorMap<const Tensor<int, 2, RowMajor> > mat4(mat2.data(), 2, 3);
|
||||
|
||||
VERIFY_IS_EQUAL(mat3.rank(), 2);
|
||||
VERIFY_IS_EQUAL(mat3.size(), 6);
|
||||
@ -129,8 +129,8 @@ static void test_3d()
|
||||
}
|
||||
}
|
||||
|
||||
TensorMap<Tensor<const int, 3> > mat3(mat1.data(), 2, 3, 7);
|
||||
TensorMap<Tensor<const int, 3, RowMajor> > mat4(mat2.data(), 2, 3, 7);
|
||||
TensorMap<const Tensor<int, 3> > mat3(mat1.data(), 2, 3, 7);
|
||||
TensorMap<const Tensor<int, 3, RowMajor> > mat4(mat2.data(), 2, 3, 7);
|
||||
|
||||
VERIFY_IS_EQUAL(mat3.rank(), 3);
|
||||
VERIFY_IS_EQUAL(mat3.size(), 2*3*7);
|
||||
@ -265,6 +265,53 @@ static void test_casting()
|
||||
VERIFY_IS_EQUAL(sum1, 861);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static const T& add_const(T& value) {
|
||||
return value;
|
||||
}
|
||||
|
||||
static void test_0d_const_tensor()
|
||||
{
|
||||
Tensor<int, 0> scalar1;
|
||||
Tensor<int, 0, RowMajor> scalar2;
|
||||
|
||||
TensorMap<const Tensor<int, 0> > scalar3(add_const(scalar1).data());
|
||||
TensorMap<const Tensor<int, 0, RowMajor> > scalar4(add_const(scalar2).data());
|
||||
|
||||
scalar1() = 7;
|
||||
scalar2() = 13;
|
||||
|
||||
VERIFY_IS_EQUAL(scalar1.rank(), 0);
|
||||
VERIFY_IS_EQUAL(scalar1.size(), 1);
|
||||
|
||||
VERIFY_IS_EQUAL(scalar3(), 7);
|
||||
VERIFY_IS_EQUAL(scalar4(), 13);
|
||||
}
|
||||
|
||||
static void test_0d_const_tensor_map()
|
||||
{
|
||||
Tensor<int, 0> scalar1;
|
||||
Tensor<int, 0, RowMajor> scalar2;
|
||||
|
||||
const TensorMap<Tensor<int, 0> > scalar3(scalar1.data());
|
||||
const TensorMap<Tensor<int, 0, RowMajor> > scalar4(scalar2.data());
|
||||
|
||||
// Although TensorMap is constant, we still can write to the underlying
|
||||
// storage, because we map over non-constant Tensor.
|
||||
scalar3() = 7;
|
||||
scalar4() = 13;
|
||||
|
||||
VERIFY_IS_EQUAL(scalar1(), 7);
|
||||
VERIFY_IS_EQUAL(scalar2(), 13);
|
||||
|
||||
// Pointer to the underlying storage is also non-const.
|
||||
scalar3.data()[0] = 8;
|
||||
scalar4.data()[0] = 14;
|
||||
|
||||
VERIFY_IS_EQUAL(scalar1(), 8);
|
||||
VERIFY_IS_EQUAL(scalar2(), 14);
|
||||
}
|
||||
|
||||
EIGEN_DECLARE_TEST(cxx11_tensor_map)
|
||||
{
|
||||
CALL_SUBTEST(test_0d());
|
||||
@ -274,4 +321,7 @@ EIGEN_DECLARE_TEST(cxx11_tensor_map)
|
||||
|
||||
CALL_SUBTEST(test_from_tensor());
|
||||
CALL_SUBTEST(test_casting());
|
||||
|
||||
CALL_SUBTEST(test_0d_const_tensor());
|
||||
CALL_SUBTEST(test_0d_const_tensor_map());
|
||||
}
|
||||
|
@ -38,9 +38,9 @@ class TestAllocator : public Allocator {
|
||||
|
||||
void test_multithread_elementwise()
|
||||
{
|
||||
Tensor<float, 3> in1(2,3,7);
|
||||
Tensor<float, 3> in2(2,3,7);
|
||||
Tensor<float, 3> out(2,3,7);
|
||||
Tensor<float, 3> in1(200, 30, 70);
|
||||
Tensor<float, 3> in2(200, 30, 70);
|
||||
Tensor<float, 3> out(200, 30, 70);
|
||||
|
||||
in1.setRandom();
|
||||
in2.setRandom();
|
||||
@ -49,15 +49,39 @@ void test_multithread_elementwise()
|
||||
Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random<int>(3, 11));
|
||||
out.device(thread_pool_device) = in1 + in2 * 3.14f;
|
||||
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
for (int j = 0; j < 3; ++j) {
|
||||
for (int k = 0; k < 7; ++k) {
|
||||
for (int i = 0; i < 200; ++i) {
|
||||
for (int j = 0; j < 30; ++j) {
|
||||
for (int k = 0; k < 70; ++k) {
|
||||
VERIFY_IS_APPROX(out(i, j, k), in1(i, j, k) + in2(i, j, k) * 3.14f);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void test_async_multithread_elementwise()
|
||||
{
|
||||
Tensor<float, 3> in1(200, 30, 70);
|
||||
Tensor<float, 3> in2(200, 30, 70);
|
||||
Tensor<float, 3> out(200, 30, 70);
|
||||
|
||||
in1.setRandom();
|
||||
in2.setRandom();
|
||||
|
||||
Eigen::ThreadPool tp(internal::random<int>(3, 11));
|
||||
Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random<int>(3, 11));
|
||||
|
||||
Eigen::Barrier b(1);
|
||||
out.device(thread_pool_device, [&b]() { b.Notify(); }) = in1 + in2 * 3.14f;
|
||||
b.Wait();
|
||||
|
||||
for (int i = 0; i < 200; ++i) {
|
||||
for (int j = 0; j < 30; ++j) {
|
||||
for (int k = 0; k < 70; ++k) {
|
||||
VERIFY_IS_APPROX(out(i, j, k), in1(i, j, k) + in2(i, j, k) * 3.14f);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void test_multithread_compound_assignment()
|
||||
{
|
||||
@ -306,6 +330,52 @@ static void test_multithread_contraction_with_output_kernel() {
|
||||
}
|
||||
}
|
||||
|
||||
template<int DataLayout>
|
||||
void test_async_multithread_contraction_agrees_with_singlethread()
|
||||
{
|
||||
int contract_size = internal::random<int>(100, 500);
|
||||
|
||||
Tensor<float, 3, DataLayout> left(internal::random<int>(10, 40),
|
||||
contract_size,
|
||||
internal::random<int>(10, 40));
|
||||
|
||||
Tensor<float, 4, DataLayout> right(
|
||||
internal::random<int>(1, 20), internal::random<int>(1, 20), contract_size,
|
||||
internal::random<int>(1, 20));
|
||||
|
||||
left.setRandom();
|
||||
right.setRandom();
|
||||
|
||||
// add constants to shift values away from 0 for more precision
|
||||
left += left.constant(1.5f);
|
||||
right += right.constant(1.5f);
|
||||
|
||||
typedef Tensor<float, 1>::DimensionPair DimPair;
|
||||
Eigen::array<DimPair, 1> dims({{DimPair(1, 2)}});
|
||||
|
||||
Eigen::ThreadPool tp(internal::random<int>(2, 11));
|
||||
Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random<int>(8, 32));
|
||||
|
||||
Tensor<float, 5, DataLayout> st_result;
|
||||
st_result = left.contract(right, dims);
|
||||
|
||||
Tensor<float, 5, DataLayout> tp_result(st_result.dimensions());
|
||||
|
||||
Eigen::Barrier barrier(1);
|
||||
tp_result.device(thread_pool_device, [&barrier]() { barrier.Notify(); }) =
|
||||
left.contract(right, dims);
|
||||
barrier.Wait();
|
||||
|
||||
VERIFY(dimensions_match(st_result.dimensions(), tp_result.dimensions()));
|
||||
for (ptrdiff_t i = 0; i < st_result.size(); i++) {
|
||||
// if both of the values are very small, then do nothing (because the test
|
||||
// will fail due to numerical precision issues when values are small)
|
||||
if (numext::abs(st_result.data()[i] - tp_result.data()[i]) >= 1e-4f) {
|
||||
VERIFY_IS_APPROX(st_result.data()[i], tp_result.data()[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We are triggering 'evalShardedByInnerDim' optimization.
|
||||
template <int DataLayout>
|
||||
static void test_sharded_by_inner_dim_contraction()
|
||||
@ -386,6 +456,93 @@ static void test_sharded_by_inner_dim_contraction_with_output_kernel()
|
||||
}
|
||||
}
|
||||
|
||||
// We are triggering 'evalShardedByInnerDim' optimization.
|
||||
template <int DataLayout>
|
||||
static void test_async_sharded_by_inner_dim_contraction()
|
||||
{
|
||||
typedef Tensor<float, 1>::DimensionPair DimPair;
|
||||
|
||||
const int num_threads = internal::random<int>(4, 16);
|
||||
ThreadPool threads(num_threads);
|
||||
Eigen::ThreadPoolDevice device(&threads, num_threads);
|
||||
|
||||
Tensor<float, 2, DataLayout> t_left(2, 10000);
|
||||
Tensor<float, 2, DataLayout> t_right(10000, 10);
|
||||
Tensor<float, 2, DataLayout> t_result(2, 10);
|
||||
|
||||
t_left.setRandom();
|
||||
t_right.setRandom();
|
||||
// Put trash in t_result to verify contraction clears output memory.
|
||||
t_result.setRandom();
|
||||
|
||||
// Add a little offset so that the results won't be close to zero.
|
||||
t_left += t_left.constant(1.0f);
|
||||
t_right += t_right.constant(1.0f);
|
||||
|
||||
typedef Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
|
||||
MapXf m_left(t_left.data(), 2, 10000);
|
||||
MapXf m_right(t_right.data(), 10000, 10);
|
||||
Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(2, 10);
|
||||
|
||||
// this contraction should be equivalent to a single matrix multiplication
|
||||
Eigen::array<DimPair, 1> dims({{DimPair(1, 0)}});
|
||||
|
||||
// compute results by separate methods
|
||||
Eigen::Barrier barrier(1);
|
||||
t_result.device(device, [&barrier]() { barrier.Notify(); }) =
|
||||
t_left.contract(t_right, dims);
|
||||
barrier.Wait();
|
||||
|
||||
m_result = m_left * m_right;
|
||||
|
||||
for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) {
|
||||
VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// We are triggering 'evalShardedByInnerDim' optimization with output kernel.
|
||||
template <int DataLayout>
|
||||
static void test_async_sharded_by_inner_dim_contraction_with_output_kernel()
|
||||
{
|
||||
typedef Tensor<float, 1>::DimensionPair DimPair;
|
||||
|
||||
const int num_threads = internal::random<int>(4, 16);
|
||||
ThreadPool threads(num_threads);
|
||||
Eigen::ThreadPoolDevice device(&threads, num_threads);
|
||||
|
||||
Tensor<float, 2, DataLayout> t_left(2, 10000);
|
||||
Tensor<float, 2, DataLayout> t_right(10000, 10);
|
||||
Tensor<float, 2, DataLayout> t_result(2, 10);
|
||||
|
||||
t_left.setRandom();
|
||||
t_right.setRandom();
|
||||
// Put trash in t_result to verify contraction clears output memory.
|
||||
t_result.setRandom();
|
||||
|
||||
// Add a little offset so that the results won't be close to zero.
|
||||
t_left += t_left.constant(1.0f);
|
||||
t_right += t_right.constant(1.0f);
|
||||
|
||||
typedef Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout>> MapXf;
|
||||
MapXf m_left(t_left.data(), 2, 10000);
|
||||
MapXf m_right(t_right.data(), 10000, 10);
|
||||
Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(2, 10);
|
||||
|
||||
// this contraction should be equivalent to a single matrix multiplication
|
||||
Eigen::array<DimPair, 1> dims({{DimPair(1, 0)}});
|
||||
|
||||
// compute results by separate methods
|
||||
Eigen::Barrier barrier(1);
|
||||
t_result.device(device, [&barrier]() { barrier.Notify(); }) =
|
||||
t_left.contract(t_right, dims, SqrtOutputKernel());
|
||||
barrier.Wait();
|
||||
m_result = m_left * m_right;
|
||||
|
||||
for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) {
|
||||
VERIFY_IS_APPROX(t_result.data()[i], std::sqrt(m_result.data()[i]));
|
||||
}
|
||||
}
|
||||
|
||||
template<int DataLayout>
|
||||
void test_full_contraction() {
|
||||
int contract_size1 = internal::random<int>(1, 500);
|
||||
@ -516,6 +673,7 @@ void test_threadpool_allocate(TestAllocator* allocator)
|
||||
EIGEN_DECLARE_TEST(cxx11_tensor_thread_pool)
|
||||
{
|
||||
CALL_SUBTEST_1(test_multithread_elementwise());
|
||||
CALL_SUBTEST_1(test_async_multithread_elementwise());
|
||||
CALL_SUBTEST_1(test_multithread_compound_assignment());
|
||||
|
||||
CALL_SUBTEST_2(test_multithread_contraction<ColMajor>());
|
||||
@ -525,11 +683,18 @@ EIGEN_DECLARE_TEST(cxx11_tensor_thread_pool)
|
||||
CALL_SUBTEST_3(test_multithread_contraction_agrees_with_singlethread<RowMajor>());
|
||||
CALL_SUBTEST_3(test_multithread_contraction_with_output_kernel<ColMajor>());
|
||||
CALL_SUBTEST_3(test_multithread_contraction_with_output_kernel<RowMajor>());
|
||||
CALL_SUBTEST_3(test_async_multithread_contraction_agrees_with_singlethread<ColMajor>());
|
||||
CALL_SUBTEST_3(test_async_multithread_contraction_agrees_with_singlethread<RowMajor>());
|
||||
|
||||
// Test EvalShardedByInnerDimContext parallelization strategy.
|
||||
CALL_SUBTEST_4(test_sharded_by_inner_dim_contraction<ColMajor>());
|
||||
CALL_SUBTEST_4(test_sharded_by_inner_dim_contraction<RowMajor>());
|
||||
CALL_SUBTEST_4(test_sharded_by_inner_dim_contraction_with_output_kernel<ColMajor>());
|
||||
CALL_SUBTEST_4(test_sharded_by_inner_dim_contraction_with_output_kernel<RowMajor>());
|
||||
CALL_SUBTEST_4(test_async_sharded_by_inner_dim_contraction<ColMajor>());
|
||||
CALL_SUBTEST_4(test_async_sharded_by_inner_dim_contraction<RowMajor>());
|
||||
CALL_SUBTEST_4(test_async_sharded_by_inner_dim_contraction_with_output_kernel<ColMajor>());
|
||||
CALL_SUBTEST_4(test_async_sharded_by_inner_dim_contraction_with_output_kernel<RowMajor>());
|
||||
|
||||
// Exercise various cases that have been problematic in the past.
|
||||
CALL_SUBTEST_5(test_contraction_corner_cases<ColMajor>());
|
||||
|
Loading…
x
Reference in New Issue
Block a user