diff --git a/Eigen/src/Core/arch/Default/BFloat16.h b/Eigen/src/Core/arch/Default/BFloat16.h index 88f5c9ba2..d0666effa 100644 --- a/Eigen/src/Core/arch/Default/BFloat16.h +++ b/Eigen/src/Core/arch/Default/BFloat16.h @@ -18,6 +18,18 @@ limitations under the License. #include "../../InternalHeaderCheck.h" +#if defined(EIGEN_HAS_HIP_BF16) +// When compiling with GPU support, the "hip_bfloat16" base class as well as +// some other routines are defined in the GPU compiler header files +// (hip_bfloat16.h), and they are not tagged constexpr +// As a consequence, we get compile failures when compiling Eigen with +// GPU support. Hence the need to disable EIGEN_CONSTEXPR when building +// Eigen with GPU support + #pragma push_macro("EIGEN_CONSTEXPR") + #undef EIGEN_CONSTEXPR + #define EIGEN_CONSTEXPR +#endif + #define BF16_PACKET_FUNCTION(PACKET_F, PACKET_BF16, METHOD) \ template <> \ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED \ @@ -25,19 +37,46 @@ limitations under the License. return F32ToBf16(METHOD(Bf16ToF32(_x))); \ } +// Only use HIP GPU bf16 in kernels +#if defined(EIGEN_HAS_HIP_BF16) && defined(EIGEN_GPU_COMPILE_PHASE) +#define EIGEN_USE_HIP_BF16 +#endif + namespace Eigen { struct bfloat16; +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 numext::bit_cast(const uint16_t& src); + +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC uint16_t numext::bit_cast(const Eigen::bfloat16& src); + namespace bfloat16_impl { +#if defined(EIGEN_USE_HIP_BF16) + +struct __bfloat16_raw : public hip_bfloat16 { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw() {} + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw(hip_bfloat16 hb) : hip_bfloat16(hb) {} + explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw(unsigned short raw) : hip_bfloat16(raw) {} +}; + +#else + // Make our own __bfloat16_raw definition. struct __bfloat16_raw { +#if defined(EIGEN_HAS_HIP_BF16) && !defined(EIGEN_GPU_COMPILE_PHASE) + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw() {} +#else EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw() : value(0) {} +#endif explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw(unsigned short raw) : value(raw) {} unsigned short value; }; +#endif // defined(EIGEN_USE_HIP_BF16) + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw raw_uint16_to_bfloat16(unsigned short value); template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(float ff); @@ -150,7 +189,7 @@ namespace bfloat16_impl { // We need to provide emulated *host-side* BF16 operators for clang. #pragma push_macro("EIGEN_DEVICE_FUNC") #undef EIGEN_DEVICE_FUNC -#if defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_NATIVE_BF16) +#if (defined(EIGEN_HAS_GPU_BF16) && defined(EIGEN_HAS_NATIVE_BF16)) #define EIGEN_DEVICE_FUNC __host__ #else // both host and device need emulated ops. #define EIGEN_DEVICE_FUNC __host__ __device__ @@ -179,9 +218,8 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator / (const bfloat16& a, co return bfloat16(float(a) / float(b)); } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator - (const bfloat16& a) { - bfloat16 result; - result.value = a.value ^ 0x8000; - return result; + numext::uint16_t x = numext::bit_cast(a) ^ 0x8000; + return numext::bit_cast(x); } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator += (bfloat16& a, const bfloat16& b) { a = bfloat16(float(a) + float(b)); @@ -248,33 +286,47 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator / (const bfloat16& a, In } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw truncate_to_bfloat16(const float v) { +#if defined(EIGEN_USE_HIP_BF16) + return __bfloat16_raw(__bfloat16_raw::round_to_bfloat16(v, __bfloat16_raw::truncate)); +#else __bfloat16_raw output; - if (Eigen::numext::isnan EIGEN_NOT_A_MACRO(v)) { + if (numext::isnan EIGEN_NOT_A_MACRO(v)) { output.value = std::signbit(v) ? 0xFFC0: 0x7FC0; return output; } output.value = static_cast(numext::bit_cast(v) >> 16); return output; +#endif } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw raw_uint16_to_bfloat16(numext::uint16_t value) { +#if defined(EIGEN_USE_HIP_BF16) + __bfloat16_raw bf; + bf.data = value; + return bf; +#else return __bfloat16_raw(value); +#endif } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR numext::uint16_t raw_bfloat16_as_uint16(const __bfloat16_raw& bf) { +#if defined(EIGEN_USE_HIP_BF16) + return bf.data; +#else return bf.value; +#endif } // float_to_bfloat16_rtne template specialization that does not make any // assumption about the value of its function argument (ff). template <> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(float ff) { -#if (defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_HIP_BF16)) - // Nothing to do here +#if defined(EIGEN_USE_HIP_BF16) + return __bfloat16_raw(__bfloat16_raw::round_to_bfloat16(ff)); #else __bfloat16_raw output; - if (Eigen::numext::isnan EIGEN_NOT_A_MACRO(ff)) { + if (numext::isnan EIGEN_NOT_A_MACRO(ff)) { // If the value is a NaN, squash it to a qNaN with msb of fraction set, // this makes sure after truncation we don't end up with an inf. // @@ -443,8 +495,8 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(float ff) { -#if (defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_HIP_BF16)) - // Nothing to do here +#if defined(EIGEN_USE_HIP_BF16) + return __bfloat16_raw(__bfloat16_raw::round_to_bfloat16(ff)); #else numext::uint32_t input = numext::bit_cast(ff); __bfloat16_raw output; @@ -459,29 +511,41 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(h); +#else return numext::bit_cast(static_cast(h.value) << 16); +#endif } + // --- standard functions --- EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const bfloat16& a) { EIGEN_USING_STD(isinf); +#if defined(EIGEN_USE_HIP_BF16) + return (isinf)(a); // Uses HIP hip_bfloat16 isinf operator +#else return (isinf)(float(a)); +#endif } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const bfloat16& a) { EIGEN_USING_STD(isnan); +#if defined(EIGEN_USE_HIP_BF16) + return (isnan)(a); // Uses HIP hip_bfloat16 isnan operator +#else return (isnan)(float(a)); +#endif } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const bfloat16& a) { return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a)); } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 abs(const bfloat16& a) { - bfloat16 result; - result.value = a.value & 0x7FFF; - return result; + numext::uint16_t x = numext::bit_cast(a) & 0x7FFF; + return numext::bit_cast(x); } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 exp(const bfloat16& a) { - return bfloat16(::expf(float(a))); + return bfloat16(::expf(float(a))); } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 expm1(const bfloat16& a) { return bfloat16(numext::expm1(float(a))); @@ -499,7 +563,7 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log2(const bfloat16& a) { return bfloat16(static_cast(EIGEN_LOG2E) * ::logf(float(a))); } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 sqrt(const bfloat16& a) { - return bfloat16(::sqrtf(float(a))); + return bfloat16(::sqrtf(float(a))); } EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 pow(const bfloat16& a, const bfloat16& b) { return bfloat16(::powf(float(a), float(b))); @@ -563,6 +627,7 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 (min)(const bfloat16& a, const bf const float f2 = static_cast(b); return f2 < f1 ? b : a; } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 (max)(const bfloat16& a, const bfloat16& b) { const float f1 = static_cast(a); const float f2 = static_cast(b); @@ -574,6 +639,7 @@ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmin(const bfloat16& a, const bfl const float f2 = static_cast(b); return bfloat16(::fminf(f1, f2)); } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmax(const bfloat16& a, const bfloat16& b) { const float f1 = static_cast(a); const float f2 = static_cast(b); @@ -623,7 +689,6 @@ template<> struct NumTraits } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 dummy_precision() { return bfloat16_impl::raw_uint16_to_bfloat16(0x3D4D); // bfloat16(5e-2f); - } EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 highest() { return bfloat16_impl::raw_uint16_to_bfloat16(0x7F7F); @@ -641,6 +706,11 @@ template<> struct NumTraits } // namespace Eigen + +#if defined(EIGEN_HAS_HIP_BF16) + #pragma pop_macro("EIGEN_CONSTEXPR") +#endif + namespace Eigen { namespace numext { @@ -664,7 +734,7 @@ bool (isfinite)(const Eigen::bfloat16& h) { template <> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bit_cast(const uint16_t& src) { - return Eigen::bfloat16(Eigen::bfloat16_impl::raw_uint16_to_bfloat16(src)); + return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(src); } template <> @@ -686,5 +756,49 @@ struct hash { } // namespace std #endif +// Add the missing shfl* intrinsics. +// The __shfl* functions are only valid on HIP or _CUDA_ARCH_ >= 300. +// CUDA defines them for (__CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__)) +// +// HIP and CUDA prior to SDK 9.0 define +// __shfl, __shfl_up, __shfl_down, __shfl_xor for int and float +// CUDA since 9.0 deprecates those and instead defines +// __shfl_sync, __shfl_up_sync, __shfl_down_sync, __shfl_xor_sync, +// with native support for __half and __nv_bfloat16 +// +// Note that the following are __device__ - only functions. +#if defined(EIGEN_HIPCC) + +#if defined(EIGEN_HAS_HIP_BF16) + +__device__ EIGEN_STRONG_INLINE Eigen::bfloat16 __shfl(Eigen::bfloat16 var, int srcLane, int width=warpSize) { + const int ivar = static_cast(Eigen::numext::bit_cast(var)); + return Eigen::numext::bit_cast(static_cast(__shfl(ivar, srcLane, width))); +} + +__device__ EIGEN_STRONG_INLINE Eigen::bfloat16 __shfl_up(Eigen::bfloat16 var, unsigned int delta, int width=warpSize) { + const int ivar = static_cast(Eigen::numext::bit_cast(var)); + return Eigen::numext::bit_cast(static_cast(__shfl_up(ivar, delta, width))); +} + +__device__ EIGEN_STRONG_INLINE Eigen::bfloat16 __shfl_down(Eigen::bfloat16 var, unsigned int delta, int width=warpSize) { + const int ivar = static_cast(Eigen::numext::bit_cast(var)); + return Eigen::numext::bit_cast(static_cast(__shfl_down(ivar, delta, width))); +} + +__device__ EIGEN_STRONG_INLINE Eigen::bfloat16 __shfl_xor(Eigen::bfloat16 var, int laneMask, int width=warpSize) { + const int ivar = static_cast(Eigen::numext::bit_cast(var)); + return Eigen::numext::bit_cast(static_cast(__shfl_xor(ivar, laneMask, width))); +} + +#endif // HIP + +#endif // __shfl* + +#if defined(EIGEN_HIPCC) +EIGEN_STRONG_INLINE __device__ Eigen::bfloat16 __ldg(const Eigen::bfloat16* ptr) { + return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(__ldg(Eigen::numext::bit_cast(ptr))); +} +#endif // __ldg #endif // EIGEN_BFLOAT16_H diff --git a/Eigen/src/Core/util/ConfigureVectorization.h b/Eigen/src/Core/util/ConfigureVectorization.h index 3745e4762..2e5e731ac 100644 --- a/Eigen/src/Core/util/ConfigureVectorization.h +++ b/Eigen/src/Core/util/ConfigureVectorization.h @@ -468,6 +468,8 @@ #include #define EIGEN_HAS_HIP_FP16 #include + #define EIGEN_HAS_HIP_BF16 + #include #endif diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt index 8aa524d54..579b34e9a 100644 --- a/unsupported/test/CMakeLists.txt +++ b/unsupported/test/CMakeLists.txt @@ -385,6 +385,7 @@ if (EIGEN_TEST_HIP) ei_add_test(cxx11_tensor_gpu) ei_add_test(cxx11_tensor_contract_gpu) ei_add_test(cxx11_tensor_of_float16_gpu) + ei_add_test(cxx11_tensor_of_bfloat16_gpu) ei_add_test(cxx11_tensor_random_gpu) unset(EIGEN_ADD_TEST_FILENAME_EXTENSION) diff --git a/unsupported/test/cxx11_tensor_of_bfloat16_gpu.cu b/unsupported/test/cxx11_tensor_of_bfloat16_gpu.cu new file mode 100644 index 000000000..10498e641 --- /dev/null +++ b/unsupported/test/cxx11_tensor_of_bfloat16_gpu.cu @@ -0,0 +1,487 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2021 Rohit Santhanam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int +#define EIGEN_USE_GPU + +#include "main.h" +#include + + +using Eigen::Tensor; + +template +void test_gpu_numext() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + bool* d_res_bfloat16 = (bool*)gpu_device.allocate(num_elem * sizeof(bool)); + bool* d_res_float = (bool*)gpu_device.allocate(num_elem * sizeof(bool)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_bfloat16( + d_res_bfloat16, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, num_elem); + + gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f); + gpu_res_float.device(gpu_device) = gpu_float.unaryExpr(Eigen::internal::scalar_isnan_op()); + // Test bfloat16 specific isnan op. + gpu_res_bfloat16.device(gpu_device) = gpu_float.cast().unaryExpr(Eigen::internal::scalar_isnan_op()); + + Tensor bfloat16_prec(num_elem); + Tensor full_prec(num_elem); + gpu_device.memcpyDeviceToHost(bfloat16_prec.data(), d_res_bfloat16, num_elem*sizeof(bool)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(bool)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + VERIFY_IS_EQUAL(full_prec(i), bfloat16_prec(i)); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_res_bfloat16); + gpu_device.deallocate(d_res_float); +} + + +#ifdef EIGEN_HAS_GPU_BF16 + +template +void test_gpu_conversion() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::bfloat16* d_bfloat16 = (Eigen::bfloat16*)gpu_device.allocate(num_elem * sizeof(Eigen::bfloat16)); + float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_bfloat16( + d_bfloat16, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_conv( + d_conv, num_elem); + + gpu_float.device(gpu_device) = gpu_float.random(); + gpu_bfloat16.device(gpu_device) = gpu_float.cast(); + gpu_conv.device(gpu_device) = gpu_bfloat16.cast(); + + Tensor initial(num_elem); + Tensor final(num_elem); + gpu_device.memcpyDeviceToHost(initial.data(), d_float, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(final.data(), d_conv, num_elem*sizeof(float)); + + for (int i = 0; i < num_elem; ++i) { + VERIFY_IS_APPROX(static_cast(initial(i)), static_cast(final(i))); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_bfloat16); + gpu_device.deallocate(d_conv); +} + +template +void test_gpu_unary() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_bfloat16 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_bfloat16( + d_res_bfloat16, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, num_elem); + + gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f); + gpu_float.device(gpu_device) = gpu_float.cast().cast(); + gpu_res_float.device(gpu_device) = gpu_float.abs(); + gpu_res_bfloat16.device(gpu_device) = gpu_float.cast().abs().cast(); + + Tensor bfloat16_prec(num_elem); + Tensor full_prec(num_elem); + gpu_device.memcpyDeviceToHost(bfloat16_prec.data(), d_res_bfloat16, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + VERIFY_IS_APPROX(full_prec(i), bfloat16_prec(i)); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_res_bfloat16); + gpu_device.deallocate(d_res_float); +} + +template +void test_gpu_elementwise() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_bfloat16 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float1( + d_float1, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_float2( + d_float2, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_bfloat16( + d_res_bfloat16, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, num_elem); + + gpu_float1.device(gpu_device) = gpu_float1.random(); + gpu_float1.device(gpu_device) = gpu_float1.cast().cast(); + gpu_float2.device(gpu_device) = gpu_float2.random(); + gpu_float2.device(gpu_device) = gpu_float2.cast().cast(); + gpu_res_float.device(gpu_device) = (gpu_float1 + gpu_float2) * gpu_float1; + gpu_res_bfloat16.device(gpu_device) = ((gpu_float1.cast() + gpu_float2.cast()) * gpu_float1.cast()).cast(); + + Tensor bfloat16_prec(num_elem); + Tensor full_prec(num_elem); + gpu_device.memcpyDeviceToHost(bfloat16_prec.data(), d_res_bfloat16, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + VERIFY_IS_APPROX(static_cast(full_prec(i)), static_cast(bfloat16_prec(i))); + } + + gpu_device.deallocate(d_float1); + gpu_device.deallocate(d_float2); + gpu_device.deallocate(d_res_bfloat16); + gpu_device.deallocate(d_res_float); +} + +template +void test_gpu_trancendental() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_float3 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::bfloat16* d_res1_bfloat16 = (Eigen::bfloat16*)gpu_device.allocate(num_elem * sizeof(Eigen::bfloat16)); + Eigen::bfloat16* d_res1_float = (Eigen::bfloat16*)gpu_device.allocate(num_elem * sizeof(Eigen::bfloat16)); + Eigen::bfloat16* d_res2_bfloat16 = (Eigen::bfloat16*)gpu_device.allocate(num_elem * sizeof(Eigen::bfloat16)); + Eigen::bfloat16* d_res2_float = (Eigen::bfloat16*)gpu_device.allocate(num_elem * sizeof(Eigen::bfloat16)); + Eigen::bfloat16* d_res3_bfloat16 = (Eigen::bfloat16*)gpu_device.allocate(num_elem * sizeof(Eigen::bfloat16)); + Eigen::bfloat16* d_res3_float = (Eigen::bfloat16*)gpu_device.allocate(num_elem * sizeof(Eigen::bfloat16)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float1(d_float1, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_float2(d_float2, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_float3(d_float3, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res1_bfloat16(d_res1_bfloat16, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res1_float(d_res1_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res2_bfloat16(d_res2_bfloat16, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res2_float(d_res2_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res3_bfloat16(d_res3_bfloat16, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res3_float(d_res3_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res4_bfloat16(d_res3_bfloat16, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res4_float(d_res3_float, num_elem); + + gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f); + gpu_float1.device(gpu_device) = gpu_float1.cast().cast(); + gpu_float2.device(gpu_device) = gpu_float2.random() + gpu_float1.constant(0.5f); + gpu_float2.device(gpu_device) = gpu_float2.cast().cast(); + gpu_float3.device(gpu_device) = gpu_float3.random(); + gpu_float3.device(gpu_device) = gpu_float3.cast().cast(); + gpu_res1_float.device(gpu_device) = gpu_float1.exp().cast(); + gpu_res2_float.device(gpu_device) = gpu_float2.log().cast(); + gpu_res3_float.device(gpu_device) = gpu_float3.log1p().cast(); + gpu_res4_float.device(gpu_device) = gpu_float3.expm1().cast(); + + gpu_res1_bfloat16.device(gpu_device) = gpu_float1.cast(); + gpu_res1_bfloat16.device(gpu_device) = gpu_res1_bfloat16.exp(); + + gpu_res2_bfloat16.device(gpu_device) = gpu_float2.cast(); + gpu_res2_bfloat16.device(gpu_device) = gpu_res2_bfloat16.log(); + + gpu_res3_bfloat16.device(gpu_device) = gpu_float3.cast(); + gpu_res3_bfloat16.device(gpu_device) = gpu_res3_bfloat16.log1p(); + + gpu_res3_bfloat16.device(gpu_device) = gpu_float3.cast(); + gpu_res3_bfloat16.device(gpu_device) = gpu_res3_bfloat16.expm1(); + + Tensor input1(num_elem); + Tensor bfloat16_prec1(num_elem); + Tensor full_prec1(num_elem); + Tensor input2(num_elem); + Tensor bfloat16_prec2(num_elem); + Tensor full_prec2(num_elem); + Tensor input3(num_elem); + Tensor bfloat16_prec3(num_elem); + Tensor full_prec3(num_elem); + gpu_device.memcpyDeviceToHost(input1.data(), d_float1, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(input2.data(), d_float2, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(input3.data(), d_float3, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(bfloat16_prec1.data(), d_res1_bfloat16, num_elem*sizeof(Eigen::bfloat16)); + gpu_device.memcpyDeviceToHost(full_prec1.data(), d_res1_float, num_elem*sizeof(Eigen::bfloat16)); + gpu_device.memcpyDeviceToHost(bfloat16_prec2.data(), d_res2_bfloat16, num_elem*sizeof(Eigen::bfloat16)); + gpu_device.memcpyDeviceToHost(full_prec2.data(), d_res2_float, num_elem*sizeof(Eigen::bfloat16)); + gpu_device.memcpyDeviceToHost(bfloat16_prec3.data(), d_res3_bfloat16, num_elem*sizeof(Eigen::bfloat16)); + gpu_device.memcpyDeviceToHost(full_prec3.data(), d_res3_float, num_elem*sizeof(Eigen::bfloat16)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + VERIFY_IS_APPROX(full_prec1(i), bfloat16_prec1(i)); + } + for (int i = 0; i < num_elem; ++i) { + if(std::abs(input2(i)-1.f)<0.05f) // log lacks accuracy nearby 1 + VERIFY_IS_APPROX(full_prec2(i)+Eigen::bfloat16(0.1f), bfloat16_prec2(i)+Eigen::bfloat16(0.1f)); + else + VERIFY_IS_APPROX(full_prec2(i), bfloat16_prec2(i)); + } + for (int i = 0; i < num_elem; ++i) { + VERIFY_IS_APPROX(full_prec3(i), bfloat16_prec3(i)); + } + gpu_device.deallocate(d_float1); + gpu_device.deallocate(d_float2); + gpu_device.deallocate(d_float3); + gpu_device.deallocate(d_res1_bfloat16); + gpu_device.deallocate(d_res1_float); + gpu_device.deallocate(d_res2_bfloat16); + gpu_device.deallocate(d_res2_float); + gpu_device.deallocate(d_res3_float); + gpu_device.deallocate(d_res3_bfloat16); +} + +template +void test_gpu_contractions() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int rows = 23; + int cols = 23; + int num_elem = rows*cols; + + float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::bfloat16* d_res_bfloat16 = (Eigen::bfloat16*)gpu_device.allocate(num_elem * sizeof(Eigen::bfloat16)); + Eigen::bfloat16* d_res_float = (Eigen::bfloat16*)gpu_device.allocate(num_elem * sizeof(Eigen::bfloat16)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float1( + d_float1, rows, cols); + Eigen::TensorMap, Eigen::Aligned> gpu_float2( + d_float2, rows, cols); + Eigen::TensorMap, Eigen::Aligned> gpu_res_bfloat16( + d_res_bfloat16, rows, cols); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, rows, cols); + + gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f); + gpu_float2.device(gpu_device) = gpu_float2.random() - gpu_float2.constant(0.5f); + + typedef Tensor::DimensionPair DimPair; + Eigen::array dims(DimPair(1, 0)); + gpu_res_float.device(gpu_device) = gpu_float1.contract(gpu_float2, dims).cast(); + gpu_res_bfloat16.device(gpu_device) = gpu_float1.cast().contract(gpu_float2.cast(), dims); + + Tensor bfloat16_prec(rows, cols); + Tensor full_prec(rows, cols); + gpu_device.memcpyDeviceToHost(bfloat16_prec.data(), d_res_bfloat16, num_elem*sizeof(Eigen::bfloat16)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(Eigen::bfloat16)); + gpu_device.synchronize(); + + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < cols; ++j) { + if (numext::abs(full_prec(i, j) - bfloat16_prec(i, j)) > Eigen::bfloat16(1e-2f)) { + VERIFY_IS_APPROX(full_prec(i, j), bfloat16_prec(i, j)); + } + } + } + + gpu_device.deallocate(d_float1); + gpu_device.deallocate(d_float2); + gpu_device.deallocate(d_res_bfloat16); + gpu_device.deallocate(d_res_float); +} + +template +void test_gpu_reductions(int size1, int size2, int redux) { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = size1*size2; + int result_size = (redux == 1 ? size1 : size2); + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::bfloat16* d_res_bfloat16 = (Eigen::bfloat16*)gpu_device.allocate(result_size * sizeof(Eigen::bfloat16)); + Eigen::bfloat16* d_res_float = (Eigen::bfloat16*)gpu_device.allocate(result_size * sizeof(Eigen::bfloat16)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, size1, size2); + Eigen::TensorMap, Eigen::Aligned> gpu_res_bfloat16( + d_res_bfloat16, result_size); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, result_size); + + gpu_float.device(gpu_device) = gpu_float.random() * 2.0f; + + Eigen::array redux_dim = {redux}; + gpu_res_float.device(gpu_device) = gpu_float.sum(redux_dim).cast(); + gpu_res_bfloat16.device(gpu_device) = gpu_float.cast().sum(redux_dim); + + Tensor bfloat16_prec(result_size); + Tensor full_prec(result_size); + gpu_device.memcpyDeviceToHost(bfloat16_prec.data(), d_res_bfloat16, result_size*sizeof(Eigen::bfloat16)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, result_size*sizeof(Eigen::bfloat16)); + gpu_device.synchronize(); + + for (int i = 0; i < result_size; ++i) { + VERIFY_IS_APPROX(full_prec(i), bfloat16_prec(i)); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_res_bfloat16); + gpu_device.deallocate(d_res_float); +} + +template +void test_gpu_reductions() { + test_gpu_reductions(13, 13, 0); + test_gpu_reductions(13, 13, 1); + + test_gpu_reductions(35, 36, 0); + test_gpu_reductions(35, 36, 1); + + test_gpu_reductions(36, 35, 0); + test_gpu_reductions(36, 35, 1); +} + +template +void test_gpu_full_reductions() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int size = 13; + int num_elem = size*size; + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::bfloat16* d_res_bfloat16 = (Eigen::bfloat16*)gpu_device.allocate(1 * sizeof(Eigen::bfloat16)); + Eigen::bfloat16* d_res_float = (Eigen::bfloat16*)gpu_device.allocate(1 * sizeof(Eigen::bfloat16)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, size, size); + Eigen::TensorMap, Eigen::Aligned> gpu_res_bfloat16( + d_res_bfloat16); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float); + + gpu_float.device(gpu_device) = gpu_float.random(); + + gpu_res_float.device(gpu_device) = gpu_float.sum().cast(); + gpu_res_bfloat16.device(gpu_device) = gpu_float.cast().sum(); + + Tensor bfloat16_prec; + Tensor full_prec; + gpu_device.memcpyDeviceToHost(bfloat16_prec.data(), d_res_bfloat16, sizeof(Eigen::bfloat16)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::bfloat16)); + gpu_device.synchronize(); + + VERIFY_IS_APPROX(full_prec(), bfloat16_prec()); + + gpu_res_float.device(gpu_device) = gpu_float.maximum().cast(); + gpu_res_bfloat16.device(gpu_device) = gpu_float.cast().maximum(); + gpu_device.memcpyDeviceToHost(bfloat16_prec.data(), d_res_bfloat16, sizeof(Eigen::bfloat16)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::bfloat16)); + gpu_device.synchronize(); + + VERIFY_IS_APPROX(full_prec(), bfloat16_prec()); + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_res_bfloat16); + gpu_device.deallocate(d_res_float); +} + +template +void test_gpu_forced_evals() { + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_bfloat16_1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_bfloat16_2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_bfloat16_1( + d_res_bfloat16_1, num_elem); + Eigen::TensorMap, Eigen::Unaligned> gpu_res_bfloat16_2( + d_res_bfloat16_2, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, num_elem); + + Eigen::array no_bcast; + no_bcast[0] = 1; + + gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f); + gpu_float.device(gpu_device) = gpu_float.cast().cast(); + gpu_res_float.device(gpu_device) = gpu_float.abs(); + gpu_res_bfloat16_1.device(gpu_device) = gpu_float.cast().abs().eval().cast(); + gpu_res_bfloat16_2.device(gpu_device) = gpu_float.cast().abs().broadcast(no_bcast).eval().cast(); + + Tensor bfloat16_prec1(num_elem); + Tensor bfloat16_prec2(num_elem); + Tensor full_prec(num_elem); + gpu_device.memcpyDeviceToHost(bfloat16_prec1.data(), d_res_bfloat16_1, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(bfloat16_prec2.data(), d_res_bfloat16_2, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + VERIFY_IS_APPROX(full_prec(i), bfloat16_prec1(i)); + VERIFY_IS_APPROX(full_prec(i), bfloat16_prec2(i)); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_res_bfloat16_1); + gpu_device.deallocate(d_res_bfloat16_2); + gpu_device.deallocate(d_res_float); +} + +#endif + +EIGEN_DECLARE_TEST(cxx11_tensor_of_bfloat16_gpu) +{ + CALL_SUBTEST_1(test_gpu_numext()); + +// The reduction unit tests have been excluded until a working +// implementation to expand the accumulator data type to float32 +// is available. +// TODO: add reduction unit tests +#ifdef EIGEN_HAS_GPU_BF16 + CALL_SUBTEST_2(test_gpu_conversion()); + CALL_SUBTEST_3(test_gpu_unary()); + CALL_SUBTEST_4(test_gpu_elementwise()); + CALL_SUBTEST_5(test_gpu_trancendental()); + CALL_SUBTEST_6(test_gpu_contractions()); + CALL_SUBTEST_7(test_gpu_reductions()); + CALL_SUBTEST_8(test_gpu_full_reductions()); + CALL_SUBTEST_9(test_gpu_forced_evals()); +#else + std::cout << "bfloat16 floats are not supported by this version of gpu: skipping the test" << std::endl; +#endif +}