From 876f392c396318f33454168db36ed54308e54e0d Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Wed, 11 Jul 2018 10:39:54 -0400 Subject: [PATCH] Updates corresponding to the latest round of PR feedback The major changes are 1. Moving CUDA/PacketMath.h to GPU/PacketMath.h 2. Moving CUDA/MathFunctions.h to GPU/MathFunction.h 3. Moving CUDA/CudaSpecialFunctions.h to GPU/GpuSpecialFunctions.h The above three changes effectively enable the Eigen "Packet" layer for the HIP platform 4. Merging the "hip_basic" and "cuda_basic" unit tests into one ("gpu_basic") 5. Updating the "EIGEN_DEVICE_FUNC" marking in some places The change has been tested on the HIP and CUDA platforms. --- Eigen/Core | 17 +- Eigen/src/Core/MathFunctions.h | 10 ++ Eigen/src/Core/arch/GPU/MathFunctions.h | 8 +- Eigen/src/Core/arch/GPU/PacketMath.h | 8 +- Eigen/src/Core/products/GeneralMatrixVector.h | 8 +- test/CMakeLists.txt | 4 +- test/gpu_basic.cu | 63 ++++---- test/gpu_common.h | 86 ++++++---- .../src/Tensor/TensorContractionBlocking.h | 14 ++ .../CXX11/src/Tensor/TensorContractionGpu.h | 147 ------------------ .../src/Tensor/TensorGpuHipCudaDefines.h | 5 +- .../src/Tensor/TensorGpuHipCudaUndefines.h | 1 + .../Eigen/CXX11/src/Tensor/TensorIndexList.h | 5 +- .../Eigen/CXX11/src/Tensor/TensorMorphing.h | 5 +- .../Eigen/CXX11/src/Tensor/TensorReduction.h | 11 +- .../CXX11/src/Tensor/TensorReductionGpu.h | 41 +---- unsupported/Eigen/SpecialFunctions | 4 +- .../arch/GPU/GpuSpecialFunctions.h | 6 +- 18 files changed, 158 insertions(+), 285 deletions(-) diff --git a/Eigen/Core b/Eigen/Core index 4336de91d..647a10831 100644 --- a/Eigen/Core +++ b/Eigen/Core @@ -81,6 +81,7 @@ // clang++ always considers constexpr functions as implicitly __host__ __device__ #define EIGEN_CONSTEXPR_ARE_DEVICE_FUNC #endif + #endif #elif defined(EIGEN_HIPCC) // Do not try to vectorize on HIP @@ -92,7 +93,7 @@ // We need hip_runtime.h to ensure that that EIGEN_USING_STD_MATH macro // works properly on the device side #include - + #if defined(__HIP_DEVICE_COMPILE__) && !defined(EIGEN_NO_HIP) // analogous to EIGEN_CUDA_ARCH, but for HIP #define EIGEN_HIP_DEVICE_COMPILE __HIP_DEVICE_COMPILE__ @@ -356,7 +357,7 @@ #endif #if defined EIGEN_CUDACC - #define EIGEN_VECTORIZE_CUDA + #define EIGEN_VECTORIZE_GPU #include #if EIGEN_CUDACC_VER >= 70500 #define EIGEN_HAS_CUDA_FP16 @@ -369,14 +370,20 @@ #endif #if defined(EIGEN_HIPCC) && defined(EIGEN_HIP_DEVICE_COMPILE) + + #define EIGEN_VECTORIZE_GPU + #include + #define EIGEN_HAS_HIP_FP16 #include + #define HIP_PATCH_WITH_NEW_FP16 18215 #if (HIP_VERSION_PATCH < HIP_PATCH_WITH_NEW_FP16) #define EIGEN_HAS_OLD_HIP_FP16 // Old HIP implementation does not have a explicit typedef for "half2" typedef __half2 half2; #endif + #endif #if defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16) @@ -550,9 +557,9 @@ using std::ptrdiff_t; #include "src/Core/arch/GPU/PacketMathHalf.h" #include "src/Core/arch/GPU/TypeCasting.h" -#if defined EIGEN_VECTORIZE_CUDA - #include "src/Core/arch/CUDA/PacketMath.h" - #include "src/Core/arch/CUDA/MathFunctions.h" +#if defined EIGEN_VECTORIZE_GPU + #include "src/Core/arch/GPU/PacketMath.h" + #include "src/Core/arch/GPU/MathFunctions.h" #endif #include "src/Core/arch/Default/Settings.h" diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h index 72aa68d45..f16476a92 100644 --- a/Eigen/src/Core/MathFunctions.h +++ b/Eigen/src/Core/MathFunctions.h @@ -982,7 +982,12 @@ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE long double mini(const long double& x, const long double& y) { +#if defined(EIGEN_HIPCC) + // no "fminl" on HIP yet + return (x < y) ? x : y; +#else return fminl(x, y); +#endif } template @@ -1007,7 +1012,12 @@ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE long double maxi(const long double& x, const long double& y) { +#if defined(EIGEN_HIPCC) + // no "fmaxl" on HIP yet + return (x > y) ? x : y; +#else return fmaxl(x, y); +#endif } #endif diff --git a/Eigen/src/Core/arch/GPU/MathFunctions.h b/Eigen/src/Core/arch/GPU/MathFunctions.h index ff6256ce0..d2b3a2568 100644 --- a/Eigen/src/Core/arch/GPU/MathFunctions.h +++ b/Eigen/src/Core/arch/GPU/MathFunctions.h @@ -7,8 +7,8 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. -#ifndef EIGEN_MATH_FUNCTIONS_CUDA_H -#define EIGEN_MATH_FUNCTIONS_CUDA_H +#ifndef EIGEN_MATH_FUNCTIONS_GPU_H +#define EIGEN_MATH_FUNCTIONS_GPU_H namespace Eigen { @@ -17,7 +17,7 @@ namespace internal { // Make sure this is only available when targeting a GPU: we don't want to // introduce conflicts between these packet_traits definitions and the ones // we'll use on the host side (SSE, AVX, ...) -#if defined(EIGEN_CUDACC) && defined(EIGEN_USE_GPU) +#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU) template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plog(const float4& a) { @@ -100,4 +100,4 @@ double2 prsqrt(const double2& a) } // end namespace Eigen -#endif // EIGEN_MATH_FUNCTIONS_CUDA_H +#endif // EIGEN_MATH_FUNCTIONS_GPU_H diff --git a/Eigen/src/Core/arch/GPU/PacketMath.h b/Eigen/src/Core/arch/GPU/PacketMath.h index ab8e477f4..ddf37b9c1 100644 --- a/Eigen/src/Core/arch/GPU/PacketMath.h +++ b/Eigen/src/Core/arch/GPU/PacketMath.h @@ -7,8 +7,8 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. -#ifndef EIGEN_PACKET_MATH_CUDA_H -#define EIGEN_PACKET_MATH_CUDA_H +#ifndef EIGEN_PACKET_MATH_GPU_H +#define EIGEN_PACKET_MATH_GPU_H namespace Eigen { @@ -17,7 +17,7 @@ namespace internal { // Make sure this is only available when targeting a GPU: we don't want to // introduce conflicts between these packet_traits definitions and the ones // we'll use on the host side (SSE, AVX, ...) -#if defined(EIGEN_CUDACC) && defined(EIGEN_USE_GPU) +#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU) template<> struct is_arithmetic { enum { value = true }; }; template<> struct is_arithmetic { enum { value = true }; }; @@ -338,4 +338,4 @@ ptranspose(PacketBlock& kernel) { } // end namespace Eigen -#endif // EIGEN_PACKET_MATH_CUDA_H +#endif // EIGEN_PACKET_MATH_GPU_H diff --git a/Eigen/src/Core/products/GeneralMatrixVector.h b/Eigen/src/Core/products/GeneralMatrixVector.h index b2a71bc6f..767feb99d 100644 --- a/Eigen/src/Core/products/GeneralMatrixVector.h +++ b/Eigen/src/Core/products/GeneralMatrixVector.h @@ -48,7 +48,7 @@ typedef typename conditional::type LhsPacket; typedef typename conditional::type RhsPacket; typedef typename conditional::type ResPacket; -EIGEN_DONT_INLINE static void run( +EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run( Index rows, Index cols, const LhsMapper& lhs, const RhsMapper& rhs, @@ -57,7 +57,7 @@ EIGEN_DONT_INLINE static void run( }; template -EIGEN_DONT_INLINE void general_matrix_vector_product::run( +EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void general_matrix_vector_product::run( Index rows, Index cols, const LhsMapper& alhs, const RhsMapper& rhs, @@ -231,7 +231,7 @@ typedef typename conditional::type LhsPacket; typedef typename conditional::type RhsPacket; typedef typename conditional::type ResPacket; -EIGEN_DONT_INLINE static void run( +EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run( Index rows, Index cols, const LhsMapper& lhs, const RhsMapper& rhs, @@ -240,7 +240,7 @@ EIGEN_DONT_INLINE static void run( }; template -EIGEN_DONT_INLINE void general_matrix_vector_product::run( +EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void general_matrix_vector_product::run( Index rows, Index cols, const LhsMapper& alhs, const RhsMapper& rhs, diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index f20df119a..d312d16e4 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -399,7 +399,7 @@ if(CUDA_FOUND) cuda_include_directories(${CMAKE_CURRENT_BINARY_DIR}) set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu") - ei_add_test(cuda_basic) + ei_add_test(gpu_basic) unset(EIGEN_ADD_TEST_FILENAME_EXTENSION) @@ -429,7 +429,7 @@ if (EIGEN_TEST_HIP) include_directories(${HIP_PATH}/include) set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu") - ei_add_test(hip_basic) + ei_add_test(gpu_basic) unset(EIGEN_ADD_TEST_FILENAME_EXTENSION) elseif (${HIP_PLATFORM} STREQUAL "nvcc") diff --git a/test/gpu_basic.cu b/test/gpu_basic.cu index 33e5fd119..897834dff 100644 --- a/test/gpu_basic.cu +++ b/test/gpu_basic.cu @@ -15,13 +15,11 @@ #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX -#define EIGEN_TEST_FUNC cuda_basic +#define EIGEN_TEST_FUNC gpu_basic #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int -#include -#include #include "main.h" -#include "cuda_common.h" +#include "gpu_common.h" // Check that dense modules can be properly parsed by nvcc #include @@ -164,40 +162,51 @@ struct matrix_inverse { } }; -void test_cuda_basic() +void test_gpu_basic() { - ei_test_init_cuda(); + ei_test_init_gpu(); int nthreads = 100; Eigen::VectorXf in, out; - #ifndef __CUDA_ARCH__ + #if !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__) int data_size = nthreads * 512; in.setRandom(data_size); out.setRandom(data_size); #endif - CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise(), nthreads, in, out) ); - - CALL_SUBTEST( run_and_compare_to_cuda(replicate(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(replicate(), nthreads, in, out) ); - - CALL_SUBTEST( run_and_compare_to_cuda(redux(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(redux(), nthreads, in, out) ); - - CALL_SUBTEST( run_and_compare_to_cuda(prod_test(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(prod_test(), nthreads, in, out) ); - - CALL_SUBTEST( run_and_compare_to_cuda(diagonal(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(diagonal(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(coeff_wise(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(matrix_inverse(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(matrix_inverse(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(matrix_inverse(), nthreads, in, out) ); +#if !defined(EIGEN_USE_HIP) + // FIXME + // These subtests result in a compile failure on the HIP platform + // + // eigen-upstream/Eigen/src/Core/Replicate.h:61:65: error: + // base class 'internal::dense_xpr_base, -1, -1> >::type' + // (aka 'ArrayBase, -1, -1> >') has protected default constructor + CALL_SUBTEST( run_and_compare_to_gpu(replicate(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(replicate(), nthreads, in, out) ); +#endif - CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues_direct(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues_direct(), nthreads, in, out) ); - CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(redux(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(redux(), nthreads, in, out) ); + + CALL_SUBTEST( run_and_compare_to_gpu(prod_test(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(prod_test(), nthreads, in, out) ); + + CALL_SUBTEST( run_and_compare_to_gpu(diagonal(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(diagonal(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(matrix_inverse(), nthreads, in, out) ); + +#if !defined(EIGEN_USE_HIP) + // FIXME + // These subtests result in a linking error on the HIP platform + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues_direct(), nthreads, in, out) ); + CALL_SUBTEST( run_and_compare_to_gpu(eigenvalues(), nthreads, in, out) ); +#endif } diff --git a/test/gpu_common.h b/test/gpu_common.h index 9737693ac..3030af6dc 100644 --- a/test/gpu_common.h +++ b/test/gpu_common.h @@ -1,13 +1,22 @@ -#ifndef EIGEN_TEST_CUDA_COMMON_H -#define EIGEN_TEST_CUDA_COMMON_H +#ifndef EIGEN_TEST_GPU_COMMON_H +#define EIGEN_TEST_GPU_COMMON_H + +#ifdef EIGEN_USE_HIP + #include + #include +#else + #include + #include + #include +#endif -#include -#include -#include #include -#ifndef __CUDACC__ +#define EIGEN_USE_GPU +#include + +#if !defined(__CUDACC__) && !defined(__HIPCC__) dim3 threadIdx, blockDim, blockIdx; #endif @@ -21,7 +30,7 @@ void run_on_cpu(const Kernel& ker, int n, const Input& in, Output& out) template __global__ -void run_on_cuda_meta_kernel(const Kernel ker, int n, const Input* in, Output* out) +void run_on_gpu_meta_kernel(const Kernel ker, int n, const Input* in, Output* out) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i -void run_on_cuda(const Kernel& ker, int n, const Input& in, Output& out) +void run_on_gpu(const Kernel& ker, int n, const Input& in, Output& out) { typename Input::Scalar* d_in; typename Output::Scalar* d_out; std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar); std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar); - cudaMalloc((void**)(&d_in), in_bytes); - cudaMalloc((void**)(&d_out), out_bytes); + gpuMalloc((void**)(&d_in), in_bytes); + gpuMalloc((void**)(&d_out), out_bytes); - cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice); - cudaMemcpy(d_out, out.data(), out_bytes, cudaMemcpyHostToDevice); + gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_out, out.data(), out_bytes, gpuMemcpyHostToDevice); // Simple and non-optimal 1D mapping assuming n is not too large // That's only for unit testing! dim3 Blocks(128); dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) ); - cudaThreadSynchronize(); - run_on_cuda_meta_kernel<<>>(ker, n, d_in, d_out); - cudaThreadSynchronize(); + gpuDeviceSynchronize(); + +#ifdef EIGEN_USE_HIP + hipLaunchKernelGGL(run_on_gpu_meta_kernel::type, + typename std::decay::type>, + dim3(Grids), dim3(Blocks), 0, 0, ker, n, d_in, d_out); +#else + run_on_gpu_meta_kernel<<>>(ker, n, d_in, d_out); +#endif + + gpuDeviceSynchronize(); // check inputs have not been modified - cudaMemcpy(const_cast(in.data()), d_in, in_bytes, cudaMemcpyDeviceToHost); - cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost); + gpuMemcpy(const_cast(in.data()), d_in, in_bytes, gpuMemcpyDeviceToHost); + gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost); - cudaFree(d_in); - cudaFree(d_out); + gpuFree(d_in); + gpuFree(d_out); } template -void run_and_compare_to_cuda(const Kernel& ker, int n, const Input& in, Output& out) +void run_and_compare_to_gpu(const Kernel& ker, int n, const Input& in, Output& out) { - Input in_ref, in_cuda; - Output out_ref, out_cuda; - #ifndef __CUDA_ARCH__ - in_ref = in_cuda = in; - out_ref = out_cuda = out; + Input in_ref, in_gpu; + Output out_ref, out_gpu; + #if !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__) + in_ref = in_gpu = in; + out_ref = out_gpu = out; #endif run_on_cpu (ker, n, in_ref, out_ref); - run_on_cuda(ker, n, in_cuda, out_cuda); - #ifndef __CUDA_ARCH__ - VERIFY_IS_APPROX(in_ref, in_cuda); - VERIFY_IS_APPROX(out_ref, out_cuda); + run_on_gpu(ker, n, in_gpu, out_gpu); + #if !defined(__CUDA_ARCH__) && !defined(__HIP_DEVICE_COMPILE__) + VERIFY_IS_APPROX(in_ref, in_gpu); + VERIFY_IS_APPROX(out_ref, out_gpu); #endif } -void ei_test_init_cuda() +void ei_test_init_gpu() { int device = 0; - cudaDeviceProp deviceProp; - cudaGetDeviceProperties(&deviceProp, device); - std::cout << "CUDA device info:\n"; + gpuDeviceProp_t deviceProp; + gpuGetDeviceProperties(&deviceProp, device); + std::cout << "GPU device info:\n"; std::cout << " name: " << deviceProp.name << "\n"; std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n"; std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n"; @@ -98,4 +116,4 @@ void ei_test_init_cuda() std::cout << " computeMode: " << deviceProp.computeMode << "\n"; } -#endif // EIGEN_TEST_CUDA_COMMON_H +#endif // EIGEN_TEST_GPU_COMMON_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h index a1e292108..8c1af1da8 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h @@ -28,6 +28,20 @@ class TensorContractionBlocking { typedef typename LhsMapper::Scalar LhsScalar; typedef typename RhsMapper::Scalar RhsScalar; + /* + adding EIGEN_DEVICE_FUNC unconditionally to 'TensorContractionBlocking' constructor in `TensorContractionBlocking.h` + requires adding EIGEN_DEVICE_FUNC to `computeProductBlockingSizes` in `GeneralBlockPanelKernel.h` + which in turn, requires adding EIGEN_DEVICE_FUNC to `evaluateProductBlockingSizesHeuristic` in `GeneralBlockPanelKernel.h` + which in turn, requires adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h` + (else HIPCC will error out) + + However adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h` + results in NVCC erroring out with the following error + + ../Eigen/src/Core/products/GeneralBlockPanelKernel.h(57): error #2901: + dynamic initialization is not supported for function-scope static variables within a __device__/__global__ function + */ + #if !defined(EIGEN_HIPCC) EIGEN_DEVICE_FUNC #endif diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h index 238754424..a4f92ee44 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h @@ -546,45 +546,6 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh results[i].x = results[i].y = results[i].z = results[i].w = 0; } -#if defined(EIGEN_HIPCC) - -#define prefetch_lhs(reg, row, col) \ - if (!CHECK_LHS_BOUNDARY) { \ - if (col < k_size) { \ - reg.x =lhs(row + 0, col); \ - reg.y =lhs(row + 1, col); \ - reg.z =lhs(row + 2, col); \ - reg.w =lhs(row + 3, col); \ - } \ - } else { \ - if (col < k_size) { \ - if (row + 3 < m_size) { \ - reg.x =lhs(row + 0, col); \ - reg.y =lhs(row + 1, col); \ - reg.z =lhs(row + 2, col); \ - reg.w =lhs(row + 3, col); \ - } else if (row + 2 < m_size) { \ - reg.x =lhs(row + 0, col); \ - reg.y =lhs(row + 1, col); \ - reg.z =lhs(row + 2, col); \ - } else if (row + 1 < m_size) { \ - reg.x =lhs(row + 0, col); \ - reg.y =lhs(row + 1, col); \ - } else if (row < m_size) { \ - reg.x =lhs(row + 0, col); \ - } \ - } \ - } \ - -#define prefetch_rhs_hipcc(reg, row, col) \ - reg.x =rhs(row + 0, col); \ - reg.y =rhs(row + 1, col); \ - reg.z =rhs(row + 2, col); \ - reg.w =rhs(row + 3, col); \ - - -#else - #define prefetch_lhs(reg, row, col) \ if (!CHECK_LHS_BOUNDARY) { \ if (col < k_size) { \ @@ -607,19 +568,12 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh } \ } \ -#endif - Index lhs_vert = base_m+threadIdx.x*4; for (Index k = 0; k < k_size; k += 16) { -#if defined(EIGEN_HIPCC) - lhs_pf0 = make_float4(0, 0, 0, 0); - rhs_pf0 = make_float4(0, 0, 0, 0); -#else lhs_pf0 = internal::pset1(0); rhs_pf0 = internal::pset1(0); -#endif Index lhs_horiz = threadIdx.y+k; prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz) @@ -630,11 +584,7 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh if (!CHECK_RHS_BOUNDARY) { if ((rhs_vert + 3) < k_size) { // just CHECK_RHS_BOUNDARY -#if defined(EIGEN_HIPCC) - prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0) -#else rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); -#endif } else if (rhs_vert + 2 < k_size) { // just CHECK_RHS_BOUNDARY rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); @@ -649,11 +599,7 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh } else { if (rhs_horiz0 < n_size) { if ((rhs_vert + 3) < k_size) { -#if defined(EIGEN_HIPCC) - prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0) -#else rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); -#endif } else if ((rhs_vert + 2) < k_size) { rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); @@ -753,10 +699,6 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh #undef prefetch_lhs #undef add_vals -#if defined(EIGEN_HIPCC) -#undef prefetch_rhs_hipcc -#endif - Index horiz_base = threadIdx.y*4+base_n; if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) { for (int i = 0; i < 4; i++) { @@ -845,33 +787,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, results[i].x = results[i].y = results[i].z = results[i].w = 0; } -#if defined(EIGEN_HIPCC) - -#define prefetch_lhs_hipcc(reg, row, col) \ - reg.x =lhs(row + 0, col); \ - reg.y =lhs(row + 1, col); \ - reg.z =lhs(row + 2, col); \ - reg.w =lhs(row + 3, col); - -#define prefetch_rhs_hipcc(reg, row, col) \ - reg.x =rhs(row + 0, col); \ - reg.y =rhs(row + 1, col); \ - reg.z =rhs(row + 2, col); \ - reg.w =rhs(row + 3, col); - -#endif - Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32; for (Index k = 0; k < k_size; k += 32) { -#if defined(EIGEN_HIPCC) - lhs_pf0 = make_float4(0, 0, 0, 0); - lhs_pf1 = make_float4(0, 0, 0, 0); - lhs_pf2 = make_float4(0, 0, 0, 0); - lhs_pf3 = make_float4(0, 0, 0, 0); - - rhs_pf0 = make_float4(0, 0, 0, 0); - rhs_pf1 = make_float4(0, 0, 0, 0); -#else lhs_pf0 = internal::pset1(0); lhs_pf1 = internal::pset1(0); lhs_pf2 = internal::pset1(0); @@ -879,85 +796,40 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, rhs_pf0 = internal::pset1(0); rhs_pf1 = internal::pset1(0); -#endif if (!CHECK_LHS_BOUNDARY) { if ((threadIdx.y/4+k+24) < k_size) { -#if defined(EIGEN_HIPCC) - prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k)) - prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8)) - prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16)) - prefetch_lhs_hipcc(lhs_pf3, lhs_vert, (threadIdx.y/4+k+24)) -#else lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); lhs_pf3 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+24)); -#endif } else if ((threadIdx.y/4+k+16) < k_size) { -#if defined(EIGEN_HIPCC) - prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k)) - prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8)) - prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16)) -#else lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); -#endif } else if ((threadIdx.y/4+k+8) < k_size) { -#if defined(EIGEN_HIPCC) - prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k)) - prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8)) -#else lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); -#endif } else if ((threadIdx.y/4+k) < k_size) { -#if defined(EIGEN_HIPCC) - prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k)) -#else lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); -#endif } } else { // just CHECK_LHS_BOUNDARY if (lhs_vert + 3 < m_size) { if ((threadIdx.y/4+k+24) < k_size) { -#if defined(EIGEN_HIPCC) - prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k)) - prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8)) - prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16)) - prefetch_lhs_hipcc(lhs_pf3, lhs_vert, (threadIdx.y/4+k+24)) -#else lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); lhs_pf3 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+24)); -#endif } else if ((threadIdx.y/4+k+16) < k_size) { -#if defined(EIGEN_HIPCC) - prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k)) - prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8)) - prefetch_lhs_hipcc(lhs_pf2, lhs_vert, (threadIdx.y/4+k+16)) -#else lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); -#endif } else if ((threadIdx.y/4+k+8) < k_size) { -#if defined(EIGEN_HIPCC) - prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k)) - prefetch_lhs_hipcc(lhs_pf1, lhs_vert, (threadIdx.y/4+k+8)) -#else lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); -#endif } else if ((threadIdx.y/4+k) < k_size) { -#if defined(EIGEN_HIPCC) - prefetch_lhs_hipcc(lhs_pf0, lhs_vert, (threadIdx.y/4+k)) -#else lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); -#endif } } else if (lhs_vert + 2 < m_size) { if ((threadIdx.y/4+k+24) < k_size) { @@ -1046,13 +918,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, if (!CHECK_RHS_BOUNDARY) { if ((rhs_vert + 3) < k_size) { // just CHECK_RHS_BOUNDARY -#if defined(EIGEN_HIPCC) - prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0) - prefetch_rhs_hipcc(rhs_pf1, rhs_vert, rhs_horiz1) -#else rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); rhs_pf1 = rhs.template loadPacket(rhs_vert, rhs_horiz1); -#endif } else if (rhs_vert + 2 < k_size) { // just CHECK_RHS_BOUNDARY rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); @@ -1074,13 +941,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, if (rhs_horiz1 < n_size) { if ((rhs_vert + 3) < k_size) { // just CHECK_RHS_BOUNDARY -#if defined(EIGEN_HIPCC) - prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0) - prefetch_rhs_hipcc(rhs_pf1, rhs_vert, rhs_horiz1) -#else rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); rhs_pf1 = rhs.template loadPacket(rhs_vert, rhs_horiz1); -#endif } else if (rhs_vert + 2 < k_size) { // just CHECK_RHS_BOUNDARY rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); @@ -1101,11 +963,7 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, } else if (rhs_horiz0 < n_size) { if ((rhs_vert + 3) < k_size) { // just CHECK_RHS_BOUNDARY -#if defined(EIGEN_HIPCC) - prefetch_rhs_hipcc(rhs_pf0, rhs_vert, rhs_horiz0) -#else rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); -#endif } else if ((rhs_vert + 2) < k_size) { // just CHECK_RHS_BOUNDARY rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); @@ -1213,11 +1071,6 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, __syncthreads(); } // end loop over k -#if defined(EIGEN_HIPCC) -#undef prefetch_lhs_hipcc -#undef prefetch_rhs_hipcc -#endif - __syncthreads(); Index horiz_base = (threadIdx.y/4)*8+base_n; if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) { diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h index f009ae855..9966955f7 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h @@ -32,8 +32,7 @@ #define gpuGetDeviceCount hipGetDeviceCount #define gpuGetErrorString hipGetErrorString #define gpuGetDeviceProperties hipGetDeviceProperties -// FIXME : use hipStreamDefault instead of 0x00 -#define gpuStreamDefault 0x00 +#define gpuStreamDefault hipStreamDefault #define gpuGetDevice hipGetDevice #define gpuSetDevice hipSetDevice #define gpuMalloc hipMalloc @@ -47,6 +46,7 @@ #define gpuSharedMemConfig hipSharedMemConfig #define gpuDeviceSetSharedMemConfig hipDeviceSetSharedMemConfig #define gpuStreamSynchronize hipStreamSynchronize +#define gpuDeviceSynchronize hipDeviceSynchronize #define gpuMemcpy hipMemcpy #else @@ -73,6 +73,7 @@ #define gpuSharedMemConfig cudaSharedMemConfig #define gpuDeviceSetSharedMemConfig cudaDeviceSetSharedMemConfig #define gpuStreamSynchronize cudaStreamSynchronize +#define gpuDeviceSynchronize cudaDeviceSynchronize #define gpuMemcpy cudaMemcpy #endif diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h index 9bc0708ed..db394bcbb 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h @@ -32,6 +32,7 @@ #undef gpuSharedMemConfig #undef gpuDeviceSetSharedMemConfig #undef gpuStreamSynchronize +#undef gpuDeviceSynchronize #undef gpuMemcpy #undef EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h index 835efbf72..8810d78cf 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h @@ -351,10 +351,7 @@ struct IndexPairList : internal::IndexTuple { namespace internal { template - #if defined(EIGEN_HIPCC) - EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC - #endif - size_t array_prod(const IndexList& sizes) { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t array_prod(const IndexList& sizes) { size_t result = 1; for (int i = 0; i < array_size >::value; ++i) { result *= sizes[i]; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h index 2a979845b..cda49f8fe 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h @@ -859,10 +859,7 @@ struct TensorEvaluator, EIGEN_STRONG_INLINE #if !defined(EIGEN_HIPCC) + // Marking this as EIGEN_DEVICE_FUNC for HIPCC requires also doing the same for all the functions + // being called within here, which then leads to proliferation of EIGEN_DEVICE_FUNC markings, one + // of which will eventually result in an NVCC error EIGEN_DEVICE_FUNC #endif bool evalSubExprsIfNeeded(typename MakePointer_::Type data) { @@ -778,17 +781,9 @@ struct TensorEvaluator, // Indexed by reduced dimensions. array m_reducedDims; -#if defined(EIGEN_HIPCC) - public: -#endif - // Evaluator for the input expression. TensorEvaluator m_impl; -#if defined(EIGEN_HIPCC) - private: -#endif - // Operation to apply for computing the reduction. Op m_reducer; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h index ca854d670..a691e530a 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h @@ -169,7 +169,9 @@ __global__ void FullReductionKernel(Reducer reducer, const Self input, Index num #pragma unroll for (int offset = warpSize/2; offset > 0; offset /= 2) { #if defined(EIGEN_HIPCC) - // XXX use std::is_floating_point to determine the type of accum + // use std::is_floating_point to determine the type of reduced_val + // This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error + // and list the float and int versions of __shfl_down as the candidate functions. if (std::is_floating_point::value) { reducer.reduce(__shfl_down(static_cast(accum), offset, warpSize), &accum); } else { @@ -238,20 +240,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input, // Initialize the output value if it wasn't initialized by the ReductionInitKernel -#if defined(EIGEN_HIPCC) - - if (gridDim.x == 1 && first_index == 0) { - if (num_coeffs % 2 != 0) { - half last = input.m_impl.coeff(num_coeffs-1); - *scratch = __halves2half2(last, reducer.initialize()); - } else { - *scratch = reducer.template initializePacket(); - } - __syncthreads(); - } - -#else - if (gridDim.x == 1) { if (first_index == 0) { if (num_coeffs % 2 != 0) { @@ -264,8 +252,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input, __syncthreads(); } -#endif - half2 accum = reducer.template initializePacket(); const Index max_iter = numext::mini((num_coeffs - first_index) / 2, NumPerThread*BlockSize / 2); for (Index i = 0; i < max_iter; i += BlockSize) { @@ -295,17 +281,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input, atomicReduce(scratch, accum, reducer); } -#if defined(EIGEN_HIPCC) - __syncthreads(); - - if (gridDim.x == 1 && first_index == 0) { - half tmp = __low2half(*scratch); - reducer.reduce(__high2half(*scratch), &tmp); - *output = tmp; - } - -#else - if (gridDim.x == 1) { __syncthreads(); if (first_index == 0) { @@ -314,8 +289,6 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input, *output = tmp; } } - -#endif } template @@ -485,7 +458,9 @@ __global__ void InnerReductionKernel(Reducer reducer, const Self input, Index nu #pragma unroll for (int offset = warpSize/2; offset > 0; offset /= 2) { #if defined(EIGEN_HIPCC) - // XXX use std::is_floating_point to determine the type of reduced_val + // use std::is_floating_point to determine the type of reduced_val + // This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error + // and list the float and int versions of __shfl_down as the candidate functions. if (std::is_floating_point::value) { reducer.reduce(__shfl_down(static_cast(reduced_val), offset), &reduced_val); } else { @@ -847,8 +822,4 @@ struct OuterReducer { } // end namespace internal } // end namespace Eigen -#if defined(EIGEN_HIPCC) -#undef warpSize -#endif - #endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H diff --git a/unsupported/Eigen/SpecialFunctions b/unsupported/Eigen/SpecialFunctions index 9441ba8f5..44fd99b43 100644 --- a/unsupported/Eigen/SpecialFunctions +++ b/unsupported/Eigen/SpecialFunctions @@ -53,8 +53,8 @@ namespace Eigen { #include "src/SpecialFunctions/SpecialFunctionsFunctors.h" #include "src/SpecialFunctions/SpecialFunctionsArrayAPI.h" -#if defined EIGEN_VECTORIZE_CUDA - #include "src/SpecialFunctions/arch/CUDA/CudaSpecialFunctions.h" +#if defined EIGEN_VECTORIZE_GPU + #include "src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h" #endif namespace Eigen { diff --git a/unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h b/unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h index ad011e305..40abcee3a 100644 --- a/unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h +++ b/unsupported/Eigen/src/SpecialFunctions/arch/GPU/GpuSpecialFunctions.h @@ -7,8 +7,8 @@ // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. -#ifndef EIGEN_CUDA_SPECIALFUNCTIONS_H -#define EIGEN_CUDA_SPECIALFUNCTIONS_H +#ifndef EIGEN_GPU_SPECIALFUNCTIONS_H +#define EIGEN_GPU_SPECIALFUNCTIONS_H namespace Eigen { @@ -223,4 +223,4 @@ pi1e(const double2& x) { } // end namespace Eigen -#endif // EIGEN_CUDA_SPECIALFUNCTIONS_H +#endif // EIGEN_GPU_SPECIALFUNCTIONS_H