// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner // Copyright (C) 2014 Navdeep Jaitly // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #include "main.h" #include #include using Eigen::Tensor; typedef Tensor::DimensionPair DimPair; template void test_gpu_contraction(int m_size, int k_size, int n_size) { Tensor t_left(m_size, k_size); Tensor t_right(k_size, n_size); Tensor t_result(m_size, n_size); Tensor t_result_gpu(m_size, n_size); Eigen::array dims{DimPair(1, 0)}; t_left.setRandom(); t_right.setRandom(); std::size_t t_left_bytes = t_left.size() * sizeof(float); std::size_t t_right_bytes = t_right.size() * sizeof(float); std::size_t t_result_bytes = t_result.size() * sizeof(float); float* d_t_left; float* d_t_right; float* d_t_result; gpuMalloc((void**)(&d_t_left), t_left_bytes); gpuMalloc((void**)(&d_t_right), t_right_bytes); gpuMalloc((void**)(&d_t_result), t_result_bytes); gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice); gpuMemcpy(d_t_right, t_right.data(), t_right_bytes, gpuMemcpyHostToDevice); Eigen::GpuStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap > gpu_t_left(d_t_left, Eigen::array{m_size, k_size}); Eigen::TensorMap > gpu_t_right(d_t_right, Eigen::array{k_size, n_size}); Eigen::TensorMap > gpu_t_result(d_t_result, Eigen::array{m_size, n_size}); gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims); t_result = t_left.contract(t_right, dims); gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost); for (DenseIndex i = 0; i < t_result.size(); i++) { if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) { continue; } if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) { continue; } std::cout << "mismatch detected at index " << i << ": " << t_result(i) << " vs " << t_result_gpu(i) << std::endl; assert(false); } gpuFree((void*)d_t_left); gpuFree((void*)d_t_right); gpuFree((void*)d_t_result); } template void test_scalar(int m_size, int k_size, int n_size) { std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl; // with these dimensions, the output has 300 * 140 elements, which is // more than 30 * 1024, which is the number of threads in blocks on // a 15 SM GK110 GPU Tensor t_left(m_size, k_size); Tensor t_right(k_size, n_size); Tensor t_result; Tensor t_result_gpu; Eigen::array dims{DimPair(0, 0), DimPair(1, 1)}; t_left.setRandom(); t_right.setRandom(); std::size_t t_left_bytes = t_left.size() * sizeof(float); std::size_t t_right_bytes = t_right.size() * sizeof(float); std::size_t t_result_bytes = sizeof(float); float* d_t_left; float* d_t_right; float* d_t_result; gpuMalloc((void**)(&d_t_left), t_left_bytes); gpuMalloc((void**)(&d_t_right), t_right_bytes); gpuMalloc((void**)(&d_t_result), t_result_bytes); gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice); gpuMemcpy(d_t_right, t_right.data(), t_right_bytes, gpuMemcpyHostToDevice); Eigen::GpuStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap > gpu_t_left(d_t_left, m_size, k_size); Eigen::TensorMap > gpu_t_right(d_t_right, k_size, n_size); Eigen::TensorMap > gpu_t_result(d_t_result); gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims); t_result = t_left.contract(t_right, dims); gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost); if (fabs(t_result() - t_result_gpu()) > 1e-4f && !Eigen::internal::isApprox(t_result(), t_result_gpu(), 1e-4f)) { std::cout << "mismatch detected: " << t_result() << " vs " << t_result_gpu() << std::endl; assert(false); } gpuFree((void*)d_t_left); gpuFree((void*)d_t_right); gpuFree((void*)d_t_result); } template void test_gpu_contraction_m() { for (int k = 32; k < 256; k++) { test_gpu_contraction(k, 128, 128); test_gpu_contraction(k, 128, 128); } } template void test_gpu_contraction_k() { for (int k = 32; k < 256; k++) { test_gpu_contraction(128, k, 128); test_gpu_contraction(128, k, 128); } } template void test_gpu_contraction_n() { for (int k = 32; k < 256; k++) { test_gpu_contraction(128, 128, k); test_gpu_contraction(128, 128, k); } } template void test_gpu_contraction_sizes() { int m_sizes[3][5] = {{31, 39, 63, 64, 65}, {127, 129, 255, 257, 511}, {512, 513, 1023, 1024, 1025}}; int n_sizes[3][5] = {{31, 39, 63, 64, 65}, {127, 129, 255, 257, 511}, {512, 513, 1023, 1024, 1025}}; int k_sizes[3][6] = {{31, 39, 63, 64, 65, 95}, {96, 127, 129, 255, 257, 511}, {512, 513, 725, 1023, 1024, 1025}}; // Some selection of specific cases. // - m changes rows each iteration // - n changes rows each 3 iterations // - k changes rows each 9 iterations // - within a row, advance once column each iteration const int m_cols = 5; const int n_cols = 5; const int k_cols = 6; int m_offset = 0; int n_offset = 1; int k_offset = 2; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { for (int l = 0; l < 3; ++l) { int m = m_sizes[l][m_offset]; int n = n_sizes[j][n_offset]; int k = k_sizes[i][k_offset]; test_gpu_contraction(m, n, k); n_offset = (n_offset + 1) % n_cols; k_offset = (k_offset + 1) % k_cols; } m_offset = (m_offset + 1) % m_cols; if (j < 2) { n_offset = (n_offset + n_cols - 3) % n_cols; // Rewind 3. } } k_offset = (k_offset + 2 * k_cols - 9) % k_cols; // Rewind 9. } } EIGEN_DECLARE_TEST(cxx11_tensor_contract_gpu) { CALL_SUBTEST_1(test_gpu_contraction(128, 128, 128)); CALL_SUBTEST_1(test_gpu_contraction(128, 128, 128)); CALL_SUBTEST_1(test_scalar(128, 128, 128)); CALL_SUBTEST_1(test_scalar(128, 128, 128)); CALL_SUBTEST_2(test_gpu_contraction_m()); CALL_SUBTEST_3(test_gpu_contraction_m()); CALL_SUBTEST_4(test_gpu_contraction_k()); CALL_SUBTEST_5(test_gpu_contraction_k()); CALL_SUBTEST_6(test_gpu_contraction_n()); CALL_SUBTEST_7(test_gpu_contraction_n()); #if !defined(EIGEN_USE_HIP) // disable these subtests for HIP CALL_SUBTEST_8(test_gpu_contraction_sizes()); CALL_SUBTEST_9(test_gpu_contraction_sizes()); #endif }