mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-06-04 02:33:59 +08:00
[SYCL-2020] Add test to validate SYCL in Eigen core.
This commit is contained in:
parent
d4ae542ed1
commit
24d15e086f
@ -496,6 +496,59 @@ if(EIGEN_BUILD_DOC)
|
||||
add_subdirectory(doc EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
# add SYCL
|
||||
option(EIGEN_TEST_SYCL "Add Sycl support." OFF)
|
||||
if(EIGEN_TEST_SYCL)
|
||||
option(EIGEN_SYCL_DPCPP "Use the DPCPP Sycl implementation (DPCPP is default SYCL-Compiler)." ON)
|
||||
option(EIGEN_SYCL_TRISYCL "Use the triSYCL Sycl implementation." OFF)
|
||||
option(EIGEN_SYCL_ComputeCpp "Use the ComputeCPP Sycl implementation." OFF)
|
||||
|
||||
# Building options
|
||||
# https://developer.codeplay.com/products/computecpp/ce/2.11.0/guides/eigen-overview/options-for-building-eigen-sycl
|
||||
option(EIGEN_SYCL_USE_DEFAULT_SELECTOR "Use sycl default selector to select the preferred device." OFF)
|
||||
option(EIGEN_SYCL_NO_LOCAL_MEM "Build for devices without dedicated shared memory." OFF)
|
||||
option(EIGEN_SYCL_LOCAL_MEM "Allow the use of local memory (enabled by default)." ON)
|
||||
option(EIGEN_SYCL_LOCAL_THREAD_DIM0 "Set work group size for dimension 0." 16)
|
||||
option(EIGEN_SYCL_LOCAL_THREAD_DIM1 "Set work group size for dimension 1." 16)
|
||||
option(EIGEN_SYCL_ASYNC_EXECUTION "Allow asynchronous execution (enabled by default)." ON)
|
||||
option(EIGEN_SYCL_DISABLE_SKINNY "Disable optimization for tall/skinny matrices." OFF)
|
||||
option(EIGEN_SYCL_DISABLE_DOUBLE_BUFFER "Disable double buffer." OFF)
|
||||
option(EIGEN_SYCL_DISABLE_SCALAR "Disable scalar contraction." OFF)
|
||||
option(EIGEN_SYCL_DISABLE_GEMV "Disable GEMV and create a single kernel to calculate contraction instead." OFF)
|
||||
|
||||
set(EIGEN_SYCL ON)
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations -Wno-shorten-64-to-32 -Wno-cast-align")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-copy-with-user-provided-copy -Wno-unused-variable")
|
||||
set (CMAKE_MODULE_PATH "${CMAKE_ROOT}/Modules" "cmake/Modules/" "${CMAKE_MODULE_PATH}")
|
||||
find_package(Threads REQUIRED)
|
||||
if(EIGEN_SYCL_TRISYCL)
|
||||
message(STATUS "Using triSYCL")
|
||||
include(FindTriSYCL)
|
||||
elseif(EIGEN_SYCL_ComputeCpp)
|
||||
message(STATUS "Using ComputeCPP SYCL")
|
||||
include(FindComputeCpp)
|
||||
set(COMPUTECPP_DRIVER_DEFAULT_VALUE OFF)
|
||||
if (NOT MSVC)
|
||||
set(COMPUTECPP_DRIVER_DEFAULT_VALUE ON)
|
||||
endif()
|
||||
option(COMPUTECPP_USE_COMPILER_DRIVER
|
||||
"Use ComputeCpp driver instead of a 2 steps compilation"
|
||||
${COMPUTECPP_DRIVER_DEFAULT_VALUE}
|
||||
)
|
||||
else() #Default SYCL compiler is DPCPP (EIGEN_SYCL_DPCPP)
|
||||
set(DPCPP_SYCL_TARGET "spir64" CACHE STRING "Default target for Intel CPU/GPU")
|
||||
message(STATUS "Using DPCPP")
|
||||
find_package(DPCPP)
|
||||
add_definitions(-DSYCL_COMPILER_IS_DPCPP)
|
||||
endif(EIGEN_SYCL_TRISYCL)
|
||||
if(EIGEN_DONT_VECTORIZE_SYCL)
|
||||
message(STATUS "Disabling SYCL vectorization in tests/examples")
|
||||
# When disabling SYCL vectorization, also disable Eigen default vectorization
|
||||
add_definitions(-DEIGEN_DONT_VECTORIZE=1)
|
||||
add_definitions(-DEIGEN_DONT_VECTORIZE_SYCL=1)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
cmake_dependent_option(BUILD_TESTING "Enable creation of tests." ON "PROJECT_IS_TOP_LEVEL" OFF)
|
||||
option(EIGEN_BUILD_TESTING "Enable creation of Eigen tests." ${BUILD_TESTING})
|
||||
@ -522,45 +575,6 @@ else()
|
||||
add_subdirectory(lapack EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
# add SYCL
|
||||
option(EIGEN_TEST_SYCL "Add Sycl support." OFF)
|
||||
if(EIGEN_TEST_SYCL)
|
||||
option(EIGEN_SYCL_DPCPP "Use the DPCPP Sycl implementation (DPCPP is default SYCL-Compiler)." ON)
|
||||
option(EIGEN_SYCL_TRISYCL "Use the triSYCL Sycl implementation." OFF)
|
||||
option(EIGEN_SYCL_ComputeCpp "Use the DPCPP Sycl implementation." OFF)
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations -Wno-shorten-64-to-32 -Wno-cast-align")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-copy-with-user-provided-copy -Wno-unused-variable")
|
||||
set (CMAKE_MODULE_PATH "${CMAKE_ROOT}/Modules" "cmake/Modules/" "${CMAKE_MODULE_PATH}")
|
||||
find_package(Threads REQUIRED)
|
||||
if(EIGEN_SYCL_TRISYCL)
|
||||
message(STATUS "Using triSYCL")
|
||||
include(FindTriSYCL)
|
||||
elseif(EIGEN_SYCL_ComputeCpp)
|
||||
message(STATUS "Using ComputeCPP SYCL")
|
||||
include(FindComputeCpp)
|
||||
set(COMPUTECPP_DRIVER_DEFAULT_VALUE OFF)
|
||||
if (NOT MSVC)
|
||||
set(COMPUTECPP_DRIVER_DEFAULT_VALUE ON)
|
||||
endif()
|
||||
option(COMPUTECPP_USE_COMPILER_DRIVER
|
||||
"Use ComputeCpp driver instead of a 2 steps compilation"
|
||||
${COMPUTECPP_DRIVER_DEFAULT_VALUE}
|
||||
)
|
||||
else() #Default SYCL compiler is DPCPP (EIGEN_SYCL_DPCPP)
|
||||
set(DPCPP_SYCL_TARGET "spir64" CACHE STRING "Defualt target for Intel CPU/GPU")
|
||||
message(STATUS "Using DPCPP")
|
||||
find_package(DPCPP)
|
||||
add_definitions(-DSYCL_COMPILER_IS_DPCPP)
|
||||
endif(EIGEN_SYCL_TRISYCL)
|
||||
if(EIGEN_DONT_VECTORIZE_SYCL)
|
||||
message(STATUS "Disabling SYCL vectorization in tests/examples")
|
||||
# When disabling SYCL vectorization, also disable Eigen default vectorization
|
||||
add_definitions(-DEIGEN_DONT_VECTORIZE=1)
|
||||
add_definitions(-DEIGEN_DONT_VECTORIZE_SYCL=1)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_subdirectory(unsupported)
|
||||
|
||||
add_subdirectory(demos EXCLUDE_FROM_ALL)
|
||||
|
@ -368,8 +368,10 @@ macro(ei_testing_print_summary)
|
||||
if(EIGEN_TEST_SYCL)
|
||||
if(EIGEN_SYCL_TRISYCL)
|
||||
message(STATUS "SYCL: ON (using triSYCL)")
|
||||
else()
|
||||
elseif(EIGEN_SYCL_ComputeCpp)
|
||||
message(STATUS "SYCL: ON (using computeCPP)")
|
||||
elseif(EIGEN_SYCL_DPCPP)
|
||||
message(STATUS "SYCL: ON (using DPCPP)")
|
||||
endif()
|
||||
else()
|
||||
message(STATUS "SYCL: OFF")
|
||||
|
64
cmake/SyclConfigureTesting.cmake
Normal file
64
cmake/SyclConfigureTesting.cmake
Normal file
@ -0,0 +1,64 @@
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
# Forward CMake options as preprocessor definitions
|
||||
if(EIGEN_SYCL_USE_DEFAULT_SELECTOR)
|
||||
add_definitions(-DEIGEN_SYCL_USE_DEFAULT_SELECTOR=${EIGEN_SYCL_USE_DEFAULT_SELECTOR})
|
||||
endif()
|
||||
if(EIGEN_SYCL_NO_LOCAL_MEM)
|
||||
add_definitions(-DEIGEN_SYCL_NO_LOCAL_MEM=${EIGEN_SYCL_NO_LOCAL_MEM})
|
||||
endif()
|
||||
if(EIGEN_SYCL_LOCAL_MEM)
|
||||
add_definitions(-DEIGEN_SYCL_LOCAL_MEM=${EIGEN_SYCL_LOCAL_MEM})
|
||||
endif()
|
||||
if(EIGEN_SYCL_MAX_GLOBAL_RANGE)
|
||||
add_definitions(-DEIGEN_SYCL_MAX_GLOBAL_RANGE=${EIGEN_SYCL_MAX_GLOBAL_RANGE})
|
||||
endif()
|
||||
if(EIGEN_SYCL_LOCAL_THREAD_DIM0)
|
||||
add_definitions(-DEIGEN_SYCL_LOCAL_THREAD_DIM0=${EIGEN_SYCL_LOCAL_THREAD_DIM0})
|
||||
endif()
|
||||
if(EIGEN_SYCL_LOCAL_THREAD_DIM1)
|
||||
add_definitions(-DEIGEN_SYCL_LOCAL_THREAD_DIM1=${EIGEN_SYCL_LOCAL_THREAD_DIM1})
|
||||
endif()
|
||||
if(EIGEN_SYCL_REG_M)
|
||||
add_definitions(-DEIGEN_SYCL_REG_M=${EIGEN_SYCL_REG_M})
|
||||
endif()
|
||||
if(EIGEN_SYCL_REG_N)
|
||||
add_definitions(-DEIGEN_SYCL_REG_N=${EIGEN_SYCL_REG_N})
|
||||
endif()
|
||||
if(EIGEN_SYCL_ASYNC_EXECUTION)
|
||||
add_definitions(-DEIGEN_SYCL_ASYNC_EXECUTION=${EIGEN_SYCL_ASYNC_EXECUTION})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_SKINNY)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_SKINNY=${EIGEN_SYCL_DISABLE_SKINNY})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_DOUBLE_BUFFER)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_DOUBLE_BUFFER=${EIGEN_SYCL_DISABLE_DOUBLE_BUFFER})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_SCALAR)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_SCALAR=${EIGEN_SYCL_DISABLE_SCALAR})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_GEMV)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_GEMV=${EIGEN_SYCL_DISABLE_GEMV})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_ARM_GPU_CACHE_OPTIMISATION)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_ARM_GPU_CACHE_OPTIMISATION=${EIGEN_SYCL_DISABLE_ARM_GPU_CACHE_OPTIMISATION})
|
||||
endif()
|
||||
|
||||
if(EIGEN_SYCL_ComputeCpp)
|
||||
if(MSVC)
|
||||
list(APPEND COMPUTECPP_USER_FLAGS -DWIN32)
|
||||
else()
|
||||
list(APPEND COMPUTECPP_USER_FLAGS -Wall)
|
||||
endif()
|
||||
# The following flags are not supported by Clang and can cause warnings
|
||||
# if used with -Werror so they are removed here.
|
||||
if(COMPUTECPP_USE_COMPILER_DRIVER)
|
||||
set(CMAKE_CXX_COMPILER ${ComputeCpp_DEVICE_COMPILER_EXECUTABLE})
|
||||
string(REPLACE "-Wlogical-op" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
|
||||
string(REPLACE "-Wno-psabi" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
|
||||
endif()
|
||||
list(APPEND COMPUTECPP_USER_FLAGS
|
||||
-DEIGEN_NO_ASSERTION_CHECKING=1
|
||||
-no-serial-memop
|
||||
-Xclang
|
||||
-cl-mad-enable)
|
||||
endif(EIGEN_SYCL_ComputeCpp)
|
@ -477,6 +477,14 @@ if (EIGEN_TEST_HIP)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(EIGEN_TEST_SYCL)
|
||||
set(EIGEN_SYCL ON)
|
||||
include(SyclConfigureTesting)
|
||||
|
||||
ei_add_test(sycl_basic)
|
||||
set(EIGEN_SYCL OFF)
|
||||
endif()
|
||||
|
||||
cmake_dependent_option(EIGEN_TEST_BUILD_DOCUMENTATION "Test building the doxygen documentation" OFF "EIGEN_BUILD_DOC" OFF)
|
||||
if(EIGEN_TEST_BUILD_DOCUMENTATION)
|
||||
add_dependencies(buildtests doc)
|
||||
|
382
test/sycl_basic.cpp
Normal file
382
test/sycl_basic.cpp
Normal file
@ -0,0 +1,382 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2023
|
||||
// Alejandro Acosta Codeplay Software Ltd.
|
||||
// Contact: <eigen@codeplay.com>
|
||||
// Copyright (C) 2015-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#define EIGEN_TEST_NO_LONGDOUBLE
|
||||
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
|
||||
|
||||
#define EIGEN_USE_SYCL
|
||||
#include "main.h"
|
||||
|
||||
#include <Eigen/Dense>
|
||||
|
||||
template <bool verifyNan = false, bool singleTask = false, typename Operation, typename Input, typename Output>
|
||||
void run_and_verify(Operation& ope, size_t num_elements, const Input& in, Output& out) {
|
||||
Output out_gpu, out_cpu;
|
||||
out_gpu = out_cpu = out;
|
||||
auto queue = sycl::queue{sycl::default_selector_v};
|
||||
|
||||
auto in_size_bytes = sizeof(typename Input::Scalar) * in.size();
|
||||
auto out_size_bytes = sizeof(typename Output::Scalar) * out.size();
|
||||
auto in_d = sycl::malloc_device<typename Input::Scalar>(in.size(), queue);
|
||||
auto out_d = sycl::malloc_device<typename Output::Scalar>(out.size(), queue);
|
||||
|
||||
queue.memcpy(in_d, in.data(), in_size_bytes).wait();
|
||||
queue.memcpy(out_d, out.data(), out_size_bytes).wait();
|
||||
|
||||
if constexpr (singleTask) {
|
||||
queue.single_task([=]() { ope(in_d, out_d); }).wait();
|
||||
} else {
|
||||
queue
|
||||
.parallel_for(sycl::range{num_elements},
|
||||
[=](sycl::id<1> idx) {
|
||||
auto id = idx[0];
|
||||
ope(id, in_d, out_d);
|
||||
})
|
||||
.wait();
|
||||
}
|
||||
|
||||
queue.memcpy(out_gpu.data(), out_d, out_size_bytes).wait();
|
||||
|
||||
sycl::free(in_d, queue);
|
||||
sycl::free(out_d, queue);
|
||||
|
||||
queue.throw_asynchronous();
|
||||
|
||||
// Run on CPU and compare the output
|
||||
if constexpr (singleTask == 1) {
|
||||
ope(in.data(), out_cpu.data());
|
||||
} else {
|
||||
for (size_t i = 0; i < num_elements; ++i) {
|
||||
ope(i, in.data(), out_cpu.data());
|
||||
}
|
||||
}
|
||||
if constexpr (verifyNan) {
|
||||
VERIFY_IS_CWISE_APPROX(out_gpu, out_cpu);
|
||||
} else {
|
||||
VERIFY_IS_APPROX(out_gpu, out_cpu);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename DataType, typename Input, typename Output>
|
||||
void test_coeff_wise(size_t num_elements, const Input& in, Output& out) {
|
||||
auto operation = [](size_t i, const typename DataType::Scalar* in, typename DataType::Scalar* out) {
|
||||
DataType x1(in + i);
|
||||
DataType x2(in + i + 1);
|
||||
DataType x3(in + i + 2);
|
||||
Map<DataType> res(out + i * DataType::MaxSizeAtCompileTime);
|
||||
|
||||
res.array() += (in[0] * x1 + x2).array() * x3.array();
|
||||
};
|
||||
|
||||
run_and_verify(operation, num_elements, in, out);
|
||||
}
|
||||
|
||||
template <typename DataType, typename Input, typename Output>
|
||||
void test_complex_sqrt(size_t num_elements, const Input& in, Output& out) {
|
||||
auto operation = [](size_t i, const typename DataType::Scalar* in, typename DataType::Scalar* out) {
|
||||
using namespace Eigen;
|
||||
typedef typename DataType::Scalar ComplexType;
|
||||
typedef typename DataType::Scalar::value_type ValueType;
|
||||
const int num_special_inputs = 18;
|
||||
|
||||
if (i == 0) {
|
||||
const ValueType nan = std::numeric_limits<ValueType>::quiet_NaN();
|
||||
typedef Eigen::Vector<ComplexType, num_special_inputs> SpecialInputs;
|
||||
SpecialInputs special_in;
|
||||
special_in.setZero();
|
||||
int idx = 0;
|
||||
special_in[idx++] = ComplexType(0, 0);
|
||||
special_in[idx++] = ComplexType(-0, 0);
|
||||
special_in[idx++] = ComplexType(0, -0);
|
||||
special_in[idx++] = ComplexType(-0, -0);
|
||||
const ValueType inf = std::numeric_limits<ValueType>::infinity();
|
||||
special_in[idx++] = ComplexType(1.0, inf);
|
||||
special_in[idx++] = ComplexType(nan, inf);
|
||||
special_in[idx++] = ComplexType(1.0, -inf);
|
||||
special_in[idx++] = ComplexType(nan, -inf);
|
||||
special_in[idx++] = ComplexType(-inf, 1.0);
|
||||
special_in[idx++] = ComplexType(inf, 1.0);
|
||||
special_in[idx++] = ComplexType(-inf, -1.0);
|
||||
special_in[idx++] = ComplexType(inf, -1.0);
|
||||
special_in[idx++] = ComplexType(-inf, nan);
|
||||
special_in[idx++] = ComplexType(inf, nan);
|
||||
special_in[idx++] = ComplexType(1.0, nan);
|
||||
special_in[idx++] = ComplexType(nan, 1.0);
|
||||
special_in[idx++] = ComplexType(nan, -1.0);
|
||||
special_in[idx++] = ComplexType(nan, nan);
|
||||
|
||||
Map<SpecialInputs> special_out(out);
|
||||
special_out = special_in.cwiseSqrt();
|
||||
}
|
||||
|
||||
DataType x1(in + i);
|
||||
Map<DataType> res(out + num_special_inputs + i * DataType::MaxSizeAtCompileTime);
|
||||
res = x1.cwiseSqrt();
|
||||
};
|
||||
run_and_verify<true>(operation, num_elements, in, out);
|
||||
}
|
||||
|
||||
template <typename DataType, typename Input, typename Output>
|
||||
void test_complex_operators(size_t num_elements, const Input& in, Output& out) {
|
||||
auto operation = [](size_t i, const typename DataType::Scalar* in, typename DataType::Scalar* out) {
|
||||
using namespace Eigen;
|
||||
typedef typename DataType::Scalar ComplexType;
|
||||
typedef typename DataType::Scalar::value_type ValueType;
|
||||
const int num_scalar_operators = 24;
|
||||
const int num_vector_operators = 23; // no unary + operator.
|
||||
size_t out_idx = i * (num_scalar_operators + num_vector_operators * DataType::MaxSizeAtCompileTime);
|
||||
|
||||
// Scalar operators.
|
||||
const ComplexType a = in[i];
|
||||
const ComplexType b = in[i + 1];
|
||||
|
||||
out[out_idx++] = +a;
|
||||
out[out_idx++] = -a;
|
||||
|
||||
out[out_idx++] = a + b;
|
||||
out[out_idx++] = a + numext::real(b);
|
||||
out[out_idx++] = numext::real(a) + b;
|
||||
out[out_idx++] = a - b;
|
||||
out[out_idx++] = a - numext::real(b);
|
||||
out[out_idx++] = numext::real(a) - b;
|
||||
out[out_idx++] = a * b;
|
||||
out[out_idx++] = a * numext::real(b);
|
||||
out[out_idx++] = numext::real(a) * b;
|
||||
out[out_idx++] = a / b;
|
||||
out[out_idx++] = a / numext::real(b);
|
||||
out[out_idx++] = numext::real(a) / b;
|
||||
|
||||
out[out_idx] = a;
|
||||
out[out_idx++] += b;
|
||||
out[out_idx] = a;
|
||||
out[out_idx++] -= b;
|
||||
out[out_idx] = a;
|
||||
out[out_idx++] *= b;
|
||||
out[out_idx] = a;
|
||||
out[out_idx++] /= b;
|
||||
|
||||
const ComplexType true_value = ComplexType(ValueType(1), ValueType(0));
|
||||
const ComplexType false_value = ComplexType(ValueType(0), ValueType(0));
|
||||
out[out_idx++] = (a == b ? true_value : false_value);
|
||||
out[out_idx++] = (a == numext::real(b) ? true_value : false_value);
|
||||
out[out_idx++] = (numext::real(a) == b ? true_value : false_value);
|
||||
out[out_idx++] = (a != b ? true_value : false_value);
|
||||
out[out_idx++] = (a != numext::real(b) ? true_value : false_value);
|
||||
out[out_idx++] = (numext::real(a) != b ? true_value : false_value);
|
||||
|
||||
// Vector versions.
|
||||
DataType x1(in + i);
|
||||
DataType x2(in + i + 1);
|
||||
const int res_size = DataType::MaxSizeAtCompileTime * num_scalar_operators;
|
||||
const int size = DataType::MaxSizeAtCompileTime;
|
||||
int block_idx = 0;
|
||||
|
||||
Map<VectorX<ComplexType>> res(out + out_idx, res_size);
|
||||
res.segment(block_idx, size) = -x1;
|
||||
block_idx += size;
|
||||
|
||||
res.segment(block_idx, size) = x1 + x2;
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1 + x2.real();
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1.real() + x2;
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1 - x2;
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1 - x2.real();
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1.real() - x2;
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1.array() * x2.array();
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1.array() * x2.real().array();
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1.real().array() * x2.array();
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1.array() / x2.array();
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1.array() / x2.real().array();
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1.real().array() / x2.array();
|
||||
block_idx += size;
|
||||
|
||||
res.segment(block_idx, size) = x1;
|
||||
res.segment(block_idx, size) += x2;
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1;
|
||||
res.segment(block_idx, size) -= x2;
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1;
|
||||
res.segment(block_idx, size).array() *= x2.array();
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = x1;
|
||||
res.segment(block_idx, size).array() /= x2.array();
|
||||
block_idx += size;
|
||||
|
||||
const DataType true_vector = DataType::Constant(true_value);
|
||||
const DataType false_vector = DataType::Constant(false_value);
|
||||
res.segment(block_idx, size) = (x1 == x2 ? true_vector : false_vector);
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = (x1 == x2.real() ? true_vector : false_vector);
|
||||
block_idx += size;
|
||||
// res.segment(block_idx, size) = (x1.real() == x2) ? true_vector : false_vector;
|
||||
// block_idx += size;
|
||||
res.segment(block_idx, size) = (x1 != x2 ? true_vector : false_vector);
|
||||
block_idx += size;
|
||||
res.segment(block_idx, size) = (x1 != x2.real() ? true_vector : false_vector);
|
||||
block_idx += size;
|
||||
// res.segment(block_idx, size) = (x1.real() != x2 ? true_vector : false_vector);
|
||||
// block_idx += size;
|
||||
};
|
||||
run_and_verify<true>(operation, num_elements, in, out);
|
||||
}
|
||||
|
||||
template <typename DataType, typename Input, typename Output>
|
||||
void test_redux(size_t num_elements, const Input& in, Output& out) {
|
||||
auto operation = [](size_t i, const typename DataType::Scalar* in, typename DataType::Scalar* out) {
|
||||
using namespace Eigen;
|
||||
int N = 10;
|
||||
DataType x1(in + i);
|
||||
out[i * N + 0] = x1.minCoeff();
|
||||
out[i * N + 1] = x1.maxCoeff();
|
||||
out[i * N + 2] = x1.sum();
|
||||
out[i * N + 3] = x1.prod();
|
||||
out[i * N + 4] = x1.matrix().squaredNorm();
|
||||
out[i * N + 5] = x1.matrix().norm();
|
||||
out[i * N + 6] = x1.colwise().sum().maxCoeff();
|
||||
out[i * N + 7] = x1.rowwise().maxCoeff().sum();
|
||||
out[i * N + 8] = x1.matrix().colwise().squaredNorm().sum();
|
||||
};
|
||||
run_and_verify(operation, num_elements, in, out);
|
||||
}
|
||||
|
||||
template <typename DataType, typename Input, typename Output>
|
||||
void test_replicate(size_t num_elements, const Input& in, Output& out) {
|
||||
auto operation = [](size_t i, const typename DataType::Scalar* in, typename DataType::Scalar* out) {
|
||||
using namespace Eigen;
|
||||
DataType x1(in + i);
|
||||
int step = x1.size() * 4;
|
||||
int stride = 3 * step;
|
||||
|
||||
typedef Map<Array<typename DataType::Scalar, Dynamic, Dynamic>> MapType;
|
||||
MapType(out + i * stride + 0 * step, x1.rows() * 2, x1.cols() * 2) = x1.replicate(2, 2);
|
||||
MapType(out + i * stride + 1 * step, x1.rows() * 3, x1.cols()) = in[i] * x1.colwise().replicate(3);
|
||||
MapType(out + i * stride + 2 * step, x1.rows(), x1.cols() * 3) = in[i] * x1.rowwise().replicate(3);
|
||||
};
|
||||
run_and_verify(operation, num_elements, in, out);
|
||||
}
|
||||
|
||||
template <typename DataType1, typename DataType2, typename Input, typename Output>
|
||||
void test_product(size_t num_elements, const Input& in, Output& out) {
|
||||
auto operation = [](size_t i, const typename DataType1::Scalar* in, typename DataType1::Scalar* out) {
|
||||
using namespace Eigen;
|
||||
typedef Matrix<typename DataType1::Scalar, DataType1::RowsAtCompileTime, DataType2::ColsAtCompileTime> DataType3;
|
||||
DataType1 x1(in + i);
|
||||
DataType2 x2(in + i + 1);
|
||||
Map<DataType3> res(out + i * DataType3::MaxSizeAtCompileTime);
|
||||
res += in[i] * x1 * x2;
|
||||
};
|
||||
run_and_verify(operation, num_elements, in, out);
|
||||
}
|
||||
|
||||
template <typename DataType1, typename DataType2, typename Input, typename Output>
|
||||
void test_diagonal(size_t num_elements, const Input& in, Output& out) {
|
||||
auto operation = [](size_t i, const typename DataType1::Scalar* in, typename DataType1::Scalar* out) {
|
||||
using namespace Eigen;
|
||||
DataType1 x1(in + i);
|
||||
Map<DataType2> res(out + i * DataType2::MaxSizeAtCompileTime);
|
||||
res += x1.diagonal();
|
||||
};
|
||||
run_and_verify(operation, num_elements, in, out);
|
||||
}
|
||||
|
||||
template <typename DataType, typename Input, typename Output>
|
||||
void test_eigenvalues_direct(size_t num_elements, const Input& in, Output& out) {
|
||||
auto operation = [](size_t i, const typename DataType::Scalar* in, typename DataType::Scalar* out) {
|
||||
using namespace Eigen;
|
||||
typedef Matrix<typename DataType::Scalar, DataType::RowsAtCompileTime, 1> Vec;
|
||||
DataType M(in + i);
|
||||
Map<Vec> res(out + i * Vec::MaxSizeAtCompileTime);
|
||||
DataType A = M * M.adjoint();
|
||||
SelfAdjointEigenSolver<DataType> eig;
|
||||
eig.computeDirect(A);
|
||||
res = eig.eigenvalues();
|
||||
};
|
||||
run_and_verify(operation, num_elements, in, out);
|
||||
}
|
||||
|
||||
template <typename DataType, typename Input, typename Output>
|
||||
void test_matrix_inverse(size_t num_elements, const Input& in, Output& out) {
|
||||
auto operation = [](size_t i, const typename DataType::Scalar* in, typename DataType::Scalar* out) {
|
||||
using namespace Eigen;
|
||||
DataType M(in + i);
|
||||
Map<DataType> res(out + i * DataType::MaxSizeAtCompileTime);
|
||||
res = M.inverse();
|
||||
};
|
||||
run_and_verify(operation, num_elements, in, out);
|
||||
}
|
||||
|
||||
template <typename DataType, typename Input, typename Output>
|
||||
void test_numeric_limits(const Input& in, Output& out) {
|
||||
auto operation = [](const typename DataType::Scalar* in, typename DataType::Scalar* out) {
|
||||
EIGEN_UNUSED_VARIABLE(in)
|
||||
out[0] = numext::numeric_limits<float>::epsilon();
|
||||
out[1] = (numext::numeric_limits<float>::max)();
|
||||
out[2] = (numext::numeric_limits<float>::min)();
|
||||
out[3] = numext::numeric_limits<float>::infinity();
|
||||
out[4] = numext::numeric_limits<float>::quiet_NaN();
|
||||
};
|
||||
run_and_verify<true, true>(operation, 1, in, out);
|
||||
}
|
||||
|
||||
EIGEN_DECLARE_TEST(sycl_basic) {
|
||||
Eigen::VectorXf in, out;
|
||||
Eigen::VectorXcf cfin, cfout;
|
||||
|
||||
constexpr size_t num_elements = 100;
|
||||
constexpr size_t data_size = num_elements * 512;
|
||||
in.setRandom(data_size);
|
||||
out.setConstant(data_size, -1);
|
||||
cfin.setRandom(data_size);
|
||||
cfout.setConstant(data_size, -1);
|
||||
|
||||
CALL_SUBTEST(test_coeff_wise<Vector3f>(num_elements, in, out));
|
||||
CALL_SUBTEST(test_coeff_wise<Array44f>(num_elements, in, out));
|
||||
|
||||
CALL_SUBTEST(test_complex_operators<Vector3cf>(num_elements, cfin, cfout));
|
||||
CALL_SUBTEST(test_complex_sqrt<Vector3cf>(num_elements, cfin, cfout));
|
||||
|
||||
CALL_SUBTEST(test_redux<Array4f>(num_elements, in, out));
|
||||
CALL_SUBTEST(test_redux<Matrix3f>(num_elements, in, out));
|
||||
|
||||
CALL_SUBTEST(test_replicate<Array4f>(num_elements, in, out));
|
||||
CALL_SUBTEST(test_replicate<Array33f>(num_elements, in, out));
|
||||
|
||||
auto test_prod_mm = [&]() { test_product<Matrix3f, Matrix3f>(num_elements, in, out); };
|
||||
auto test_prod_mv = [&]() { test_product<Matrix4f, Vector4f>(num_elements, in, out); };
|
||||
CALL_SUBTEST(test_prod_mm());
|
||||
CALL_SUBTEST(test_prod_mv());
|
||||
|
||||
auto test_diagonal_mv3f = [&]() { test_diagonal<Matrix3f, Vector3f>(num_elements, in, out); };
|
||||
auto test_diagonal_mv4f = [&]() { test_diagonal<Matrix4f, Vector4f>(num_elements, in, out); };
|
||||
CALL_SUBTEST(test_diagonal_mv3f());
|
||||
CALL_SUBTEST(test_diagonal_mv4f());
|
||||
|
||||
CALL_SUBTEST(test_eigenvalues_direct<Matrix3f>(num_elements, in, out));
|
||||
CALL_SUBTEST(test_eigenvalues_direct<Matrix2f>(num_elements, in, out));
|
||||
|
||||
CALL_SUBTEST(test_matrix_inverse<Matrix2f>(num_elements, in, out));
|
||||
CALL_SUBTEST(test_matrix_inverse<Matrix3f>(num_elements, in, out));
|
||||
CALL_SUBTEST(test_matrix_inverse<Matrix4f>(num_elements, in, out));
|
||||
|
||||
CALL_SUBTEST(test_numeric_limits<Vector3f>(in, out));
|
||||
}
|
@ -122,73 +122,7 @@ ei_add_test(special_packetmath "-DEIGEN_FAST_MATH=1")
|
||||
|
||||
if(EIGEN_TEST_SYCL)
|
||||
set(EIGEN_SYCL ON)
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
# Forward CMake options as preprocessor definitions
|
||||
if(EIGEN_SYCL_USE_DEFAULT_SELECTOR)
|
||||
add_definitions(-DEIGEN_SYCL_USE_DEFAULT_SELECTOR=${EIGEN_SYCL_USE_DEFAULT_SELECTOR})
|
||||
endif()
|
||||
if(EIGEN_SYCL_NO_LOCAL_MEM)
|
||||
add_definitions(-DEIGEN_SYCL_NO_LOCAL_MEM=${EIGEN_SYCL_NO_LOCAL_MEM})
|
||||
endif()
|
||||
if(EIGEN_SYCL_LOCAL_MEM)
|
||||
add_definitions(-DEIGEN_SYCL_LOCAL_MEM=${EIGEN_SYCL_LOCAL_MEM})
|
||||
endif()
|
||||
if(EIGEN_SYCL_MAX_GLOBAL_RANGE)
|
||||
add_definitions(-DEIGEN_SYCL_MAX_GLOBAL_RANGE=${EIGEN_SYCL_MAX_GLOBAL_RANGE})
|
||||
endif()
|
||||
if(EIGEN_SYCL_LOCAL_THREAD_DIM0)
|
||||
add_definitions(-DEIGEN_SYCL_LOCAL_THREAD_DIM0=${EIGEN_SYCL_LOCAL_THREAD_DIM0})
|
||||
endif()
|
||||
if(EIGEN_SYCL_LOCAL_THREAD_DIM1)
|
||||
add_definitions(-DEIGEN_SYCL_LOCAL_THREAD_DIM1=${EIGEN_SYCL_LOCAL_THREAD_DIM1})
|
||||
endif()
|
||||
if(EIGEN_SYCL_REG_M)
|
||||
add_definitions(-DEIGEN_SYCL_REG_M=${EIGEN_SYCL_REG_M})
|
||||
endif()
|
||||
if(EIGEN_SYCL_REG_N)
|
||||
add_definitions(-DEIGEN_SYCL_REG_N=${EIGEN_SYCL_REG_N})
|
||||
endif()
|
||||
if(EIGEN_SYCL_ASYNC_EXECUTION)
|
||||
add_definitions(-DEIGEN_SYCL_ASYNC_EXECUTION=${EIGEN_SYCL_ASYNC_EXECUTION})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_SKINNY)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_SKINNY=${EIGEN_SYCL_DISABLE_SKINNY})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_DOUBLE_BUFFER)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_DOUBLE_BUFFER=${EIGEN_SYCL_DISABLE_DOUBLE_BUFFER})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_RANK1)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_RANK1=${EIGEN_SYCL_DISABLE_RANK1})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_SCALAR)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_SCALAR=${EIGEN_SYCL_DISABLE_SCALAR})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_GEMV)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_GEMV=${EIGEN_SYCL_DISABLE_GEMV})
|
||||
endif()
|
||||
if(EIGEN_SYCL_DISABLE_ARM_GPU_CACHE_OPTIMISATION)
|
||||
add_definitions(-DEIGEN_SYCL_DISABLE_ARM_GPU_CACHE_OPTIMISATION=${EIGEN_SYCL_DISABLE_ARM_GPU_CACHE_OPTIMISATION})
|
||||
endif()
|
||||
|
||||
if(EIGEN_SYCL_ComputeCpp)
|
||||
if(MSVC)
|
||||
list(APPEND COMPUTECPP_USER_FLAGS -DWIN32)
|
||||
else()
|
||||
list(APPEND COMPUTECPP_USER_FLAGS -Wall)
|
||||
endif()
|
||||
# The following flags are not supported by Clang and can cause warnings
|
||||
# if used with -Werror so they are removed here.
|
||||
if(COMPUTECPP_USE_COMPILER_DRIVER)
|
||||
set(CMAKE_CXX_COMPILER ${ComputeCpp_DEVICE_COMPILER_EXECUTABLE})
|
||||
string(REPLACE "-Wlogical-op" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
|
||||
string(REPLACE "-Wno-psabi" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
|
||||
endif()
|
||||
list(APPEND COMPUTECPP_USER_FLAGS
|
||||
-DEIGEN_NO_ASSERTION_CHECKING=1
|
||||
-no-serial-memop
|
||||
-Xclang
|
||||
-cl-mad-enable)
|
||||
endif(EIGEN_SYCL_ComputeCpp)
|
||||
include(SyclConfigureTesting)
|
||||
|
||||
ei_add_test(cxx11_tensor_sycl)
|
||||
ei_add_test(cxx11_tensor_image_op_sycl)
|
||||
|
Loading…
x
Reference in New Issue
Block a user