mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-10 18:59:01 +08:00
Merged in benoitsteiner/opencl (pull request PR-246)
Improved support for OpenCL
This commit is contained in:
commit
db3903498d
10
Eigen/Core
10
Eigen/Core
@ -14,16 +14,6 @@
|
||||
// first thing Eigen does: stop the compiler from committing suicide
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/// This will no longer be needed after the next release of the computecppCE
|
||||
#ifdef EIGEN_USE_SYCL
|
||||
#undef min
|
||||
#undef max
|
||||
#undef isnan
|
||||
#undef isinf
|
||||
#undef isfinite
|
||||
#include <SYCL/sycl.hpp>
|
||||
#endif
|
||||
|
||||
// Handle NVCC/CUDA/SYCL
|
||||
#if defined(__CUDACC__) || defined(__SYCL_DEVICE_ONLY__)
|
||||
// Do not try asserts on CUDA and SYCL!
|
||||
|
@ -1,23 +1,23 @@
|
||||
|
||||
macro(ei_add_property prop value)
|
||||
get_property(previous GLOBAL PROPERTY ${prop})
|
||||
get_property(previous GLOBAL PROPERTY ${prop})
|
||||
if ((NOT previous) OR (previous STREQUAL ""))
|
||||
set_property(GLOBAL PROPERTY ${prop} "${value}")
|
||||
else()
|
||||
set_property(GLOBAL PROPERTY ${prop} "${previous} ${value}")
|
||||
endif()
|
||||
endif()
|
||||
endmacro(ei_add_property)
|
||||
|
||||
#internal. See documentation of ei_add_test for details.
|
||||
macro(ei_add_test_internal testname testname_with_suffix)
|
||||
set(targetname ${testname_with_suffix})
|
||||
|
||||
|
||||
if(EIGEN_ADD_TEST_FILENAME_EXTENSION)
|
||||
set(filename ${testname}.${EIGEN_ADD_TEST_FILENAME_EXTENSION})
|
||||
else()
|
||||
set(filename ${testname}.cpp)
|
||||
endif()
|
||||
|
||||
|
||||
if(EIGEN_ADD_TEST_FILENAME_EXTENSION STREQUAL cu)
|
||||
if(EIGEN_TEST_CUDA_CLANG)
|
||||
set_source_files_properties(${filename} PROPERTIES LANGUAGE CXX)
|
||||
@ -42,7 +42,7 @@ macro(ei_add_test_internal testname testname_with_suffix)
|
||||
else()
|
||||
add_executable(${targetname} ${filename})
|
||||
endif()
|
||||
|
||||
|
||||
if (targetname MATCHES "^eigen2_")
|
||||
add_dependencies(eigen2_buildtests ${targetname})
|
||||
else()
|
||||
@ -56,20 +56,20 @@ macro(ei_add_test_internal testname testname_with_suffix)
|
||||
ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_DEBUG_ASSERTS=1")
|
||||
endif(EIGEN_DEBUG_ASSERTS)
|
||||
endif(EIGEN_NO_ASSERTION_CHECKING)
|
||||
|
||||
|
||||
ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_MAX_SIZE=${EIGEN_TEST_MAX_SIZE}")
|
||||
|
||||
ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_FUNC=${testname}")
|
||||
|
||||
|
||||
if(MSVC)
|
||||
ei_add_target_property(${targetname} COMPILE_FLAGS "/bigobj")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# let the user pass flags.
|
||||
if(${ARGC} GREATER 2)
|
||||
ei_add_target_property(${targetname} COMPILE_FLAGS "${ARGV2}")
|
||||
endif(${ARGC} GREATER 2)
|
||||
|
||||
|
||||
if(EIGEN_TEST_CUSTOM_CXX_FLAGS)
|
||||
ei_add_target_property(${targetname} COMPILE_FLAGS "${EIGEN_TEST_CUSTOM_CXX_FLAGS}")
|
||||
endif()
|
||||
@ -95,12 +95,12 @@ macro(ei_add_test_internal testname testname_with_suffix)
|
||||
# notice: no double quotes around ${libs_to_link} here. It may be a list.
|
||||
target_link_libraries(${targetname} ${libs_to_link})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_test(${testname_with_suffix} "${targetname}")
|
||||
|
||||
|
||||
# Specify target and test labels accoirding to EIGEN_CURRENT_SUBPROJECT
|
||||
get_property(current_subproject GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT)
|
||||
get_property(current_subproject GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT)
|
||||
if ((current_subproject) AND (NOT (current_subproject STREQUAL "")))
|
||||
set_property(TARGET ${targetname} PROPERTY LABELS "Build${current_subproject}")
|
||||
add_dependencies("Build${current_subproject}" ${targetname})
|
||||
@ -128,14 +128,14 @@ macro(ei_add_test_internal_sycl testname testname_with_suffix)
|
||||
OUTPUT ${include_file}
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${host_file}\\\"" > ${include_file}
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${bc_file}.sycl\\\"" >> ${include_file}
|
||||
DEPENDS ${filename}
|
||||
DEPENDS ${filename} ${bc_file}.sycl
|
||||
COMMENT "Building ComputeCpp integration header file ${include_file}"
|
||||
)
|
||||
# Add a custom target for the generated integration header
|
||||
add_custom_target(${testname}_integration_header_woho DEPENDS ${include_file})
|
||||
add_custom_target(${testname}_integration_header_sycl DEPENDS ${include_file})
|
||||
|
||||
add_executable(${targetname} ${include_file})
|
||||
add_dependencies(${targetname} ${testname}_integration_header_woho)
|
||||
add_dependencies(${targetname} ${testname}_integration_header_sycl)
|
||||
add_sycl_to_target(${targetname} ${filename} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
if (targetname MATCHES "^eigen2_")
|
||||
@ -258,7 +258,7 @@ macro(ei_add_test testname)
|
||||
else()
|
||||
set(filename ${testname}.cpp)
|
||||
endif()
|
||||
|
||||
|
||||
file(READ "${filename}" test_source)
|
||||
set(parts 0)
|
||||
string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+"
|
||||
@ -379,7 +379,7 @@ macro(ei_testing_print_summary)
|
||||
elseif(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION)
|
||||
message(STATUS "Explicit vectorization disabled (alignment kept enabled)")
|
||||
else()
|
||||
|
||||
|
||||
message(STATUS "Maximal matrix/vector size: ${EIGEN_TEST_MAX_SIZE}")
|
||||
|
||||
if(EIGEN_TEST_SSE2)
|
||||
@ -453,13 +453,13 @@ macro(ei_testing_print_summary)
|
||||
else()
|
||||
message(STATUS "ARMv8 NEON: Using architecture defaults")
|
||||
endif()
|
||||
|
||||
|
||||
if(EIGEN_TEST_ZVECTOR)
|
||||
message(STATUS "S390X ZVECTOR: ON")
|
||||
else()
|
||||
message(STATUS "S390X ZVECTOR: Using architecture defaults")
|
||||
endif()
|
||||
|
||||
|
||||
if(EIGEN_TEST_CXX11)
|
||||
message(STATUS "C++11: ON")
|
||||
else()
|
||||
@ -505,7 +505,7 @@ macro(ei_init_testing)
|
||||
|
||||
set_property(GLOBAL PROPERTY EIGEN_FAILTEST_FAILURE_COUNT "0")
|
||||
set_property(GLOBAL PROPERTY EIGEN_FAILTEST_COUNT "0")
|
||||
|
||||
|
||||
# uncomment anytime you change the ei_get_compilerver_from_cxx_version_string macro
|
||||
# ei_test_get_compilerver_from_cxx_version_string()
|
||||
endmacro(ei_init_testing)
|
||||
@ -514,22 +514,22 @@ macro(ei_set_sitename)
|
||||
# if the sitename is not yet set, try to set it
|
||||
if(NOT ${SITE} OR ${SITE} STREQUAL "")
|
||||
set(eigen_computername $ENV{COMPUTERNAME})
|
||||
set(eigen_hostname $ENV{HOSTNAME})
|
||||
set(eigen_hostname $ENV{HOSTNAME})
|
||||
if(eigen_hostname)
|
||||
set(SITE ${eigen_hostname})
|
||||
elseif(eigen_computername)
|
||||
set(SITE ${eigen_computername})
|
||||
elseif(eigen_computername)
|
||||
set(SITE ${eigen_computername})
|
||||
endif()
|
||||
endif()
|
||||
# in case it is already set, enforce lower case
|
||||
if(SITE)
|
||||
string(TOLOWER ${SITE} SITE)
|
||||
endif()
|
||||
endif()
|
||||
endmacro(ei_set_sitename)
|
||||
|
||||
macro(ei_get_compilerver VAR)
|
||||
if(MSVC)
|
||||
# on windows system, we use a modified CMake script
|
||||
# on windows system, we use a modified CMake script
|
||||
include(EigenDetermineVSServicePack)
|
||||
EigenDetermineVSServicePack( my_service_pack )
|
||||
|
||||
@ -541,20 +541,20 @@ macro(ei_get_compilerver VAR)
|
||||
else()
|
||||
# on all other system we rely on ${CMAKE_CXX_COMPILER}
|
||||
# supporting a "--version" or "/version" flag
|
||||
|
||||
|
||||
if(WIN32 AND ${CMAKE_CXX_COMPILER_ID} EQUAL "Intel")
|
||||
set(EIGEN_CXX_FLAG_VERSION "/version")
|
||||
else()
|
||||
set(EIGEN_CXX_FLAG_VERSION "--version")
|
||||
endif()
|
||||
|
||||
|
||||
execute_process(COMMAND ${CMAKE_CXX_COMPILER} ${EIGEN_CXX_FLAG_VERSION}
|
||||
OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
string(REGEX REPLACE "[\n\r].*" "" eigen_cxx_compiler_version_string ${eigen_cxx_compiler_version_string})
|
||||
|
||||
|
||||
ei_get_compilerver_from_cxx_version_string("${eigen_cxx_compiler_version_string}" CNAME CVER)
|
||||
set(${VAR} "${CNAME}-${CVER}")
|
||||
|
||||
|
||||
endif()
|
||||
endmacro(ei_get_compilerver)
|
||||
|
||||
@ -563,13 +563,13 @@ endmacro(ei_get_compilerver)
|
||||
# the testing macro call in ei_init_testing() of the EigenTesting.cmake file.
|
||||
# See also the ei_test_get_compilerver_from_cxx_version_string macro at the end of the file
|
||||
macro(ei_get_compilerver_from_cxx_version_string VERSTRING CNAME CVER)
|
||||
# extract possible compiler names
|
||||
# extract possible compiler names
|
||||
string(REGEX MATCH "g\\+\\+" ei_has_gpp ${VERSTRING})
|
||||
string(REGEX MATCH "llvm|LLVM" ei_has_llvm ${VERSTRING})
|
||||
string(REGEX MATCH "gcc|GCC" ei_has_gcc ${VERSTRING})
|
||||
string(REGEX MATCH "icpc|ICC" ei_has_icpc ${VERSTRING})
|
||||
string(REGEX MATCH "clang|CLANG" ei_has_clang ${VERSTRING})
|
||||
|
||||
|
||||
# combine them
|
||||
if((ei_has_llvm) AND (ei_has_gpp OR ei_has_gcc))
|
||||
set(${CNAME} "llvm-g++")
|
||||
@ -584,7 +584,7 @@ macro(ei_get_compilerver_from_cxx_version_string VERSTRING CNAME CVER)
|
||||
else()
|
||||
set(${CNAME} "_")
|
||||
endif()
|
||||
|
||||
|
||||
# extract possible version numbers
|
||||
# first try to extract 3 isolated numbers:
|
||||
string(REGEX MATCH " [0-9]+\\.[0-9]+\\.[0-9]+" eicver ${VERSTRING})
|
||||
@ -602,9 +602,9 @@ macro(ei_get_compilerver_from_cxx_version_string VERSTRING CNAME CVER)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
string(REGEX REPLACE ".(.*)" "\\1" ${CVER} ${eicver})
|
||||
|
||||
|
||||
endmacro(ei_get_compilerver_from_cxx_version_string)
|
||||
|
||||
macro(ei_get_cxxflags VAR)
|
||||
@ -633,30 +633,30 @@ macro(ei_get_cxxflags VAR)
|
||||
elseif(EIGEN_TEST_SSE3)
|
||||
set(${VAR} SSE3)
|
||||
elseif(EIGEN_TEST_SSE2 OR IS_64BIT_ENV)
|
||||
set(${VAR} SSE2)
|
||||
set(${VAR} SSE2)
|
||||
endif()
|
||||
|
||||
if(EIGEN_TEST_OPENMP)
|
||||
if (${VAR} STREQUAL "")
|
||||
set(${VAR} OMP)
|
||||
else()
|
||||
set(${VAR} ${${VAR}}-OMP)
|
||||
endif()
|
||||
set(${VAR} OMP)
|
||||
else()
|
||||
set(${VAR} ${${VAR}}-OMP)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
if(EIGEN_DEFAULT_TO_ROW_MAJOR)
|
||||
if (${VAR} STREQUAL "")
|
||||
set(${VAR} ROW)
|
||||
else()
|
||||
set(${VAR} ${${VAR}}-ROWMAJ)
|
||||
endif()
|
||||
set(${VAR} ROW)
|
||||
else()
|
||||
set(${VAR} ${${VAR}}-ROWMAJ)
|
||||
endif()
|
||||
endif()
|
||||
endmacro(ei_get_cxxflags)
|
||||
|
||||
macro(ei_set_build_string)
|
||||
ei_get_compilerver(LOCAL_COMPILER_VERSION)
|
||||
ei_get_cxxflags(LOCAL_COMPILER_FLAGS)
|
||||
|
||||
|
||||
include(EigenDetermineOSVersion)
|
||||
DetermineOSVersion(OS_VERSION)
|
||||
|
||||
@ -672,11 +672,11 @@ macro(ei_set_build_string)
|
||||
else()
|
||||
set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-64bit)
|
||||
endif()
|
||||
|
||||
|
||||
if(EIGEN_TEST_CXX11)
|
||||
set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-cxx11)
|
||||
endif()
|
||||
|
||||
|
||||
if(EIGEN_BUILD_STRING_SUFFIX)
|
||||
set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-${EIGEN_BUILD_STRING_SUFFIX})
|
||||
endif()
|
||||
|
@ -1,6 +1,21 @@
|
||||
#.rst:
|
||||
# FindComputeCpp
|
||||
#---------------
|
||||
#
|
||||
# Copyright 2016 Codeplay Software Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use these files except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#########################
|
||||
# FindComputeCpp.cmake
|
||||
@ -8,6 +23,11 @@
|
||||
#
|
||||
# Tools for finding and building with ComputeCpp.
|
||||
#
|
||||
# User must define COMPUTECPP_PACKAGE_ROOT_DIR pointing to the ComputeCpp
|
||||
# installation.
|
||||
#
|
||||
# Latest version of this file can be found at:
|
||||
# https://github.com/codeplaysoftware/computecpp-sdk
|
||||
|
||||
# Require CMake version 3.2.2 or higher
|
||||
cmake_minimum_required(VERSION 3.2.2)
|
||||
@ -32,7 +52,6 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||
message(FATAL_ERROR
|
||||
"host compiler - Not found! (clang version must be at least 3.6)")
|
||||
else()
|
||||
set(COMPUTECPP_DISABLE_GCC_DUAL_ABI "True")
|
||||
message(STATUS "host compiler - clang ${CMAKE_CXX_COMPILER_VERSION}")
|
||||
endif()
|
||||
else()
|
||||
@ -48,11 +67,12 @@ mark_as_advanced(COMPUTECPP_64_BIT_CODE)
|
||||
# Find OpenCL package
|
||||
find_package(OpenCL REQUIRED)
|
||||
|
||||
# Find ComputeCpp package
|
||||
if(EXISTS ${COMPUTECPP_PACKAGE_ROOT_DIR})
|
||||
message(STATUS "ComputeCpp package - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
||||
# Find ComputeCpp packagee
|
||||
if(NOT COMPUTECPP_PACKAGE_ROOT_DIR)
|
||||
message(FATAL_ERROR
|
||||
"ComputeCpp package - Not found! (please set COMPUTECPP_PACKAGE_ROOT_DIR")
|
||||
else()
|
||||
message(FATAL_ERROR "ComputeCpp package - Not found! (please set COMPUTECPP_PACKAGE_ROOT_DIR) (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
||||
message(STATUS "ComputeCpp package - Found")
|
||||
endif()
|
||||
option(COMPUTECPP_PACKAGE_ROOT_DIR "Path to the ComputeCpp Package")
|
||||
|
||||
@ -61,9 +81,9 @@ find_program(COMPUTECPP_DEVICE_COMPILER compute++ PATHS
|
||||
${COMPUTECPP_PACKAGE_ROOT_DIR} PATH_SUFFIXES bin)
|
||||
if (EXISTS ${COMPUTECPP_DEVICE_COMPILER})
|
||||
mark_as_advanced(COMPUTECPP_DEVICE_COMPILER)
|
||||
message(STATUS "compute++ - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
||||
message(STATUS "compute++ - Found")
|
||||
else()
|
||||
message(FATAL_ERROR "compute++ - Not found! (${COMPUTECPP_DEVICE_COMPILER}) (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
||||
message(FATAL_ERROR "compute++ - Not found! (${COMPUTECPP_DEVICE_COMPILER})")
|
||||
endif()
|
||||
|
||||
# Obtain the path to computecpp_info
|
||||
@ -71,9 +91,9 @@ find_program(COMPUTECPP_INFO_TOOL computecpp_info PATHS
|
||||
${COMPUTECPP_PACKAGE_ROOT_DIR} PATH_SUFFIXES bin)
|
||||
if (EXISTS ${COMPUTECPP_INFO_TOOL})
|
||||
mark_as_advanced(${COMPUTECPP_INFO_TOOL})
|
||||
message(STATUS "computecpp_info - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
||||
message(STATUS "computecpp_info - Found")
|
||||
else()
|
||||
message(FATAL_ERROR "computecpp_info - Not found! (${COMPUTECPP_INFO_TOOL}) (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
||||
message(FATAL_ERROR "computecpp_info - Not found! (${COMPUTECPP_INFO_TOOL})")
|
||||
endif()
|
||||
|
||||
# Obtain the path to the ComputeCpp runtime library
|
||||
@ -85,15 +105,15 @@ if (EXISTS ${COMPUTECPP_RUNTIME_LIBRARY})
|
||||
mark_as_advanced(COMPUTECPP_RUNTIME_LIBRARY)
|
||||
message(STATUS "libComputeCpp.so - Found")
|
||||
else()
|
||||
message(FATAL_ERROR "libComputeCpp.so - Not found! (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
||||
message(FATAL_ERROR "libComputeCpp.so - Not found!")
|
||||
endif()
|
||||
|
||||
# Obtain the ComputeCpp include directory
|
||||
set(COMPUTECPP_INCLUDE_DIRECTORY ${COMPUTECPP_PACKAGE_ROOT_DIR}/include/)
|
||||
if (NOT EXISTS ${COMPUTECPP_INCLUDE_DIRECTORY})
|
||||
message(FATAL_ERROR "ComputeCpp includes - Not found! (${COMPUTECPP_PACKAGE_ROOT_DIR}/include/)")
|
||||
message(FATAL_ERROR "ComputeCpp includes - Not found!")
|
||||
else()
|
||||
message(STATUS "ComputeCpp includes - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
||||
message(STATUS "ComputeCpp includes - Found")
|
||||
endif()
|
||||
|
||||
# Obtain the package version
|
||||
@ -144,7 +164,7 @@ endif()
|
||||
#
|
||||
# targetName : Name of the target.
|
||||
# sourceFile : Source file to be compiled.
|
||||
# binaryDir : Intermediate output directory for the integration header.
|
||||
# binaryDir : Intermediate directory to output the integration header.
|
||||
#
|
||||
function(__build_spir targetName sourceFile binaryDir)
|
||||
|
||||
@ -176,12 +196,13 @@ function(__build_spir targetName sourceFile binaryDir)
|
||||
OUTPUT ${outputSyclFile}
|
||||
COMMAND ${COMPUTECPP_DEVICE_COMPILER}
|
||||
${COMPUTECPP_DEVICE_COMPILER_FLAGS}
|
||||
-I${COMPUTECPP_INCLUDE_DIRECTORY}
|
||||
-isystem ${COMPUTECPP_INCLUDE_DIRECTORY}
|
||||
${COMPUTECPP_PLATFORM_SPECIFIC_ARGS}
|
||||
${device_compiler_includes}
|
||||
-o ${outputSyclFile}
|
||||
-c ${CMAKE_CURRENT_SOURCE_DIR}/${sourceFile}
|
||||
DEPENDS ${sourceFile}
|
||||
WORKING_DIRECTORY ${binaryDir}
|
||||
COMMENT "Building ComputeCpp integration header file ${outputSyclFile}")
|
||||
|
||||
# Add a custom target for the generated integration header
|
||||
@ -190,10 +211,6 @@ function(__build_spir targetName sourceFile binaryDir)
|
||||
# Add a dependency on the integration header
|
||||
add_dependencies(${targetName} ${targetName}_integration_header)
|
||||
|
||||
# Force inclusion of the integration header for the host compiler
|
||||
#set(compileFlags -include ${include_file} "-Wall")
|
||||
target_compile_options(${targetName} PUBLIC ${compileFlags})
|
||||
|
||||
# Set the host compiler C++ standard to C++11
|
||||
set_property(TARGET ${targetName} PROPERTY CXX_STANDARD 11)
|
||||
|
||||
@ -210,11 +227,11 @@ endfunction()
|
||||
#######################
|
||||
#
|
||||
# Adds a SYCL compilation custom command associated with an existing
|
||||
# target and sets a dependency on that new command.
|
||||
# target and sets a dependancy on that new command.
|
||||
#
|
||||
# targetName : Name of the target to add a SYCL to.
|
||||
# sourceFile : Source file to be compiled for SYCL.
|
||||
# binaryDir : Intermediate output directory for the integration header.
|
||||
# binaryDir : Intermediate directory to output the integration header.
|
||||
#
|
||||
function(add_sycl_to_target targetName sourceFile binaryDir)
|
||||
|
||||
|
@ -13,6 +13,18 @@
|
||||
|
||||
#include "../../../Eigen/Core"
|
||||
|
||||
#ifdef EIGEN_USE_SYCL
|
||||
#undef min
|
||||
#undef max
|
||||
#undef isnan
|
||||
#undef isinf
|
||||
#undef isfinite
|
||||
#include <SYCL/sycl.hpp>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#endif
|
||||
|
||||
#include <Eigen/src/Core/util/DisableStupidWarnings.h>
|
||||
|
||||
#include "../SpecialFunctions"
|
||||
@ -69,10 +81,6 @@ typedef unsigned __int64 uint64_t;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_USE_SYCL
|
||||
#include <SYCL/sycl.hpp>
|
||||
#endif
|
||||
|
||||
#include "src/Tensor/TensorMacros.h"
|
||||
#include "src/Tensor/TensorForwardDeclarations.h"
|
||||
#include "src/Tensor/TensorMeta.h"
|
||||
@ -81,7 +89,6 @@ typedef unsigned __int64 uint64_t;
|
||||
#include "src/Tensor/TensorDeviceDefault.h"
|
||||
#include "src/Tensor/TensorDeviceThreadPool.h"
|
||||
#include "src/Tensor/TensorDeviceCuda.h"
|
||||
#include "src/Tensor/TensorSycl.h"
|
||||
#include "src/Tensor/TensorDeviceSycl.h"
|
||||
#include "src/Tensor/TensorIndexList.h"
|
||||
#include "src/Tensor/TensorDimensionList.h"
|
||||
@ -128,6 +135,7 @@ typedef unsigned __int64 uint64_t;
|
||||
#include "src/Tensor/TensorAssign.h"
|
||||
#include "src/Tensor/TensorScan.h"
|
||||
|
||||
#include "src/Tensor/TensorSycl.h"
|
||||
#include "src/Tensor/TensorExecutor.h"
|
||||
#include "src/Tensor/TensorDevice.h"
|
||||
|
||||
|
@ -1,12 +1,11 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
// Mehdi Goli Codeplay Software Ltd.
|
||||
// Ralph Potter Codeplay Software Ltd.
|
||||
// Luke Iwanski Codeplay Software Ltd.
|
||||
// Cummins Chris PhD student at The University of Edinburgh.
|
||||
// Contact: <eigen@codeplay.com>
|
||||
// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
@ -17,106 +16,107 @@
|
||||
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
|
||||
|
||||
namespace Eigen {
|
||||
/// \struct BufferT is used to specialise add_sycl_buffer function for
|
||||
// two types of buffer we have. When the MapAllocator is true, we create the
|
||||
// sycl buffer with MapAllocator.
|
||||
/// We have to const_cast the input pointer in order to work around the fact
|
||||
/// that sycl does not accept map allocator for const pointer.
|
||||
template <typename T, bool MapAllocator>
|
||||
struct BufferT {
|
||||
using Type = cl::sycl::buffer<T, 1, cl::sycl::map_allocator<T>>;
|
||||
static inline void add_sycl_buffer(
|
||||
const T *ptr, size_t num_bytes,
|
||||
std::map<const void *, std::shared_ptr<void>> &buffer_map) {
|
||||
buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(
|
||||
ptr, std::shared_ptr<void>(std::make_shared<Type>(
|
||||
Type(const_cast<T *>(ptr), cl::sycl::range<1>(num_bytes))))));
|
||||
}
|
||||
};
|
||||
|
||||
/// specialisation of the \ref BufferT when the MapAllocator is false. In this
|
||||
/// case we only create the device-only buffer.
|
||||
template <typename T>
|
||||
struct BufferT<T, false> {
|
||||
using Type = cl::sycl::buffer<T, 1>;
|
||||
static inline void add_sycl_buffer(
|
||||
const T *ptr, size_t num_bytes,
|
||||
std::map<const void *, std::shared_ptr<void>> &buffer_map) {
|
||||
buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(
|
||||
ptr, std::shared_ptr<void>(
|
||||
std::make_shared<Type>(Type(cl::sycl::range<1>(num_bytes))))));
|
||||
}
|
||||
};
|
||||
|
||||
struct SyclDevice {
|
||||
/// class members
|
||||
/// sycl queue
|
||||
cl::sycl::queue &m_queue;
|
||||
mutable cl::sycl::queue m_queue;
|
||||
/// std::map is the container used to make sure that we create only one buffer
|
||||
/// per pointer. The lifespan of the buffer
|
||||
/// now depends on the lifespan of SyclDevice. If a non-read-only pointer is
|
||||
/// needed to be accessed on the host we should manually deallocate it.
|
||||
/// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
|
||||
/// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
|
||||
mutable std::map<const void *, std::shared_ptr<void>> buffer_map;
|
||||
|
||||
SyclDevice(cl::sycl::queue &q) : m_queue(q) {}
|
||||
/// creating device by using selector
|
||||
template<typename dev_Selector> SyclDevice(dev_Selector s)
|
||||
:
|
||||
#ifdef EIGEN_EXCEPTIONS
|
||||
m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) {
|
||||
for (const auto& e : l) {
|
||||
try {
|
||||
std::rethrow_exception(e);
|
||||
} catch (cl::sycl::exception e) {
|
||||
std::cout << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
}))
|
||||
#else
|
||||
m_queue(cl::sycl::queue(s))
|
||||
#endif
|
||||
{}
|
||||
// destructor
|
||||
~SyclDevice() { deallocate_all(); }
|
||||
|
||||
template <typename T>
|
||||
void deallocate(const T *p) const {
|
||||
template <typename T> void deallocate(T *p) const {
|
||||
auto it = buffer_map.find(p);
|
||||
if (it != buffer_map.end()) {
|
||||
buffer_map.erase(it);
|
||||
internal::aligned_free(p);
|
||||
}
|
||||
}
|
||||
void deallocate_all() const { buffer_map.clear(); }
|
||||
void deallocate_all() const {
|
||||
std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin();
|
||||
while (it!=buffer_map.end()) {
|
||||
auto p=it->first;
|
||||
buffer_map.erase(it);
|
||||
internal::aligned_free(const_cast<void*>(p));
|
||||
it=buffer_map.begin();
|
||||
}
|
||||
buffer_map.clear();
|
||||
}
|
||||
|
||||
/// creation of sycl accessor for a buffer. This function first tries to find
|
||||
/// the buffer in the buffer_map.
|
||||
/// If found it gets the accessor from it, if not, the function then adds an
|
||||
/// entry by creating a sycl buffer
|
||||
/// for that particular pointer.
|
||||
template <cl::sycl::access::mode AcMd, bool MapAllocator, typename T>
|
||||
inline cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
|
||||
get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh,
|
||||
const T *ptr) const {
|
||||
auto it = buffer_map.find(ptr);
|
||||
if (it == buffer_map.end()) {
|
||||
BufferT<T, MapAllocator>::add_sycl_buffer(ptr, num_bytes, buffer_map);
|
||||
}
|
||||
return (
|
||||
((typename BufferT<T, MapAllocator>::Type *)(buffer_map.at(ptr).get()))
|
||||
->template get_access<AcMd>(cgh));
|
||||
/// the buffer in the buffer_map. If found it gets the accessor from it, if not,
|
||||
///the function then adds an entry by creating a sycl buffer for that particular pointer.
|
||||
template <cl::sycl::access::mode AcMd, typename T> inline cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
|
||||
get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const {
|
||||
return (get_sycl_buffer<T>(num_bytes, ptr)->template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
|
||||
}
|
||||
|
||||
template<typename T> inline std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(const T *ptr, size_t num_bytes) const {
|
||||
using Type = cl::sycl::buffer<T, 1>;
|
||||
std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret = buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
|
||||
[](void *dataMem) { delete static_cast<Type*>(dataMem); })));
|
||||
(static_cast<Type*>(buffer_map.at(ptr).get()))->set_final_data(nullptr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <typename T> inline cl::sycl::buffer<T, 1>* get_sycl_buffer(size_t num_bytes,const T * ptr) const {
|
||||
return static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(ptr, num_bytes).first->second.get());
|
||||
}
|
||||
|
||||
/// allocating memory on the cpu
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const {
|
||||
return internal::aligned_malloc(num_bytes);
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t) const {
|
||||
return internal::aligned_malloc(8);
|
||||
}
|
||||
|
||||
// some runtime conditions that can be applied here
|
||||
bool isDeviceSuitable() const { return true; }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void *buffer) const {
|
||||
internal::aligned_free(buffer);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src,
|
||||
size_t n) const {
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
|
||||
::memcpy(dst, src, n);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(
|
||||
void *dst, const void *src, size_t n) const {
|
||||
memcpy(dst, src, n);
|
||||
|
||||
template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
|
||||
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
|
||||
memcpy(host_acc.get_pointer(), src, n);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(
|
||||
void *dst, const void *src, size_t n) const {
|
||||
memcpy(dst, src, n);
|
||||
/// whith the current implementation of sycl, the data is copied twice from device to host. This will be fixed soon.
|
||||
template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
|
||||
auto it = buffer_map.find(src);
|
||||
if (it != buffer_map.end()) {
|
||||
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::host_buffer>();
|
||||
memcpy(dst,host_acc.get_pointer(), n);
|
||||
} else{
|
||||
eigen_assert("no device memory found. The memory might be destroyed before creation");
|
||||
}
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c,
|
||||
size_t n) const {
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const {
|
||||
::memset(buffer, c, n);
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
|
||||
return 1;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
|
||||
|
@ -47,13 +47,13 @@ struct traits<TensorEvalToOp<XprType, MakePointer_> >
|
||||
template<typename XprType, template <class> class MakePointer_>
|
||||
struct eval<TensorEvalToOp<XprType, MakePointer_>, Eigen::Dense>
|
||||
{
|
||||
typedef const TensorEvalToOp<XprType>& type;
|
||||
typedef const TensorEvalToOp<XprType, MakePointer_>& type;
|
||||
};
|
||||
|
||||
template<typename XprType, template <class> class MakePointer_>
|
||||
struct nested<TensorEvalToOp<XprType, MakePointer_>, 1, typename eval<TensorEvalToOp<XprType, MakePointer_> >::type>
|
||||
{
|
||||
typedef TensorEvalToOp<XprType> type;
|
||||
typedef TensorEvalToOp<XprType, MakePointer_> type;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
@ -33,7 +33,7 @@ template<typename UnaryOp, typename XprType> class TensorCwiseUnaryOp;
|
||||
template<typename BinaryOp, typename LeftXprType, typename RightXprType> class TensorCwiseBinaryOp;
|
||||
template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType> class TensorCwiseTernaryOp;
|
||||
template<typename IfXprType, typename ThenXprType, typename ElseXprType> class TensorSelectOp;
|
||||
template<typename Op, typename Dims, typename XprType> class TensorReductionOp;
|
||||
template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_ = MakePointer > class TensorReductionOp;
|
||||
template<typename XprType> class TensorIndexTupleOp;
|
||||
template<typename ReduceOp, typename Dims, typename XprType> class TensorTupleReducerOp;
|
||||
template<typename Axis, typename LeftXprType, typename RightXprType> class TensorConcatenationOp;
|
||||
|
@ -2,6 +2,7 @@
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||
// Copyright (C) 2016 Mehdi Goli, Codeplay Software Ltd <eigen@codeplay.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
@ -20,8 +21,8 @@ namespace Eigen {
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template<typename Op, typename Dims, typename XprType>
|
||||
struct traits<TensorReductionOp<Op, Dims, XprType> >
|
||||
template<typename Op, typename Dims, typename XprType,template <class> class MakePointer_ >
|
||||
struct traits<TensorReductionOp<Op, Dims, XprType, MakePointer_> >
|
||||
: traits<XprType>
|
||||
{
|
||||
typedef traits<XprType> XprTraits;
|
||||
@ -31,18 +32,24 @@ struct traits<TensorReductionOp<Op, Dims, XprType> >
|
||||
typedef typename XprType::Nested Nested;
|
||||
static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
|
||||
static const int Layout = XprTraits::Layout;
|
||||
|
||||
template <class T> struct MakePointer {
|
||||
// Intermediate typedef to workaround MSVC issue.
|
||||
typedef MakePointer_<T> MakePointerT;
|
||||
typedef typename MakePointerT::Type Type;
|
||||
};
|
||||
};
|
||||
|
||||
template<typename Op, typename Dims, typename XprType>
|
||||
struct eval<TensorReductionOp<Op, Dims, XprType>, Eigen::Dense>
|
||||
template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
|
||||
struct eval<TensorReductionOp<Op, Dims, XprType, MakePointer_>, Eigen::Dense>
|
||||
{
|
||||
typedef const TensorReductionOp<Op, Dims, XprType>& type;
|
||||
typedef const TensorReductionOp<Op, Dims, XprType, MakePointer_>& type;
|
||||
};
|
||||
|
||||
template<typename Op, typename Dims, typename XprType>
|
||||
struct nested<TensorReductionOp<Op, Dims, XprType>, 1, typename eval<TensorReductionOp<Op, Dims, XprType> >::type>
|
||||
template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
|
||||
struct nested<TensorReductionOp<Op, Dims, XprType, MakePointer_>, 1, typename eval<TensorReductionOp<Op, Dims, XprType, MakePointer_> >::type>
|
||||
{
|
||||
typedef TensorReductionOp<Op, Dims, XprType> type;
|
||||
typedef TensorReductionOp<Op, Dims, XprType, MakePointer_> type;
|
||||
};
|
||||
|
||||
|
||||
@ -339,8 +346,8 @@ __global__ void OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnTy
|
||||
} // end namespace internal
|
||||
|
||||
|
||||
template <typename Op, typename Dims, typename XprType>
|
||||
class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType>, ReadOnlyAccessors> {
|
||||
template <typename Op, typename Dims, typename XprType, template <class> class MakePointer_>
|
||||
class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType, MakePointer_>, ReadOnlyAccessors> {
|
||||
public:
|
||||
typedef typename Eigen::internal::traits<TensorReductionOp>::Scalar Scalar;
|
||||
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
|
||||
@ -371,18 +378,19 @@ class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType>
|
||||
|
||||
|
||||
// Eval as rvalue
|
||||
template<typename Op, typename Dims, typename ArgType, typename Device>
|
||||
struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
|
||||
struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>
|
||||
{
|
||||
typedef TensorReductionOp<Op, Dims, ArgType> XprType;
|
||||
typedef TensorReductionOp<Op, Dims, ArgType, MakePointer_> XprType;
|
||||
typedef typename XprType::Index Index;
|
||||
typedef ArgType ChildType;
|
||||
typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
|
||||
static const int NumInputDims = internal::array_size<InputDimensions>::value;
|
||||
static const int NumReducedDims = internal::array_size<Dims>::value;
|
||||
static const int NumOutputDims = NumInputDims - NumReducedDims;
|
||||
typedef typename internal::conditional<NumOutputDims==0, Sizes<>, DSizes<Index, NumOutputDims> >::type Dimensions;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device> Self;
|
||||
typedef TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> Self;
|
||||
static const bool InputPacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess;
|
||||
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
|
||||
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
|
||||
@ -401,7 +409,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
static const bool RunningFullReduction = (NumOutputDims==0);
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
|
||||
: m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device)
|
||||
: m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device), m_xpr_dims(op.dims())
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((NumInputDims >= NumReducedDims), YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
EIGEN_STATIC_ASSERT((!ReducingInnerMostDims | !PreservingInnerMostDims | (NumReducedDims == NumInputDims)),
|
||||
@ -471,25 +479,35 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
|
||||
|
||||
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(CoeffReturnType* data) {
|
||||
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(typename MakePointer_<CoeffReturnType>::Type data) {
|
||||
m_impl.evalSubExprsIfNeeded(NULL);
|
||||
|
||||
// Use the FullReducer if possible.
|
||||
if (RunningFullReduction &&
|
||||
if ((RunningFullReduction && RunningOnSycl) ||(RunningFullReduction &&
|
||||
internal::FullReducer<Self, Op, Device>::HasOptimizedImplementation &&
|
||||
((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) ||
|
||||
!RunningOnGPU)) {
|
||||
!RunningOnGPU))) {
|
||||
bool need_assign = false;
|
||||
if (!data) {
|
||||
m_result = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType)));
|
||||
data = m_result;
|
||||
need_assign = true;
|
||||
}
|
||||
|
||||
Op reducer(m_reducer);
|
||||
internal::FullReducer<Self, Op, Device>::run(*this, reducer, m_device, data);
|
||||
return need_assign;
|
||||
}
|
||||
else if(RunningOnSycl){
|
||||
const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
|
||||
const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
|
||||
if (!data) {
|
||||
data = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve));
|
||||
m_result = data;
|
||||
}
|
||||
Op reducer(m_reducer);
|
||||
internal::InnerReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve);
|
||||
return (m_result != NULL);
|
||||
}
|
||||
|
||||
// Attempt to use an optimized reduction.
|
||||
else if (RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) {
|
||||
@ -572,7 +590,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
|
||||
{
|
||||
if ((RunningFullReduction || RunningOnGPU) && m_result) {
|
||||
if ((RunningOnSycl || RunningFullReduction || RunningOnGPU) && m_result) {
|
||||
return *(m_result + index);
|
||||
}
|
||||
Op reducer(m_reducer);
|
||||
@ -644,7 +662,14 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
}
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
|
||||
EIGEN_DEVICE_FUNC typename MakePointer_<Scalar>::Type data() const { return m_result; }
|
||||
/// required by sycl in order to extract the accessor
|
||||
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
|
||||
/// added for sycl in order to construct the buffer from the sycl device
|
||||
const Device& device() const{return m_device;}
|
||||
/// added for sycl in order to re-construct the reduction eval on the device for the sub-kernel
|
||||
const Dims& xprDims() const {return m_xpr_dims;}
|
||||
|
||||
|
||||
private:
|
||||
template <int, typename, typename> friend struct internal::GenericDimReducer;
|
||||
@ -737,12 +762,18 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
// For full reductions
|
||||
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
|
||||
static const bool RunningOnGPU = internal::is_same<Device, Eigen::GpuDevice>::value;
|
||||
static const bool RunningOnSycl = false;
|
||||
#elif defined(EIGEN_USE_SYCL)
|
||||
static const bool RunningOnSycl = internal::is_same<typename internal::remove_all<Device>::type, Eigen::SyclDevice>::value;
|
||||
static const bool RunningOnGPU = false;
|
||||
#else
|
||||
static const bool RunningOnGPU = false;
|
||||
static const bool RunningOnSycl = false;
|
||||
#endif
|
||||
CoeffReturnType* m_result;
|
||||
typename MakePointer_<CoeffReturnType>::Type m_result;
|
||||
|
||||
const Device& m_device;
|
||||
const Dims& m_xpr_dims;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
242
unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
Normal file
242
unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h
Normal file
@ -0,0 +1,242 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Mehdi Goli Codeplay Software Ltd.
|
||||
// Ralph Potter Codeplay Software Ltd.
|
||||
// Luke Iwanski Codeplay Software Ltd.
|
||||
// Contact: <eigen@codeplay.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
/*****************************************************************
|
||||
* TensorSyclPlaceHolderExpr.h
|
||||
*
|
||||
* \brief:
|
||||
* This is the specialisation of the placeholder expression based on the
|
||||
* operation type
|
||||
*
|
||||
*****************************************************************/
|
||||
|
||||
#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP
|
||||
#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP
|
||||
|
||||
namespace Eigen {
|
||||
namespace internal {
|
||||
|
||||
template<typename CoeffReturnType, typename KernelName> struct syclGenericBufferReducer{
|
||||
template<typename BufferTOut, typename BufferTIn>
|
||||
static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
|
||||
do {
|
||||
auto f = [length, local, bufOut, &bufI](cl::sycl::handler& h) mutable {
|
||||
cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)},
|
||||
cl::sycl::range<1>{std::min(length, local)}};
|
||||
/* Two accessors are used: one to the buffer that is being reduced,
|
||||
* and a second to local memory, used to store intermediate data. */
|
||||
auto aI =
|
||||
bufI.template get_access<cl::sycl::access::mode::read_write>(h);
|
||||
auto aOut =
|
||||
bufOut->template get_access<cl::sycl::access::mode::discard_write>(h);
|
||||
cl::sycl::accessor<CoeffReturnType, 1, cl::sycl::access::mode::read_write,
|
||||
cl::sycl::access::target::local>
|
||||
scratch(cl::sycl::range<1>(local), h);
|
||||
|
||||
/* The parallel_for invocation chosen is the variant with an nd_item
|
||||
* parameter, since the code requires barriers for correctness. */
|
||||
h.parallel_for<KernelName>(
|
||||
r, [aOut, aI, scratch, local, length](cl::sycl::nd_item<1> id) {
|
||||
size_t globalid = id.get_global(0);
|
||||
size_t localid = id.get_local(0);
|
||||
/* All threads collectively read from global memory into local.
|
||||
* The barrier ensures all threads' IO is resolved before
|
||||
* execution continues (strictly speaking, all threads within
|
||||
* a single work-group - there is no co-ordination between
|
||||
* work-groups, only work-items). */
|
||||
if (globalid < length) {
|
||||
scratch[localid] = aI[globalid];
|
||||
}
|
||||
id.barrier(cl::sycl::access::fence_space::local_space);
|
||||
|
||||
/* Apply the reduction operation between the current local
|
||||
* id and the one on the other half of the vector. */
|
||||
if (globalid < length) {
|
||||
int min = (length < local) ? length : local;
|
||||
for (size_t offset = min / 2; offset > 0; offset /= 2) {
|
||||
if (localid < offset) {
|
||||
scratch[localid] += scratch[localid + offset];
|
||||
}
|
||||
id.barrier(cl::sycl::access::fence_space::local_space);
|
||||
}
|
||||
/* The final result will be stored in local id 0. */
|
||||
if (localid == 0) {
|
||||
aI[id.get_group(0)] = scratch[localid];
|
||||
if((length<=local) && globalid ==0){
|
||||
aOut[globalid]=scratch[localid];
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
dev.m_queue.submit(f);
|
||||
dev.m_queue.throw_asynchronous();
|
||||
|
||||
/* At this point, you could queue::wait_and_throw() to ensure that
|
||||
* errors are caught quickly. However, this would likely impact
|
||||
* performance negatively. */
|
||||
length = length / local;
|
||||
|
||||
} while (length > 1);
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
/// For now let's start with a full reducer
|
||||
/// Self is useless here because in expression construction we are going to treat reduction as a leafnode.
|
||||
/// we want to take reduction child and then build a construction and apply the full reducer function on it. Fullreducre applies the
|
||||
/// reduction operation on the child of the reduction. once it is done the reduction is an empty shell and can be thrown away and treated as
|
||||
// a leafNode.
|
||||
template <typename Self, typename Op, bool Vectorizable>
|
||||
struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
|
||||
|
||||
typedef typename Self::CoeffReturnType CoeffReturnType;
|
||||
static const bool HasOptimizedImplementation = false;
|
||||
|
||||
static void run(const Self& self, Op& reducer, const Eigen::SyclDevice& dev, CoeffReturnType* output) {
|
||||
typedef const typename Self::ChildType HostExpr; /// this is the child of reduction
|
||||
typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
|
||||
auto functors = TensorSycl::internal::extractFunctors(self.impl());
|
||||
int red_factor =256; /// initial reduction. If the size is less than red_factor we only creates one thread.
|
||||
size_t inputSize =self.impl().dimensions().TotalSize();
|
||||
size_t rng = inputSize/red_factor; // the total number of thread initially is half the size of the input
|
||||
size_t remaining = inputSize% red_factor;
|
||||
if(rng ==0) {
|
||||
red_factor=1;
|
||||
};
|
||||
size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
|
||||
size_t GRange=std::max((size_t )1, rng);
|
||||
|
||||
// convert global range to power of 2 for redecution
|
||||
GRange--;
|
||||
GRange |= GRange >> 1;
|
||||
GRange |= GRange >> 2;
|
||||
GRange |= GRange >> 4;
|
||||
GRange |= GRange >> 8;
|
||||
GRange |= GRange >> 16;
|
||||
#if __x86_64__ || __ppc64__ || _WIN64
|
||||
GRange |= GRange >> 32;
|
||||
#endif
|
||||
GRange++;
|
||||
size_t outTileSize = tileSize;
|
||||
/// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one.
|
||||
if (GRange < outTileSize) outTileSize=GRange;
|
||||
// getting final out buffer at the moment the created buffer is true because there is no need for assign
|
||||
auto out_buffer =dev.template get_sycl_buffer<typename Eigen::internal::remove_all<CoeffReturnType>::type>(self.dimensions().TotalSize(), output);
|
||||
/// creating the shared memory for calculating reduction.
|
||||
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
|
||||
/// recursively apply reduction on it in order to reduce the whole.
|
||||
auto temp_global_buffer =cl::sycl::buffer<CoeffReturnType, 1>(cl::sycl::range<1>(GRange));
|
||||
typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
|
||||
Dims dims= self.xprDims();
|
||||
Op functor = reducer;
|
||||
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
|
||||
// create a tuple of accessors from Evaluator
|
||||
auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
|
||||
auto tmp_global_accessor = temp_global_buffer. template get_access<cl::sycl::access::mode::read_write, cl::sycl::access::target::global_buffer>(cgh);
|
||||
|
||||
cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(outTileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||
typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
|
||||
auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
|
||||
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
|
||||
/// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
|
||||
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
|
||||
const auto device_self_expr= TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
|
||||
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
|
||||
/// the device_evaluator is detectable and recognisable on the device.
|
||||
auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
|
||||
/// const cast added as a naive solution to solve the qualifier drop error
|
||||
auto globalid=itemID.get_global_linear_id();
|
||||
|
||||
if(globalid<rng)
|
||||
tmp_global_accessor.get_pointer()[globalid]=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, red_factor*globalid, red_factor, const_cast<Op&>(functor));
|
||||
else
|
||||
tmp_global_accessor.get_pointer()[globalid]=static_cast<CoeffReturnType>(0);
|
||||
|
||||
if(remaining!=0 && globalid==0 )
|
||||
// this will add the rest of input buffer when the input size is not devidable to red_factor.
|
||||
tmp_global_accessor.get_pointer()[globalid]+=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, red_factor*(rng), remaining, const_cast<Op&>(functor));
|
||||
});
|
||||
});
|
||||
dev.m_queue.throw_asynchronous();
|
||||
|
||||
/// This is used to recursively reduce the tmp value to an element of 1;
|
||||
syclGenericBufferReducer<CoeffReturnType,HostExpr>::run(out_buffer, temp_global_buffer,dev, GRange, outTileSize);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
template <typename Self, typename Op>
|
||||
struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
|
||||
|
||||
typedef typename Self::CoeffReturnType CoeffReturnType;
|
||||
static const bool HasOptimizedImplementation = false;
|
||||
|
||||
static bool run(const Self& self, Op& reducer, const Eigen::SyclDevice& dev, CoeffReturnType* output, typename Self::Index , typename Self::Index num_coeffs_to_preserve) {
|
||||
typedef const typename Self::ChildType HostExpr; /// this is the child of reduction
|
||||
typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
|
||||
auto functors = TensorSycl::internal::extractFunctors(self.impl());
|
||||
|
||||
size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
|
||||
|
||||
size_t GRange=num_coeffs_to_preserve;
|
||||
if (tileSize>GRange) tileSize=GRange;
|
||||
else if(GRange>tileSize){
|
||||
size_t xMode = GRange % tileSize;
|
||||
if (xMode != 0) GRange += (tileSize - xMode);
|
||||
}
|
||||
// getting final out buffer at the moment the created buffer is true because there is no need for assign
|
||||
/// creating the shared memory for calculating reduction.
|
||||
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
|
||||
/// recursively apply reduction on it in order to reduce the whole.
|
||||
typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
|
||||
Dims dims= self.xprDims();
|
||||
Op functor = reducer;
|
||||
|
||||
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
|
||||
// create a tuple of accessors from Evaluator
|
||||
auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
|
||||
auto output_accessor = dev.template get_sycl_accessor<cl::sycl::access::mode::discard_write>(num_coeffs_to_preserve,cgh, output);
|
||||
|
||||
cgh.parallel_for<Self>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||
typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
|
||||
auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
|
||||
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
|
||||
/// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
|
||||
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
|
||||
const auto device_self_expr= TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
|
||||
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
|
||||
/// the device_evaluator is detectable and recognisable on the device.
|
||||
typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice> DeiceSelf;
|
||||
auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
|
||||
/// const cast added as a naive solution to solve the qualifier drop error
|
||||
auto globalid=itemID.get_global_linear_id();
|
||||
if (globalid< static_cast<size_t>(num_coeffs_to_preserve)) {
|
||||
typename DeiceSelf::CoeffReturnType accum = functor.initialize();
|
||||
GenericDimReducer<DeiceSelf::NumReducedDims-1, DeiceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(globalid),const_cast<Op&>(functor), &accum);
|
||||
functor.finalize(accum);
|
||||
output_accessor.get_pointer()[globalid]= accum;
|
||||
}
|
||||
});
|
||||
});
|
||||
dev.m_queue.throw_asynchronous();
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
} // namespace Eigen
|
||||
|
||||
#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP
|
@ -22,6 +22,13 @@ struct MakeGlobalPointer {
|
||||
typedef typename cl::sycl::global_ptr<T>::pointer_t Type;
|
||||
};
|
||||
|
||||
// global pointer to set different attribute state for a class
|
||||
template <class T>
|
||||
struct MakeLocalPointer {
|
||||
typedef typename cl::sycl::local_ptr<T>::pointer_t Type;
|
||||
};
|
||||
|
||||
|
||||
namespace Eigen {
|
||||
namespace TensorSycl {
|
||||
namespace internal {
|
||||
@ -43,9 +50,7 @@ template<typename T> struct GetType<false, T>{
|
||||
// tuple construction
|
||||
#include "TensorSyclTuple.h"
|
||||
|
||||
// This file contains the PlaceHolder that replaces the actual data
|
||||
#include "TensorSyclPlaceHolder.h"
|
||||
|
||||
// counting number of leaf at compile time
|
||||
#include "TensorSyclLeafCount.h"
|
||||
|
||||
// The index PlaceHolder takes the actual expression and replaces the actual
|
||||
@ -57,9 +62,6 @@ template<typename T> struct GetType<false, T>{
|
||||
// creation of an accessor tuple from a tuple of SYCL buffers
|
||||
#include "TensorSyclExtractAccessor.h"
|
||||
|
||||
// actual data extraction using accessors
|
||||
//#include "GetDeviceData.h"
|
||||
|
||||
// this is used to change the address space type in tensor map for GPU
|
||||
#include "TensorSyclConvertToDeviceExpression.h"
|
||||
|
||||
@ -70,6 +72,9 @@ template<typename T> struct GetType<false, T>{
|
||||
// this is used to construct the expression on the device
|
||||
#include "TensorSyclExprConstructor.h"
|
||||
|
||||
/// this is used for extracting tensor reduction
|
||||
#include "TensorReductionSycl.h"
|
||||
|
||||
// kernel execution using fusion
|
||||
#include "TensorSyclRun.h"
|
||||
|
||||
|
@ -102,6 +102,18 @@ KERNELBROKERCONVERT(, false, TensorForcedEvalOp)
|
||||
KERNELBROKERCONVERT(const, true, TensorEvalToOp)
|
||||
KERNELBROKERCONVERT(, false, TensorEvalToOp)
|
||||
#undef KERNELBROKERCONVERT
|
||||
|
||||
/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorReductionOp
|
||||
#define KERNELBROKERCONVERTREDUCTION(CVQual)\
|
||||
template <typename OP, typename Dim, typename subExpr, template <class> class MakePointer_>\
|
||||
struct ConvertToDeviceExpression<CVQual TensorReductionOp<OP, Dim, subExpr, MakePointer_> > {\
|
||||
typedef CVQual TensorReductionOp<OP, Dim, typename ConvertToDeviceExpression<subExpr>::Type, MakeGlobalPointer> Type;\
|
||||
};
|
||||
|
||||
KERNELBROKERCONVERTREDUCTION(const)
|
||||
KERNELBROKERCONVERTREDUCTION()
|
||||
#undef KERNELBROKERCONVERTREDUCTION
|
||||
|
||||
} // namespace internal
|
||||
} // namespace TensorSycl
|
||||
} // namespace Eigen
|
||||
|
@ -33,8 +33,7 @@ struct EvalToLHSConstructor {
|
||||
EvalToLHSConstructor(const utility::tuple::Tuple<Params...> &t): expr((&(*(utility::tuple::get<N>(t).get_pointer())))) {}
|
||||
};
|
||||
|
||||
/// \struct ExprConstructor is used to reconstruct the expression on the device
|
||||
/// and
|
||||
/// \struct ExprConstructor is used to reconstruct the expression on the device and
|
||||
/// recreate the expression with MakeGlobalPointer containing the device address
|
||||
/// space for the TensorMap pointers used in eval function.
|
||||
/// It receives the original expression type, the functor of the node, the tuple
|
||||
@ -49,7 +48,7 @@ struct ExprConstructor;
|
||||
template <typename Scalar_, int Options_, int Options2_, int Options3_, int NumIndices_, typename IndexType_,\
|
||||
template <class> class MakePointer_, size_t N, typename... Params>\
|
||||
struct ExprConstructor< CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer>,\
|
||||
CVQual Eigen::internal::PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options3_, MakePointer_>, N>, Params...>{\
|
||||
CVQual PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options3_, MakePointer_>, N>, Params...>{\
|
||||
typedef CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer> Type;\
|
||||
Type expr;\
|
||||
template <typename FuncDetector>\
|
||||
@ -187,7 +186,7 @@ EVALTO()
|
||||
#define FORCEDEVAL(CVQual)\
|
||||
template <typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
|
||||
struct ExprConstructor<CVQual TensorForcedEvalOp<OrigExpr, MakeGlobalPointer>,\
|
||||
CVQual Eigen::internal::PlaceHolder<CVQual TensorForcedEvalOp<DevExpr>, N>, Params...> {\
|
||||
CVQual PlaceHolder<CVQual TensorForcedEvalOp<DevExpr>, N>, Params...> {\
|
||||
typedef CVQual TensorMap<Tensor<typename TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::Scalar,\
|
||||
TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::NumDimensions, 0, typename TensorForcedEvalOp<DevExpr>::Index>, 0, MakeGlobalPointer> Type;\
|
||||
Type expr;\
|
||||
@ -200,14 +199,41 @@ FORCEDEVAL(const)
|
||||
FORCEDEVAL()
|
||||
#undef FORCEDEVAL
|
||||
|
||||
template <bool Conds, size_t X , size_t Y > struct ValueCondition {
|
||||
static const size_t Res =X;
|
||||
};
|
||||
template<size_t X, size_t Y> struct ValueCondition<false, X , Y> {
|
||||
static const size_t Res =Y;
|
||||
};
|
||||
|
||||
/// specialisation of the \ref ExprConstructor struct when the node type is TensorReductionOp
|
||||
#define SYCLREDUCTIONEXPR(CVQual)\
|
||||
template <typename OP, typename Dim, typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
|
||||
struct ExprConstructor<CVQual TensorReductionOp<OP, Dim, OrigExpr, MakeGlobalPointer>,\
|
||||
CVQual PlaceHolder<CVQual TensorReductionOp<OP, Dim, DevExpr>, N>, Params...> {\
|
||||
static const size_t NumIndices= ValueCondition< TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions==0, 1, TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions >::Res;\
|
||||
typedef CVQual TensorMap<Tensor<typename TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::Scalar,\
|
||||
NumIndices, 0, typename TensorReductionOp<OP, Dim, DevExpr>::Index>, 0, MakeGlobalPointer> Type;\
|
||||
Type expr;\
|
||||
template <typename FuncDetector>\
|
||||
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
|
||||
: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
|
||||
};
|
||||
|
||||
SYCLREDUCTIONEXPR(const)
|
||||
SYCLREDUCTIONEXPR()
|
||||
#undef SYCLREDUCTIONEXPR
|
||||
|
||||
/// template deduction for \ref ExprConstructor struct
|
||||
template <typename OrigExpr, typename IndexExpr, typename FuncD, typename... Params>
|
||||
auto createDeviceExpression(FuncD &funcD, const utility::tuple::Tuple<Params...> &t)
|
||||
-> decltype(ExprConstructor<OrigExpr, IndexExpr, Params...>(funcD, t)) {
|
||||
return ExprConstructor<OrigExpr, IndexExpr, Params...>(funcD, t);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace Eigen
|
||||
|
||||
} /// namespace TensorSycl
|
||||
} /// namespace internal
|
||||
} /// namespace Eigen
|
||||
|
||||
|
||||
#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXPR_CONSTRUCTOR_HPP
|
||||
|
@ -57,9 +57,9 @@ struct AccessorConstructor{
|
||||
return utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)));
|
||||
}
|
||||
template< cl::sycl::access::mode AcM, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval)
|
||||
-> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM, true,
|
||||
-> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM,
|
||||
typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()))){
|
||||
return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM, true, typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()));
|
||||
return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM, typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()));
|
||||
}
|
||||
};
|
||||
|
||||
@ -73,14 +73,12 @@ struct ExtractAccessor<TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> >
|
||||
}
|
||||
};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// TensorCwiseNullaryOp, TensorCwiseUnaryOp and TensorBroadcastingOp
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is TensorCwiseNullaryOp, TensorCwiseUnaryOp and TensorBroadcastingOp
|
||||
template <template<class, class> class UnaryCategory, typename OP, typename RHSExpr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<UnaryCategory<OP, RHSExpr>, Dev> >
|
||||
: ExtractAccessor<TensorEvaluator<const UnaryCategory<OP, RHSExpr>, Dev> > {};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// const TensorCwiseBinaryOp
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorCwiseBinaryOp
|
||||
template <template<class, class, class> class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> > {
|
||||
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> eval)
|
||||
@ -88,9 +86,7 @@ struct ExtractAccessor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr
|
||||
return AccessorConstructor::getTuple(cgh, eval.left_impl(), eval.right_impl());
|
||||
}
|
||||
};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// TensorCwiseBinaryOp
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is TensorCwiseBinaryOp
|
||||
template <template<class, class, class> class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> >
|
||||
: ExtractAccessor<TensorEvaluator<const BinaryCategory<OP, LHSExpr, RHSExpr>, Dev> >{};
|
||||
@ -105,8 +101,7 @@ struct ExtractAccessor<TensorEvaluator<const TernaryCategory<OP, Arg1Expr, Arg2E
|
||||
}
|
||||
};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// TensorCwiseTernaryOp
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is TensorCwiseTernaryOp
|
||||
template <template<class, class, class, class> class TernaryCategory, typename OP, typename Arg1Expr, typename Arg2Expr, typename Arg3Expr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> >
|
||||
: ExtractAccessor<TensorEvaluator<const TernaryCategory<OP, Arg1Expr, Arg2Expr, Arg3Expr>, Dev> >{};
|
||||
@ -127,8 +122,7 @@ template <typename IfExpr, typename ThenExpr, typename ElseExpr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> >
|
||||
: ExtractAccessor<TensorEvaluator<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr>, Dev> >{};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// const TensorAssignOp
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorAssignOp
|
||||
template <typename LHSExpr, typename RHSExpr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> > {
|
||||
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> eval)
|
||||
@ -137,14 +131,12 @@ struct ExtractAccessor<TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, D
|
||||
}
|
||||
};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// TensorAssignOp
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is TensorAssignOp
|
||||
template <typename LHSExpr, typename RHSExpr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<TensorAssignOp<LHSExpr, RHSExpr>, Dev> >
|
||||
: ExtractAccessor<TensorEvaluator<const TensorAssignOp<LHSExpr, RHSExpr>, Dev> >{};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// const TensorMap
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorMap
|
||||
#define TENSORMAPEXPR(CVQual, ACCType)\
|
||||
template <typename PlainObjectType, int Options_, typename Dev>\
|
||||
struct ExtractAccessor<TensorEvaluator<CVQual TensorMap<PlainObjectType, Options_>, Dev> > {\
|
||||
@ -157,8 +149,7 @@ TENSORMAPEXPR(const, cl::sycl::access::mode::read)
|
||||
TENSORMAPEXPR(, cl::sycl::access::mode::read_write)
|
||||
#undef TENSORMAPEXPR
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// const TensorForcedEvalOp
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorForcedEvalOp
|
||||
template <typename Expr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> > {
|
||||
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> eval)
|
||||
@ -167,14 +158,12 @@ struct ExtractAccessor<TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> > {
|
||||
}
|
||||
};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// TensorForcedEvalOp
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is TensorForcedEvalOp
|
||||
template <typename Expr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<TensorForcedEvalOp<Expr>, Dev> >
|
||||
: ExtractAccessor<TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> >{};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// const TensorEvalToOp
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorEvalToOp
|
||||
template <typename Expr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<const TensorEvalToOp<Expr>, Dev> > {
|
||||
static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator<const TensorEvalToOp<Expr>, Dev> eval)
|
||||
@ -183,19 +172,33 @@ struct ExtractAccessor<TensorEvaluator<const TensorEvalToOp<Expr>, Dev> > {
|
||||
}
|
||||
};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||
/// TensorEvalToOp
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is TensorEvalToOp
|
||||
template <typename Expr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<TensorEvalToOp<Expr>, Dev> >
|
||||
: ExtractAccessor<TensorEvaluator<const TensorEvalToOp<Expr>, Dev> >{};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorReductionOp
|
||||
template <typename OP, typename Dim, typename Expr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> > {
|
||||
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> eval)
|
||||
-> decltype(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval)){
|
||||
return AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval);
|
||||
}
|
||||
};
|
||||
|
||||
/// specialisation of the \ref ExtractAccessor struct when the node type is TensorReductionOp
|
||||
template <typename OP, typename Dim, typename Expr, typename Dev>
|
||||
struct ExtractAccessor<TensorEvaluator<TensorReductionOp<OP, Dim, Expr>, Dev> >
|
||||
: ExtractAccessor<TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> >{};
|
||||
|
||||
/// template deduction for \ref ExtractAccessor
|
||||
template <typename Evaluator>
|
||||
auto createTupleOfAccessors(cl::sycl::handler& cgh, const Evaluator& expr)
|
||||
-> decltype(ExtractAccessor<Evaluator>::getTuple(cgh, expr)) {
|
||||
return ExtractAccessor<Evaluator>::getTuple(cgh, expr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} /// namespace TensorSycl
|
||||
} /// namespace internal
|
||||
} /// namespace Eigen
|
||||
#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXTRACT_ACCESSOR_HPP
|
||||
|
@ -141,7 +141,30 @@ template <typename RHSExpr, typename Dev>
|
||||
struct FunctorExtractor<TensorEvaluator<TensorEvalToOp<RHSExpr>, Dev> >
|
||||
: FunctorExtractor<TensorEvaluator<const TensorEvalToOp<RHSExpr>, Dev> > {};
|
||||
|
||||
template<typename Dim, size_t NumOutputDim> struct DimConstr {
|
||||
template<typename InDim>
|
||||
static inline Dim getDim(InDim dims ) {return dims;}
|
||||
};
|
||||
|
||||
template<typename Dim> struct DimConstr<Dim, 0> {
|
||||
template<typename InDim>
|
||||
static inline Dim getDim(InDim dims ) {return Dim(dims.TotalSize());}
|
||||
};
|
||||
|
||||
template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
|
||||
struct FunctorExtractor<TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>{
|
||||
typedef TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device> Evaluator;
|
||||
typedef typename Eigen::internal::conditional<Evaluator::NumOutputDims==0, DSizes<typename Evaluator::Index, 1>, typename Evaluator::Dimensions >::type Dimensions;
|
||||
const Dimensions m_dimensions;
|
||||
const Dimensions& dimensions() const { return m_dimensions; }
|
||||
FunctorExtractor(const TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>& expr)
|
||||
: m_dimensions(DimConstr<Dimensions, Evaluator::NumOutputDims>::getDim(expr.dimensions())) {}
|
||||
};
|
||||
|
||||
|
||||
template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
|
||||
struct FunctorExtractor<TensorEvaluator<TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>
|
||||
: FunctorExtractor<TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>{};
|
||||
/// template deduction function for FunctorExtractor
|
||||
template <typename Evaluator>
|
||||
auto inline extractFunctors(const Evaluator& evaluator)-> FunctorExtractor<Evaluator> {
|
||||
|
@ -43,8 +43,7 @@ struct CategoryCount<Arg,Args...>{
|
||||
static const size_t Count = LeafCount<Arg>::Count + CategoryCount<Args...>::Count;
|
||||
};
|
||||
|
||||
/// specialisation of the \ref LeafCount struct when the node type is const
|
||||
/// TensorMap
|
||||
/// specialisation of the \ref LeafCount struct when the node type is const TensorMap
|
||||
template <typename PlainObjectType, int Options_, template <class> class MakePointer_>
|
||||
struct LeafCount<const TensorMap<PlainObjectType, Options_, MakePointer_> > {
|
||||
static const size_t Count =1;
|
||||
@ -61,18 +60,15 @@ struct LeafCount<const CategoryExpr<OP, RHSExpr...> >: CategoryCount<RHSExpr...>
|
||||
template <template <class, class...> class CategoryExpr, typename OP, typename... RHSExpr>
|
||||
struct LeafCount<CategoryExpr<OP, RHSExpr...> > :LeafCount<const CategoryExpr<OP, RHSExpr...> >{};
|
||||
|
||||
/// specialisation of the \ref LeafCount struct when the node type is
|
||||
/// const TensorSelectOp is an exception
|
||||
/// specialisation of the \ref LeafCount struct when the node type is const TensorSelectOp is an exception
|
||||
template <typename IfExpr, typename ThenExpr, typename ElseExpr>
|
||||
struct LeafCount<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr> > : CategoryCount<IfExpr, ThenExpr, ElseExpr> {};
|
||||
/// specialisation of the \ref LeafCount struct when the node type is
|
||||
/// TensorSelectOp
|
||||
/// specialisation of the \ref LeafCount struct when the node type is TensorSelectOp
|
||||
template <typename IfExpr, typename ThenExpr, typename ElseExpr>
|
||||
struct LeafCount<TensorSelectOp<IfExpr, ThenExpr, ElseExpr> >: LeafCount<const TensorSelectOp<IfExpr, ThenExpr, ElseExpr> > {};
|
||||
|
||||
|
||||
/// specialisation of the \ref LeafCount struct when the node type is const
|
||||
/// TensorAssignOp
|
||||
/// specialisation of the \ref LeafCount struct when the node type is const TensorAssignOp
|
||||
template <typename LHSExpr, typename RHSExpr>
|
||||
struct LeafCount<const TensorAssignOp<LHSExpr, RHSExpr> >: CategoryCount<LHSExpr,RHSExpr> {};
|
||||
|
||||
@ -81,31 +77,38 @@ struct LeafCount<const TensorAssignOp<LHSExpr, RHSExpr> >: CategoryCount<LHSExpr
|
||||
template <typename LHSExpr, typename RHSExpr>
|
||||
struct LeafCount<TensorAssignOp<LHSExpr, RHSExpr> > :LeafCount<const TensorAssignOp<LHSExpr, RHSExpr> >{};
|
||||
|
||||
/// specialisation of the \ref LeafCount struct when the node type is const
|
||||
/// TensorForcedEvalOp
|
||||
/// specialisation of the \ref LeafCount struct when the node type is const TensorForcedEvalOp
|
||||
template <typename Expr>
|
||||
struct LeafCount<const TensorForcedEvalOp<Expr> > {
|
||||
static const size_t Count =1;
|
||||
};
|
||||
|
||||
/// specialisation of the \ref LeafCount struct when the node type is
|
||||
/// TensorForcedEvalOp
|
||||
/// specialisation of the \ref LeafCount struct when the node type is TensorForcedEvalOp
|
||||
template <typename Expr>
|
||||
struct LeafCount<TensorForcedEvalOp<Expr> >: LeafCount<const TensorForcedEvalOp<Expr> > {};
|
||||
|
||||
/// specialisation of the \ref LeafCount struct when the node type is const
|
||||
/// TensorEvalToOp
|
||||
/// specialisation of the \ref LeafCount struct when the node type is const TensorEvalToOp
|
||||
template <typename Expr>
|
||||
struct LeafCount<const TensorEvalToOp<Expr> > {
|
||||
static const size_t Count = 1 + CategoryCount<Expr>::Count;
|
||||
};
|
||||
|
||||
/// specialisation of the \ref LeafCount struct when the node type is
|
||||
/// TensorEvalToOp
|
||||
/// specialisation of the \ref LeafCount struct when the node type is const TensorReductionOp
|
||||
template <typename OP, typename Dim, typename Expr>
|
||||
struct LeafCount<const TensorReductionOp<OP, Dim, Expr> > {
|
||||
static const size_t Count =1;
|
||||
};
|
||||
|
||||
/// specialisation of the \ref LeafCount struct when the node type is TensorReductionOp
|
||||
template <typename OP, typename Dim, typename Expr>
|
||||
struct LeafCount<TensorReductionOp<OP, Dim, Expr> >: LeafCount<const TensorReductionOp<OP, Dim, Expr> >{};
|
||||
|
||||
/// specialisation of the \ref LeafCount struct when the node type is TensorEvalToOp
|
||||
template <typename Expr>
|
||||
struct LeafCount<TensorEvalToOp<Expr> >: LeafCount<const TensorEvalToOp<Expr> >{};
|
||||
}
|
||||
}
|
||||
} // namespace Eigen
|
||||
|
||||
} /// namespace TensorSycl
|
||||
} /// namespace internal
|
||||
} /// namespace Eigen
|
||||
|
||||
#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_LEAF_COUNT_HPP
|
||||
|
@ -1,99 +0,0 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Mehdi Goli Codeplay Software Ltd.
|
||||
// Ralph Potter Codeplay Software Ltd.
|
||||
// Luke Iwanski Codeplay Software Ltd.
|
||||
// Contact: <eigen@codeplay.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
/*****************************************************************
|
||||
* TensorSyclPlaceHolder.h
|
||||
*
|
||||
* \brief:
|
||||
* The PlaceHolder expression are nothing but a container preserving
|
||||
* the order of actual data in the tuple of sycl buffer.
|
||||
*
|
||||
*****************************************************************/
|
||||
|
||||
#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_HPP
|
||||
#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_HPP
|
||||
|
||||
namespace Eigen {
|
||||
namespace internal {
|
||||
/// \struct PlaceHolder
|
||||
/// \brief PlaceHolder is used to replace the \ref TensorMap in the expression
|
||||
/// tree.
|
||||
/// PlaceHolder contains the order of the leaf node in the expression tree.
|
||||
template <typename Scalar, size_t N>
|
||||
struct PlaceHolder {
|
||||
static constexpr size_t I = N;
|
||||
typedef Scalar Type;
|
||||
};
|
||||
|
||||
/// \brief specialisation of the PlaceHolder node for const TensorMap
|
||||
#define TENSORMAPPLACEHOLDER(CVQual)\
|
||||
template <typename PlainObjectType, int Options_, template <class> class MakePointer_, size_t N>\
|
||||
struct PlaceHolder<CVQual TensorMap<PlainObjectType, Options_, MakePointer_>, N> {\
|
||||
static const size_t I = N;\
|
||||
typedef CVQual TensorMap<PlainObjectType, Options_, MakePointer_> Type;\
|
||||
typedef typename Type::Self Self;\
|
||||
typedef typename Type::Base Base;\
|
||||
typedef typename Type::Nested Nested;\
|
||||
typedef typename Type::StorageKind StorageKind;\
|
||||
typedef typename Type::Index Index;\
|
||||
typedef typename Type::Scalar Scalar;\
|
||||
typedef typename Type::RealScalar RealScalar;\
|
||||
typedef typename Type::CoeffReturnType CoeffReturnType;\
|
||||
};
|
||||
|
||||
TENSORMAPPLACEHOLDER(const)
|
||||
TENSORMAPPLACEHOLDER()
|
||||
#undef TENSORMAPPLACEHOLDER
|
||||
|
||||
/// \brief specialisation of the PlaceHolder node for TensorForcedEvalOp. The
|
||||
/// TensorForcedEvalOp acts as a leaf node for its parent node.
|
||||
#define TENSORFORCEDEVALPLACEHOLDER(CVQual)\
|
||||
template <typename Expression, size_t N>\
|
||||
struct PlaceHolder<CVQual TensorForcedEvalOp<Expression>, N> {\
|
||||
static const size_t I = N;\
|
||||
typedef CVQual TensorForcedEvalOp<Expression> Type;\
|
||||
typedef typename Type::Nested Nested;\
|
||||
typedef typename Type::StorageKind StorageKind;\
|
||||
typedef typename Type::Index Index;\
|
||||
typedef typename Type::Scalar Scalar;\
|
||||
typedef typename Type::Packet Packet;\
|
||||
typedef typename Type::RealScalar RealScalar;\
|
||||
typedef typename Type::CoeffReturnType CoeffReturnType;\
|
||||
typedef typename Type::PacketReturnType PacketReturnType;\
|
||||
};
|
||||
|
||||
TENSORFORCEDEVALPLACEHOLDER(const)
|
||||
TENSORFORCEDEVALPLACEHOLDER()
|
||||
#undef TENSORFORCEDEVALPLACEHOLDER
|
||||
|
||||
template <typename PlainObjectType, int Options_, template <class> class Makepointer_, size_t N>
|
||||
struct traits<PlaceHolder<const TensorMap<PlainObjectType, Options_, Makepointer_>, N> >: public traits<PlainObjectType> {
|
||||
typedef traits<PlainObjectType> BaseTraits;
|
||||
typedef typename BaseTraits::Scalar Scalar;
|
||||
typedef typename BaseTraits::StorageKind StorageKind;
|
||||
typedef typename BaseTraits::Index Index;
|
||||
static const int NumDimensions = BaseTraits::NumDimensions;
|
||||
static const int Layout = BaseTraits::Layout;
|
||||
enum {
|
||||
Options = Options_,
|
||||
Flags = BaseTraits::Flags,
|
||||
};
|
||||
};
|
||||
|
||||
template <typename PlainObjectType, int Options_, template <class> class Makepointer_, size_t N>
|
||||
struct traits<PlaceHolder<TensorMap<PlainObjectType, Options_, Makepointer_>, N> >
|
||||
: traits<PlaceHolder<const TensorMap<PlainObjectType, Options_, Makepointer_>, N> > {};
|
||||
|
||||
} // end namespace internal
|
||||
} // end namespoace Eigen
|
||||
|
||||
#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_HPP
|
@ -25,6 +25,17 @@
|
||||
namespace Eigen {
|
||||
namespace TensorSycl {
|
||||
namespace internal {
|
||||
|
||||
/// \struct PlaceHolder
|
||||
/// \brief PlaceHolder is used to replace the \ref TensorMap in the expression
|
||||
/// tree.
|
||||
/// PlaceHolder contains the order of the leaf node in the expression tree.
|
||||
template <typename Scalar, size_t N>
|
||||
struct PlaceHolder {
|
||||
static constexpr size_t I = N;
|
||||
typedef Scalar Type;
|
||||
};
|
||||
|
||||
/// \sttruct PlaceHolderExpression
|
||||
/// \brief it is used to create the PlaceHolder expression. The PlaceHolder
|
||||
/// expression is a copy of expression type in which the TensorMap of the has
|
||||
@ -113,7 +124,7 @@ ASSIGNEXPR()
|
||||
#define TENSORMAPEXPR(CVQual)\
|
||||
template <typename Scalar_, int Options_, int Options2_, int NumIndices_, typename IndexType_, template <class> class MakePointer_, size_t N>\
|
||||
struct PlaceHolderExpression< CVQual TensorMap< Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_>, N> {\
|
||||
typedef CVQual Eigen::internal::PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_>, N> Type;\
|
||||
typedef CVQual PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_>, N> Type;\
|
||||
};
|
||||
|
||||
TENSORMAPEXPR(const)
|
||||
@ -125,7 +136,7 @@ TENSORMAPEXPR()
|
||||
#define FORCEDEVAL(CVQual)\
|
||||
template <typename Expr, size_t N>\
|
||||
struct PlaceHolderExpression<CVQual TensorForcedEvalOp<Expr>, N> {\
|
||||
typedef CVQual Eigen::internal::PlaceHolder<CVQual TensorForcedEvalOp<Expr>, N> Type;\
|
||||
typedef CVQual PlaceHolder<CVQual TensorForcedEvalOp<Expr>, N> Type;\
|
||||
};
|
||||
|
||||
FORCEDEVAL(const)
|
||||
@ -144,6 +155,18 @@ EVALTO(const)
|
||||
EVALTO()
|
||||
#undef EVALTO
|
||||
|
||||
|
||||
/// specialisation of the \ref PlaceHolderExpression when the node is
|
||||
/// TensorReductionOp
|
||||
#define SYCLREDUCTION(CVQual)\
|
||||
template <typename OP, typename Dims, typename Expr, size_t N>\
|
||||
struct PlaceHolderExpression<CVQual TensorReductionOp<OP, Dims, Expr>, N>{\
|
||||
typedef CVQual PlaceHolder<CVQual TensorReductionOp<OP, Dims,Expr>, N> Type;\
|
||||
};
|
||||
SYCLREDUCTION(const)
|
||||
SYCLREDUCTION()
|
||||
#undef SYCLREDUCTION
|
||||
|
||||
/// template deduction for \ref PlaceHolderExpression struct
|
||||
template <typename Expr>
|
||||
struct createPlaceHolderExpression {
|
||||
@ -151,8 +174,8 @@ struct createPlaceHolderExpression {
|
||||
typedef typename PlaceHolderExpression<Expr, TotalLeaves - 1>::Type Type;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
} // internal
|
||||
} // TensorSycl
|
||||
} // namespace Eigen
|
||||
|
||||
#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_PLACEHOLDER_EXPR_HPP
|
||||
|
@ -37,20 +37,20 @@ void run(Expr &expr, Dev &dev) {
|
||||
typedef typename internal::createPlaceHolderExpression<Expr>::Type PlaceHolderExpr;
|
||||
auto functors = internal::extractFunctors(evaluator);
|
||||
|
||||
size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
|
||||
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
|
||||
|
||||
// create a tuple of accessors from Evaluator
|
||||
auto tuple_of_accessors = internal::createTupleOfAccessors<decltype(evaluator)>(cgh, evaluator);
|
||||
const auto range = utility::tuple::get<0>(tuple_of_accessors).get_range()[0];
|
||||
|
||||
size_t outTileSize = range;
|
||||
if (range > 64) outTileSize = 64;
|
||||
size_t yMode = range % outTileSize;
|
||||
int yRange = static_cast<int>(range);
|
||||
if (yMode != 0) yRange += (outTileSize - yMode);
|
||||
|
||||
size_t GRange=range;
|
||||
if (tileSize>GRange) tileSize=GRange;
|
||||
else if(GRange>tileSize){
|
||||
size_t xMode = GRange % tileSize;
|
||||
if (xMode != 0) GRange += (tileSize - xMode);
|
||||
}
|
||||
// run the kernel
|
||||
cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(yRange), cl::sycl::range<1>(outTileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||
cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||
typedef typename internal::ConvertToDeviceExpression<Expr>::Type DevExpr;
|
||||
auto device_expr =internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
|
||||
auto device_evaluator = Eigen::TensorEvaluator<decltype(device_expr.expr), Eigen::DefaultDevice>(device_expr.expr, Eigen::DefaultDevice());
|
||||
@ -61,6 +61,7 @@ void run(Expr &expr, Dev &dev) {
|
||||
});
|
||||
dev.m_queue.throw_asynchronous();
|
||||
}
|
||||
|
||||
evaluator.cleanup();
|
||||
}
|
||||
} // namespace TensorSycl
|
||||
|
@ -1,6 +1,6 @@
|
||||
# generate split test header file only if it does not yet exist
|
||||
# in order to prevent a rebuild everytime cmake is configured
|
||||
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
|
||||
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
|
||||
foreach(i RANGE 1 999)
|
||||
file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h
|
||||
@ -16,11 +16,11 @@ endif()
|
||||
set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Unsupported")
|
||||
add_custom_target(BuildUnsupported)
|
||||
|
||||
include_directories(../../test ../../unsupported ../../Eigen
|
||||
include_directories(../../test ../../unsupported ../../Eigen
|
||||
${CMAKE_CURRENT_BINARY_DIR}/../../test)
|
||||
|
||||
find_package (Threads)
|
||||
|
||||
|
||||
find_package(GoogleHash)
|
||||
if(GOOGLEHASH_FOUND)
|
||||
add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT")
|
||||
@ -134,7 +134,7 @@ ei_add_test(cxx11_tensor_roundings)
|
||||
ei_add_test(cxx11_tensor_layout_swap)
|
||||
ei_add_test(cxx11_tensor_io)
|
||||
if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
|
||||
# This test requires __uint128_t which is only available on 64bit systems
|
||||
# This test requires __uint128_t which is only available on 64bit systems
|
||||
ei_add_test(cxx11_tensor_uint128)
|
||||
endif()
|
||||
endif()
|
||||
@ -145,6 +145,7 @@ if(EIGEN_TEST_CXX11)
|
||||
ei_add_test_sycl(cxx11_tensor_forced_eval_sycl "-std=c++11")
|
||||
ei_add_test_sycl(cxx11_tensor_broadcast_sycl "-std=c++11")
|
||||
ei_add_test_sycl(cxx11_tensor_device_sycl "-std=c++11")
|
||||
ei_add_test_sycl(cxx11_tensor_reduction_sycl "-std=c++11")
|
||||
endif(EIGEN_TEST_SYCL)
|
||||
# It should be safe to always run these tests as there is some fallback code for
|
||||
# older compiler that don't support cxx11.
|
||||
|
@ -25,55 +25,50 @@ using Eigen::SyclDevice;
|
||||
using Eigen::Tensor;
|
||||
using Eigen::TensorMap;
|
||||
|
||||
// Types used in tests:
|
||||
using TestTensor = Tensor<float, 3>;
|
||||
using TestTensorMap = TensorMap<Tensor<float, 3>>;
|
||||
static void test_broadcast_sycl(){
|
||||
static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){
|
||||
|
||||
cl::sycl::gpu_selector s;
|
||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
||||
for (const auto& e : l) {
|
||||
try {
|
||||
std::rethrow_exception(e);
|
||||
} catch (cl::sycl::exception e) {
|
||||
std::cout << e.what() << std::endl;
|
||||
// BROADCAST test:
|
||||
array<int, 4> in_range = {{2, 3, 5, 7}};
|
||||
array<int, 4> broadcasts = {{2, 3, 1, 4}};
|
||||
array<int, 4> out_range; // = in_range * broadcasts
|
||||
for (size_t i = 0; i < out_range.size(); ++i)
|
||||
out_range[i] = in_range[i] * broadcasts[i];
|
||||
|
||||
Tensor<float, 4> input(in_range);
|
||||
Tensor<float, 4> out(out_range);
|
||||
|
||||
for (size_t i = 0; i < in_range.size(); ++i)
|
||||
VERIFY_IS_EQUAL(out.dimension(i), out_range[i]);
|
||||
|
||||
|
||||
for (int i = 0; i < input.size(); ++i)
|
||||
input(i) = static_cast<float>(i);
|
||||
|
||||
float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float)));
|
||||
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
|
||||
|
||||
TensorMap<Tensor<float, 4>> gpu_in(gpu_in_data, in_range);
|
||||
TensorMap<Tensor<float, 4>> gpu_out(gpu_out_data, out_range);
|
||||
sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float));
|
||||
gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
|
||||
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
for (int j = 0; j < 9; ++j) {
|
||||
for (int k = 0; k < 5; ++k) {
|
||||
for (int l = 0; l < 28; ++l) {
|
||||
VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l));
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
SyclDevice sycl_device(q);
|
||||
// BROADCAST test:
|
||||
array<int, 4> in_range = {{2, 3, 5, 7}};
|
||||
array<int, in_range.size()> broadcasts = {{2, 3, 1, 4}};
|
||||
array<int, in_range.size()> out_range; // = in_range * broadcasts
|
||||
for (size_t i = 0; i < out_range.size(); ++i)
|
||||
out_range[i] = in_range[i] * broadcasts[i];
|
||||
|
||||
Tensor<float, in_range.size()> input(in_range);
|
||||
Tensor<float, out_range.size()> output(out_range);
|
||||
|
||||
for (int i = 0; i < input.size(); ++i)
|
||||
input(i) = static_cast<float>(i);
|
||||
|
||||
TensorMap<decltype(input)> gpu_in(input.data(), in_range);
|
||||
TensorMap<decltype(output)> gpu_out(output.data(), out_range);
|
||||
gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
|
||||
sycl_device.deallocate(output.data());
|
||||
|
||||
for (size_t i = 0; i < in_range.size(); ++i)
|
||||
VERIFY_IS_EQUAL(output.dimension(i), out_range[i]);
|
||||
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
for (int j = 0; j < 9; ++j) {
|
||||
for (int k = 0; k < 5; ++k) {
|
||||
for (int l = 0; l < 28; ++l) {
|
||||
VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), output(i,j,k,l));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
printf("Broadcast Test Passed\n");
|
||||
}
|
||||
printf("Broadcast Test Passed\n");
|
||||
sycl_device.deallocate(gpu_in_data);
|
||||
sycl_device.deallocate(gpu_out_data);
|
||||
}
|
||||
|
||||
void test_cxx11_tensor_broadcast_sycl() {
|
||||
CALL_SUBTEST(test_broadcast_sycl());
|
||||
cl::sycl::gpu_selector s;
|
||||
Eigen::SyclDevice sycl_device(s);
|
||||
CALL_SUBTEST(test_broadcast_sycl(sycl_device));
|
||||
}
|
||||
|
@ -20,20 +20,12 @@
|
||||
#include "main.h"
|
||||
#include <unsupported/Eigen/CXX11/Tensor>
|
||||
|
||||
void test_device_sycl() {
|
||||
cl::sycl::gpu_selector s;
|
||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
||||
for (const auto& e : l) {
|
||||
try {
|
||||
std::rethrow_exception(e);
|
||||
} catch (cl::sycl::exception e) {
|
||||
std::cout << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
});
|
||||
Eigen::SyclDevice sycl_device(q);
|
||||
printf("Helo from ComputeCpp: Device Exists\n");
|
||||
void test_device_sycl(const Eigen::SyclDevice &sycl_device) {
|
||||
std::cout <<"Helo from ComputeCpp: the requested device exists and the device name is : "
|
||||
<< sycl_device.m_queue.get_device(). template get_info<cl::sycl::info::device::name>() <<std::endl;;
|
||||
}
|
||||
void test_cxx11_tensor_device_sycl() {
|
||||
CALL_SUBTEST(test_device_sycl());
|
||||
cl::sycl::gpu_selector s;
|
||||
Eigen::SyclDevice sycl_device(s);
|
||||
CALL_SUBTEST(test_device_sycl(sycl_device));
|
||||
}
|
||||
|
@ -22,18 +22,7 @@
|
||||
|
||||
using Eigen::Tensor;
|
||||
|
||||
void test_forced_eval_sycl() {
|
||||
cl::sycl::gpu_selector s;
|
||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
||||
for (const auto& e : l) {
|
||||
try {
|
||||
std::rethrow_exception(e);
|
||||
} catch (cl::sycl::exception e) {
|
||||
std::cout << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
});
|
||||
SyclDevice sycl_device(q);
|
||||
void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) {
|
||||
|
||||
int sizeDim1 = 100;
|
||||
int sizeDim2 = 200;
|
||||
@ -43,17 +32,22 @@ void test_forced_eval_sycl() {
|
||||
Eigen::Tensor<float, 3> in2(tensorRange);
|
||||
Eigen::Tensor<float, 3> out(tensorRange);
|
||||
|
||||
float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
|
||||
float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
|
||||
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
|
||||
|
||||
in1 = in1.random() + in1.constant(10.0f);
|
||||
in2 = in2.random() + in2.constant(10.0f);
|
||||
|
||||
// creating TensorMap from tensor
|
||||
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in1(in1.data(), tensorRange);
|
||||
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in2(in2.data(), tensorRange);
|
||||
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_out(out.data(), tensorRange);
|
||||
|
||||
// creating TensorMap from tensor
|
||||
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
|
||||
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
|
||||
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange);
|
||||
sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(float));
|
||||
sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(float));
|
||||
/// c=(a+b)*b
|
||||
gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2;
|
||||
sycl_device.deallocate(out.data());
|
||||
gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2;
|
||||
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||
for (int i = 0; i < sizeDim1; ++i) {
|
||||
for (int j = 0; j < sizeDim2; ++j) {
|
||||
for (int k = 0; k < sizeDim3; ++k) {
|
||||
@ -62,7 +56,15 @@ void test_forced_eval_sycl() {
|
||||
}
|
||||
}
|
||||
}
|
||||
printf("(a+b)*b Test Passed\n");
|
||||
printf("(a+b)*b Test Passed\n");
|
||||
sycl_device.deallocate(gpu_in1_data);
|
||||
sycl_device.deallocate(gpu_in2_data);
|
||||
sycl_device.deallocate(gpu_out_data);
|
||||
|
||||
}
|
||||
|
||||
void test_cxx11_tensor_forced_eval_sycl() { CALL_SUBTEST(test_forced_eval_sycl()); }
|
||||
void test_cxx11_tensor_forced_eval_sycl() {
|
||||
cl::sycl::gpu_selector s;
|
||||
Eigen::SyclDevice sycl_device(s);
|
||||
CALL_SUBTEST(test_forced_eval_sycl(sycl_device));
|
||||
}
|
||||
|
138
unsupported/test/cxx11_tensor_reduction_sycl.cpp
Normal file
138
unsupported/test/cxx11_tensor_reduction_sycl.cpp
Normal file
@ -0,0 +1,138 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2015
|
||||
// Mehdi Goli Codeplay Software Ltd.
|
||||
// Ralph Potter Codeplay Software Ltd.
|
||||
// Luke Iwanski Codeplay Software Ltd.
|
||||
// Contact: <eigen@codeplay.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#define EIGEN_TEST_NO_LONGDOUBLE
|
||||
#define EIGEN_TEST_NO_COMPLEX
|
||||
#define EIGEN_TEST_FUNC cxx11_tensor_reduction_sycl
|
||||
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
|
||||
#define EIGEN_USE_SYCL
|
||||
|
||||
#include "main.h"
|
||||
#include <unsupported/Eigen/CXX11/Tensor>
|
||||
|
||||
|
||||
|
||||
static void test_full_reductions_sycl(const Eigen::SyclDevice& sycl_device) {
|
||||
|
||||
const int num_rows = 452;
|
||||
const int num_cols = 765;
|
||||
array<int, 2> tensorRange = {{num_rows, num_cols}};
|
||||
|
||||
Tensor<float, 2> in(tensorRange);
|
||||
Tensor<float, 0> full_redux;
|
||||
Tensor<float, 0> full_redux_gpu;
|
||||
|
||||
in.setRandom();
|
||||
|
||||
full_redux = in.sum();
|
||||
|
||||
float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
|
||||
float* gpu_out_data =(float*)sycl_device.allocate(sizeof(float));
|
||||
|
||||
TensorMap<Tensor<float, 2> > in_gpu(gpu_in_data, tensorRange);
|
||||
TensorMap<Tensor<float, 0> > out_gpu(gpu_out_data);
|
||||
|
||||
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
|
||||
out_gpu.device(sycl_device) = in_gpu.sum();
|
||||
sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(float));
|
||||
// Check that the CPU and GPU reductions return the same result.
|
||||
VERIFY_IS_APPROX(full_redux_gpu(), full_redux());
|
||||
|
||||
sycl_device.deallocate(gpu_in_data);
|
||||
sycl_device.deallocate(gpu_out_data);
|
||||
}
|
||||
|
||||
static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device) {
|
||||
|
||||
int dim_x = 145;
|
||||
int dim_y = 1;
|
||||
int dim_z = 67;
|
||||
|
||||
array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}};
|
||||
Eigen::array<int, 1> red_axis;
|
||||
red_axis[0] = 0;
|
||||
array<int, 2> reduced_tensorRange = {{dim_y, dim_z}};
|
||||
|
||||
Tensor<float, 3> in(tensorRange);
|
||||
Tensor<float, 2> redux(reduced_tensorRange);
|
||||
Tensor<float, 2> redux_gpu(reduced_tensorRange);
|
||||
|
||||
in.setRandom();
|
||||
|
||||
redux= in.sum(red_axis);
|
||||
|
||||
float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
|
||||
float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
|
||||
|
||||
TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange);
|
||||
TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange);
|
||||
|
||||
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
|
||||
out_gpu.device(sycl_device) = in_gpu.sum(red_axis);
|
||||
sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
|
||||
|
||||
// Check that the CPU and GPU reductions return the same result.
|
||||
for(int j=0; j<reduced_tensorRange[0]; j++ )
|
||||
for(int k=0; k<reduced_tensorRange[1]; k++ )
|
||||
VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k));
|
||||
|
||||
sycl_device.deallocate(gpu_in_data);
|
||||
sycl_device.deallocate(gpu_out_data);
|
||||
}
|
||||
|
||||
static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device) {
|
||||
|
||||
int dim_x = 567;
|
||||
int dim_y = 1;
|
||||
int dim_z = 47;
|
||||
|
||||
array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}};
|
||||
Eigen::array<int, 1> red_axis;
|
||||
red_axis[0] = 2;
|
||||
array<int, 2> reduced_tensorRange = {{dim_x, dim_y}};
|
||||
|
||||
Tensor<float, 3> in(tensorRange);
|
||||
Tensor<float, 2> redux(reduced_tensorRange);
|
||||
Tensor<float, 2> redux_gpu(reduced_tensorRange);
|
||||
|
||||
in.setRandom();
|
||||
|
||||
redux= in.sum(red_axis);
|
||||
|
||||
float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
|
||||
float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
|
||||
|
||||
TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange);
|
||||
TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange);
|
||||
|
||||
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
|
||||
out_gpu.device(sycl_device) = in_gpu.sum(red_axis);
|
||||
sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
|
||||
// Check that the CPU and GPU reductions return the same result.
|
||||
for(int j=0; j<reduced_tensorRange[0]; j++ )
|
||||
for(int k=0; k<reduced_tensorRange[1]; k++ )
|
||||
VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k));
|
||||
|
||||
sycl_device.deallocate(gpu_in_data);
|
||||
sycl_device.deallocate(gpu_out_data);
|
||||
|
||||
}
|
||||
|
||||
void test_cxx11_tensor_reduction_sycl() {
|
||||
cl::sycl::gpu_selector s;
|
||||
Eigen::SyclDevice sycl_device(s);
|
||||
CALL_SUBTEST((test_full_reductions_sycl(sycl_device)));
|
||||
CALL_SUBTEST((test_first_dim_reductions_sycl(sycl_device)));
|
||||
CALL_SUBTEST((test_last_dim_reductions_sycl(sycl_device)));
|
||||
|
||||
}
|
@ -27,42 +27,33 @@ using Eigen::SyclDevice;
|
||||
using Eigen::Tensor;
|
||||
using Eigen::TensorMap;
|
||||
|
||||
// Types used in tests:
|
||||
using TestTensor = Tensor<float, 3>;
|
||||
using TestTensorMap = TensorMap<Tensor<float, 3>>;
|
||||
|
||||
void test_sycl_cpu() {
|
||||
cl::sycl::gpu_selector s;
|
||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
||||
for (const auto& e : l) {
|
||||
try {
|
||||
std::rethrow_exception(e);
|
||||
} catch (cl::sycl::exception e) {
|
||||
std::cout << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
});
|
||||
SyclDevice sycl_device(q);
|
||||
void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
|
||||
|
||||
int sizeDim1 = 100;
|
||||
int sizeDim2 = 100;
|
||||
int sizeDim3 = 100;
|
||||
array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
|
||||
TestTensor in1(tensorRange);
|
||||
TestTensor in2(tensorRange);
|
||||
TestTensor in3(tensorRange);
|
||||
TestTensor out(tensorRange);
|
||||
in1 = in1.random();
|
||||
Tensor<float, 3> in1(tensorRange);
|
||||
Tensor<float, 3> in2(tensorRange);
|
||||
Tensor<float, 3> in3(tensorRange);
|
||||
Tensor<float, 3> out(tensorRange);
|
||||
|
||||
in2 = in2.random();
|
||||
in3 = in3.random();
|
||||
TestTensorMap gpu_in1(in1.data(), tensorRange);
|
||||
TestTensorMap gpu_in2(in2.data(), tensorRange);
|
||||
TestTensorMap gpu_in3(in3.data(), tensorRange);
|
||||
TestTensorMap gpu_out(out.data(), tensorRange);
|
||||
|
||||
float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
|
||||
float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
|
||||
float * gpu_in3_data = static_cast<float*>(sycl_device.allocate(in3.dimensions().TotalSize()*sizeof(float)));
|
||||
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
|
||||
|
||||
TensorMap<Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
|
||||
TensorMap<Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
|
||||
TensorMap<Tensor<float, 3>> gpu_in3(gpu_in3_data, tensorRange);
|
||||
TensorMap<Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange);
|
||||
|
||||
/// a=1.2f
|
||||
gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f);
|
||||
sycl_device.deallocate(in1.data());
|
||||
sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.dimensions().TotalSize())*sizeof(float));
|
||||
for (int i = 0; i < sizeDim1; ++i) {
|
||||
for (int j = 0; j < sizeDim2; ++j) {
|
||||
for (int k = 0; k < sizeDim3; ++k) {
|
||||
@ -74,7 +65,7 @@ void test_sycl_cpu() {
|
||||
|
||||
/// a=b*1.2f
|
||||
gpu_out.device(sycl_device) = gpu_in1 * 1.2f;
|
||||
sycl_device.deallocate(out.data());
|
||||
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.dimensions().TotalSize())*sizeof(float));
|
||||
for (int i = 0; i < sizeDim1; ++i) {
|
||||
for (int j = 0; j < sizeDim2; ++j) {
|
||||
for (int k = 0; k < sizeDim3; ++k) {
|
||||
@ -86,8 +77,9 @@ void test_sycl_cpu() {
|
||||
printf("a=b*1.2f Test Passed\n");
|
||||
|
||||
/// c=a*b
|
||||
sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(float));
|
||||
gpu_out.device(sycl_device) = gpu_in1 * gpu_in2;
|
||||
sycl_device.deallocate(out.data());
|
||||
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||
for (int i = 0; i < sizeDim1; ++i) {
|
||||
for (int j = 0; j < sizeDim2; ++j) {
|
||||
for (int k = 0; k < sizeDim3; ++k) {
|
||||
@ -101,7 +93,7 @@ void test_sycl_cpu() {
|
||||
|
||||
/// c=a+b
|
||||
gpu_out.device(sycl_device) = gpu_in1 + gpu_in2;
|
||||
sycl_device.deallocate(out.data());
|
||||
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||
for (int i = 0; i < sizeDim1; ++i) {
|
||||
for (int j = 0; j < sizeDim2; ++j) {
|
||||
for (int k = 0; k < sizeDim3; ++k) {
|
||||
@ -115,7 +107,7 @@ void test_sycl_cpu() {
|
||||
|
||||
/// c=a*a
|
||||
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1;
|
||||
sycl_device.deallocate(out.data());
|
||||
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||
for (int i = 0; i < sizeDim1; ++i) {
|
||||
for (int j = 0; j < sizeDim2; ++j) {
|
||||
for (int k = 0; k < sizeDim3; ++k) {
|
||||
@ -125,12 +117,11 @@ void test_sycl_cpu() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
printf("c= a*a Test Passed\n");
|
||||
|
||||
//a*3.14f + b*2.7f
|
||||
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f);
|
||||
sycl_device.deallocate(out.data());
|
||||
sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||
for (int i = 0; i < sizeDim1; ++i) {
|
||||
for (int j = 0; j < sizeDim2; ++j) {
|
||||
for (int k = 0; k < sizeDim3; ++k) {
|
||||
@ -143,8 +134,9 @@ void test_sycl_cpu() {
|
||||
printf("a*3.14f + b*2.7f Test Passed\n");
|
||||
|
||||
///d= (a>0.5? b:c)
|
||||
sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.dimensions().TotalSize())*sizeof(float));
|
||||
gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3);
|
||||
sycl_device.deallocate(out.data());
|
||||
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||
for (int i = 0; i < sizeDim1; ++i) {
|
||||
for (int j = 0; j < sizeDim2; ++j) {
|
||||
for (int k = 0; k < sizeDim3; ++k) {
|
||||
@ -155,8 +147,13 @@ void test_sycl_cpu() {
|
||||
}
|
||||
}
|
||||
printf("d= (a>0.5? b:c) Test Passed\n");
|
||||
|
||||
sycl_device.deallocate(gpu_in1_data);
|
||||
sycl_device.deallocate(gpu_in2_data);
|
||||
sycl_device.deallocate(gpu_in3_data);
|
||||
sycl_device.deallocate(gpu_out_data);
|
||||
}
|
||||
void test_cxx11_tensor_sycl() {
|
||||
CALL_SUBTEST(test_sycl_cpu());
|
||||
cl::sycl::gpu_selector s;
|
||||
Eigen::SyclDevice sycl_device(s);
|
||||
CALL_SUBTEST(test_sycl_cpu(sycl_device));
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user