From d57430dd73ab2f88aa5e45c370f6ab91103ff18a Mon Sep 17 00:00:00 2001 From: Mehdi Goli Date: Tue, 8 Nov 2016 17:08:02 +0000 Subject: [PATCH] Converting all sycl buffers to uninitialised device only buffers; adding memcpyHostToDevice and memcpyDeviceToHost on syclDevice; modifying all examples to obey the new rules; moving sycl queue creating to the device based on Benoit suggestion; removing the sycl specefic condition for returning m_result in TensorReduction.h according to Benoit suggestion. --- cmake/EigenTesting.cmake | 94 +++++------ cmake/FindComputeCpp.cmake | 57 ++++--- .../Eigen/CXX11/src/Tensor/TensorDeviceSycl.h | 115 +++++++------- .../Eigen/CXX11/src/Tensor/TensorReduction.h | 8 +- .../CXX11/src/Tensor/TensorReductionSycl.h | 10 +- .../src/Tensor/TensorSyclExtractAccessor.h | 22 +-- .../test/cxx11_tensor_broadcast_sycl.cpp | 85 +++++----- unsupported/test/cxx11_tensor_device_sycl.cpp | 20 +-- .../test/cxx11_tensor_forced_eval_sycl.cpp | 44 +++--- .../test/cxx11_tensor_reduction_sycl.cpp | 147 ++++++++---------- unsupported/test/cxx11_tensor_sycl.cpp | 67 ++++---- 11 files changed, 328 insertions(+), 341 deletions(-) diff --git a/cmake/EigenTesting.cmake b/cmake/EigenTesting.cmake index 9ebd06a04..047e96462 100644 --- a/cmake/EigenTesting.cmake +++ b/cmake/EigenTesting.cmake @@ -1,23 +1,23 @@ macro(ei_add_property prop value) - get_property(previous GLOBAL PROPERTY ${prop}) + get_property(previous GLOBAL PROPERTY ${prop}) if ((NOT previous) OR (previous STREQUAL "")) set_property(GLOBAL PROPERTY ${prop} "${value}") else() set_property(GLOBAL PROPERTY ${prop} "${previous} ${value}") - endif() + endif() endmacro(ei_add_property) #internal. See documentation of ei_add_test for details. macro(ei_add_test_internal testname testname_with_suffix) set(targetname ${testname_with_suffix}) - + if(EIGEN_ADD_TEST_FILENAME_EXTENSION) set(filename ${testname}.${EIGEN_ADD_TEST_FILENAME_EXTENSION}) else() set(filename ${testname}.cpp) endif() - + if(EIGEN_ADD_TEST_FILENAME_EXTENSION STREQUAL cu) if(EIGEN_TEST_CUDA_CLANG) set_source_files_properties(${filename} PROPERTIES LANGUAGE CXX) @@ -42,7 +42,7 @@ macro(ei_add_test_internal testname testname_with_suffix) else() add_executable(${targetname} ${filename}) endif() - + if (targetname MATCHES "^eigen2_") add_dependencies(eigen2_buildtests ${targetname}) else() @@ -56,20 +56,20 @@ macro(ei_add_test_internal testname testname_with_suffix) ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_DEBUG_ASSERTS=1") endif(EIGEN_DEBUG_ASSERTS) endif(EIGEN_NO_ASSERTION_CHECKING) - + ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_MAX_SIZE=${EIGEN_TEST_MAX_SIZE}") ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_FUNC=${testname}") - + if(MSVC) ei_add_target_property(${targetname} COMPILE_FLAGS "/bigobj") - endif() + endif() # let the user pass flags. if(${ARGC} GREATER 2) ei_add_target_property(${targetname} COMPILE_FLAGS "${ARGV2}") endif(${ARGC} GREATER 2) - + if(EIGEN_TEST_CUSTOM_CXX_FLAGS) ei_add_target_property(${targetname} COMPILE_FLAGS "${EIGEN_TEST_CUSTOM_CXX_FLAGS}") endif() @@ -95,12 +95,12 @@ macro(ei_add_test_internal testname testname_with_suffix) # notice: no double quotes around ${libs_to_link} here. It may be a list. target_link_libraries(${targetname} ${libs_to_link}) endif() - endif() + endif() add_test(${testname_with_suffix} "${targetname}") - + # Specify target and test labels accoirding to EIGEN_CURRENT_SUBPROJECT - get_property(current_subproject GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT) + get_property(current_subproject GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT) if ((current_subproject) AND (NOT (current_subproject STREQUAL ""))) set_property(TARGET ${targetname} PROPERTY LABELS "Build${current_subproject}") add_dependencies("Build${current_subproject}" ${targetname}) @@ -128,14 +128,14 @@ macro(ei_add_test_internal_sycl testname testname_with_suffix) OUTPUT ${include_file} COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${host_file}\\\"" > ${include_file} COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${bc_file}.sycl\\\"" >> ${include_file} - DEPENDS ${filename} + DEPENDS ${filename} ${bc_file}.sycl COMMENT "Building ComputeCpp integration header file ${include_file}" ) # Add a custom target for the generated integration header - add_custom_target(${testname}_integration_header_woho DEPENDS ${include_file}) + add_custom_target(${testname}_integration_header_sycl DEPENDS ${include_file}) add_executable(${targetname} ${include_file}) - add_dependencies(${targetname} ${testname}_integration_header_woho) + add_dependencies(${targetname} ${testname}_integration_header_sycl) add_sycl_to_target(${targetname} ${filename} ${CMAKE_CURRENT_BINARY_DIR}) if (targetname MATCHES "^eigen2_") @@ -258,7 +258,7 @@ macro(ei_add_test testname) else() set(filename ${testname}.cpp) endif() - + file(READ "${filename}" test_source) set(parts 0) string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+" @@ -379,7 +379,7 @@ macro(ei_testing_print_summary) elseif(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION) message(STATUS "Explicit vectorization disabled (alignment kept enabled)") else() - + message(STATUS "Maximal matrix/vector size: ${EIGEN_TEST_MAX_SIZE}") if(EIGEN_TEST_SSE2) @@ -453,13 +453,13 @@ macro(ei_testing_print_summary) else() message(STATUS "ARMv8 NEON: Using architecture defaults") endif() - + if(EIGEN_TEST_ZVECTOR) message(STATUS "S390X ZVECTOR: ON") else() message(STATUS "S390X ZVECTOR: Using architecture defaults") endif() - + if(EIGEN_TEST_CXX11) message(STATUS "C++11: ON") else() @@ -505,7 +505,7 @@ macro(ei_init_testing) set_property(GLOBAL PROPERTY EIGEN_FAILTEST_FAILURE_COUNT "0") set_property(GLOBAL PROPERTY EIGEN_FAILTEST_COUNT "0") - + # uncomment anytime you change the ei_get_compilerver_from_cxx_version_string macro # ei_test_get_compilerver_from_cxx_version_string() endmacro(ei_init_testing) @@ -514,22 +514,22 @@ macro(ei_set_sitename) # if the sitename is not yet set, try to set it if(NOT ${SITE} OR ${SITE} STREQUAL "") set(eigen_computername $ENV{COMPUTERNAME}) - set(eigen_hostname $ENV{HOSTNAME}) + set(eigen_hostname $ENV{HOSTNAME}) if(eigen_hostname) set(SITE ${eigen_hostname}) - elseif(eigen_computername) - set(SITE ${eigen_computername}) + elseif(eigen_computername) + set(SITE ${eigen_computername}) endif() endif() # in case it is already set, enforce lower case if(SITE) string(TOLOWER ${SITE} SITE) - endif() + endif() endmacro(ei_set_sitename) macro(ei_get_compilerver VAR) if(MSVC) - # on windows system, we use a modified CMake script + # on windows system, we use a modified CMake script include(EigenDetermineVSServicePack) EigenDetermineVSServicePack( my_service_pack ) @@ -541,20 +541,20 @@ macro(ei_get_compilerver VAR) else() # on all other system we rely on ${CMAKE_CXX_COMPILER} # supporting a "--version" or "/version" flag - + if(WIN32 AND ${CMAKE_CXX_COMPILER_ID} EQUAL "Intel") set(EIGEN_CXX_FLAG_VERSION "/version") else() set(EIGEN_CXX_FLAG_VERSION "--version") endif() - + execute_process(COMMAND ${CMAKE_CXX_COMPILER} ${EIGEN_CXX_FLAG_VERSION} OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE) string(REGEX REPLACE "[\n\r].*" "" eigen_cxx_compiler_version_string ${eigen_cxx_compiler_version_string}) - + ei_get_compilerver_from_cxx_version_string("${eigen_cxx_compiler_version_string}" CNAME CVER) set(${VAR} "${CNAME}-${CVER}") - + endif() endmacro(ei_get_compilerver) @@ -563,13 +563,13 @@ endmacro(ei_get_compilerver) # the testing macro call in ei_init_testing() of the EigenTesting.cmake file. # See also the ei_test_get_compilerver_from_cxx_version_string macro at the end of the file macro(ei_get_compilerver_from_cxx_version_string VERSTRING CNAME CVER) - # extract possible compiler names + # extract possible compiler names string(REGEX MATCH "g\\+\\+" ei_has_gpp ${VERSTRING}) string(REGEX MATCH "llvm|LLVM" ei_has_llvm ${VERSTRING}) string(REGEX MATCH "gcc|GCC" ei_has_gcc ${VERSTRING}) string(REGEX MATCH "icpc|ICC" ei_has_icpc ${VERSTRING}) string(REGEX MATCH "clang|CLANG" ei_has_clang ${VERSTRING}) - + # combine them if((ei_has_llvm) AND (ei_has_gpp OR ei_has_gcc)) set(${CNAME} "llvm-g++") @@ -584,7 +584,7 @@ macro(ei_get_compilerver_from_cxx_version_string VERSTRING CNAME CVER) else() set(${CNAME} "_") endif() - + # extract possible version numbers # first try to extract 3 isolated numbers: string(REGEX MATCH " [0-9]+\\.[0-9]+\\.[0-9]+" eicver ${VERSTRING}) @@ -602,9 +602,9 @@ macro(ei_get_compilerver_from_cxx_version_string VERSTRING CNAME CVER) endif() endif() endif() - + string(REGEX REPLACE ".(.*)" "\\1" ${CVER} ${eicver}) - + endmacro(ei_get_compilerver_from_cxx_version_string) macro(ei_get_cxxflags VAR) @@ -633,30 +633,30 @@ macro(ei_get_cxxflags VAR) elseif(EIGEN_TEST_SSE3) set(${VAR} SSE3) elseif(EIGEN_TEST_SSE2 OR IS_64BIT_ENV) - set(${VAR} SSE2) + set(${VAR} SSE2) endif() if(EIGEN_TEST_OPENMP) if (${VAR} STREQUAL "") - set(${VAR} OMP) - else() - set(${VAR} ${${VAR}}-OMP) - endif() + set(${VAR} OMP) + else() + set(${VAR} ${${VAR}}-OMP) endif() - + endif() + if(EIGEN_DEFAULT_TO_ROW_MAJOR) if (${VAR} STREQUAL "") - set(${VAR} ROW) - else() - set(${VAR} ${${VAR}}-ROWMAJ) - endif() + set(${VAR} ROW) + else() + set(${VAR} ${${VAR}}-ROWMAJ) + endif() endif() endmacro(ei_get_cxxflags) macro(ei_set_build_string) ei_get_compilerver(LOCAL_COMPILER_VERSION) ei_get_cxxflags(LOCAL_COMPILER_FLAGS) - + include(EigenDetermineOSVersion) DetermineOSVersion(OS_VERSION) @@ -672,11 +672,11 @@ macro(ei_set_build_string) else() set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-64bit) endif() - + if(EIGEN_TEST_CXX11) set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-cxx11) endif() - + if(EIGEN_BUILD_STRING_SUFFIX) set(TMP_BUILD_STRING ${TMP_BUILD_STRING}-${EIGEN_BUILD_STRING_SUFFIX}) endif() diff --git a/cmake/FindComputeCpp.cmake b/cmake/FindComputeCpp.cmake index 3aab5b833..07ebed61b 100644 --- a/cmake/FindComputeCpp.cmake +++ b/cmake/FindComputeCpp.cmake @@ -1,6 +1,21 @@ #.rst: # FindComputeCpp #--------------- +# +# Copyright 2016 Codeplay Software Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use these files except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. ######################### # FindComputeCpp.cmake @@ -8,6 +23,11 @@ # # Tools for finding and building with ComputeCpp. # +# User must define COMPUTECPP_PACKAGE_ROOT_DIR pointing to the ComputeCpp +# installation. +# +# Latest version of this file can be found at: +# https://github.com/codeplaysoftware/computecpp-sdk # Require CMake version 3.2.2 or higher cmake_minimum_required(VERSION 3.2.2) @@ -32,7 +52,6 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") message(FATAL_ERROR "host compiler - Not found! (clang version must be at least 3.6)") else() - set(COMPUTECPP_DISABLE_GCC_DUAL_ABI "True") message(STATUS "host compiler - clang ${CMAKE_CXX_COMPILER_VERSION}") endif() else() @@ -48,11 +67,12 @@ mark_as_advanced(COMPUTECPP_64_BIT_CODE) # Find OpenCL package find_package(OpenCL REQUIRED) -# Find ComputeCpp package -if(EXISTS ${COMPUTECPP_PACKAGE_ROOT_DIR}) - message(STATUS "ComputeCpp package - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})") +# Find ComputeCpp packagee +if(NOT COMPUTECPP_PACKAGE_ROOT_DIR) + message(FATAL_ERROR + "ComputeCpp package - Not found! (please set COMPUTECPP_PACKAGE_ROOT_DIR") else() - message(FATAL_ERROR "ComputeCpp package - Not found! (please set COMPUTECPP_PACKAGE_ROOT_DIR) (${COMPUTECPP_PACKAGE_ROOT_DIR})") + message(STATUS "ComputeCpp package - Found") endif() option(COMPUTECPP_PACKAGE_ROOT_DIR "Path to the ComputeCpp Package") @@ -61,9 +81,9 @@ find_program(COMPUTECPP_DEVICE_COMPILER compute++ PATHS ${COMPUTECPP_PACKAGE_ROOT_DIR} PATH_SUFFIXES bin) if (EXISTS ${COMPUTECPP_DEVICE_COMPILER}) mark_as_advanced(COMPUTECPP_DEVICE_COMPILER) - message(STATUS "compute++ - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})") + message(STATUS "compute++ - Found") else() - message(FATAL_ERROR "compute++ - Not found! (${COMPUTECPP_DEVICE_COMPILER}) (${COMPUTECPP_PACKAGE_ROOT_DIR})") + message(FATAL_ERROR "compute++ - Not found! (${COMPUTECPP_DEVICE_COMPILER})") endif() # Obtain the path to computecpp_info @@ -71,9 +91,9 @@ find_program(COMPUTECPP_INFO_TOOL computecpp_info PATHS ${COMPUTECPP_PACKAGE_ROOT_DIR} PATH_SUFFIXES bin) if (EXISTS ${COMPUTECPP_INFO_TOOL}) mark_as_advanced(${COMPUTECPP_INFO_TOOL}) - message(STATUS "computecpp_info - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})") + message(STATUS "computecpp_info - Found") else() - message(FATAL_ERROR "computecpp_info - Not found! (${COMPUTECPP_INFO_TOOL}) (${COMPUTECPP_PACKAGE_ROOT_DIR})") + message(FATAL_ERROR "computecpp_info - Not found! (${COMPUTECPP_INFO_TOOL})") endif() # Obtain the path to the ComputeCpp runtime library @@ -85,15 +105,15 @@ if (EXISTS ${COMPUTECPP_RUNTIME_LIBRARY}) mark_as_advanced(COMPUTECPP_RUNTIME_LIBRARY) message(STATUS "libComputeCpp.so - Found") else() - message(FATAL_ERROR "libComputeCpp.so - Not found! (${COMPUTECPP_PACKAGE_ROOT_DIR})") + message(FATAL_ERROR "libComputeCpp.so - Not found!") endif() # Obtain the ComputeCpp include directory set(COMPUTECPP_INCLUDE_DIRECTORY ${COMPUTECPP_PACKAGE_ROOT_DIR}/include/) if (NOT EXISTS ${COMPUTECPP_INCLUDE_DIRECTORY}) - message(FATAL_ERROR "ComputeCpp includes - Not found! (${COMPUTECPP_PACKAGE_ROOT_DIR}/include/)") + message(FATAL_ERROR "ComputeCpp includes - Not found!") else() - message(STATUS "ComputeCpp includes - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})") + message(STATUS "ComputeCpp includes - Found") endif() # Obtain the package version @@ -144,7 +164,7 @@ endif() # # targetName : Name of the target. # sourceFile : Source file to be compiled. -# binaryDir : Intermediate output directory for the integration header. +# binaryDir : Intermediate directory to output the integration header. # function(__build_spir targetName sourceFile binaryDir) @@ -176,12 +196,13 @@ function(__build_spir targetName sourceFile binaryDir) OUTPUT ${outputSyclFile} COMMAND ${COMPUTECPP_DEVICE_COMPILER} ${COMPUTECPP_DEVICE_COMPILER_FLAGS} - -I${COMPUTECPP_INCLUDE_DIRECTORY} + -isystem ${COMPUTECPP_INCLUDE_DIRECTORY} ${COMPUTECPP_PLATFORM_SPECIFIC_ARGS} ${device_compiler_includes} -o ${outputSyclFile} -c ${CMAKE_CURRENT_SOURCE_DIR}/${sourceFile} DEPENDS ${sourceFile} + WORKING_DIRECTORY ${binaryDir} COMMENT "Building ComputeCpp integration header file ${outputSyclFile}") # Add a custom target for the generated integration header @@ -190,10 +211,6 @@ function(__build_spir targetName sourceFile binaryDir) # Add a dependency on the integration header add_dependencies(${targetName} ${targetName}_integration_header) - # Force inclusion of the integration header for the host compiler - #set(compileFlags -include ${include_file} "-Wall") - target_compile_options(${targetName} PUBLIC ${compileFlags}) - # Set the host compiler C++ standard to C++11 set_property(TARGET ${targetName} PROPERTY CXX_STANDARD 11) @@ -210,11 +227,11 @@ endfunction() ####################### # # Adds a SYCL compilation custom command associated with an existing -# target and sets a dependency on that new command. +# target and sets a dependancy on that new command. # # targetName : Name of the target to add a SYCL to. # sourceFile : Source file to be compiled for SYCL. -# binaryDir : Intermediate output directory for the integration header. +# binaryDir : Intermediate directory to output the integration header. # function(add_sycl_to_target targetName sourceFile binaryDir) diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h index 4231a11ff..8333301ea 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h @@ -16,95 +16,93 @@ #define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H namespace Eigen { -/// \struct BufferT is used to specialise add_sycl_buffer function for -// two types of buffer we have. When the MapAllocator is true, we create the -// sycl buffer with MapAllocator. -/// We have to const_cast the input pointer in order to work around the fact -/// that sycl does not accept map allocator for const pointer. -template -struct BufferT { - using Type = cl::sycl::buffer>; - static inline void add_sycl_buffer(const T *ptr, size_t num_bytes,std::map> &buffer_map) { - buffer_map.insert(std::pair>(ptr, std::shared_ptr(std::make_shared(Type(const_cast(ptr), cl::sycl::range<1>(num_bytes)))))); - } -}; - -/// specialisation of the \ref BufferT when the MapAllocator is false. In this -/// case we only create the device-only buffer. -template -struct BufferT { - using Type = cl::sycl::buffer; - static inline void add_sycl_buffer(const T *ptr, size_t num_bytes, std::map> &buffer_map) { - buffer_map.insert(std::pair>(ptr, std::shared_ptr(std::make_shared(Type(cl::sycl::range<1>(num_bytes)))))); - } -}; - struct SyclDevice { /// class members /// sycl queue - cl::sycl::queue &m_queue; + mutable cl::sycl::queue m_queue; /// std::map is the container used to make sure that we create only one buffer - /// per pointer. The lifespan of the buffer - /// now depends on the lifespan of SyclDevice. If a non-read-only pointer is - /// needed to be accessed on the host we should manually deallocate it. + /// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice. + /// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it. mutable std::map> buffer_map; - - SyclDevice(cl::sycl::queue &q) : m_queue(q) {} + /// creating device by using selector + template SyclDevice(dev_Selector s) + :m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) { + for (const auto& e : l) { + try { + std::rethrow_exception(e); + } catch (cl::sycl::exception e) { + std::cout << e.what() << std::endl; + } + } + })) {} // destructor ~SyclDevice() { deallocate_all(); } - template - void deallocate(const T *p) const { + template void deallocate(T *p) const { auto it = buffer_map.find(p); if (it != buffer_map.end()) { buffer_map.erase(it); + internal::aligned_free(p); } } - void deallocate_all() const { buffer_map.clear(); } + void deallocate_all() const { + std::map>::iterator it=buffer_map.begin(); + while (it!=buffer_map.end()) { + auto p=it->first; + buffer_map.erase(it); + internal::aligned_free(const_cast(p)); + it=buffer_map.begin(); + } + buffer_map.clear(); + } /// creation of sycl accessor for a buffer. This function first tries to find - /// the buffer in the buffer_map. - /// If found it gets the accessor from it, if not, the function then adds an - /// entry by creating a sycl buffer - /// for that particular pointer. - template - inline cl::sycl::accessor + /// the buffer in the buffer_map. If found it gets the accessor from it, if not, + ///the function then adds an entry by creating a sycl buffer for that particular pointer. + template inline cl::sycl::accessor get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const { - return (get_sycl_buffer(num_bytes, ptr).template get_access(cgh)); + return (get_sycl_buffer(num_bytes, ptr)->template get_access(cgh)); } -template - inline typename BufferT::Type - get_sycl_buffer(size_t num_bytes,const T * ptr) const { - if(MapAllocator && !ptr){ - eigen_assert("pointer with map_Allocator cannot be null. Please initialise the input pointer"); } - auto it = buffer_map.find(ptr); - if (it == buffer_map.end()) { - BufferT::add_sycl_buffer(ptr, num_bytes, buffer_map); - } - return (*((typename BufferT::Type*)((buffer_map.at(ptr).get())))); + template inline std::pair>::iterator,bool> add_sycl_buffer(const T *ptr, size_t num_bytes) const { + using Type = cl::sycl::buffer; + std::pair>::iterator,bool> ret = buffer_map.insert(std::pair>(ptr, std::shared_ptr(new Type(cl::sycl::range<1>(num_bytes)), + [](void *dataMem) { delete static_cast(dataMem); }))); + (static_cast(buffer_map.at(ptr).get()))->set_final_data(nullptr); + return ret; + } + + template inline cl::sycl::buffer* get_sycl_buffer(size_t num_bytes,const T * ptr) const { + return static_cast*>(add_sycl_buffer(ptr, num_bytes).first->second.get()); } /// allocating memory on the cpu - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const { - return internal::aligned_malloc(num_bytes); + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t) const { + return internal::aligned_malloc(8); } // some runtime conditions that can be applied here bool isDeviceSuitable() const { return true; } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void *buffer) const { - internal::aligned_free(buffer); - } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const { ::memcpy(dst, src, n); } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void *dst, const void *src, size_t n) const { - memcpy(dst, src, n); + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const { + auto host_acc= (static_cast*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access(); + memcpy(host_acc.get_pointer(), src, n); } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const void *src, size_t n) const { - memcpy(dst, src, n); + /// whith the current implementation of sycl, the data is copied twice from device to host. This will be fixed soon. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const { + auto it = buffer_map.find(src); + if (it != buffer_map.end()) { + auto host_acc= (static_cast*>(it->second.get()))-> template get_access(); + memcpy(dst,host_acc.get_pointer(), n); + } else{ + eigen_assert("no device memory found. The memory might be destroyed before creation"); + } } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const { ::memset(buffer, c, n); } @@ -112,6 +110,7 @@ template return 1; } }; + } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h index 367bccf63..f731bf17e 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h @@ -662,13 +662,7 @@ struct TensorEvaluator, } } - /// required by sycl in order to extract the output accessor -#ifndef EIGEN_USE_SYCL - EIGEN_DEVICE_FUNC typename MakePointer_::Type data() const { return NULL; } -#else - EIGEN_DEVICE_FUNC typename MakePointer_::Type data() const { - return m_result; } -#endif + EIGEN_DEVICE_FUNC typename MakePointer_::Type data() const { return m_result; } /// required by sycl in order to extract the accessor const TensorEvaluator& impl() const { return m_impl; } /// added for sycl in order to construct the buffer from the sycl device diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h index 1c89132db..3daecb045 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h @@ -27,9 +27,9 @@ namespace internal { template struct syclGenericBufferReducer{ template -static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){ +static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){ do { - auto f = [length, local, &bufOut, &bufI](cl::sycl::handler& h) mutable { + auto f = [length, local, bufOut, &bufI](cl::sycl::handler& h) mutable { cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)}, cl::sycl::range<1>{std::min(length, local)}}; /* Two accessors are used: one to the buffer that is being reduced, @@ -37,7 +37,7 @@ static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de auto aI = bufI.template get_access(h); auto aOut = - bufOut.template get_access(h); + bufOut->template get_access(h); cl::sycl::accessor scratch(cl::sycl::range<1>(local), h); @@ -134,7 +134,7 @@ struct FullReducer { /// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one. if (GRange < outTileSize) outTileSize=GRange; // getting final out buffer at the moment the created buffer is true because there is no need for assign - auto out_buffer =dev.template get_sycl_buffer::type>(self.dimensions().TotalSize(), output); + auto out_buffer =dev.template get_sycl_buffer::type>(self.dimensions().TotalSize(), output); /// creating the shared memory for calculating reduction. /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can /// recursively apply reduction on it in order to reduce the whole. @@ -208,7 +208,7 @@ struct InnerReducer { dev.m_queue.submit([&](cl::sycl::handler &cgh) { // create a tuple of accessors from Evaluator auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl()); - auto output_accessor = dev.template get_sycl_accessor(num_coeffs_to_preserve,cgh, output); + auto output_accessor = dev.template get_sycl_accessor(num_coeffs_to_preserve,cgh, output); cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) { typedef typename TensorSycl::internal::ConvertToDeviceExpression::Type DevExpr; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h index 3af5f8cfc..b1da6858e 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h @@ -56,10 +56,10 @@ struct AccessorConstructor{ -> decltype(utility::tuple::append(ExtractAccessor::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor::getTuple(cgh, eval2), ExtractAccessor::getTuple(cgh, eval3)))) { return utility::tuple::append(ExtractAccessor::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor::getTuple(cgh, eval2), ExtractAccessor::getTuple(cgh, eval3))); } - template< cl::sycl::access::mode AcM, bool MapAllocator, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval) - -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval) + -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor::type>(eval.dimensions().TotalSize(), cgh,eval.data()))){ - return utility::tuple::make_tuple(eval.device().template get_sycl_accessor::type>(eval.dimensions().TotalSize(), cgh,eval.data())); + return utility::tuple::make_tuple(eval.device().template get_sycl_accessor::type>(eval.dimensions().TotalSize(), cgh,eval.data())); } }; @@ -141,8 +141,8 @@ struct ExtractAccessor, Dev> > template \ struct ExtractAccessor, Dev> > {\ static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator, Dev> eval)\ - -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){\ - return AccessorConstructor::template getAccessor(cgh, eval);\ + -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){\ + return AccessorConstructor::template getAccessor(cgh, eval);\ }\ }; TENSORMAPEXPR(const, cl::sycl::access::mode::read) @@ -153,8 +153,8 @@ TENSORMAPEXPR(, cl::sycl::access::mode::read_write) template struct ExtractAccessor, Dev> > { static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator, Dev> eval) - -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){ - return AccessorConstructor::template getAccessor(cgh, eval); + -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){ + return AccessorConstructor::template getAccessor(cgh, eval); } }; @@ -167,8 +167,8 @@ struct ExtractAccessor, Dev> > template struct ExtractAccessor, Dev> > { static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator, Dev> eval) - -> decltype(utility::tuple::append(AccessorConstructor::template getAccessor(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()))){ - return utility::tuple::append(AccessorConstructor::template getAccessor(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl())); + -> decltype(utility::tuple::append(AccessorConstructor::template getAccessor(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()))){ + return utility::tuple::append(AccessorConstructor::template getAccessor(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl())); } }; @@ -181,8 +181,8 @@ struct ExtractAccessor, Dev> > template struct ExtractAccessor, Dev> > { static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator, Dev> eval) - -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){ - return AccessorConstructor::template getAccessor(cgh, eval); + -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){ + return AccessorConstructor::template getAccessor(cgh, eval); } }; diff --git a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp index ecebf7d68..7201bfe37 100644 --- a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp +++ b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp @@ -25,55 +25,50 @@ using Eigen::SyclDevice; using Eigen::Tensor; using Eigen::TensorMap; -// Types used in tests: -using TestTensor = Tensor; -using TestTensorMap = TensorMap>; -static void test_broadcast_sycl(){ +static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){ - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; + // BROADCAST test: + array in_range = {{2, 3, 5, 7}}; + array broadcasts = {{2, 3, 1, 4}}; + array out_range; // = in_range * broadcasts + for (size_t i = 0; i < out_range.size(); ++i) + out_range[i] = in_range[i] * broadcasts[i]; + + Tensor input(in_range); + Tensor out(out_range); + + for (size_t i = 0; i < in_range.size(); ++i) + VERIFY_IS_EQUAL(out.dimension(i), out_range[i]); + + + for (int i = 0; i < input.size(); ++i) + input(i) = static_cast(i); + + float * gpu_in_data = static_cast(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float))); + float * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float))); + + TensorMap> gpu_in(gpu_in_data, in_range); + TensorMap> gpu_out(gpu_out_data, out_range); + sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float)); + gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); + + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 9; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 28; ++l) { + VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l)); + } } } - }); - SyclDevice sycl_device(q); - // BROADCAST test: - array in_range = {{2, 3, 5, 7}}; - array broadcasts = {{2, 3, 1, 4}}; - array out_range; // = in_range * broadcasts - for (size_t i = 0; i < out_range.size(); ++i) - out_range[i] = in_range[i] * broadcasts[i]; - - Tensor input(in_range); - Tensor output(out_range); - - for (int i = 0; i < input.size(); ++i) - input(i) = static_cast(i); - - TensorMap gpu_in(input.data(), in_range); - TensorMap gpu_out(output.data(), out_range); - gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts); - sycl_device.deallocate(output.data()); - - for (size_t i = 0; i < in_range.size(); ++i) - VERIFY_IS_EQUAL(output.dimension(i), out_range[i]); - - for (int i = 0; i < 4; ++i) { - for (int j = 0; j < 9; ++j) { - for (int k = 0; k < 5; ++k) { - for (int l = 0; l < 28; ++l) { - VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), output(i,j,k,l)); - } - } - } - } - printf("Broadcast Test Passed\n"); + } + printf("Broadcast Test Passed\n"); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); } void test_cxx11_tensor_broadcast_sycl() { - CALL_SUBTEST(test_broadcast_sycl()); + cl::sycl::gpu_selector s; + Eigen::SyclDevice sycl_device(s); + CALL_SUBTEST(test_broadcast_sycl(sycl_device)); } diff --git a/unsupported/test/cxx11_tensor_device_sycl.cpp b/unsupported/test/cxx11_tensor_device_sycl.cpp index f54fc8786..7f79753c5 100644 --- a/unsupported/test/cxx11_tensor_device_sycl.cpp +++ b/unsupported/test/cxx11_tensor_device_sycl.cpp @@ -20,20 +20,12 @@ #include "main.h" #include -void test_device_sycl() { - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - Eigen::SyclDevice sycl_device(q); - printf("Helo from ComputeCpp: Device Exists\n"); +void test_device_sycl(const Eigen::SyclDevice &sycl_device) { + std::cout <<"Helo from ComputeCpp: the requested device exists and the device name is : " + << sycl_device.m_queue.get_device(). template get_info() < in2(tensorRange); Eigen::Tensor out(tensorRange); + float * gpu_in1_data = static_cast(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float))); + float * gpu_in2_data = static_cast(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float))); + float * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float))); + in1 = in1.random() + in1.constant(10.0f); in2 = in2.random() + in2.constant(10.0f); - // creating TensorMap from tensor - Eigen::TensorMap> gpu_in1(in1.data(), tensorRange); - Eigen::TensorMap> gpu_in2(in2.data(), tensorRange); - Eigen::TensorMap> gpu_out(out.data(), tensorRange); - + // creating TensorMap from tensor + Eigen::TensorMap> gpu_in1(gpu_in1_data, tensorRange); + Eigen::TensorMap> gpu_in2(gpu_in2_data, tensorRange); + Eigen::TensorMap> gpu_out(gpu_out_data, tensorRange); + sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(float)); + sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(float)); /// c=(a+b)*b - gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2; - sycl_device.deallocate(out.data()); + gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2; + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -62,7 +56,15 @@ void test_forced_eval_sycl() { } } } - printf("(a+b)*b Test Passed\n"); + printf("(a+b)*b Test Passed\n"); + sycl_device.deallocate(gpu_in1_data); + sycl_device.deallocate(gpu_in2_data); + sycl_device.deallocate(gpu_out_data); + } -void test_cxx11_tensor_forced_eval_sycl() { CALL_SUBTEST(test_forced_eval_sycl()); } +void test_cxx11_tensor_forced_eval_sycl() { + cl::sycl::gpu_selector s; + Eigen::SyclDevice sycl_device(s); + CALL_SUBTEST(test_forced_eval_sycl(sycl_device)); +} diff --git a/unsupported/test/cxx11_tensor_reduction_sycl.cpp b/unsupported/test/cxx11_tensor_reduction_sycl.cpp index bd09744a6..a9ef82907 100644 --- a/unsupported/test/cxx11_tensor_reduction_sycl.cpp +++ b/unsupported/test/cxx11_tensor_reduction_sycl.cpp @@ -22,126 +22,117 @@ -static void test_full_reductions_sycl() { - - - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - Eigen::SyclDevice sycl_device(q); +static void test_full_reductions_sycl(const Eigen::SyclDevice& sycl_device) { const int num_rows = 452; const int num_cols = 765; array tensorRange = {{num_rows, num_cols}}; Tensor in(tensorRange); + Tensor full_redux; + Tensor full_redux_gpu; + in.setRandom(); - Tensor full_redux; - Tensor full_redux_g; full_redux = in.sum(); - float* out_data = (float*)sycl_device.allocate(sizeof(float)); - TensorMap > in_gpu(in.data(), tensorRange); - TensorMap > full_redux_gpu(out_data); - full_redux_gpu.device(sycl_device) = in_gpu.sum(); - sycl_device.deallocate(out_data); + + float* gpu_in_data = static_cast(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float))); + float* gpu_out_data =(float*)sycl_device.allocate(sizeof(float)); + + TensorMap > in_gpu(gpu_in_data, tensorRange); + TensorMap > out_gpu(gpu_out_data); + + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); + out_gpu.device(sycl_device) = in_gpu.sum(); + sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(float)); // Check that the CPU and GPU reductions return the same result. VERIFY_IS_APPROX(full_redux_gpu(), full_redux()); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); } - -static void test_first_dim_reductions_sycl() { - - - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - Eigen::SyclDevice sycl_device(q); +static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device) { int dim_x = 145; int dim_y = 1; int dim_z = 67; array tensorRange = {{dim_x, dim_y, dim_z}}; - - Tensor in(tensorRange); - in.setRandom(); Eigen::array red_axis; red_axis[0] = 0; - Tensor redux = in.sum(red_axis); array reduced_tensorRange = {{dim_y, dim_z}}; - Tensor redux_g(reduced_tensorRange); - TensorMap > in_gpu(in.data(), tensorRange); - float* out_data = (float*)sycl_device.allocate(dim_y*dim_z*sizeof(float)); - TensorMap > redux_gpu(out_data, dim_y, dim_z ); - redux_gpu.device(sycl_device) = in_gpu.sum(red_axis); - sycl_device.deallocate(out_data); + Tensor in(tensorRange); + Tensor redux(reduced_tensorRange); + Tensor redux_gpu(reduced_tensorRange); + + in.setRandom(); + + redux= in.sum(red_axis); + + float* gpu_in_data = static_cast(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float))); + float* gpu_out_data = static_cast(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float))); + + TensorMap > in_gpu(gpu_in_data, tensorRange); + TensorMap > out_gpu(gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); + out_gpu.device(sycl_device) = in_gpu.sum(red_axis); + sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float)); + // Check that the CPU and GPU reductions return the same result. - for(int j=0; j tensorRange = {{dim_x, dim_y, dim_z}}; - - Tensor in(tensorRange); - in.setRandom(); Eigen::array red_axis; red_axis[0] = 2; - Tensor redux = in.sum(red_axis); array reduced_tensorRange = {{dim_x, dim_y}}; - Tensor redux_g(reduced_tensorRange); - TensorMap > in_gpu(in.data(), tensorRange); - float* out_data = (float*)sycl_device.allocate(dim_x*dim_y*sizeof(float)); - TensorMap > redux_gpu(out_data, dim_x, dim_y ); - redux_gpu.device(sycl_device) = in_gpu.sum(red_axis); - sycl_device.deallocate(out_data); + Tensor in(tensorRange); + Tensor redux(reduced_tensorRange); + Tensor redux_gpu(reduced_tensorRange); + + in.setRandom(); + + redux= in.sum(red_axis); + + float* gpu_in_data = static_cast(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float))); + float* gpu_out_data = static_cast(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float))); + + TensorMap > in_gpu(gpu_in_data, tensorRange); + TensorMap > out_gpu(gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); + out_gpu.device(sycl_device) = in_gpu.sum(red_axis); + sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float)); // Check that the CPU and GPU reductions return the same result. - for(int j=0; j; -using TestTensorMap = TensorMap>; - -void test_sycl_cpu() { - cl::sycl::gpu_selector s; - cl::sycl::queue q(s, [=](cl::sycl::exception_list l) { - for (const auto& e : l) { - try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; - } - } - }); - SyclDevice sycl_device(q); +void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) { int sizeDim1 = 100; int sizeDim2 = 100; int sizeDim3 = 100; array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; - TestTensor in1(tensorRange); - TestTensor in2(tensorRange); - TestTensor in3(tensorRange); - TestTensor out(tensorRange); - in1 = in1.random(); + Tensor in1(tensorRange); + Tensor in2(tensorRange); + Tensor in3(tensorRange); + Tensor out(tensorRange); + in2 = in2.random(); in3 = in3.random(); - TestTensorMap gpu_in1(in1.data(), tensorRange); - TestTensorMap gpu_in2(in2.data(), tensorRange); - TestTensorMap gpu_in3(in3.data(), tensorRange); - TestTensorMap gpu_out(out.data(), tensorRange); + + float * gpu_in1_data = static_cast(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float))); + float * gpu_in2_data = static_cast(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float))); + float * gpu_in3_data = static_cast(sycl_device.allocate(in3.dimensions().TotalSize()*sizeof(float))); + float * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float))); + + TensorMap> gpu_in1(gpu_in1_data, tensorRange); + TensorMap> gpu_in2(gpu_in2_data, tensorRange); + TensorMap> gpu_in3(gpu_in3_data, tensorRange); + TensorMap> gpu_out(gpu_out_data, tensorRange); /// a=1.2f gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f); - sycl_device.deallocate(in1.data()); + sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -74,7 +65,7 @@ void test_sycl_cpu() { /// a=b*1.2f gpu_out.device(sycl_device) = gpu_in1 * 1.2f; - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -86,8 +77,9 @@ void test_sycl_cpu() { printf("a=b*1.2f Test Passed\n"); /// c=a*b + sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(float)); gpu_out.device(sycl_device) = gpu_in1 * gpu_in2; - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -101,7 +93,7 @@ void test_sycl_cpu() { /// c=a+b gpu_out.device(sycl_device) = gpu_in1 + gpu_in2; - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -115,7 +107,7 @@ void test_sycl_cpu() { /// c=a*a gpu_out.device(sycl_device) = gpu_in1 * gpu_in1; - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -125,12 +117,11 @@ void test_sycl_cpu() { } } } - printf("c= a*a Test Passed\n"); //a*3.14f + b*2.7f gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f); - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -143,8 +134,9 @@ void test_sycl_cpu() { printf("a*3.14f + b*2.7f Test Passed\n"); ///d= (a>0.5? b:c) + sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.dimensions().TotalSize())*sizeof(float)); gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3); - sycl_device.deallocate(out.data()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -155,8 +147,13 @@ void test_sycl_cpu() { } } printf("d= (a>0.5? b:c) Test Passed\n"); - + sycl_device.deallocate(gpu_in1_data); + sycl_device.deallocate(gpu_in2_data); + sycl_device.deallocate(gpu_in3_data); + sycl_device.deallocate(gpu_out_data); } void test_cxx11_tensor_sycl() { - CALL_SUBTEST(test_sycl_cpu()); + cl::sycl::gpu_selector s; + Eigen::SyclDevice sycl_device(s); + CALL_SUBTEST(test_sycl_cpu(sycl_device)); }