mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-07-04 04:05:19 +08:00
Converting all sycl buffers to uninitialised device only buffers; adding memcpyHostToDevice and memcpyDeviceToHost on syclDevice; modifying all examples to obey the new rules; moving sycl queue creating to the device based on Benoit suggestion; removing the sycl specefic condition for returning m_result in TensorReduction.h according to Benoit suggestion.
This commit is contained in:
parent
dad177be01
commit
d57430dd73
@ -128,14 +128,14 @@ macro(ei_add_test_internal_sycl testname testname_with_suffix)
|
|||||||
OUTPUT ${include_file}
|
OUTPUT ${include_file}
|
||||||
COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${host_file}\\\"" > ${include_file}
|
COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${host_file}\\\"" > ${include_file}
|
||||||
COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${bc_file}.sycl\\\"" >> ${include_file}
|
COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${bc_file}.sycl\\\"" >> ${include_file}
|
||||||
DEPENDS ${filename}
|
DEPENDS ${filename} ${bc_file}.sycl
|
||||||
COMMENT "Building ComputeCpp integration header file ${include_file}"
|
COMMENT "Building ComputeCpp integration header file ${include_file}"
|
||||||
)
|
)
|
||||||
# Add a custom target for the generated integration header
|
# Add a custom target for the generated integration header
|
||||||
add_custom_target(${testname}_integration_header_woho DEPENDS ${include_file})
|
add_custom_target(${testname}_integration_header_sycl DEPENDS ${include_file})
|
||||||
|
|
||||||
add_executable(${targetname} ${include_file})
|
add_executable(${targetname} ${include_file})
|
||||||
add_dependencies(${targetname} ${testname}_integration_header_woho)
|
add_dependencies(${targetname} ${testname}_integration_header_sycl)
|
||||||
add_sycl_to_target(${targetname} ${filename} ${CMAKE_CURRENT_BINARY_DIR})
|
add_sycl_to_target(${targetname} ${filename} ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
if (targetname MATCHES "^eigen2_")
|
if (targetname MATCHES "^eigen2_")
|
||||||
@ -514,11 +514,11 @@ macro(ei_set_sitename)
|
|||||||
# if the sitename is not yet set, try to set it
|
# if the sitename is not yet set, try to set it
|
||||||
if(NOT ${SITE} OR ${SITE} STREQUAL "")
|
if(NOT ${SITE} OR ${SITE} STREQUAL "")
|
||||||
set(eigen_computername $ENV{COMPUTERNAME})
|
set(eigen_computername $ENV{COMPUTERNAME})
|
||||||
set(eigen_hostname $ENV{HOSTNAME})
|
set(eigen_hostname $ENV{HOSTNAME})
|
||||||
if(eigen_hostname)
|
if(eigen_hostname)
|
||||||
set(SITE ${eigen_hostname})
|
set(SITE ${eigen_hostname})
|
||||||
elseif(eigen_computername)
|
elseif(eigen_computername)
|
||||||
set(SITE ${eigen_computername})
|
set(SITE ${eigen_computername})
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
# in case it is already set, enforce lower case
|
# in case it is already set, enforce lower case
|
||||||
@ -638,18 +638,18 @@ macro(ei_get_cxxflags VAR)
|
|||||||
|
|
||||||
if(EIGEN_TEST_OPENMP)
|
if(EIGEN_TEST_OPENMP)
|
||||||
if (${VAR} STREQUAL "")
|
if (${VAR} STREQUAL "")
|
||||||
set(${VAR} OMP)
|
set(${VAR} OMP)
|
||||||
else()
|
else()
|
||||||
set(${VAR} ${${VAR}}-OMP)
|
set(${VAR} ${${VAR}}-OMP)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(EIGEN_DEFAULT_TO_ROW_MAJOR)
|
if(EIGEN_DEFAULT_TO_ROW_MAJOR)
|
||||||
if (${VAR} STREQUAL "")
|
if (${VAR} STREQUAL "")
|
||||||
set(${VAR} ROW)
|
set(${VAR} ROW)
|
||||||
else()
|
else()
|
||||||
set(${VAR} ${${VAR}}-ROWMAJ)
|
set(${VAR} ${${VAR}}-ROWMAJ)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
endmacro(ei_get_cxxflags)
|
endmacro(ei_get_cxxflags)
|
||||||
|
|
||||||
|
@ -1,6 +1,21 @@
|
|||||||
#.rst:
|
#.rst:
|
||||||
# FindComputeCpp
|
# FindComputeCpp
|
||||||
#---------------
|
#---------------
|
||||||
|
#
|
||||||
|
# Copyright 2016 Codeplay Software Ltd.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use these files except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
#########################
|
#########################
|
||||||
# FindComputeCpp.cmake
|
# FindComputeCpp.cmake
|
||||||
@ -8,6 +23,11 @@
|
|||||||
#
|
#
|
||||||
# Tools for finding and building with ComputeCpp.
|
# Tools for finding and building with ComputeCpp.
|
||||||
#
|
#
|
||||||
|
# User must define COMPUTECPP_PACKAGE_ROOT_DIR pointing to the ComputeCpp
|
||||||
|
# installation.
|
||||||
|
#
|
||||||
|
# Latest version of this file can be found at:
|
||||||
|
# https://github.com/codeplaysoftware/computecpp-sdk
|
||||||
|
|
||||||
# Require CMake version 3.2.2 or higher
|
# Require CMake version 3.2.2 or higher
|
||||||
cmake_minimum_required(VERSION 3.2.2)
|
cmake_minimum_required(VERSION 3.2.2)
|
||||||
@ -32,7 +52,6 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
|||||||
message(FATAL_ERROR
|
message(FATAL_ERROR
|
||||||
"host compiler - Not found! (clang version must be at least 3.6)")
|
"host compiler - Not found! (clang version must be at least 3.6)")
|
||||||
else()
|
else()
|
||||||
set(COMPUTECPP_DISABLE_GCC_DUAL_ABI "True")
|
|
||||||
message(STATUS "host compiler - clang ${CMAKE_CXX_COMPILER_VERSION}")
|
message(STATUS "host compiler - clang ${CMAKE_CXX_COMPILER_VERSION}")
|
||||||
endif()
|
endif()
|
||||||
else()
|
else()
|
||||||
@ -48,11 +67,12 @@ mark_as_advanced(COMPUTECPP_64_BIT_CODE)
|
|||||||
# Find OpenCL package
|
# Find OpenCL package
|
||||||
find_package(OpenCL REQUIRED)
|
find_package(OpenCL REQUIRED)
|
||||||
|
|
||||||
# Find ComputeCpp package
|
# Find ComputeCpp packagee
|
||||||
if(EXISTS ${COMPUTECPP_PACKAGE_ROOT_DIR})
|
if(NOT COMPUTECPP_PACKAGE_ROOT_DIR)
|
||||||
message(STATUS "ComputeCpp package - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
message(FATAL_ERROR
|
||||||
|
"ComputeCpp package - Not found! (please set COMPUTECPP_PACKAGE_ROOT_DIR")
|
||||||
else()
|
else()
|
||||||
message(FATAL_ERROR "ComputeCpp package - Not found! (please set COMPUTECPP_PACKAGE_ROOT_DIR) (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
message(STATUS "ComputeCpp package - Found")
|
||||||
endif()
|
endif()
|
||||||
option(COMPUTECPP_PACKAGE_ROOT_DIR "Path to the ComputeCpp Package")
|
option(COMPUTECPP_PACKAGE_ROOT_DIR "Path to the ComputeCpp Package")
|
||||||
|
|
||||||
@ -61,9 +81,9 @@ find_program(COMPUTECPP_DEVICE_COMPILER compute++ PATHS
|
|||||||
${COMPUTECPP_PACKAGE_ROOT_DIR} PATH_SUFFIXES bin)
|
${COMPUTECPP_PACKAGE_ROOT_DIR} PATH_SUFFIXES bin)
|
||||||
if (EXISTS ${COMPUTECPP_DEVICE_COMPILER})
|
if (EXISTS ${COMPUTECPP_DEVICE_COMPILER})
|
||||||
mark_as_advanced(COMPUTECPP_DEVICE_COMPILER)
|
mark_as_advanced(COMPUTECPP_DEVICE_COMPILER)
|
||||||
message(STATUS "compute++ - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
message(STATUS "compute++ - Found")
|
||||||
else()
|
else()
|
||||||
message(FATAL_ERROR "compute++ - Not found! (${COMPUTECPP_DEVICE_COMPILER}) (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
message(FATAL_ERROR "compute++ - Not found! (${COMPUTECPP_DEVICE_COMPILER})")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Obtain the path to computecpp_info
|
# Obtain the path to computecpp_info
|
||||||
@ -71,9 +91,9 @@ find_program(COMPUTECPP_INFO_TOOL computecpp_info PATHS
|
|||||||
${COMPUTECPP_PACKAGE_ROOT_DIR} PATH_SUFFIXES bin)
|
${COMPUTECPP_PACKAGE_ROOT_DIR} PATH_SUFFIXES bin)
|
||||||
if (EXISTS ${COMPUTECPP_INFO_TOOL})
|
if (EXISTS ${COMPUTECPP_INFO_TOOL})
|
||||||
mark_as_advanced(${COMPUTECPP_INFO_TOOL})
|
mark_as_advanced(${COMPUTECPP_INFO_TOOL})
|
||||||
message(STATUS "computecpp_info - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
message(STATUS "computecpp_info - Found")
|
||||||
else()
|
else()
|
||||||
message(FATAL_ERROR "computecpp_info - Not found! (${COMPUTECPP_INFO_TOOL}) (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
message(FATAL_ERROR "computecpp_info - Not found! (${COMPUTECPP_INFO_TOOL})")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Obtain the path to the ComputeCpp runtime library
|
# Obtain the path to the ComputeCpp runtime library
|
||||||
@ -85,15 +105,15 @@ if (EXISTS ${COMPUTECPP_RUNTIME_LIBRARY})
|
|||||||
mark_as_advanced(COMPUTECPP_RUNTIME_LIBRARY)
|
mark_as_advanced(COMPUTECPP_RUNTIME_LIBRARY)
|
||||||
message(STATUS "libComputeCpp.so - Found")
|
message(STATUS "libComputeCpp.so - Found")
|
||||||
else()
|
else()
|
||||||
message(FATAL_ERROR "libComputeCpp.so - Not found! (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
message(FATAL_ERROR "libComputeCpp.so - Not found!")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Obtain the ComputeCpp include directory
|
# Obtain the ComputeCpp include directory
|
||||||
set(COMPUTECPP_INCLUDE_DIRECTORY ${COMPUTECPP_PACKAGE_ROOT_DIR}/include/)
|
set(COMPUTECPP_INCLUDE_DIRECTORY ${COMPUTECPP_PACKAGE_ROOT_DIR}/include/)
|
||||||
if (NOT EXISTS ${COMPUTECPP_INCLUDE_DIRECTORY})
|
if (NOT EXISTS ${COMPUTECPP_INCLUDE_DIRECTORY})
|
||||||
message(FATAL_ERROR "ComputeCpp includes - Not found! (${COMPUTECPP_PACKAGE_ROOT_DIR}/include/)")
|
message(FATAL_ERROR "ComputeCpp includes - Not found!")
|
||||||
else()
|
else()
|
||||||
message(STATUS "ComputeCpp includes - Found (${COMPUTECPP_PACKAGE_ROOT_DIR})")
|
message(STATUS "ComputeCpp includes - Found")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Obtain the package version
|
# Obtain the package version
|
||||||
@ -144,7 +164,7 @@ endif()
|
|||||||
#
|
#
|
||||||
# targetName : Name of the target.
|
# targetName : Name of the target.
|
||||||
# sourceFile : Source file to be compiled.
|
# sourceFile : Source file to be compiled.
|
||||||
# binaryDir : Intermediate output directory for the integration header.
|
# binaryDir : Intermediate directory to output the integration header.
|
||||||
#
|
#
|
||||||
function(__build_spir targetName sourceFile binaryDir)
|
function(__build_spir targetName sourceFile binaryDir)
|
||||||
|
|
||||||
@ -176,12 +196,13 @@ function(__build_spir targetName sourceFile binaryDir)
|
|||||||
OUTPUT ${outputSyclFile}
|
OUTPUT ${outputSyclFile}
|
||||||
COMMAND ${COMPUTECPP_DEVICE_COMPILER}
|
COMMAND ${COMPUTECPP_DEVICE_COMPILER}
|
||||||
${COMPUTECPP_DEVICE_COMPILER_FLAGS}
|
${COMPUTECPP_DEVICE_COMPILER_FLAGS}
|
||||||
-I${COMPUTECPP_INCLUDE_DIRECTORY}
|
-isystem ${COMPUTECPP_INCLUDE_DIRECTORY}
|
||||||
${COMPUTECPP_PLATFORM_SPECIFIC_ARGS}
|
${COMPUTECPP_PLATFORM_SPECIFIC_ARGS}
|
||||||
${device_compiler_includes}
|
${device_compiler_includes}
|
||||||
-o ${outputSyclFile}
|
-o ${outputSyclFile}
|
||||||
-c ${CMAKE_CURRENT_SOURCE_DIR}/${sourceFile}
|
-c ${CMAKE_CURRENT_SOURCE_DIR}/${sourceFile}
|
||||||
DEPENDS ${sourceFile}
|
DEPENDS ${sourceFile}
|
||||||
|
WORKING_DIRECTORY ${binaryDir}
|
||||||
COMMENT "Building ComputeCpp integration header file ${outputSyclFile}")
|
COMMENT "Building ComputeCpp integration header file ${outputSyclFile}")
|
||||||
|
|
||||||
# Add a custom target for the generated integration header
|
# Add a custom target for the generated integration header
|
||||||
@ -190,10 +211,6 @@ function(__build_spir targetName sourceFile binaryDir)
|
|||||||
# Add a dependency on the integration header
|
# Add a dependency on the integration header
|
||||||
add_dependencies(${targetName} ${targetName}_integration_header)
|
add_dependencies(${targetName} ${targetName}_integration_header)
|
||||||
|
|
||||||
# Force inclusion of the integration header for the host compiler
|
|
||||||
#set(compileFlags -include ${include_file} "-Wall")
|
|
||||||
target_compile_options(${targetName} PUBLIC ${compileFlags})
|
|
||||||
|
|
||||||
# Set the host compiler C++ standard to C++11
|
# Set the host compiler C++ standard to C++11
|
||||||
set_property(TARGET ${targetName} PROPERTY CXX_STANDARD 11)
|
set_property(TARGET ${targetName} PROPERTY CXX_STANDARD 11)
|
||||||
|
|
||||||
@ -210,11 +227,11 @@ endfunction()
|
|||||||
#######################
|
#######################
|
||||||
#
|
#
|
||||||
# Adds a SYCL compilation custom command associated with an existing
|
# Adds a SYCL compilation custom command associated with an existing
|
||||||
# target and sets a dependency on that new command.
|
# target and sets a dependancy on that new command.
|
||||||
#
|
#
|
||||||
# targetName : Name of the target to add a SYCL to.
|
# targetName : Name of the target to add a SYCL to.
|
||||||
# sourceFile : Source file to be compiled for SYCL.
|
# sourceFile : Source file to be compiled for SYCL.
|
||||||
# binaryDir : Intermediate output directory for the integration header.
|
# binaryDir : Intermediate directory to output the integration header.
|
||||||
#
|
#
|
||||||
function(add_sycl_to_target targetName sourceFile binaryDir)
|
function(add_sycl_to_target targetName sourceFile binaryDir)
|
||||||
|
|
||||||
|
@ -16,95 +16,93 @@
|
|||||||
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
|
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
/// \struct BufferT is used to specialise add_sycl_buffer function for
|
|
||||||
// two types of buffer we have. When the MapAllocator is true, we create the
|
|
||||||
// sycl buffer with MapAllocator.
|
|
||||||
/// We have to const_cast the input pointer in order to work around the fact
|
|
||||||
/// that sycl does not accept map allocator for const pointer.
|
|
||||||
template <typename T, bool MapAllocator>
|
|
||||||
struct BufferT {
|
|
||||||
using Type = cl::sycl::buffer<T, 1, cl::sycl::map_allocator<T>>;
|
|
||||||
static inline void add_sycl_buffer(const T *ptr, size_t num_bytes,std::map<const void *, std::shared_ptr<void>> &buffer_map) {
|
|
||||||
buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(std::make_shared<Type>(Type(const_cast<T *>(ptr), cl::sycl::range<1>(num_bytes))))));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/// specialisation of the \ref BufferT when the MapAllocator is false. In this
|
|
||||||
/// case we only create the device-only buffer.
|
|
||||||
template <typename T>
|
|
||||||
struct BufferT<T, false> {
|
|
||||||
using Type = cl::sycl::buffer<T, 1>;
|
|
||||||
static inline void add_sycl_buffer(const T *ptr, size_t num_bytes, std::map<const void *, std::shared_ptr<void>> &buffer_map) {
|
|
||||||
buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(std::make_shared<Type>(Type(cl::sycl::range<1>(num_bytes))))));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct SyclDevice {
|
struct SyclDevice {
|
||||||
/// class members
|
/// class members
|
||||||
/// sycl queue
|
/// sycl queue
|
||||||
cl::sycl::queue &m_queue;
|
mutable cl::sycl::queue m_queue;
|
||||||
/// std::map is the container used to make sure that we create only one buffer
|
/// std::map is the container used to make sure that we create only one buffer
|
||||||
/// per pointer. The lifespan of the buffer
|
/// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
|
||||||
/// now depends on the lifespan of SyclDevice. If a non-read-only pointer is
|
/// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
|
||||||
/// needed to be accessed on the host we should manually deallocate it.
|
|
||||||
mutable std::map<const void *, std::shared_ptr<void>> buffer_map;
|
mutable std::map<const void *, std::shared_ptr<void>> buffer_map;
|
||||||
|
/// creating device by using selector
|
||||||
SyclDevice(cl::sycl::queue &q) : m_queue(q) {}
|
template<typename dev_Selector> SyclDevice(dev_Selector s)
|
||||||
|
:m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) {
|
||||||
|
for (const auto& e : l) {
|
||||||
|
try {
|
||||||
|
std::rethrow_exception(e);
|
||||||
|
} catch (cl::sycl::exception e) {
|
||||||
|
std::cout << e.what() << std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})) {}
|
||||||
// destructor
|
// destructor
|
||||||
~SyclDevice() { deallocate_all(); }
|
~SyclDevice() { deallocate_all(); }
|
||||||
|
|
||||||
template <typename T>
|
template <typename T> void deallocate(T *p) const {
|
||||||
void deallocate(const T *p) const {
|
|
||||||
auto it = buffer_map.find(p);
|
auto it = buffer_map.find(p);
|
||||||
if (it != buffer_map.end()) {
|
if (it != buffer_map.end()) {
|
||||||
buffer_map.erase(it);
|
buffer_map.erase(it);
|
||||||
|
internal::aligned_free(p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void deallocate_all() const { buffer_map.clear(); }
|
void deallocate_all() const {
|
||||||
|
std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin();
|
||||||
|
while (it!=buffer_map.end()) {
|
||||||
|
auto p=it->first;
|
||||||
|
buffer_map.erase(it);
|
||||||
|
internal::aligned_free(const_cast<void*>(p));
|
||||||
|
it=buffer_map.begin();
|
||||||
|
}
|
||||||
|
buffer_map.clear();
|
||||||
|
}
|
||||||
|
|
||||||
/// creation of sycl accessor for a buffer. This function first tries to find
|
/// creation of sycl accessor for a buffer. This function first tries to find
|
||||||
/// the buffer in the buffer_map.
|
/// the buffer in the buffer_map. If found it gets the accessor from it, if not,
|
||||||
/// If found it gets the accessor from it, if not, the function then adds an
|
///the function then adds an entry by creating a sycl buffer for that particular pointer.
|
||||||
/// entry by creating a sycl buffer
|
template <cl::sycl::access::mode AcMd, typename T> inline cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
|
||||||
/// for that particular pointer.
|
|
||||||
template <cl::sycl::access::mode AcMd, bool MapAllocator, typename T>
|
|
||||||
inline cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
|
|
||||||
get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const {
|
get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const {
|
||||||
return (get_sycl_buffer<MapAllocator,T>(num_bytes, ptr).template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
|
return (get_sycl_buffer<T>(num_bytes, ptr)->template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool MapAllocator, typename T>
|
template<typename T> inline std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(const T *ptr, size_t num_bytes) const {
|
||||||
inline typename BufferT<T, MapAllocator>::Type
|
using Type = cl::sycl::buffer<T, 1>;
|
||||||
get_sycl_buffer(size_t num_bytes,const T * ptr) const {
|
std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret = buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
|
||||||
if(MapAllocator && !ptr){
|
[](void *dataMem) { delete static_cast<Type*>(dataMem); })));
|
||||||
eigen_assert("pointer with map_Allocator cannot be null. Please initialise the input pointer"); }
|
(static_cast<Type*>(buffer_map.at(ptr).get()))->set_final_data(nullptr);
|
||||||
auto it = buffer_map.find(ptr);
|
return ret;
|
||||||
if (it == buffer_map.end()) {
|
}
|
||||||
BufferT<T, MapAllocator>::add_sycl_buffer(ptr, num_bytes, buffer_map);
|
|
||||||
}
|
template <typename T> inline cl::sycl::buffer<T, 1>* get_sycl_buffer(size_t num_bytes,const T * ptr) const {
|
||||||
return (*((typename BufferT<T, MapAllocator>::Type*)((buffer_map.at(ptr).get()))));
|
return static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(ptr, num_bytes).first->second.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// allocating memory on the cpu
|
/// allocating memory on the cpu
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const {
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t) const {
|
||||||
return internal::aligned_malloc(num_bytes);
|
return internal::aligned_malloc(8);
|
||||||
}
|
}
|
||||||
|
|
||||||
// some runtime conditions that can be applied here
|
// some runtime conditions that can be applied here
|
||||||
bool isDeviceSuitable() const { return true; }
|
bool isDeviceSuitable() const { return true; }
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void *buffer) const {
|
|
||||||
internal::aligned_free(buffer);
|
|
||||||
}
|
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
|
||||||
::memcpy(dst, src, n);
|
::memcpy(dst, src, n);
|
||||||
}
|
}
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void *dst, const void *src, size_t n) const {
|
|
||||||
memcpy(dst, src, n);
|
template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
|
||||||
|
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
|
||||||
|
memcpy(host_acc.get_pointer(), src, n);
|
||||||
}
|
}
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const void *src, size_t n) const {
|
/// whith the current implementation of sycl, the data is copied twice from device to host. This will be fixed soon.
|
||||||
memcpy(dst, src, n);
|
template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
|
||||||
|
auto it = buffer_map.find(src);
|
||||||
|
if (it != buffer_map.end()) {
|
||||||
|
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::host_buffer>();
|
||||||
|
memcpy(dst,host_acc.get_pointer(), n);
|
||||||
|
} else{
|
||||||
|
eigen_assert("no device memory found. The memory might be destroyed before creation");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const {
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const {
|
||||||
::memset(buffer, c, n);
|
::memset(buffer, c, n);
|
||||||
}
|
}
|
||||||
@ -112,6 +110,7 @@ template <bool MapAllocator, typename T>
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // end namespace Eigen
|
} // end namespace Eigen
|
||||||
|
|
||||||
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
|
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
|
||||||
|
@ -662,13 +662,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// required by sycl in order to extract the output accessor
|
EIGEN_DEVICE_FUNC typename MakePointer_<Scalar>::Type data() const { return m_result; }
|
||||||
#ifndef EIGEN_USE_SYCL
|
|
||||||
EIGEN_DEVICE_FUNC typename MakePointer_<Scalar>::Type data() const { return NULL; }
|
|
||||||
#else
|
|
||||||
EIGEN_DEVICE_FUNC typename MakePointer_<Scalar>::Type data() const {
|
|
||||||
return m_result; }
|
|
||||||
#endif
|
|
||||||
/// required by sycl in order to extract the accessor
|
/// required by sycl in order to extract the accessor
|
||||||
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
|
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
|
||||||
/// added for sycl in order to construct the buffer from the sycl device
|
/// added for sycl in order to construct the buffer from the sycl device
|
||||||
|
@ -27,9 +27,9 @@ namespace internal {
|
|||||||
|
|
||||||
template<typename CoeffReturnType, typename KernelName> struct syclGenericBufferReducer{
|
template<typename CoeffReturnType, typename KernelName> struct syclGenericBufferReducer{
|
||||||
template<typename BufferTOut, typename BufferTIn>
|
template<typename BufferTOut, typename BufferTIn>
|
||||||
static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
|
static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
|
||||||
do {
|
do {
|
||||||
auto f = [length, local, &bufOut, &bufI](cl::sycl::handler& h) mutable {
|
auto f = [length, local, bufOut, &bufI](cl::sycl::handler& h) mutable {
|
||||||
cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)},
|
cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)},
|
||||||
cl::sycl::range<1>{std::min(length, local)}};
|
cl::sycl::range<1>{std::min(length, local)}};
|
||||||
/* Two accessors are used: one to the buffer that is being reduced,
|
/* Two accessors are used: one to the buffer that is being reduced,
|
||||||
@ -37,7 +37,7 @@ static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de
|
|||||||
auto aI =
|
auto aI =
|
||||||
bufI.template get_access<cl::sycl::access::mode::read_write>(h);
|
bufI.template get_access<cl::sycl::access::mode::read_write>(h);
|
||||||
auto aOut =
|
auto aOut =
|
||||||
bufOut.template get_access<cl::sycl::access::mode::discard_write>(h);
|
bufOut->template get_access<cl::sycl::access::mode::discard_write>(h);
|
||||||
cl::sycl::accessor<CoeffReturnType, 1, cl::sycl::access::mode::read_write,
|
cl::sycl::accessor<CoeffReturnType, 1, cl::sycl::access::mode::read_write,
|
||||||
cl::sycl::access::target::local>
|
cl::sycl::access::target::local>
|
||||||
scratch(cl::sycl::range<1>(local), h);
|
scratch(cl::sycl::range<1>(local), h);
|
||||||
@ -134,7 +134,7 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
|
|||||||
/// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one.
|
/// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one.
|
||||||
if (GRange < outTileSize) outTileSize=GRange;
|
if (GRange < outTileSize) outTileSize=GRange;
|
||||||
// getting final out buffer at the moment the created buffer is true because there is no need for assign
|
// getting final out buffer at the moment the created buffer is true because there is no need for assign
|
||||||
auto out_buffer =dev.template get_sycl_buffer<true, typename Eigen::internal::remove_all<CoeffReturnType>::type>(self.dimensions().TotalSize(), output);
|
auto out_buffer =dev.template get_sycl_buffer<typename Eigen::internal::remove_all<CoeffReturnType>::type>(self.dimensions().TotalSize(), output);
|
||||||
/// creating the shared memory for calculating reduction.
|
/// creating the shared memory for calculating reduction.
|
||||||
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
|
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
|
||||||
/// recursively apply reduction on it in order to reduce the whole.
|
/// recursively apply reduction on it in order to reduce the whole.
|
||||||
@ -208,7 +208,7 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
|
|||||||
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
|
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
|
||||||
// create a tuple of accessors from Evaluator
|
// create a tuple of accessors from Evaluator
|
||||||
auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
|
auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
|
||||||
auto output_accessor = dev.template get_sycl_accessor<cl::sycl::access::mode::discard_write, true>(num_coeffs_to_preserve,cgh, output);
|
auto output_accessor = dev.template get_sycl_accessor<cl::sycl::access::mode::discard_write>(num_coeffs_to_preserve,cgh, output);
|
||||||
|
|
||||||
cgh.parallel_for<Self>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
cgh.parallel_for<Self>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||||
typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
|
typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
|
||||||
|
@ -56,10 +56,10 @@ struct AccessorConstructor{
|
|||||||
-> decltype(utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)))) {
|
-> decltype(utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)))) {
|
||||||
return utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)));
|
return utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)));
|
||||||
}
|
}
|
||||||
template< cl::sycl::access::mode AcM, bool MapAllocator, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval)
|
template< cl::sycl::access::mode AcM, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval)
|
||||||
-> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM, MapAllocator,
|
-> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM,
|
||||||
typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()))){
|
typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()))){
|
||||||
return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM, MapAllocator, typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()));
|
return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM, typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -141,8 +141,8 @@ struct ExtractAccessor<TensorEvaluator<TensorAssignOp<LHSExpr, RHSExpr>, Dev> >
|
|||||||
template <typename PlainObjectType, int Options_, typename Dev>\
|
template <typename PlainObjectType, int Options_, typename Dev>\
|
||||||
struct ExtractAccessor<TensorEvaluator<CVQual TensorMap<PlainObjectType, Options_>, Dev> > {\
|
struct ExtractAccessor<TensorEvaluator<CVQual TensorMap<PlainObjectType, Options_>, Dev> > {\
|
||||||
static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator<CVQual TensorMap<PlainObjectType, Options_>, Dev> eval)\
|
static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator<CVQual TensorMap<PlainObjectType, Options_>, Dev> eval)\
|
||||||
-> decltype(AccessorConstructor::template getAccessor<ACCType, true>(cgh, eval)){\
|
-> decltype(AccessorConstructor::template getAccessor<ACCType>(cgh, eval)){\
|
||||||
return AccessorConstructor::template getAccessor<ACCType, true>(cgh, eval);\
|
return AccessorConstructor::template getAccessor<ACCType>(cgh, eval);\
|
||||||
}\
|
}\
|
||||||
};
|
};
|
||||||
TENSORMAPEXPR(const, cl::sycl::access::mode::read)
|
TENSORMAPEXPR(const, cl::sycl::access::mode::read)
|
||||||
@ -153,8 +153,8 @@ TENSORMAPEXPR(, cl::sycl::access::mode::read_write)
|
|||||||
template <typename Expr, typename Dev>
|
template <typename Expr, typename Dev>
|
||||||
struct ExtractAccessor<TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> > {
|
struct ExtractAccessor<TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> > {
|
||||||
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> eval)
|
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorForcedEvalOp<Expr>, Dev> eval)
|
||||||
-> decltype(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read, false>(cgh, eval)){
|
-> decltype(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval)){
|
||||||
return AccessorConstructor::template getAccessor<cl::sycl::access::mode::read, false>(cgh, eval);
|
return AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -167,8 +167,8 @@ struct ExtractAccessor<TensorEvaluator<TensorForcedEvalOp<Expr>, Dev> >
|
|||||||
template <typename Expr, typename Dev>
|
template <typename Expr, typename Dev>
|
||||||
struct ExtractAccessor<TensorEvaluator<const TensorEvalToOp<Expr>, Dev> > {
|
struct ExtractAccessor<TensorEvaluator<const TensorEvalToOp<Expr>, Dev> > {
|
||||||
static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator<const TensorEvalToOp<Expr>, Dev> eval)
|
static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator<const TensorEvalToOp<Expr>, Dev> eval)
|
||||||
-> decltype(utility::tuple::append(AccessorConstructor::template getAccessor<cl::sycl::access::mode::write, false>(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()))){
|
-> decltype(utility::tuple::append(AccessorConstructor::template getAccessor<cl::sycl::access::mode::write>(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()))){
|
||||||
return utility::tuple::append(AccessorConstructor::template getAccessor<cl::sycl::access::mode::write, false>(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()));
|
return utility::tuple::append(AccessorConstructor::template getAccessor<cl::sycl::access::mode::write>(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -181,8 +181,8 @@ struct ExtractAccessor<TensorEvaluator<TensorEvalToOp<Expr>, Dev> >
|
|||||||
template <typename OP, typename Dim, typename Expr, typename Dev>
|
template <typename OP, typename Dim, typename Expr, typename Dev>
|
||||||
struct ExtractAccessor<TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> > {
|
struct ExtractAccessor<TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> > {
|
||||||
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> eval)
|
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> eval)
|
||||||
-> decltype(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read, false>(cgh, eval)){
|
-> decltype(AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval)){
|
||||||
return AccessorConstructor::template getAccessor<cl::sycl::access::mode::read, false>(cgh, eval);
|
return AccessorConstructor::template getAccessor<cl::sycl::access::mode::read>(cgh, eval);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -25,55 +25,50 @@ using Eigen::SyclDevice;
|
|||||||
using Eigen::Tensor;
|
using Eigen::Tensor;
|
||||||
using Eigen::TensorMap;
|
using Eigen::TensorMap;
|
||||||
|
|
||||||
// Types used in tests:
|
static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){
|
||||||
using TestTensor = Tensor<float, 3>;
|
|
||||||
using TestTensorMap = TensorMap<Tensor<float, 3>>;
|
|
||||||
static void test_broadcast_sycl(){
|
|
||||||
|
|
||||||
cl::sycl::gpu_selector s;
|
// BROADCAST test:
|
||||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
array<int, 4> in_range = {{2, 3, 5, 7}};
|
||||||
for (const auto& e : l) {
|
array<int, 4> broadcasts = {{2, 3, 1, 4}};
|
||||||
try {
|
array<int, 4> out_range; // = in_range * broadcasts
|
||||||
std::rethrow_exception(e);
|
for (size_t i = 0; i < out_range.size(); ++i)
|
||||||
} catch (cl::sycl::exception e) {
|
out_range[i] = in_range[i] * broadcasts[i];
|
||||||
std::cout << e.what() << std::endl;
|
|
||||||
|
Tensor<float, 4> input(in_range);
|
||||||
|
Tensor<float, 4> out(out_range);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < in_range.size(); ++i)
|
||||||
|
VERIFY_IS_EQUAL(out.dimension(i), out_range[i]);
|
||||||
|
|
||||||
|
|
||||||
|
for (int i = 0; i < input.size(); ++i)
|
||||||
|
input(i) = static_cast<float>(i);
|
||||||
|
|
||||||
|
float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
|
||||||
|
TensorMap<Tensor<float, 4>> gpu_in(gpu_in_data, in_range);
|
||||||
|
TensorMap<Tensor<float, 4>> gpu_out(gpu_out_data, out_range);
|
||||||
|
sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float));
|
||||||
|
gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
|
||||||
|
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||||
|
|
||||||
|
for (int i = 0; i < 4; ++i) {
|
||||||
|
for (int j = 0; j < 9; ++j) {
|
||||||
|
for (int k = 0; k < 5; ++k) {
|
||||||
|
for (int l = 0; l < 28; ++l) {
|
||||||
|
VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
SyclDevice sycl_device(q);
|
printf("Broadcast Test Passed\n");
|
||||||
// BROADCAST test:
|
sycl_device.deallocate(gpu_in_data);
|
||||||
array<int, 4> in_range = {{2, 3, 5, 7}};
|
sycl_device.deallocate(gpu_out_data);
|
||||||
array<int, in_range.size()> broadcasts = {{2, 3, 1, 4}};
|
|
||||||
array<int, in_range.size()> out_range; // = in_range * broadcasts
|
|
||||||
for (size_t i = 0; i < out_range.size(); ++i)
|
|
||||||
out_range[i] = in_range[i] * broadcasts[i];
|
|
||||||
|
|
||||||
Tensor<float, in_range.size()> input(in_range);
|
|
||||||
Tensor<float, out_range.size()> output(out_range);
|
|
||||||
|
|
||||||
for (int i = 0; i < input.size(); ++i)
|
|
||||||
input(i) = static_cast<float>(i);
|
|
||||||
|
|
||||||
TensorMap<decltype(input)> gpu_in(input.data(), in_range);
|
|
||||||
TensorMap<decltype(output)> gpu_out(output.data(), out_range);
|
|
||||||
gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
|
|
||||||
sycl_device.deallocate(output.data());
|
|
||||||
|
|
||||||
for (size_t i = 0; i < in_range.size(); ++i)
|
|
||||||
VERIFY_IS_EQUAL(output.dimension(i), out_range[i]);
|
|
||||||
|
|
||||||
for (int i = 0; i < 4; ++i) {
|
|
||||||
for (int j = 0; j < 9; ++j) {
|
|
||||||
for (int k = 0; k < 5; ++k) {
|
|
||||||
for (int l = 0; l < 28; ++l) {
|
|
||||||
VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), output(i,j,k,l));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printf("Broadcast Test Passed\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void test_cxx11_tensor_broadcast_sycl() {
|
void test_cxx11_tensor_broadcast_sycl() {
|
||||||
CALL_SUBTEST(test_broadcast_sycl());
|
cl::sycl::gpu_selector s;
|
||||||
|
Eigen::SyclDevice sycl_device(s);
|
||||||
|
CALL_SUBTEST(test_broadcast_sycl(sycl_device));
|
||||||
}
|
}
|
||||||
|
@ -20,20 +20,12 @@
|
|||||||
#include "main.h"
|
#include "main.h"
|
||||||
#include <unsupported/Eigen/CXX11/Tensor>
|
#include <unsupported/Eigen/CXX11/Tensor>
|
||||||
|
|
||||||
void test_device_sycl() {
|
void test_device_sycl(const Eigen::SyclDevice &sycl_device) {
|
||||||
cl::sycl::gpu_selector s;
|
std::cout <<"Helo from ComputeCpp: the requested device exists and the device name is : "
|
||||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
<< sycl_device.m_queue.get_device(). template get_info<cl::sycl::info::device::name>() <<std::endl;;
|
||||||
for (const auto& e : l) {
|
|
||||||
try {
|
|
||||||
std::rethrow_exception(e);
|
|
||||||
} catch (cl::sycl::exception e) {
|
|
||||||
std::cout << e.what() << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Eigen::SyclDevice sycl_device(q);
|
|
||||||
printf("Helo from ComputeCpp: Device Exists\n");
|
|
||||||
}
|
}
|
||||||
void test_cxx11_tensor_device_sycl() {
|
void test_cxx11_tensor_device_sycl() {
|
||||||
CALL_SUBTEST(test_device_sycl());
|
cl::sycl::gpu_selector s;
|
||||||
|
Eigen::SyclDevice sycl_device(s);
|
||||||
|
CALL_SUBTEST(test_device_sycl(sycl_device));
|
||||||
}
|
}
|
||||||
|
@ -22,18 +22,7 @@
|
|||||||
|
|
||||||
using Eigen::Tensor;
|
using Eigen::Tensor;
|
||||||
|
|
||||||
void test_forced_eval_sycl() {
|
void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) {
|
||||||
cl::sycl::gpu_selector s;
|
|
||||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
|
||||||
for (const auto& e : l) {
|
|
||||||
try {
|
|
||||||
std::rethrow_exception(e);
|
|
||||||
} catch (cl::sycl::exception e) {
|
|
||||||
std::cout << e.what() << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
SyclDevice sycl_device(q);
|
|
||||||
|
|
||||||
int sizeDim1 = 100;
|
int sizeDim1 = 100;
|
||||||
int sizeDim2 = 200;
|
int sizeDim2 = 200;
|
||||||
@ -43,17 +32,22 @@ void test_forced_eval_sycl() {
|
|||||||
Eigen::Tensor<float, 3> in2(tensorRange);
|
Eigen::Tensor<float, 3> in2(tensorRange);
|
||||||
Eigen::Tensor<float, 3> out(tensorRange);
|
Eigen::Tensor<float, 3> out(tensorRange);
|
||||||
|
|
||||||
|
float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
|
||||||
in1 = in1.random() + in1.constant(10.0f);
|
in1 = in1.random() + in1.constant(10.0f);
|
||||||
in2 = in2.random() + in2.constant(10.0f);
|
in2 = in2.random() + in2.constant(10.0f);
|
||||||
|
|
||||||
// creating TensorMap from tensor
|
// creating TensorMap from tensor
|
||||||
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in1(in1.data(), tensorRange);
|
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
|
||||||
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in2(in2.data(), tensorRange);
|
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
|
||||||
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_out(out.data(), tensorRange);
|
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange);
|
||||||
|
sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(float));
|
||||||
|
sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(float));
|
||||||
/// c=(a+b)*b
|
/// c=(a+b)*b
|
||||||
gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2;
|
gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2;
|
||||||
sycl_device.deallocate(out.data());
|
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||||
for (int i = 0; i < sizeDim1; ++i) {
|
for (int i = 0; i < sizeDim1; ++i) {
|
||||||
for (int j = 0; j < sizeDim2; ++j) {
|
for (int j = 0; j < sizeDim2; ++j) {
|
||||||
for (int k = 0; k < sizeDim3; ++k) {
|
for (int k = 0; k < sizeDim3; ++k) {
|
||||||
@ -62,7 +56,15 @@ void test_forced_eval_sycl() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
printf("(a+b)*b Test Passed\n");
|
printf("(a+b)*b Test Passed\n");
|
||||||
|
sycl_device.deallocate(gpu_in1_data);
|
||||||
|
sycl_device.deallocate(gpu_in2_data);
|
||||||
|
sycl_device.deallocate(gpu_out_data);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void test_cxx11_tensor_forced_eval_sycl() { CALL_SUBTEST(test_forced_eval_sycl()); }
|
void test_cxx11_tensor_forced_eval_sycl() {
|
||||||
|
cl::sycl::gpu_selector s;
|
||||||
|
Eigen::SyclDevice sycl_device(s);
|
||||||
|
CALL_SUBTEST(test_forced_eval_sycl(sycl_device));
|
||||||
|
}
|
||||||
|
@ -22,126 +22,117 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
static void test_full_reductions_sycl() {
|
static void test_full_reductions_sycl(const Eigen::SyclDevice& sycl_device) {
|
||||||
|
|
||||||
|
|
||||||
cl::sycl::gpu_selector s;
|
|
||||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
|
||||||
for (const auto& e : l) {
|
|
||||||
try {
|
|
||||||
std::rethrow_exception(e);
|
|
||||||
} catch (cl::sycl::exception e) {
|
|
||||||
std::cout << e.what() << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Eigen::SyclDevice sycl_device(q);
|
|
||||||
|
|
||||||
const int num_rows = 452;
|
const int num_rows = 452;
|
||||||
const int num_cols = 765;
|
const int num_cols = 765;
|
||||||
array<int, 2> tensorRange = {{num_rows, num_cols}};
|
array<int, 2> tensorRange = {{num_rows, num_cols}};
|
||||||
|
|
||||||
Tensor<float, 2> in(tensorRange);
|
Tensor<float, 2> in(tensorRange);
|
||||||
|
Tensor<float, 0> full_redux;
|
||||||
|
Tensor<float, 0> full_redux_gpu;
|
||||||
|
|
||||||
in.setRandom();
|
in.setRandom();
|
||||||
|
|
||||||
Tensor<float, 0> full_redux;
|
|
||||||
Tensor<float, 0> full_redux_g;
|
|
||||||
full_redux = in.sum();
|
full_redux = in.sum();
|
||||||
float* out_data = (float*)sycl_device.allocate(sizeof(float));
|
|
||||||
TensorMap<Tensor<float, 2> > in_gpu(in.data(), tensorRange);
|
float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
|
||||||
TensorMap<Tensor<float, 0> > full_redux_gpu(out_data);
|
float* gpu_out_data =(float*)sycl_device.allocate(sizeof(float));
|
||||||
full_redux_gpu.device(sycl_device) = in_gpu.sum();
|
|
||||||
sycl_device.deallocate(out_data);
|
TensorMap<Tensor<float, 2> > in_gpu(gpu_in_data, tensorRange);
|
||||||
|
TensorMap<Tensor<float, 0> > out_gpu(gpu_out_data);
|
||||||
|
|
||||||
|
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
|
||||||
|
out_gpu.device(sycl_device) = in_gpu.sum();
|
||||||
|
sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(float));
|
||||||
// Check that the CPU and GPU reductions return the same result.
|
// Check that the CPU and GPU reductions return the same result.
|
||||||
VERIFY_IS_APPROX(full_redux_gpu(), full_redux());
|
VERIFY_IS_APPROX(full_redux_gpu(), full_redux());
|
||||||
|
|
||||||
|
sycl_device.deallocate(gpu_in_data);
|
||||||
|
sycl_device.deallocate(gpu_out_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device) {
|
||||||
static void test_first_dim_reductions_sycl() {
|
|
||||||
|
|
||||||
|
|
||||||
cl::sycl::gpu_selector s;
|
|
||||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
|
||||||
for (const auto& e : l) {
|
|
||||||
try {
|
|
||||||
std::rethrow_exception(e);
|
|
||||||
} catch (cl::sycl::exception e) {
|
|
||||||
std::cout << e.what() << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Eigen::SyclDevice sycl_device(q);
|
|
||||||
|
|
||||||
int dim_x = 145;
|
int dim_x = 145;
|
||||||
int dim_y = 1;
|
int dim_y = 1;
|
||||||
int dim_z = 67;
|
int dim_z = 67;
|
||||||
|
|
||||||
array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}};
|
array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}};
|
||||||
|
|
||||||
Tensor<float, 3> in(tensorRange);
|
|
||||||
in.setRandom();
|
|
||||||
Eigen::array<int, 1> red_axis;
|
Eigen::array<int, 1> red_axis;
|
||||||
red_axis[0] = 0;
|
red_axis[0] = 0;
|
||||||
Tensor<float, 2> redux = in.sum(red_axis);
|
|
||||||
array<int, 2> reduced_tensorRange = {{dim_y, dim_z}};
|
array<int, 2> reduced_tensorRange = {{dim_y, dim_z}};
|
||||||
Tensor<float, 2> redux_g(reduced_tensorRange);
|
|
||||||
TensorMap<Tensor<float, 3> > in_gpu(in.data(), tensorRange);
|
|
||||||
float* out_data = (float*)sycl_device.allocate(dim_y*dim_z*sizeof(float));
|
|
||||||
TensorMap<Tensor<float, 2> > redux_gpu(out_data, dim_y, dim_z );
|
|
||||||
redux_gpu.device(sycl_device) = in_gpu.sum(red_axis);
|
|
||||||
|
|
||||||
sycl_device.deallocate(out_data);
|
Tensor<float, 3> in(tensorRange);
|
||||||
|
Tensor<float, 2> redux(reduced_tensorRange);
|
||||||
|
Tensor<float, 2> redux_gpu(reduced_tensorRange);
|
||||||
|
|
||||||
|
in.setRandom();
|
||||||
|
|
||||||
|
redux= in.sum(red_axis);
|
||||||
|
|
||||||
|
float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
|
||||||
|
TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange);
|
||||||
|
TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange);
|
||||||
|
|
||||||
|
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
|
||||||
|
out_gpu.device(sycl_device) = in_gpu.sum(red_axis);
|
||||||
|
sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
|
||||||
|
|
||||||
// Check that the CPU and GPU reductions return the same result.
|
// Check that the CPU and GPU reductions return the same result.
|
||||||
for(int j=0; j<dim_y; j++ )
|
for(int j=0; j<reduced_tensorRange[0]; j++ )
|
||||||
for(int k=0; k<dim_z; k++ )
|
for(int k=0; k<reduced_tensorRange[1]; k++ )
|
||||||
VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k));
|
VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k));
|
||||||
|
|
||||||
|
sycl_device.deallocate(gpu_in_data);
|
||||||
|
sycl_device.deallocate(gpu_out_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device) {
|
||||||
static void test_last_dim_reductions_sycl() {
|
|
||||||
|
|
||||||
|
|
||||||
cl::sycl::gpu_selector s;
|
|
||||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
|
||||||
for (const auto& e : l) {
|
|
||||||
try {
|
|
||||||
std::rethrow_exception(e);
|
|
||||||
} catch (cl::sycl::exception e) {
|
|
||||||
std::cout << e.what() << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Eigen::SyclDevice sycl_device(q);
|
|
||||||
|
|
||||||
int dim_x = 567;
|
int dim_x = 567;
|
||||||
int dim_y = 1;
|
int dim_y = 1;
|
||||||
int dim_z = 47;
|
int dim_z = 47;
|
||||||
|
|
||||||
array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}};
|
array<int, 3> tensorRange = {{dim_x, dim_y, dim_z}};
|
||||||
|
|
||||||
Tensor<float, 3> in(tensorRange);
|
|
||||||
in.setRandom();
|
|
||||||
Eigen::array<int, 1> red_axis;
|
Eigen::array<int, 1> red_axis;
|
||||||
red_axis[0] = 2;
|
red_axis[0] = 2;
|
||||||
Tensor<float, 2> redux = in.sum(red_axis);
|
|
||||||
array<int, 2> reduced_tensorRange = {{dim_x, dim_y}};
|
array<int, 2> reduced_tensorRange = {{dim_x, dim_y}};
|
||||||
Tensor<float, 2> redux_g(reduced_tensorRange);
|
|
||||||
TensorMap<Tensor<float, 3> > in_gpu(in.data(), tensorRange);
|
|
||||||
float* out_data = (float*)sycl_device.allocate(dim_x*dim_y*sizeof(float));
|
|
||||||
TensorMap<Tensor<float, 2> > redux_gpu(out_data, dim_x, dim_y );
|
|
||||||
redux_gpu.device(sycl_device) = in_gpu.sum(red_axis);
|
|
||||||
|
|
||||||
sycl_device.deallocate(out_data);
|
Tensor<float, 3> in(tensorRange);
|
||||||
|
Tensor<float, 2> redux(reduced_tensorRange);
|
||||||
|
Tensor<float, 2> redux_gpu(reduced_tensorRange);
|
||||||
|
|
||||||
|
in.setRandom();
|
||||||
|
|
||||||
|
redux= in.sum(red_axis);
|
||||||
|
|
||||||
|
float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
|
||||||
|
TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange);
|
||||||
|
TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange);
|
||||||
|
|
||||||
|
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
|
||||||
|
out_gpu.device(sycl_device) = in_gpu.sum(red_axis);
|
||||||
|
sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
|
||||||
// Check that the CPU and GPU reductions return the same result.
|
// Check that the CPU and GPU reductions return the same result.
|
||||||
for(int j=0; j<dim_x; j++ )
|
for(int j=0; j<reduced_tensorRange[0]; j++ )
|
||||||
for(int k=0; k<dim_y; k++ )
|
for(int k=0; k<reduced_tensorRange[1]; k++ )
|
||||||
VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k));
|
VERIFY_IS_APPROX(redux_gpu(j,k), redux(j,k));
|
||||||
|
|
||||||
|
sycl_device.deallocate(gpu_in_data);
|
||||||
|
sycl_device.deallocate(gpu_out_data);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void test_cxx11_tensor_reduction_sycl() {
|
void test_cxx11_tensor_reduction_sycl() {
|
||||||
CALL_SUBTEST((test_full_reductions_sycl()));
|
cl::sycl::gpu_selector s;
|
||||||
CALL_SUBTEST((test_first_dim_reductions_sycl()));
|
Eigen::SyclDevice sycl_device(s);
|
||||||
CALL_SUBTEST((test_last_dim_reductions_sycl()));
|
CALL_SUBTEST((test_full_reductions_sycl(sycl_device)));
|
||||||
|
CALL_SUBTEST((test_first_dim_reductions_sycl(sycl_device)));
|
||||||
|
CALL_SUBTEST((test_last_dim_reductions_sycl(sycl_device)));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -27,42 +27,33 @@ using Eigen::SyclDevice;
|
|||||||
using Eigen::Tensor;
|
using Eigen::Tensor;
|
||||||
using Eigen::TensorMap;
|
using Eigen::TensorMap;
|
||||||
|
|
||||||
// Types used in tests:
|
void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
|
||||||
using TestTensor = Tensor<float, 3>;
|
|
||||||
using TestTensorMap = TensorMap<Tensor<float, 3>>;
|
|
||||||
|
|
||||||
void test_sycl_cpu() {
|
|
||||||
cl::sycl::gpu_selector s;
|
|
||||||
cl::sycl::queue q(s, [=](cl::sycl::exception_list l) {
|
|
||||||
for (const auto& e : l) {
|
|
||||||
try {
|
|
||||||
std::rethrow_exception(e);
|
|
||||||
} catch (cl::sycl::exception e) {
|
|
||||||
std::cout << e.what() << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
SyclDevice sycl_device(q);
|
|
||||||
|
|
||||||
int sizeDim1 = 100;
|
int sizeDim1 = 100;
|
||||||
int sizeDim2 = 100;
|
int sizeDim2 = 100;
|
||||||
int sizeDim3 = 100;
|
int sizeDim3 = 100;
|
||||||
array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
|
array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
|
||||||
TestTensor in1(tensorRange);
|
Tensor<float, 3> in1(tensorRange);
|
||||||
TestTensor in2(tensorRange);
|
Tensor<float, 3> in2(tensorRange);
|
||||||
TestTensor in3(tensorRange);
|
Tensor<float, 3> in3(tensorRange);
|
||||||
TestTensor out(tensorRange);
|
Tensor<float, 3> out(tensorRange);
|
||||||
in1 = in1.random();
|
|
||||||
in2 = in2.random();
|
in2 = in2.random();
|
||||||
in3 = in3.random();
|
in3 = in3.random();
|
||||||
TestTensorMap gpu_in1(in1.data(), tensorRange);
|
|
||||||
TestTensorMap gpu_in2(in2.data(), tensorRange);
|
float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
|
||||||
TestTensorMap gpu_in3(in3.data(), tensorRange);
|
float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
|
||||||
TestTensorMap gpu_out(out.data(), tensorRange);
|
float * gpu_in3_data = static_cast<float*>(sycl_device.allocate(in3.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
|
||||||
|
TensorMap<Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
|
||||||
|
TensorMap<Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
|
||||||
|
TensorMap<Tensor<float, 3>> gpu_in3(gpu_in3_data, tensorRange);
|
||||||
|
TensorMap<Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange);
|
||||||
|
|
||||||
/// a=1.2f
|
/// a=1.2f
|
||||||
gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f);
|
gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f);
|
||||||
sycl_device.deallocate(in1.data());
|
sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.dimensions().TotalSize())*sizeof(float));
|
||||||
for (int i = 0; i < sizeDim1; ++i) {
|
for (int i = 0; i < sizeDim1; ++i) {
|
||||||
for (int j = 0; j < sizeDim2; ++j) {
|
for (int j = 0; j < sizeDim2; ++j) {
|
||||||
for (int k = 0; k < sizeDim3; ++k) {
|
for (int k = 0; k < sizeDim3; ++k) {
|
||||||
@ -74,7 +65,7 @@ void test_sycl_cpu() {
|
|||||||
|
|
||||||
/// a=b*1.2f
|
/// a=b*1.2f
|
||||||
gpu_out.device(sycl_device) = gpu_in1 * 1.2f;
|
gpu_out.device(sycl_device) = gpu_in1 * 1.2f;
|
||||||
sycl_device.deallocate(out.data());
|
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.dimensions().TotalSize())*sizeof(float));
|
||||||
for (int i = 0; i < sizeDim1; ++i) {
|
for (int i = 0; i < sizeDim1; ++i) {
|
||||||
for (int j = 0; j < sizeDim2; ++j) {
|
for (int j = 0; j < sizeDim2; ++j) {
|
||||||
for (int k = 0; k < sizeDim3; ++k) {
|
for (int k = 0; k < sizeDim3; ++k) {
|
||||||
@ -86,8 +77,9 @@ void test_sycl_cpu() {
|
|||||||
printf("a=b*1.2f Test Passed\n");
|
printf("a=b*1.2f Test Passed\n");
|
||||||
|
|
||||||
/// c=a*b
|
/// c=a*b
|
||||||
|
sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(float));
|
||||||
gpu_out.device(sycl_device) = gpu_in1 * gpu_in2;
|
gpu_out.device(sycl_device) = gpu_in1 * gpu_in2;
|
||||||
sycl_device.deallocate(out.data());
|
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||||
for (int i = 0; i < sizeDim1; ++i) {
|
for (int i = 0; i < sizeDim1; ++i) {
|
||||||
for (int j = 0; j < sizeDim2; ++j) {
|
for (int j = 0; j < sizeDim2; ++j) {
|
||||||
for (int k = 0; k < sizeDim3; ++k) {
|
for (int k = 0; k < sizeDim3; ++k) {
|
||||||
@ -101,7 +93,7 @@ void test_sycl_cpu() {
|
|||||||
|
|
||||||
/// c=a+b
|
/// c=a+b
|
||||||
gpu_out.device(sycl_device) = gpu_in1 + gpu_in2;
|
gpu_out.device(sycl_device) = gpu_in1 + gpu_in2;
|
||||||
sycl_device.deallocate(out.data());
|
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||||
for (int i = 0; i < sizeDim1; ++i) {
|
for (int i = 0; i < sizeDim1; ++i) {
|
||||||
for (int j = 0; j < sizeDim2; ++j) {
|
for (int j = 0; j < sizeDim2; ++j) {
|
||||||
for (int k = 0; k < sizeDim3; ++k) {
|
for (int k = 0; k < sizeDim3; ++k) {
|
||||||
@ -115,7 +107,7 @@ void test_sycl_cpu() {
|
|||||||
|
|
||||||
/// c=a*a
|
/// c=a*a
|
||||||
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1;
|
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1;
|
||||||
sycl_device.deallocate(out.data());
|
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||||
for (int i = 0; i < sizeDim1; ++i) {
|
for (int i = 0; i < sizeDim1; ++i) {
|
||||||
for (int j = 0; j < sizeDim2; ++j) {
|
for (int j = 0; j < sizeDim2; ++j) {
|
||||||
for (int k = 0; k < sizeDim3; ++k) {
|
for (int k = 0; k < sizeDim3; ++k) {
|
||||||
@ -125,12 +117,11 @@ void test_sycl_cpu() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("c= a*a Test Passed\n");
|
printf("c= a*a Test Passed\n");
|
||||||
|
|
||||||
//a*3.14f + b*2.7f
|
//a*3.14f + b*2.7f
|
||||||
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f);
|
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f);
|
||||||
sycl_device.deallocate(out.data());
|
sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||||
for (int i = 0; i < sizeDim1; ++i) {
|
for (int i = 0; i < sizeDim1; ++i) {
|
||||||
for (int j = 0; j < sizeDim2; ++j) {
|
for (int j = 0; j < sizeDim2; ++j) {
|
||||||
for (int k = 0; k < sizeDim3; ++k) {
|
for (int k = 0; k < sizeDim3; ++k) {
|
||||||
@ -143,8 +134,9 @@ void test_sycl_cpu() {
|
|||||||
printf("a*3.14f + b*2.7f Test Passed\n");
|
printf("a*3.14f + b*2.7f Test Passed\n");
|
||||||
|
|
||||||
///d= (a>0.5? b:c)
|
///d= (a>0.5? b:c)
|
||||||
|
sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.dimensions().TotalSize())*sizeof(float));
|
||||||
gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3);
|
gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3);
|
||||||
sycl_device.deallocate(out.data());
|
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
|
||||||
for (int i = 0; i < sizeDim1; ++i) {
|
for (int i = 0; i < sizeDim1; ++i) {
|
||||||
for (int j = 0; j < sizeDim2; ++j) {
|
for (int j = 0; j < sizeDim2; ++j) {
|
||||||
for (int k = 0; k < sizeDim3; ++k) {
|
for (int k = 0; k < sizeDim3; ++k) {
|
||||||
@ -155,8 +147,13 @@ void test_sycl_cpu() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
printf("d= (a>0.5? b:c) Test Passed\n");
|
printf("d= (a>0.5? b:c) Test Passed\n");
|
||||||
|
sycl_device.deallocate(gpu_in1_data);
|
||||||
|
sycl_device.deallocate(gpu_in2_data);
|
||||||
|
sycl_device.deallocate(gpu_in3_data);
|
||||||
|
sycl_device.deallocate(gpu_out_data);
|
||||||
}
|
}
|
||||||
void test_cxx11_tensor_sycl() {
|
void test_cxx11_tensor_sycl() {
|
||||||
CALL_SUBTEST(test_sycl_cpu());
|
cl::sycl::gpu_selector s;
|
||||||
|
Eigen::SyclDevice sycl_device(s);
|
||||||
|
CALL_SUBTEST(test_sycl_cpu(sycl_device));
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user