diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h index 2be1a5ad6..844cec199 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h @@ -44,14 +44,14 @@ struct SyclDevice { // destructor ~SyclDevice() { deallocate_all(); } - template void deallocate(T *p) const { + template EIGEN_STRONG_INLINE void deallocate(T *p) const { auto it = buffer_map.find(p); if (it != buffer_map.end()) { buffer_map.erase(it); internal::aligned_free(p); } } - void deallocate_all() const { + EIGEN_STRONG_INLINE void deallocate_all() const { std::map>::iterator it=buffer_map.begin(); while (it!=buffer_map.end()) { auto p=it->first; @@ -88,23 +88,23 @@ struct SyclDevice { } /// allocating memory on the cpu - void *allocate(size_t) const { + EIGEN_STRONG_INLINE void *allocate(size_t) const { return internal::aligned_malloc(8); } // some runtime conditions that can be applied here - bool isDeviceSuitable() const { return true; } + EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; } - void memcpy(void *dst, const void *src, size_t n) const { + EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const { ::memcpy(dst, src, n); } - template void memcpyHostToDevice(T *dst, const T *src, size_t n) const { + template EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const { auto host_acc= (static_cast*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access(); memcpy(host_acc.get_pointer(), src, n); } - inline void parallel_for_setup(size_t n, size_t &tileSize, size_t &rng, size_t &GRange) const { + EIGEN_STRONG_INLINE void parallel_for_setup(size_t n, size_t &tileSize, size_t &rng, size_t &GRange) const { tileSize =m_queue.get_device(). template get_info()/2; rng = n; if (rng==0) rng=1; @@ -116,7 +116,7 @@ struct SyclDevice { } } - template void memcpyDeviceToHost(T *dst, const T *src, size_t n) const { + template EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const { auto it = buffer_map.find(src); if (it != buffer_map.end()) { size_t rng, GRange, tileSize; @@ -141,7 +141,7 @@ struct SyclDevice { } } - template void memset(T *buff, int c, size_t n) const { + template EIGEN_STRONG_INLINE void memset(T *buff, int c, size_t n) const { size_t rng, GRange, tileSize; parallel_for_setup(n/sizeof(T), tileSize, rng, GRange); @@ -158,7 +158,7 @@ struct SyclDevice { }); m_queue.throw_asynchronous(); } - int majorDeviceVersion() const { + EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; } }; diff --git a/unsupported/test/cxx11_tensor_device_sycl.cpp b/unsupported/test/cxx11_tensor_device_sycl.cpp index 820bc88d0..584fa8026 100644 --- a/unsupported/test/cxx11_tensor_device_sycl.cpp +++ b/unsupported/test/cxx11_tensor_device_sycl.cpp @@ -29,11 +29,11 @@ void test_device_sycl(const Eigen::SyclDevice &sycl_device) { array tensorRange = {{sizeDim1}}; Tensor in(tensorRange); Tensor in1(tensorRange); - memset(in1.data(), 1,in1.dimensions().TotalSize()*sizeof(int)); - int * gpu_in_data = static_cast(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(int))); - sycl_device.memset(gpu_in_data, 1,in.dimensions().TotalSize()*sizeof(int) ); - sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.dimensions().TotalSize()*sizeof(int) ); - for (int i=0; i(sycl_device.allocate(in.size()*sizeof(int))); + sycl_device.memset(gpu_in_data, 1,in.size()*sizeof(int) ); + sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(int) ); + for (int i=0; i