diff --git a/Eigen/Core b/Eigen/Core index 0ee7e9f43..a9a082782 100644 --- a/Eigen/Core +++ b/Eigen/Core @@ -43,10 +43,12 @@ #else #define EIGEN_DEVICE_FUNC #endif - #else #define EIGEN_DEVICE_FUNC +#endif +#if defined(EIGEN_USE_SYCL) + #define EIGEN_DONT_VECTORIZE #endif // When compiling CUDA device code with NVCC, pull in math functions from the @@ -283,6 +285,15 @@ #include #endif +#if defined(__SYCL_DEVICE_ONLY__) + #undef min + #undef max + #undef isnan + #undef isinf + #undef isfinite + #include +#endif + /** \brief Namespace containing all symbols from the %Eigen library. */ namespace Eigen { diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h index 8d47fb8a4..7dfbc92d5 100644 --- a/Eigen/src/Core/MathFunctions.h +++ b/Eigen/src/Core/MathFunctions.h @@ -413,7 +413,7 @@ inline NewType cast(const OldType& x) static inline Scalar run(const Scalar& x) { EIGEN_STATIC_ASSERT((!NumTraits::IsComplex), NUMERIC_TYPE_MUST_BE_REAL) - using std::round; + EIGEN_USING_STD_MATH(round); return round(x); } }; @@ -640,7 +640,7 @@ template struct random_default_impl { static inline Scalar run(const Scalar& x, const Scalar& y) - { + { typedef typename conditional::IsSigned,std::ptrdiff_t,std::size_t>::type ScalarX; if(y EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float log1p(const float &x) { return ::log1pf(x); } @@ -969,10 +974,24 @@ inline typename internal::pow_impl::result_type pow(const Scala return internal::pow_impl::run(x, y); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float pow(float x, float y) { return cl::sycl::pow(x, y); } +EIGEN_ALWAYS_INLINE double pow(double x, double y) { return cl::sycl::pow(x, y); } +#endif // defined(__SYCL_DEVICE_ONLY__) + template EIGEN_DEVICE_FUNC bool (isnan) (const T &x) { return internal::isnan_impl(x); } template EIGEN_DEVICE_FUNC bool (isinf) (const T &x) { return internal::isinf_impl(x); } template EIGEN_DEVICE_FUNC bool (isfinite)(const T &x) { return internal::isfinite_impl(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float isnan(float x) { return cl::sycl::isnan(x); } +EIGEN_ALWAYS_INLINE double isnan(double x) { return cl::sycl::isnan(x); } +EIGEN_ALWAYS_INLINE float isinf(float x) { return cl::sycl::isinf(x); } +EIGEN_ALWAYS_INLINE double isinf(double x) { return cl::sycl::isinf(x); } +EIGEN_ALWAYS_INLINE float isfinite(float x) { return cl::sycl::isfinite(x); } +EIGEN_ALWAYS_INLINE double isfinite(double x) { return cl::sycl::isfinite(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + template EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(round, Scalar) round(const Scalar& x) @@ -980,6 +999,11 @@ inline EIGEN_MATHFUNC_RETVAL(round, Scalar) round(const Scalar& x) return EIGEN_MATHFUNC_IMPL(round, Scalar)::run(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float round(float x) { return cl::sycl::round(x); } +EIGEN_ALWAYS_INLINE double round(double x) { return cl::sycl::round(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + template EIGEN_DEVICE_FUNC T (floor)(const T& x) @@ -988,6 +1012,11 @@ T (floor)(const T& x) return floor(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float floor(float x) { return cl::sycl::floor(x); } +EIGEN_ALWAYS_INLINE double floor(double x) { return cl::sycl::floor(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float floor(const float &x) { return ::floorf(x); } @@ -1004,6 +1033,11 @@ T (ceil)(const T& x) return ceil(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float ceil(float x) { return cl::sycl::ceil(x); } +EIGEN_ALWAYS_INLINE double ceil(double x) { return cl::sycl::ceil(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float ceil(const float &x) { return ::ceilf(x); } @@ -1044,6 +1078,11 @@ T sqrt(const T &x) return sqrt(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float sqrt(float x) { return cl::sycl::sqrt(x); } +EIGEN_ALWAYS_INLINE double sqrt(double x) { return cl::sycl::sqrt(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T log(const T &x) { @@ -1051,6 +1090,12 @@ T log(const T &x) { return log(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float log(float x) { return cl::sycl::log(x); } +EIGEN_ALWAYS_INLINE double log(double x) { return cl::sycl::log(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float log(const float &x) { return ::logf(x); } @@ -1066,6 +1111,11 @@ typename NumTraits::Real abs(const T &x) { return abs(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float abs(float x) { return cl::sycl::fabs(x); } +EIGEN_ALWAYS_INLINE double abs(double x) { return cl::sycl::fabs(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float abs(const float &x) { return ::fabsf(x); } @@ -1091,6 +1141,11 @@ T exp(const T &x) { return exp(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float exp(float x) { return cl::sycl::exp(x); } +EIGEN_ALWAYS_INLINE double exp(double x) { return cl::sycl::exp(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float exp(const float &x) { return ::expf(x); } @@ -1106,6 +1161,11 @@ T cos(const T &x) { return cos(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float cos(float x) { return cl::sycl::cos(x); } +EIGEN_ALWAYS_INLINE double cos(double x) { return cl::sycl::cos(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float cos(const float &x) { return ::cosf(x); } @@ -1121,6 +1181,11 @@ T sin(const T &x) { return sin(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float sin(float x) { return cl::sycl::sin(x); } +EIGEN_ALWAYS_INLINE double sin(double x) { return cl::sycl::sin(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float sin(const float &x) { return ::sinf(x); } @@ -1136,6 +1201,11 @@ T tan(const T &x) { return tan(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float tan(float x) { return cl::sycl::tan(x); } +EIGEN_ALWAYS_INLINE double tan(double x) { return cl::sycl::tan(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tan(const float &x) { return ::tanf(x); } @@ -1151,6 +1221,11 @@ T acos(const T &x) { return acos(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float acos(float x) { return cl::sycl::acos(x); } +EIGEN_ALWAYS_INLINE double acos(double x) { return cl::sycl::acos(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float acos(const float &x) { return ::acosf(x); } @@ -1166,6 +1241,11 @@ T asin(const T &x) { return asin(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float asin(float x) { return cl::sycl::asin(x); } +EIGEN_ALWAYS_INLINE double asin(double x) { return cl::sycl::asin(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float asin(const float &x) { return ::asinf(x); } @@ -1181,6 +1261,11 @@ T atan(const T &x) { return atan(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float atan(float x) { return cl::sycl::atan(x); } +EIGEN_ALWAYS_INLINE double atan(double x) { return cl::sycl::atan(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float atan(const float &x) { return ::atanf(x); } @@ -1197,6 +1282,11 @@ T cosh(const T &x) { return cosh(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float cosh(float x) { return cl::sycl::cosh(x); } +EIGEN_ALWAYS_INLINE double cosh(double x) { return cl::sycl::cosh(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float cosh(const float &x) { return ::coshf(x); } @@ -1212,6 +1302,11 @@ T sinh(const T &x) { return sinh(x); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float sinh(float x) { return cl::sycl::sinh(x); } +EIGEN_ALWAYS_INLINE double sinh(double x) { return cl::sycl::sinh(x); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float sinh(const float &x) { return ::sinhf(x); } @@ -1227,7 +1322,10 @@ T tanh(const T &x) { return tanh(x); } -#if (!defined(__CUDACC__)) && EIGEN_FAST_MATH +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float tanh(float x) { return cl::sycl::tanh(x); } +EIGEN_ALWAYS_INLINE double tanh(double x) { return cl::sycl::tanh(x); } +#elif (!defined(__CUDACC__)) && EIGEN_FAST_MATH EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tanh(float x) { return internal::generic_fast_tanh_float(x); } #endif @@ -1247,6 +1345,11 @@ T fmod(const T& a, const T& b) { return fmod(a, b); } +#if defined(__SYCL_DEVICE_ONLY__) +EIGEN_ALWAYS_INLINE float fmod(float x, float y) { return cl::sycl::fmod(x, y); } +EIGEN_ALWAYS_INLINE double fmod(double x, double y) { return cl::sycl::fmod(x, y); } +#endif // defined(__SYCL_DEVICE_ONLY__) + #ifdef __CUDACC__ template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE @@ -1389,13 +1492,13 @@ template<> struct random_impl template<> struct scalar_fuzzy_impl { typedef bool RealScalar; - + template EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&) { return !x; } - + EIGEN_DEVICE_FUNC static inline bool isApprox(bool x, bool y, bool) { @@ -1407,10 +1510,10 @@ template<> struct scalar_fuzzy_impl { return (!x) || y; } - + }; - + } // end namespace internal } // end namespace Eigen diff --git a/Eigen/src/Core/functors/UnaryFunctors.h b/Eigen/src/Core/functors/UnaryFunctors.h index 2e6a00ffd..9d4d3eece 100644 --- a/Eigen/src/Core/functors/UnaryFunctors.h +++ b/Eigen/src/Core/functors/UnaryFunctors.h @@ -678,7 +678,13 @@ struct functor_traits > template struct scalar_isnan_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_isnan_op) typedef bool result_type; - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return (numext::isnan)(a); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { +#if defined(__SYCL_DEVICE_ONLY__) + return numext::isnan(a); +#else + return (numext::isnan)(a); +#endif + } }; template struct functor_traits > @@ -696,7 +702,13 @@ struct functor_traits > template struct scalar_isinf_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_isinf_op) typedef bool result_type; - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return (numext::isinf)(a); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { +#if defined(__SYCL_DEVICE_ONLY__) + return numext::isinf(a); +#else + return (numext::isinf)(a); +#endif + } }; template struct functor_traits > @@ -714,7 +726,13 @@ struct functor_traits > template struct scalar_isfinite_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_isfinite_op) typedef bool result_type; - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return (numext::isfinite)(a); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { +#if defined(__SYCL_DEVICE_ONLY__) + return numext::isfinite(a); +#else + return (numext::isfinite)(a); +#endif + } }; template struct functor_traits > diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor index 7ecb4c74d..e41b67c56 100644 --- a/unsupported/Eigen/CXX11/Tensor +++ b/unsupported/Eigen/CXX11/Tensor @@ -13,7 +13,7 @@ #include "../../../Eigen/Core" -#ifdef EIGEN_USE_SYCL +#if defined(EIGEN_USE_SYCL) #undef min #undef max #undef isnan diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h index 7f0f16de3..3fe0219ac 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h @@ -16,27 +16,33 @@ #define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H namespace Eigen { -struct SyclDevice { - /// class members: - /// sycl queue - mutable cl::sycl::queue m_queue; +#define ConvertToActualTypeSycl(T, buf_acc) reinterpret_cast::pointer_t>((&(*buf_acc.get_pointer()))) + +struct QueueInterface { + /// class members: + bool exception_caught_ = false; /// std::map is the container used to make sure that we create only one buffer /// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice. /// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it. - mutable std::map> buffer_map; - + mutable std::map> buffer_map; + /// sycl queue + mutable cl::sycl::queue m_queue; /// creating device by using selector - template explicit SyclDevice(dev_Selector s): + /// SyclStreamDevice is not owned. it is the caller's responsibility to destroy it. + template explicit QueueInterface(dev_Selector s): #ifdef EIGEN_EXCEPTIONS - m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) { + m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) { for (const auto& e : l) { try { - std::rethrow_exception(e); - } catch (cl::sycl::exception e) { - std::cout << e.what() << std::endl; + if (e) { + exception_caught_ = true; + std::rethrow_exception(e); } + } catch (cl::sycl::exception e) { + std::cerr << e.what() << std::endl; + } } })) #else @@ -44,63 +50,119 @@ struct SyclDevice { #endif {} - // destructor - ~SyclDevice() { deallocate_all(); } + /// creating device by using selector + /// SyclStreamDevice is not owned. it is the caller's responsibility to destroy it. + explicit QueueInterface(cl::sycl::device d): +#ifdef EIGEN_EXCEPTIONS + m_queue(cl::sycl::queue(d, [&](cl::sycl::exception_list l) { + for (const auto& e : l) { + try { + if (e) { + exception_caught_ = true; + std::rethrow_exception(e); + } + } catch (cl::sycl::exception e) { + std::cerr << e.what() << std::endl; + } + } + })) +#else + m_queue(cl::sycl::queue(d)) +#endif + {} + + + /// Allocating device pointer. This pointer is actually an 8 bytes host pointer used as key to access the sycl device buffer. + /// The reason is that we cannot use device buffer as a pointer as a m_data in Eigen leafNode expressions. So we create a key + /// pointer to be used in Eigen expression construction. When we convert the Eigen construction into the sycl construction we + /// use this pointer as a key in our buffer_map and we make sure that we dedicate only one buffer only for this pointer. + /// The device pointer would be deleted by calling deallocate function. + EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { + auto buf = cl::sycl::buffer(cl::sycl::range<1>(num_bytes)); + auto ptr =buf.get_access().get_pointer(); + buf.set_final_data(nullptr); + buffer_map.insert(std::pair>(ptr,buf)); + return static_cast(ptr); + } /// This is used to deallocate the device pointer. p is used as a key inside /// the map to find the device buffer and delete it. - template EIGEN_STRONG_INLINE void deallocate(T *p) const { - auto it = buffer_map.find(p); + EIGEN_STRONG_INLINE void deallocate(const void *p) const { + auto it = buffer_map.find(static_cast(p)); if (it != buffer_map.end()) { buffer_map.erase(it); - internal::aligned_free(p); } } - /// This is called by the SyclDevice destructor to release all allocated memory if the user didn't already do so. - /// We also free the host pointer that we have dedicated as a key to accessing the device buffer. - EIGEN_STRONG_INLINE void deallocate_all() const { - std::map>::iterator it=buffer_map.begin(); - while (it!=buffer_map.end()) { - auto p=it->first; - buffer_map.erase(it); - internal::aligned_free(const_cast(p)); - it=buffer_map.begin(); + EIGEN_STRONG_INLINE std::map>::iterator find_buffer(const void* ptr) const { + auto it1 = buffer_map.find(static_cast(ptr)); + if (it1 != buffer_map.end()){ + return it1; } - buffer_map.clear(); + else{ + for(std::map>::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){ + auto size = it->second.get_size(); + if((it->first < (static_cast(ptr))) && ((static_cast(ptr)) < (it->first + size)) ) return it; + } + } + //eigen_assert("No sycl buffer found. Make sure that you have allocated memory for your buffer by calling allocate function in SyclDevice"); + std::cerr << "No sycl buffer found. Make sure that you have allocated memory for your buffer by calling allocate function in SyclDevice"<< std::endl; + abort(); + //return buffer_map.end(); } + // This function checks if the runtime recorded an error for the + // underlying stream device. + EIGEN_STRONG_INLINE bool ok() const { + return !exception_caught_; + } + // destructor + ~QueueInterface() { buffer_map.clear(); } +}; + +template class MemCopyFunctor { + public: + typedef cl::sycl::accessor read_accessor; + typedef cl::sycl::accessor write_accessor; + MemCopyFunctor(read_accessor src_acc, write_accessor dst_acc, size_t rng, size_t i, size_t offset): m_src_acc(src_acc), m_dst_acc(dst_acc), m_rng(rng), m_i(i), m_offset(offset) {} + void operator()(cl::sycl::nd_item<1> itemID) { + auto src_ptr = ConvertToActualTypeSycl(T, m_src_acc); + auto dst_ptr = ConvertToActualTypeSycl(T, m_dst_acc); + auto globalid = itemID.get_global_linear_id(); + if (globalid < m_rng) { + dst_ptr[globalid + m_i] = src_ptr[globalid + m_offset]; + } + } + private: + read_accessor m_src_acc; + write_accessor m_dst_acc; + size_t m_rng; + size_t m_i; + size_t m_offset; +}; + +struct SyclDevice { + // class member. + QueueInterface* m_queue_stream; + /// QueueInterface is not owned. it is the caller's responsibility to destroy it. + explicit SyclDevice(QueueInterface* queue_stream) : m_queue_stream(queue_stream){} + /// Creation of sycl accessor for a buffer. This function first tries to find /// the buffer in the buffer_map. If found it gets the accessor from it, if not, /// the function then adds an entry by creating a sycl buffer for that particular pointer. - template EIGEN_STRONG_INLINE cl::sycl::accessor - get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const { - return (get_sycl_buffer(num_bytes, ptr)->template get_access(cgh)); - } - - /// Inserting a new sycl buffer. For every allocated device pointer only one buffer would be created. The buffer type is a device- only buffer. - /// The key pointer used to access the device buffer(the device pointer(ptr) ) must be initialised by the allocate function. - template EIGEN_STRONG_INLINE std::pair>::iterator,bool> add_sycl_buffer(size_t num_bytes, const T *ptr) const { - using Type = cl::sycl::buffer; - std::pair>::iterator,bool> ret; - if(ptr!=nullptr){ - ret= buffer_map.insert(std::pair>(ptr, std::shared_ptr(new Type(cl::sycl::range<1>(num_bytes)), - [](void *dataMem) { delete static_cast(dataMem); }))); - (static_cast(ret.first->second.get()))->set_final_data(nullptr); - } else { - eigen_assert("The device memory is not allocated. Please call allocate on the device!!"); - } - return ret; + template EIGEN_STRONG_INLINE cl::sycl::accessor + get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const void* ptr) const { + return (get_sycl_buffer(num_bytes, ptr).template get_access(cgh)); } /// Accessing the created sycl device buffer for the device pointer - template EIGEN_STRONG_INLINE cl::sycl::buffer* get_sycl_buffer(size_t num_bytes,const T * ptr) const { - return static_cast*>(add_sycl_buffer(num_bytes, ptr).first->second.get()); + EIGEN_STRONG_INLINE cl::sycl::buffer& get_sycl_buffer(size_t , const void * ptr) const { + return m_queue_stream->find_buffer(ptr)->second; } /// This is used to prepare the number of threads and also the number of threads per block for sycl kernels EIGEN_STRONG_INLINE void parallel_for_setup(size_t n, size_t &tileSize, size_t &rng, size_t &GRange) const { - tileSize =m_queue.get_device(). template get_info()/2; + tileSize =sycl_queue().get_device(). template get_info()/2; rng = n; if (rng==0) rng=1; GRange=rng; @@ -110,58 +172,35 @@ struct SyclDevice { if (xMode != 0) GRange += (tileSize - xMode); } } - - /// Allocating device pointer. This pointer is actually an 8 bytes host pointer used as key to access the sycl device buffer. - /// The reason is that we cannot use device buffer as a pointer as a m_data in Eigen leafNode expressions. So we create a key - /// pointer to be used in Eigen expression construction. When we convert the Eigen construction into the sycl construction we - /// use this pointer as a key in our buffer_map and we make sure that we dedicate only one buffer only for this pointer. - /// The device pointer would be deleted by calling deallocate function. - EIGEN_STRONG_INLINE void *allocate(size_t) const { - return internal::aligned_malloc(8); + /// allocate device memory + EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const { + return m_queue_stream->allocate(num_bytes); } + /// deallocate device memory + EIGEN_STRONG_INLINE void deallocate(const void *p) const { + m_queue_stream->deallocate(p); + } // some runtime conditions that can be applied here EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; } - template EIGEN_STRONG_INLINE std::map>::iterator find_nearest(const T* ptr) const { - auto it1 = buffer_map.find(ptr); - if (it1 != buffer_map.end()){ - return it1; - } - else{ - for(std::map>::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){ - auto size = ((cl::sycl::buffer*)it->second.get())->get_size(); - if((static_cast(it->first) < ptr) && (ptr < (static_cast(it->first)) + size)) return it; - } - } - return buffer_map.end(); - } /// the memcpy function template EIGEN_STRONG_INLINE void memcpy(void *dst, const T *src, size_t n) const { - auto it1 = find_nearest(src); - auto it2 = find_nearest(static_cast(dst)); - if ((it1 != buffer_map.end()) && (it2!=buffer_map.end())) { - auto offset= (src - (static_cast(it1->first))); - auto i= ((static_cast(dst)) - const_cast((static_cast(it2->first)))); - size_t rng, GRange, tileSize; - parallel_for_setup(n/sizeof(T), tileSize, rng, GRange); - m_queue.submit([&](cl::sycl::handler &cgh) { - auto src_acc =((cl::sycl::buffer*)it1->second.get())-> template get_access(cgh); - auto dst_acc =((cl::sycl::buffer*)it2->second.get())-> template get_access(cgh); - typedef decltype(src_acc) DevToDev; - cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) { - auto globalid=itemID.get_global_linear_id(); - if (globalid< rng) { - dst_acc[globalid+i ]=src_acc[globalid+offset]; - } - }); - }); - m_queue.throw_asynchronous(); - } else{ - eigen_assert("no source or destination device memory found."); - } - //::memcpy(dst, src, n); + auto it1 = m_queue_stream->find_buffer((void*)src); + auto it2 = m_queue_stream->find_buffer(dst); + auto offset= (static_cast(static_cast(src))) - it1->first; + auto i= (static_cast(dst)) - it2->first; + offset/=sizeof(T); + i/=sizeof(T); + size_t rng, GRange, tileSize; + parallel_for_setup(n/sizeof(T), tileSize, rng, GRange); + sycl_queue().submit([&](cl::sycl::handler &cgh) { + auto src_acc =it1->second.template get_access(cgh); + auto dst_acc =it2->second.template get_access(cgh); + cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor(src_acc, dst_acc, rng, 0, offset)); + }); + sycl_queue().throw_asynchronous(); } /// The memcpyHostToDevice is used to copy the device only pointer to a host pointer. Using the device @@ -170,8 +209,7 @@ struct SyclDevice { /// buffer to host. Then we use the memcpy to copy the data to the host accessor. The first time that /// this buffer is accessed, the data will be copied to the device. template EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const { - - auto host_acc= get_sycl_buffer(n, dst)-> template get_access(); + auto host_acc= get_sycl_buffer(n, dst). template get_access(); ::memcpy(host_acc.get_pointer(), src, n); } /// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl @@ -180,57 +218,53 @@ struct SyclDevice { /// buffer with map_allocator on the gpu in parallel. At the end of the function call the destination buffer would be destroyed and the data /// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back /// to the cpu only once per function call. - template EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const { - auto it = find_nearest(src); - auto offset = src- (static_cast(it->first)); - if (it != buffer_map.end()) { + template EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const T *src, size_t n) const { + auto it = m_queue_stream->find_buffer(src); + auto offset =static_cast(static_cast(src))- it->first; + offset/=sizeof(T); size_t rng, GRange, tileSize; parallel_for_setup(n/sizeof(T), tileSize, rng, GRange); // Assuming that the dst is the start of the destination pointer - auto dest_buf = cl::sycl::buffer>(dst, cl::sycl::range<1>(rng)); - typedef decltype(dest_buf) SYCLDTOH; - m_queue.submit([&](cl::sycl::handler &cgh) { - auto src_acc= (static_cast*>(it->second.get()))-> template get_access(cgh); + auto dest_buf = cl::sycl::buffer >(static_cast(dst), cl::sycl::range<1>(rng*sizeof(T))); + sycl_queue().submit([&](cl::sycl::handler &cgh) { + auto src_acc= it->second.template get_access(cgh); auto dst_acc =dest_buf.template get_access(cgh); - cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) { + cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor(src_acc, dst_acc, rng, 0, offset)); + }); + sycl_queue().throw_asynchronous(); + } + /// returning the sycl queue + EIGEN_STRONG_INLINE cl::sycl::queue& sycl_queue() const { return m_queue_stream->m_queue;} + /// Here is the implementation of memset function on sycl. + template EIGEN_STRONG_INLINE void memset(T *buff, int c, size_t n) const { + size_t rng, GRange, tileSize; + parallel_for_setup(n/sizeof(T), tileSize, rng, GRange); + sycl_queue().submit([&](cl::sycl::handler &cgh) { + auto buf_acc =get_sycl_buffer(n, static_cast(static_cast(buff))). template get_access(cgh); + cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) { auto globalid=itemID.get_global_linear_id(); - if (globalid< dst_acc.get_size()) { - dst_acc[globalid] = src_acc[globalid + offset]; + if (globalid< buf_acc.get_size()) { + for(size_t i=0; i EIGEN_STRONG_INLINE void memset(T *buff, int c, size_t n) const { - size_t rng, GRange, tileSize; - parallel_for_setup(n/sizeof(T), tileSize, rng, GRange); - m_queue.submit([&](cl::sycl::handler &cgh) { - auto buf_acc =get_sycl_buffer(n, buff)-> template get_access(cgh); - cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) { - auto globalid=itemID.get_global_linear_id(); - auto buf_ptr= reinterpret_cast::pointer_t>((&(*buf_acc.get_pointer()))); - if (globalid< buf_acc.get_size()) { - for(size_t i=0; iok(); } - /// There is no need to synchronise the stream in sycl as it is automatically handled by sycl runtime scheduler. - EIGEN_STRONG_INLINE void synchronize() const {} }; + } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h index db23bd7b0..f293869ee 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h @@ -27,7 +27,7 @@ namespace internal { template struct syclGenericBufferReducer{ template -static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){ +static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){ do { auto f = [length, local, bufOut, &bufI](cl::sycl::handler& h) mutable { cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)}, @@ -37,7 +37,7 @@ static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de auto aI = bufI.template get_access(h); auto aOut = - bufOut->template get_access(h); + bufOut.template get_access(h); cl::sycl::accessor scratch(cl::sycl::range<1>(local), h); @@ -61,7 +61,7 @@ static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de /* Apply the reduction operation between the current local * id and the one on the other half of the vector. */ if (globalid < length) { - int min = (length < local) ? length : local; + auto min = (length < local) ? length : local; for (size_t offset = min / 2; offset > 0; offset /= 2) { if (localid < offset) { scratch[localid] += scratch[localid + offset]; @@ -72,14 +72,15 @@ static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de if (localid == 0) { aI[id.get_group(0)] = scratch[localid]; if((length<=local) && globalid ==0){ - aOut[globalid]=scratch[localid]; + auto aOutPtr = ConvertToActualTypeSycl(CoeffReturnType, aOut); + aOutPtr[0]=scratch[0]; } } } }); }; - dev.m_queue.submit(f); - dev.m_queue.throw_asynchronous(); + dev.sycl_queue().submit(f); + dev.sycl_queue().throw_asynchronous(); /* At this point, you could queue::wait_and_throw() to ensure that * errors are caught quickly. However, this would likely impact @@ -116,7 +117,7 @@ struct FullReducer { if(rng ==0) { red_factor=1; }; - size_t tileSize =dev.m_queue.get_device(). template get_info()/2; + size_t tileSize =dev.sycl_queue().get_device(). template get_info()/2; size_t GRange=std::max((size_t )1, rng); // convert global range to power of 2 for redecution @@ -134,7 +135,9 @@ struct FullReducer { /// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one. if (GRange < outTileSize) outTileSize=GRange; // getting final out buffer at the moment the created buffer is true because there is no need for assign - auto out_buffer =dev.template get_sycl_buffer::type>(self.dimensions().TotalSize(), output); +// auto out_buffer =dev.template get_sycl_buffer::type>(self.dimensions().TotalSize(), output); + auto out_buffer =dev.get_sycl_buffer(self.dimensions().TotalSize(), output); + /// creating the shared memory for calculating reduction. /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can /// recursively apply reduction on it in order to reduce the whole. @@ -142,7 +145,7 @@ struct FullReducer { typedef typename Eigen::internal::remove_all::type Dims; Dims dims= self.xprDims(); Op functor = reducer; - dev.m_queue.submit([&](cl::sycl::handler &cgh) { + dev.sycl_queue().submit([&](cl::sycl::handler &cgh) { // create a tuple of accessors from Evaluator auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl()); auto tmp_global_accessor = temp_global_buffer. template get_access(cgh); @@ -161,16 +164,16 @@ struct FullReducer { auto globalid=itemID.get_global_linear_id(); if(globalid::reduce(device_self_evaluator, red_factor*globalid, red_factor, const_cast(functor)); + tmp_global_accessor.get_pointer()[globalid]=InnerMostDimReducer::reduce(device_self_evaluator, static_cast(red_factor*globalid), red_factor, const_cast(functor)); else tmp_global_accessor.get_pointer()[globalid]=static_cast(0); if(remaining!=0 && globalid==0 ) // this will add the rest of input buffer when the input size is not devidable to red_factor. - tmp_global_accessor.get_pointer()[globalid]+=InnerMostDimReducer::reduce(device_self_evaluator, red_factor*(rng), remaining, const_cast(functor)); + tmp_global_accessor.get_pointer()[0]+=InnerMostDimReducer::reduce(device_self_evaluator, static_cast(red_factor*(rng)), static_cast(remaining), const_cast(functor)); }); }); - dev.m_queue.throw_asynchronous(); + dev.sycl_queue().throw_asynchronous(); /// This is used to recursively reduce the tmp value to an element of 1; syclGenericBufferReducer::run(out_buffer, temp_global_buffer,dev, GRange, outTileSize); @@ -198,7 +201,7 @@ struct InnerReducer { Dims dims= self.xprDims(); Op functor = reducer; - dev.m_queue.submit([&](cl::sycl::handler &cgh) { + dev.sycl_queue().submit([&](cl::sycl::handler &cgh) { // create a tuple of accessors from Evaluator auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl()); auto output_accessor = dev.template get_sycl_accessor(num_coeffs_to_preserve,cgh, output); @@ -212,19 +215,20 @@ struct InnerReducer { const auto device_self_expr= TensorReductionOp(device_expr.expr, dims, functor); /// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is /// the device_evaluator is detectable and recognisable on the device. - typedef Eigen::TensorEvaluator DeiceSelf; + typedef Eigen::TensorEvaluator DeviceSelf; auto device_self_evaluator = Eigen::TensorEvaluator(device_self_expr, Eigen::DefaultDevice()); + auto output_accessor_ptr =ConvertToActualTypeSycl(typename DeviceSelf::CoeffReturnType, output_accessor); /// const cast added as a naive solution to solve the qualifier drop error auto globalid=itemID.get_global_linear_id(); if (globalid< range) { - typename DeiceSelf::CoeffReturnType accum = functor.initialize(); - GenericDimReducer::reduce(device_self_evaluator, device_self_evaluator.firstInput(globalid),const_cast(functor), &accum); + typename DeviceSelf::CoeffReturnType accum = functor.initialize(); + GenericDimReducer::reduce(device_self_evaluator, device_self_evaluator.firstInput(static_cast(globalid)),const_cast(functor), &accum); functor.finalize(accum); - output_accessor.get_pointer()[globalid]= accum; + output_accessor_ptr[globalid]= accum; } }); }); - dev.m_queue.throw_asynchronous(); + dev.sycl_queue().throw_asynchronous(); return false; } }; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h index c3152513c..d7551d94f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h @@ -30,7 +30,8 @@ namespace internal { template struct EvalToLHSConstructor { PtrType expr; - EvalToLHSConstructor(const utility::tuple::Tuple &t): expr((&(*(utility::tuple::get(t).get_pointer())))) {} + EvalToLHSConstructor(const utility::tuple::Tuple &t) : expr(ConvertToActualTypeSycl(typename Eigen::internal::remove_all::type, utility::tuple::get(t))) {} + //EvalToLHSConstructor(const utility::tuple::Tuple &t): expr((&(*(utility::tuple::get(t).get_pointer())))) {} }; /// \struct ExprConstructor is used to reconstruct the expression on the device and @@ -53,9 +54,11 @@ CVQual PlaceHolder, N>, Params...>{ Type expr;\ template \ ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple &t)\ - : expr(Type((&(*(utility::tuple::get(t).get_pointer()))), fd.dimensions())) {}\ + : expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get(t)), fd.dimensions())){}\ }; +//: expr(Type((&(*(utility::tuple::get(t).get_pointer()))), fd.dimensions())) {} + TENSORMAP(const) TENSORMAP() @@ -163,7 +166,7 @@ struct ExprConstructor, CVQual ASSIGN() #undef ASSIGN /// specialisation of the \ref ExprConstructor struct when the node type is -/// TensorEvalToOp +/// TensorEvalToOp /// 0 here is the output number in the buffer #define EVALTO(CVQual)\ template \ struct ExprConstructor, CVQual TensorEvalToOp, Params...> {\ @@ -189,12 +192,13 @@ template \ struct ExprConstructor,\ CVQual PlaceHolder, N>, Params...> {\ typedef CVQual TensorMap::Scalar,\ - TensorForcedEvalOp::NumDimensions, 0, typename TensorForcedEvalOp::Index>, 0, MakeGlobalPointer> Type;\ + TensorForcedEvalOp::NumDimensions, Eigen::internal::traits>::Layout, typename TensorForcedEvalOp::Index>, Eigen::internal::traits>::Layout, MakeGlobalPointer> Type;\ Type expr;\ template \ ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple &t)\ - : expr(Type((&(*(utility::tuple::get(t).get_pointer()))), fd.dimensions())) {}\ + : expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get(t)), fd.dimensions())) {}\ }; +//: expr(Type((&(*(utility::tuple::get(t).get_pointer()))), fd.dimensions())) {} FORCEDEVAL(const) FORCEDEVAL() @@ -214,12 +218,13 @@ struct ExprConstructor, N>, Params...> {\ static const size_t NumIndices= ValueCondition< TensorReductionOp::NumDimensions==0, 1, TensorReductionOp::NumDimensions >::Res;\ typedef CVQual TensorMap::Scalar,\ - NumIndices, 0, typename TensorReductionOp::Index>, 0, MakeGlobalPointer> Type;\ + NumIndices, Eigen::internal::traits>::Layout, typename TensorReductionOp::Index>, Eigen::internal::traits>::Layout, MakeGlobalPointer> Type;\ Type expr;\ template \ ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple &t)\ - : expr(Type((&(*(utility::tuple::get(t).get_pointer()))), fd.dimensions())) {}\ + :expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get(t)), fd.dimensions())) {}\ }; +//: expr(Type((&(*(utility::tuple::get(t).get_pointer()))), fd.dimensions())) {} SYCLREDUCTIONEXPR(const) SYCLREDUCTIONEXPR() diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h index 461aef128..94a1452ec 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h @@ -57,9 +57,8 @@ struct AccessorConstructor{ return utility::tuple::append(ExtractAccessor::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor::getTuple(cgh, eval2), ExtractAccessor::getTuple(cgh, eval3))); } template< cl::sycl::access::mode AcM, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval) - -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor::type>(eval.dimensions().TotalSize(), cgh,eval.data()))){ - return utility::tuple::make_tuple(eval.device().template get_sycl_accessor::type>(eval.dimensions().TotalSize(), cgh,eval.data())); + -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor(eval.dimensions().TotalSize(), cgh,eval.data()))){ + return utility::tuple::make_tuple(eval.device().template get_sycl_accessor(eval.dimensions().TotalSize(), cgh,eval.data())); } }; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h index ef56391ff..382f0cb50 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h @@ -148,7 +148,7 @@ template template struct DimConstr { template - static inline Dim getDim(InDim dims ) {return Dim(dims.TotalSize());} + static inline Dim getDim(InDim dims ) {return Dim(static_cast(dims.TotalSize()));} }; template class MakePointer_, typename Device> diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h index 724eebd83..5742592de 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclRun.h @@ -37,11 +37,11 @@ void run(Expr &expr, Dev &dev) { typedef typename internal::createPlaceHolderExpression::Type PlaceHolderExpr; auto functors = internal::extractFunctors(evaluator); - dev.m_queue.submit([&](cl::sycl::handler &cgh) { + dev.sycl_queue().submit([&](cl::sycl::handler &cgh) { // create a tuple of accessors from Evaluator auto tuple_of_accessors = internal::createTupleOfAccessors(cgh, evaluator); size_t range, GRange, tileSize; - dev.parallel_for_setup(utility::tuple::get<0>(tuple_of_accessors).get_range()[0], tileSize, range, GRange); + dev.parallel_for_setup(utility::tuple::get<0>(tuple_of_accessors).get_range()[0]/sizeof(typename Expr::Scalar), tileSize, range, GRange); // run the kernel cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) { @@ -49,11 +49,11 @@ void run(Expr &expr, Dev &dev) { auto device_expr =internal::createDeviceExpression(functors, tuple_of_accessors); auto device_evaluator = Eigen::TensorEvaluator(device_expr.expr, Eigen::DefaultDevice()); if (itemID.get_global_linear_id() < range) { - device_evaluator.evalScalar(static_cast(itemID.get_global_linear_id())); + device_evaluator.evalScalar(static_cast(itemID.get_global_linear_id())); } }); }); - dev.m_queue.throw_asynchronous(); + dev.sycl_queue().throw_asynchronous(); } evaluator.cleanup(); diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt index f988cb465..471826746 100644 --- a/unsupported/test/CMakeLists.txt +++ b/unsupported/test/CMakeLists.txt @@ -147,6 +147,7 @@ if(EIGEN_TEST_CXX11) ei_add_test_sycl(cxx11_tensor_device_sycl "-std=c++11") ei_add_test_sycl(cxx11_tensor_reduction_sycl "-std=c++11") ei_add_test_sycl(cxx11_tensor_morphing_sycl "-std=c++11") + ei_add_test_sycl(cxx11_tensor_builtins_sycl "-std=c++11") endif(EIGEN_TEST_SYCL) # It should be safe to always run these tests as there is some fallback code for # older compiler that don't support cxx11. diff --git a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp index 02aa4c636..3dbb8d553 100644 --- a/unsupported/test/cxx11_tensor_broadcast_sycl.cpp +++ b/unsupported/test/cxx11_tensor_broadcast_sycl.cpp @@ -14,7 +14,7 @@ #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_broadcast_sycl -#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t #define EIGEN_USE_SYCL #include "main.h" @@ -25,38 +25,47 @@ using Eigen::SyclDevice; using Eigen::Tensor; using Eigen::TensorMap; +template static void test_broadcast_sycl_fixed(const Eigen::SyclDevice &sycl_device){ // BROADCAST test: - array in_range = {{2, 3, 5, 7}}; - array broadcasts = {{2, 3, 1, 4}}; - array out_range; // = in_range * broadcasts + IndexType inDim1=2; + IndexType inDim2=3; + IndexType inDim3=5; + IndexType inDim4=7; + IndexType bDim1=2; + IndexType bDim2=3; + IndexType bDim3=1; + IndexType bDim4=4; + array in_range = {{inDim1, inDim2, inDim3, inDim4}}; + array broadcasts = {{bDim1, bDim2, bDim3, bDim4}}; + array out_range; // = in_range * broadcasts for (size_t i = 0; i < out_range.size(); ++i) out_range[i] = in_range[i] * broadcasts[i]; - Tensor input(in_range); - Tensor out(out_range); + Tensor input(in_range); + Tensor out(out_range); for (size_t i = 0; i < in_range.size(); ++i) VERIFY_IS_EQUAL(out.dimension(i), out_range[i]); - for (int i = 0; i < input.size(); ++i) - input(i) = static_cast(i); + for (IndexType i = 0; i < input.size(); ++i) + input(i) = static_cast(i); - float * gpu_in_data = static_cast(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float))); - float * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float))); + DataType * gpu_in_data = static_cast(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType))); - TensorMap>> gpu_in(gpu_in_data, in_range); - TensorMap> gpu_out(gpu_out_data, out_range); - sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float)); + TensorMap, DataLayout, IndexType>> gpu_in(gpu_in_data, in_range); + TensorMap> gpu_out(gpu_out_data, out_range); + sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(DataType)); gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts); - sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType)); - for (int i = 0; i < 4; ++i) { - for (int j = 0; j < 9; ++j) { - for (int k = 0; k < 5; ++k) { - for (int l = 0; l < 28; ++l) { + for (IndexType i = 0; i < inDim1*bDim1; ++i) { + for (IndexType j = 0; j < inDim2*bDim2; ++j) { + for (IndexType k = 0; k < inDim3*bDim3; ++k) { + for (IndexType l = 0; l < inDim4*bDim4; ++l) { VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l)); } } @@ -67,40 +76,48 @@ static void test_broadcast_sycl_fixed(const Eigen::SyclDevice &sycl_device){ sycl_device.deallocate(gpu_out_data); } - +template static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){ // BROADCAST test: - array in_range = {{2, 3, 5, 7}}; - array broadcasts = {{2, 3, 1, 4}}; - array out_range; // = in_range * broadcasts + IndexType inDim1=2; + IndexType inDim2=3; + IndexType inDim3=5; + IndexType inDim4=7; + IndexType bDim1=2; + IndexType bDim2=3; + IndexType bDim3=1; + IndexType bDim4=4; + array in_range = {{inDim1, inDim2, inDim3, inDim4}}; + array broadcasts = {{bDim1, bDim2, bDim3, bDim4}}; + array out_range; // = in_range * broadcasts for (size_t i = 0; i < out_range.size(); ++i) out_range[i] = in_range[i] * broadcasts[i]; - Tensor input(in_range); - Tensor out(out_range); + Tensor input(in_range); + Tensor out(out_range); for (size_t i = 0; i < in_range.size(); ++i) VERIFY_IS_EQUAL(out.dimension(i), out_range[i]); - for (int i = 0; i < input.size(); ++i) - input(i) = static_cast(i); + for (IndexType i = 0; i < input.size(); ++i) + input(i) = static_cast(i); - float * gpu_in_data = static_cast(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float))); - float * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float))); + DataType * gpu_in_data = static_cast(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType))); - TensorMap> gpu_in(gpu_in_data, in_range); - TensorMap> gpu_out(gpu_out_data, out_range); - sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float)); + TensorMap> gpu_in(gpu_in_data, in_range); + TensorMap> gpu_out(gpu_out_data, out_range); + sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(DataType)); gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts); - sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType)); - for (int i = 0; i < 4; ++i) { - for (int j = 0; j < 9; ++j) { - for (int k = 0; k < 5; ++k) { - for (int l = 0; l < 28; ++l) { - VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l)); + for (IndexType i = 0; i < inDim1*bDim1; ++i) { + for (IndexType j = 0; j < inDim2*bDim2; ++j) { + for (IndexType k = 0; k < inDim3*bDim3; ++k) { + for (IndexType l = 0; l < inDim4*bDim4; ++l) { + VERIFY_IS_APPROX(input(i%inDim1,j%inDim2,k%inDim3,l%inDim4), out(i,j,k,l)); } } } @@ -110,10 +127,24 @@ static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){ sycl_device.deallocate(gpu_out_data); } +template void sycl_broadcast_test_per_device(const cl::sycl::device& d){ + std::cout << "Running on " << d.template get_info() << std::endl; + QueueInterface queueInterface(d); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + + test_broadcast_sycl_fixed(sycl_device); + test_broadcast_sycl(sycl_device); + test_broadcast_sycl_fixed(sycl_device); + test_broadcast_sycl(sycl_device); + + test_broadcast_sycl_fixed(sycl_device); + test_broadcast_sycl(sycl_device); + test_broadcast_sycl_fixed(sycl_device); + test_broadcast_sycl(sycl_device); +} void test_cxx11_tensor_broadcast_sycl() { - cl::sycl::gpu_selector s; - Eigen::SyclDevice sycl_device(s); - CALL_SUBTEST(test_broadcast_sycl_fixed(sycl_device)); - CALL_SUBTEST(test_broadcast_sycl(sycl_device)); + for (const auto& device : cl::sycl::device::get_devices()) { + CALL_SUBTEST(sycl_broadcast_test_per_device(device)); + } } diff --git a/unsupported/test/cxx11_tensor_builtins_sycl.cpp b/unsupported/test/cxx11_tensor_builtins_sycl.cpp new file mode 100644 index 000000000..26cea18a6 --- /dev/null +++ b/unsupported/test/cxx11_tensor_builtins_sycl.cpp @@ -0,0 +1,148 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX +#define EIGEN_TEST_FUNC cxx11_tensor_builtins_sycl +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +namespace std { +template T rsqrt(T x) { return 1 / std::sqrt(x); } +template T square(T x) { return x * x; } +template T cube(T x) { return x * x * x; } +template T inverse(T x) { return 1 / x; } +} + +#define TEST_UNARY_BUILTINS_FOR_SCALAR(FUNC, SCALAR, OPERATOR) \ + { \ + /* out OPERATOR in.FUNC() */ \ + Tensor in(tensorRange); \ + Tensor out(tensorRange); \ + in = in.random() + static_cast(0.01); \ + out = out.random() + static_cast(0.01); \ + Tensor reference(out); \ + SCALAR *gpu_data = static_cast( \ + sycl_device.allocate(in.size() * sizeof(SCALAR))); \ + SCALAR *gpu_data_out = static_cast( \ + sycl_device.allocate(out.size() * sizeof(SCALAR))); \ + TensorMap> gpu(gpu_data, tensorRange); \ + TensorMap> gpu_out(gpu_data_out, tensorRange); \ + sycl_device.memcpyHostToDevice(gpu_data, in.data(), \ + (in.size()) * sizeof(SCALAR)); \ + sycl_device.memcpyHostToDevice(gpu_data_out, out.data(), \ + (out.size()) * sizeof(SCALAR)); \ + gpu_out.device(sycl_device) OPERATOR gpu.FUNC(); \ + sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, \ + (out.size()) * sizeof(SCALAR)); \ + for (int i = 0; i < out.size(); ++i) { \ + SCALAR ver = reference(i); \ + ver OPERATOR std::FUNC(in(i)); \ + VERIFY_IS_APPROX(out(i), ver); \ + } \ + sycl_device.deallocate(gpu_data); \ + sycl_device.deallocate(gpu_data_out); \ + } \ + { \ + /* out OPERATOR out.FUNC() */ \ + Tensor out(tensorRange); \ + out = out.random() + static_cast(0.01); \ + Tensor reference(out); \ + SCALAR *gpu_data_out = static_cast( \ + sycl_device.allocate(out.size() * sizeof(SCALAR))); \ + TensorMap> gpu_out(gpu_data_out, tensorRange); \ + sycl_device.memcpyHostToDevice(gpu_data_out, out.data(), \ + (out.size()) * sizeof(SCALAR)); \ + gpu_out.device(sycl_device) OPERATOR gpu_out.FUNC(); \ + sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, \ + (out.size()) * sizeof(SCALAR)); \ + for (int i = 0; i < out.size(); ++i) { \ + SCALAR ver = reference(i); \ + ver OPERATOR std::FUNC(reference(i)); \ + VERIFY_IS_APPROX(out(i), ver); \ + } \ + sycl_device.deallocate(gpu_data_out); \ + } + +#define TEST_UNARY_BUILTINS_OPERATOR(SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(abs, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(sqrt, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(rsqrt, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(square, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(cube, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(inverse, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(tanh, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(exp, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(log, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(abs, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(ceil, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(floor, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(round, SCALAR, OPERATOR) \ + TEST_UNARY_BUILTINS_FOR_SCALAR(log1p, SCALAR, OPERATOR) + +#define TEST_IS_THAT_RETURNS_BOOL(SCALAR, FUNC) \ + { \ + /* out = in.FUNC() */ \ + Tensor in(tensorRange); \ + Tensor out(tensorRange); \ + in = in.random() + static_cast(0.01); \ + SCALAR *gpu_data = static_cast( \ + sycl_device.allocate(in.size() * sizeof(SCALAR))); \ + bool *gpu_data_out = \ + static_cast(sycl_device.allocate(out.size() * sizeof(bool))); \ + TensorMap> gpu(gpu_data, tensorRange); \ + TensorMap> gpu_out(gpu_data_out, tensorRange); \ + sycl_device.memcpyHostToDevice(gpu_data, in.data(), \ + (in.size()) * sizeof(SCALAR)); \ + gpu_out.device(sycl_device) = gpu.FUNC(); \ + sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, \ + (out.size()) * sizeof(bool)); \ + for (int i = 0; i < out.size(); ++i) { \ + VERIFY_IS_EQUAL(out(i), std::FUNC(in(i))); \ + } \ + sycl_device.deallocate(gpu_data); \ + sycl_device.deallocate(gpu_data_out); \ + } + +#define TEST_UNARY_BUILTINS(SCALAR) \ + TEST_UNARY_BUILTINS_OPERATOR(SCALAR, += ) \ + TEST_UNARY_BUILTINS_OPERATOR(SCALAR, = ) \ + TEST_IS_THAT_RETURNS_BOOL(SCALAR, isnan) \ + TEST_IS_THAT_RETURNS_BOOL(SCALAR, isfinite) \ + TEST_IS_THAT_RETURNS_BOOL(SCALAR, isinf) + +static void test_builtin_unary_sycl(const Eigen::SyclDevice &sycl_device) { + int sizeDim1 = 10; + int sizeDim2 = 10; + int sizeDim3 = 10; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; + + TEST_UNARY_BUILTINS(float) + /// your GPU must support double. Otherwise, disable the double test. + TEST_UNARY_BUILTINS(double) +} + +void test_cxx11_tensor_builtins_sycl() { + cl::sycl::gpu_selector s; + QueueInterface queueInterface(s); + Eigen::SyclDevice sycl_device(&queueInterface); + CALL_SUBTEST(test_builtin_unary_sycl(sycl_device)); +} diff --git a/unsupported/test/cxx11_tensor_device_sycl.cpp b/unsupported/test/cxx11_tensor_device_sycl.cpp index 584fa8026..9e13d2f1b 100644 --- a/unsupported/test/cxx11_tensor_device_sycl.cpp +++ b/unsupported/test/cxx11_tensor_device_sycl.cpp @@ -19,26 +19,59 @@ #include "main.h" #include -#include +#include +#include -void test_device_sycl(const Eigen::SyclDevice &sycl_device) { - std::cout <<"Helo from ComputeCpp: the requested device exists and the device name is : " - << sycl_device.m_queue.get_device(). template get_info() < +void test_device_memory(const Eigen::SyclDevice &sycl_device) { + std::cout << "Running on : " + << sycl_device.sycl_queue().get_device(). template get_info() + < tensorRange = {{sizeDim1}}; - Tensor in(tensorRange); - Tensor in1(tensorRange); - memset(in1.data(), 1,in1.size()*sizeof(int)); - int * gpu_in_data = static_cast(sycl_device.allocate(in.size()*sizeof(int))); - sycl_device.memset(gpu_in_data, 1,in.size()*sizeof(int) ); - sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(int) ); - for (int i=0; i in(tensorRange); + Tensor in1(tensorRange); + memset(in1.data(), 1, in1.size() * sizeof(DataType)); + DataType* gpu_in_data = static_cast(sycl_device.allocate(in.size()*sizeof(DataType))); + sycl_device.memset(gpu_in_data, 1, in.size()*sizeof(DataType)); + sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(DataType)); + for (int i=0; i +void test_device_exceptions(const Eigen::SyclDevice &sycl_device) { + VERIFY(sycl_device.ok()); + int sizeDim1 = 100; + array tensorDims = {{sizeDim1}}; + DataType* gpu_data = static_cast(sycl_device.allocate(sizeDim1*sizeof(DataType))); + sycl_device.memset(gpu_data, 1, sizeDim1*sizeof(DataType)); + + TensorMap> in(gpu_data, tensorDims); + TensorMap> out(gpu_data, tensorDims); + out.device(sycl_device) = in / in.constant(0); + + sycl_device.synchronize(); + VERIFY(!sycl_device.ok()); + sycl_device.deallocate(gpu_data); +} + +template void sycl_device_test_per_device(const cl::sycl::device& d){ + std::cout << "Running on " << d.template get_info() << std::endl; + QueueInterface queueInterface(d); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_device_memory(sycl_device); + test_device_memory(sycl_device); + /// this test throw an exception. enable it if you want to see the exception + //test_device_exceptions(sycl_device); + /// this test throw an exception. enable it if you want to see the exception + //test_device_exceptions(sycl_device); +} + +void test_cxx11_tensor_device_sycl() { + for (const auto& device : cl::sycl::device::get_devices()) { + CALL_SUBTEST(sycl_device_test_per_device(device)); + } } diff --git a/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp b/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp index 5690da723..70b182558 100644 --- a/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp +++ b/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp @@ -21,33 +21,33 @@ #include using Eigen::Tensor; - +template void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) { int sizeDim1 = 100; - int sizeDim2 = 200; - int sizeDim3 = 200; + int sizeDim2 = 20; + int sizeDim3 = 20; Eigen::array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; - Eigen::Tensor in1(tensorRange); - Eigen::Tensor in2(tensorRange); - Eigen::Tensor out(tensorRange); + Eigen::Tensor in1(tensorRange); + Eigen::Tensor in2(tensorRange); + Eigen::Tensor out(tensorRange); - float * gpu_in1_data = static_cast(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float))); - float * gpu_in2_data = static_cast(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float))); - float * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float))); + DataType * gpu_in1_data = static_cast(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_in2_data = static_cast(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType))); in1 = in1.random() + in1.constant(10.0f); in2 = in2.random() + in2.constant(10.0f); // creating TensorMap from tensor - Eigen::TensorMap> gpu_in1(gpu_in1_data, tensorRange); - Eigen::TensorMap> gpu_in2(gpu_in2_data, tensorRange); - Eigen::TensorMap> gpu_out(gpu_out_data, tensorRange); - sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(float)); - sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(float)); + Eigen::TensorMap> gpu_in1(gpu_in1_data, tensorRange); + Eigen::TensorMap> gpu_in2(gpu_in2_data, tensorRange); + Eigen::TensorMap> gpu_out(gpu_out_data, tensorRange); + sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(DataType)); /// c=(a+b)*b gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2; - sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -63,8 +63,19 @@ void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) { } -void test_cxx11_tensor_forced_eval_sycl() { - cl::sycl::gpu_selector s; - Eigen::SyclDevice sycl_device(s); - CALL_SUBTEST(test_forced_eval_sycl(sycl_device)); +template void tensorForced_evalperDevice(Dev_selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_forced_eval_sycl(sycl_device); + test_forced_eval_sycl(sycl_device); +} +void test_cxx11_tensor_forced_eval_sycl() { + + printf("Test on GPU: OpenCL\n"); + CALL_SUBTEST(tensorForced_evalperDevice((cl::sycl::gpu_selector()))); + printf("repeating the test on CPU: OpenCL\n"); + CALL_SUBTEST(tensorForced_evalperDevice((cl::sycl::cpu_selector()))); + printf("repeating the test on CPU: HOST\n"); + CALL_SUBTEST(tensorForced_evalperDevice((cl::sycl::host_selector()))); + printf("Test Passed******************\n" ); } diff --git a/unsupported/test/cxx11_tensor_morphing_sycl.cpp b/unsupported/test/cxx11_tensor_morphing_sycl.cpp index 8a03b826e..a16e1caf5 100644 --- a/unsupported/test/cxx11_tensor_morphing_sycl.cpp +++ b/unsupported/test/cxx11_tensor_morphing_sycl.cpp @@ -28,7 +28,7 @@ using Eigen::SyclDevice; using Eigen::Tensor; using Eigen::TensorMap; - +template static void test_simple_slice(const Eigen::SyclDevice &sycl_device) { int sizeDim1 = 2; @@ -37,31 +37,31 @@ static void test_simple_slice(const Eigen::SyclDevice &sycl_device) int sizeDim4 = 7; int sizeDim5 = 11; array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; - Tensor tensor(tensorRange); + Tensor tensor(tensorRange); tensor.setRandom(); array slice1_range ={{1, 1, 1, 1, 1}}; - Tensor slice1(slice1_range); + Tensor slice1(slice1_range); - float* gpu_data1 = static_cast(sycl_device.allocate(tensor.size()*sizeof(float))); - float* gpu_data2 = static_cast(sycl_device.allocate(slice1.size()*sizeof(float))); - TensorMap> gpu1(gpu_data1, tensorRange); - TensorMap> gpu2(gpu_data2, slice1_range); + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(slice1.size()*sizeof(DataType))); + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, slice1_range); Eigen::DSizes indices(1,2,3,4,5); Eigen::DSizes sizes(1,1,1,1,1); - sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(float)); + sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType)); gpu2.device(sycl_device)=gpu1.slice(indices, sizes); - sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(float)); + sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(DataType)); VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5)); array slice2_range ={{1,1,2,2,3}}; - Tensor slice2(slice2_range); - float* gpu_data3 = static_cast(sycl_device.allocate(slice2.size()*sizeof(float))); - TensorMap> gpu3(gpu_data3, slice2_range); + Tensor slice2(slice2_range); + DataType* gpu_data3 = static_cast(sycl_device.allocate(slice2.size()*sizeof(DataType))); + TensorMap> gpu3(gpu_data3, slice2_range); Eigen::DSizes indices2(1,1,3,4,5); Eigen::DSizes sizes2(1,1,2,2,3); gpu3.device(sycl_device)=gpu1.slice(indices2, sizes2); - sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(float)); + sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(DataType)); for (int i = 0; i < 2; ++i) { for (int j = 0; j < 2; ++j) { for (int k = 0; k < 3; ++k) { @@ -74,11 +74,22 @@ static void test_simple_slice(const Eigen::SyclDevice &sycl_device) sycl_device.deallocate(gpu_data3); } +template void sycl_slicing_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_simple_slice(sycl_device); + test_simple_slice(sycl_device); +} void test_cxx11_tensor_morphing_sycl() { /// Currentlly it only works on cpu. Adding GPU cause LLVM ERROR in cunstructing OpenCL Kernel at runtime. - cl::sycl::cpu_selector s; - Eigen::SyclDevice sycl_device(s); - CALL_SUBTEST(test_simple_slice(sycl_device)); +// printf("Test on GPU: OpenCL\n"); +// CALL_SUBTEST(sycl_device_test_per_device((cl::sycl::gpu_selector()))); + printf("repeating the test on CPU: OpenCL\n"); + CALL_SUBTEST(sycl_slicing_test_per_device((cl::sycl::cpu_selector()))); + printf("repeating the test on CPU: HOST\n"); + CALL_SUBTEST(sycl_slicing_test_per_device((cl::sycl::host_selector()))); + printf("Test Passed******************\n" ); + } diff --git a/unsupported/test/cxx11_tensor_reduction_sycl.cpp b/unsupported/test/cxx11_tensor_reduction_sycl.cpp index a9ef82907..9e20f9cd0 100644 --- a/unsupported/test/cxx11_tensor_reduction_sycl.cpp +++ b/unsupported/test/cxx11_tensor_reduction_sycl.cpp @@ -21,37 +21,37 @@ #include - +template static void test_full_reductions_sycl(const Eigen::SyclDevice& sycl_device) { const int num_rows = 452; const int num_cols = 765; array tensorRange = {{num_rows, num_cols}}; - Tensor in(tensorRange); - Tensor full_redux; - Tensor full_redux_gpu; + Tensor in(tensorRange); + Tensor full_redux; + Tensor full_redux_gpu; in.setRandom(); full_redux = in.sum(); - float* gpu_in_data = static_cast(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float))); - float* gpu_out_data =(float*)sycl_device.allocate(sizeof(float)); + DataType* gpu_in_data = static_cast(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(DataType))); + DataType* gpu_out_data =(DataType*)sycl_device.allocate(sizeof(DataType)); - TensorMap > in_gpu(gpu_in_data, tensorRange); - TensorMap > out_gpu(gpu_out_data); + TensorMap > in_gpu(gpu_in_data, tensorRange); + TensorMap > out_gpu(gpu_out_data); - sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(DataType)); out_gpu.device(sycl_device) = in_gpu.sum(); - sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(float)); + sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(DataType)); // Check that the CPU and GPU reductions return the same result. VERIFY_IS_APPROX(full_redux_gpu(), full_redux()); sycl_device.deallocate(gpu_in_data); sycl_device.deallocate(gpu_out_data); } - +template static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device) { int dim_x = 145; @@ -63,23 +63,23 @@ static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device) red_axis[0] = 0; array reduced_tensorRange = {{dim_y, dim_z}}; - Tensor in(tensorRange); - Tensor redux(reduced_tensorRange); - Tensor redux_gpu(reduced_tensorRange); + Tensor in(tensorRange); + Tensor redux(reduced_tensorRange); + Tensor redux_gpu(reduced_tensorRange); in.setRandom(); redux= in.sum(red_axis); - float* gpu_in_data = static_cast(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float))); - float* gpu_out_data = static_cast(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float))); + DataType* gpu_in_data = static_cast(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(DataType))); + DataType* gpu_out_data = static_cast(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(DataType))); - TensorMap > in_gpu(gpu_in_data, tensorRange); - TensorMap > out_gpu(gpu_out_data, reduced_tensorRange); + TensorMap > in_gpu(gpu_in_data, tensorRange); + TensorMap > out_gpu(gpu_out_data, reduced_tensorRange); - sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(DataType)); out_gpu.device(sycl_device) = in_gpu.sum(red_axis); - sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float)); + sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(DataType)); // Check that the CPU and GPU reductions return the same result. for(int j=0; j static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device) { int dim_x = 567; @@ -101,23 +102,23 @@ static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device) red_axis[0] = 2; array reduced_tensorRange = {{dim_x, dim_y}}; - Tensor in(tensorRange); - Tensor redux(reduced_tensorRange); - Tensor redux_gpu(reduced_tensorRange); + Tensor in(tensorRange); + Tensor redux(reduced_tensorRange); + Tensor redux_gpu(reduced_tensorRange); in.setRandom(); redux= in.sum(red_axis); - float* gpu_in_data = static_cast(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float))); - float* gpu_out_data = static_cast(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float))); + DataType* gpu_in_data = static_cast(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(DataType))); + DataType* gpu_out_data = static_cast(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(DataType))); - TensorMap > in_gpu(gpu_in_data, tensorRange); - TensorMap > out_gpu(gpu_out_data, reduced_tensorRange); + TensorMap > in_gpu(gpu_in_data, tensorRange); + TensorMap > out_gpu(gpu_out_data, reduced_tensorRange); - sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(DataType)); out_gpu.device(sycl_device) = in_gpu.sum(red_axis); - sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float)); + sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(DataType)); // Check that the CPU and GPU reductions return the same result. for(int j=0; j void sycl_reduction_test_per_device(const cl::sycl::device& d){ + std::cout << "Running on " << d.template get_info() << std::endl; + QueueInterface queueInterface(d); + auto sycl_device = Eigen::SyclDevice(&queueInterface); -void test_cxx11_tensor_reduction_sycl() { - cl::sycl::gpu_selector s; - Eigen::SyclDevice sycl_device(s); - CALL_SUBTEST((test_full_reductions_sycl(sycl_device))); - CALL_SUBTEST((test_first_dim_reductions_sycl(sycl_device))); - CALL_SUBTEST((test_last_dim_reductions_sycl(sycl_device))); - + test_full_reductions_sycl(sycl_device); + test_first_dim_reductions_sycl(sycl_device); + test_last_dim_reductions_sycl(sycl_device); + test_full_reductions_sycl(sycl_device); + test_first_dim_reductions_sycl(sycl_device); + test_last_dim_reductions_sycl(sycl_device); +} +void test_cxx11_tensor_reduction_sycl() { + for (const auto& device : cl::sycl::device::get_devices()) { + CALL_SUBTEST(sycl_reduction_test_per_device(device)); + } } diff --git a/unsupported/test/cxx11_tensor_sycl.cpp b/unsupported/test/cxx11_tensor_sycl.cpp index 05fbf9e62..bf115d652 100644 --- a/unsupported/test/cxx11_tensor_sycl.cpp +++ b/unsupported/test/cxx11_tensor_sycl.cpp @@ -26,35 +26,32 @@ using Eigen::array; using Eigen::SyclDevice; using Eigen::Tensor; using Eigen::TensorMap; - +template void test_sycl_mem_transfers(const Eigen::SyclDevice &sycl_device) { int sizeDim1 = 100; - int sizeDim2 = 100; - int sizeDim3 = 100; + int sizeDim2 = 10; + int sizeDim3 = 20; array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; - Tensor in1(tensorRange); - Tensor out1(tensorRange); - Tensor out2(tensorRange); - Tensor out3(tensorRange); + Tensor in1(tensorRange); + Tensor out1(tensorRange); + Tensor out2(tensorRange); + Tensor out3(tensorRange); in1 = in1.random(); - float* gpu_data1 = static_cast(sycl_device.allocate(in1.size()*sizeof(float))); - float* gpu_data2 = static_cast(sycl_device.allocate(out1.size()*sizeof(float))); - //float* gpu_data = static_cast(sycl_device.allocate(out2.size()*sizeof(float))); + DataType* gpu_data1 = static_cast(sycl_device.allocate(in1.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(out1.size()*sizeof(DataType))); - TensorMap> gpu1(gpu_data1, tensorRange); - TensorMap> gpu2(gpu_data2, tensorRange); - //TensorMap> gpu_out2(gpu_out2_data, tensorRange); - - sycl_device.memcpyHostToDevice(gpu_data1, in1.data(),(in1.size())*sizeof(float)); - sycl_device.memcpyHostToDevice(gpu_data2, in1.data(),(in1.size())*sizeof(float)); + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data1, in1.data(),(in1.size())*sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_data2, in1.data(),(in1.size())*sizeof(DataType)); gpu1.device(sycl_device) = gpu1 * 3.14f; gpu2.device(sycl_device) = gpu2 * 2.7f; - sycl_device.memcpyDeviceToHost(out1.data(), gpu_data1,(out1.size())*sizeof(float)); - sycl_device.memcpyDeviceToHost(out2.data(), gpu_data1,(out2.size())*sizeof(float)); - sycl_device.memcpyDeviceToHost(out3.data(), gpu_data2,(out3.size())*sizeof(float)); - // sycl_device.Synchronize(); + sycl_device.memcpyDeviceToHost(out1.data(), gpu_data1,(out1.size())*sizeof(DataType)); + sycl_device.memcpyDeviceToHost(out2.data(), gpu_data1,(out2.size())*sizeof(DataType)); + sycl_device.memcpyDeviceToHost(out3.data(), gpu_data2,(out3.size())*sizeof(DataType)); for (int i = 0; i < in1.size(); ++i) { VERIFY_IS_APPROX(out1(i), in1(i) * 3.14f); @@ -65,34 +62,34 @@ void test_sycl_mem_transfers(const Eigen::SyclDevice &sycl_device) { sycl_device.deallocate(gpu_data1); sycl_device.deallocate(gpu_data2); } - +template void test_sycl_computations(const Eigen::SyclDevice &sycl_device) { int sizeDim1 = 100; - int sizeDim2 = 100; - int sizeDim3 = 100; + int sizeDim2 = 10; + int sizeDim3 = 20; array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; - Tensor in1(tensorRange); - Tensor in2(tensorRange); - Tensor in3(tensorRange); - Tensor out(tensorRange); + Tensor in1(tensorRange); + Tensor in2(tensorRange); + Tensor in3(tensorRange); + Tensor out(tensorRange); in2 = in2.random(); in3 = in3.random(); - float * gpu_in1_data = static_cast(sycl_device.allocate(in1.size()*sizeof(float))); - float * gpu_in2_data = static_cast(sycl_device.allocate(in2.size()*sizeof(float))); - float * gpu_in3_data = static_cast(sycl_device.allocate(in3.size()*sizeof(float))); - float * gpu_out_data = static_cast(sycl_device.allocate(out.size()*sizeof(float))); + DataType * gpu_in1_data = static_cast(sycl_device.allocate(in1.size()*sizeof(DataType))); + DataType * gpu_in2_data = static_cast(sycl_device.allocate(in2.size()*sizeof(DataType))); + DataType * gpu_in3_data = static_cast(sycl_device.allocate(in3.size()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(out.size()*sizeof(DataType))); - TensorMap> gpu_in1(gpu_in1_data, tensorRange); - TensorMap> gpu_in2(gpu_in2_data, tensorRange); - TensorMap> gpu_in3(gpu_in3_data, tensorRange); - TensorMap> gpu_out(gpu_out_data, tensorRange); + TensorMap> gpu_in1(gpu_in1_data, tensorRange); + TensorMap> gpu_in2(gpu_in2_data, tensorRange); + TensorMap> gpu_in3(gpu_in3_data, tensorRange); + TensorMap> gpu_out(gpu_out_data, tensorRange); /// a=1.2f gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f); - sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.size())*sizeof(float)); + sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.size())*sizeof(DataType)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -104,7 +101,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) { /// a=b*1.2f gpu_out.device(sycl_device) = gpu_in1 * 1.2f; - sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.size())*sizeof(float)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.size())*sizeof(DataType)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -116,9 +113,9 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) { printf("a=b*1.2f Test Passed\n"); /// c=a*b - sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.size())*sizeof(float)); + sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.size())*sizeof(DataType)); gpu_out.device(sycl_device) = gpu_in1 * gpu_in2; - sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -132,7 +129,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) { /// c=a+b gpu_out.device(sycl_device) = gpu_in1 + gpu_in2; - sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -146,7 +143,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) { /// c=a*a gpu_out.device(sycl_device) = gpu_in1 * gpu_in1; - sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -160,7 +157,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) { //a*3.14f + b*2.7f gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f); - sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.size())*sizeof(float)); + sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.size())*sizeof(DataType)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -173,9 +170,9 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) { printf("a*3.14f + b*2.7f Test Passed\n"); ///d= (a>0.5? b:c) - sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.size())*sizeof(float)); + sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.size())*sizeof(DataType)); gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3); - sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType)); for (int i = 0; i < sizeDim1; ++i) { for (int j = 0; j < sizeDim2; ++j) { for (int k = 0; k < sizeDim3; ++k) { @@ -191,10 +188,20 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) { sycl_device.deallocate(gpu_in3_data); sycl_device.deallocate(gpu_out_data); } - -void test_cxx11_tensor_sycl() { - cl::sycl::gpu_selector s; - Eigen::SyclDevice sycl_device(s); - CALL_SUBTEST(test_sycl_mem_transfers(sycl_device)); - CALL_SUBTEST(test_sycl_computations(sycl_device)); +template void sycl_computing_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_sycl_mem_transfers(sycl_device); + test_sycl_computations(sycl_device); + test_sycl_mem_transfers(sycl_device); + test_sycl_computations(sycl_device); +} +void test_cxx11_tensor_sycl() { + printf("Test on GPU: OpenCL\n"); + CALL_SUBTEST(sycl_computing_test_per_device((cl::sycl::gpu_selector()))); + printf("repeating the test on CPU: OpenCL\n"); + CALL_SUBTEST(sycl_computing_test_per_device((cl::sycl::cpu_selector()))); + printf("repeating the test on CPU: HOST\n"); + CALL_SUBTEST(sycl_computing_test_per_device((cl::sycl::host_selector()))); + printf("Test Passed******************\n" ); }