Merged in benoitsteiner/opencl (pull request PR-253)

OpenCL improvements
This commit is contained in:
Benoit Steiner 2016-11-19 04:44:43 +00:00
commit 1bdf1b9ce0
18 changed files with 780 additions and 355 deletions

View File

@ -43,10 +43,12 @@
#else
#define EIGEN_DEVICE_FUNC
#endif
#else
#define EIGEN_DEVICE_FUNC
#endif
#if defined(EIGEN_USE_SYCL)
#define EIGEN_DONT_VECTORIZE
#endif
// When compiling CUDA device code with NVCC, pull in math functions from the
@ -283,6 +285,15 @@
#include <intrin.h>
#endif
#if defined(__SYCL_DEVICE_ONLY__)
#undef min
#undef max
#undef isnan
#undef isinf
#undef isfinite
#include <SYCL/sycl.hpp>
#endif
/** \brief Namespace containing all symbols from the %Eigen library. */
namespace Eigen {

View File

@ -413,7 +413,7 @@ inline NewType cast(const OldType& x)
static inline Scalar run(const Scalar& x)
{
EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex), NUMERIC_TYPE_MUST_BE_REAL)
using std::round;
EIGEN_USING_STD_MATH(round);
return round(x);
}
};
@ -640,7 +640,7 @@ template<typename Scalar>
struct random_default_impl<Scalar, false, true>
{
static inline Scalar run(const Scalar& x, const Scalar& y)
{
{
typedef typename conditional<NumTraits<Scalar>::IsSigned,std::ptrdiff_t,std::size_t>::type ScalarX;
if(y<x)
return x;
@ -954,6 +954,11 @@ inline EIGEN_MATHFUNC_RETVAL(log1p, Scalar) log1p(const Scalar& x)
return EIGEN_MATHFUNC_IMPL(log1p, Scalar)::run(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float log1p(float x) { return cl::sycl::log1p(x); }
EIGEN_ALWAYS_INLINE double log1p(double x) { return cl::sycl::log1p(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float log1p(const float &x) { return ::log1pf(x); }
@ -969,10 +974,24 @@ inline typename internal::pow_impl<ScalarX,ScalarY>::result_type pow(const Scala
return internal::pow_impl<ScalarX,ScalarY>::run(x, y);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float pow(float x, float y) { return cl::sycl::pow(x, y); }
EIGEN_ALWAYS_INLINE double pow(double x, double y) { return cl::sycl::pow(x, y); }
#endif // defined(__SYCL_DEVICE_ONLY__)
template<typename T> EIGEN_DEVICE_FUNC bool (isnan) (const T &x) { return internal::isnan_impl(x); }
template<typename T> EIGEN_DEVICE_FUNC bool (isinf) (const T &x) { return internal::isinf_impl(x); }
template<typename T> EIGEN_DEVICE_FUNC bool (isfinite)(const T &x) { return internal::isfinite_impl(x); }
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float isnan(float x) { return cl::sycl::isnan(x); }
EIGEN_ALWAYS_INLINE double isnan(double x) { return cl::sycl::isnan(x); }
EIGEN_ALWAYS_INLINE float isinf(float x) { return cl::sycl::isinf(x); }
EIGEN_ALWAYS_INLINE double isinf(double x) { return cl::sycl::isinf(x); }
EIGEN_ALWAYS_INLINE float isfinite(float x) { return cl::sycl::isfinite(x); }
EIGEN_ALWAYS_INLINE double isfinite(double x) { return cl::sycl::isfinite(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
template<typename Scalar>
EIGEN_DEVICE_FUNC
inline EIGEN_MATHFUNC_RETVAL(round, Scalar) round(const Scalar& x)
@ -980,6 +999,11 @@ inline EIGEN_MATHFUNC_RETVAL(round, Scalar) round(const Scalar& x)
return EIGEN_MATHFUNC_IMPL(round, Scalar)::run(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float round(float x) { return cl::sycl::round(x); }
EIGEN_ALWAYS_INLINE double round(double x) { return cl::sycl::round(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
template<typename T>
EIGEN_DEVICE_FUNC
T (floor)(const T& x)
@ -988,6 +1012,11 @@ T (floor)(const T& x)
return floor(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float floor(float x) { return cl::sycl::floor(x); }
EIGEN_ALWAYS_INLINE double floor(double x) { return cl::sycl::floor(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float floor(const float &x) { return ::floorf(x); }
@ -1004,6 +1033,11 @@ T (ceil)(const T& x)
return ceil(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float ceil(float x) { return cl::sycl::ceil(x); }
EIGEN_ALWAYS_INLINE double ceil(double x) { return cl::sycl::ceil(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float ceil(const float &x) { return ::ceilf(x); }
@ -1044,6 +1078,11 @@ T sqrt(const T &x)
return sqrt(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float sqrt(float x) { return cl::sycl::sqrt(x); }
EIGEN_ALWAYS_INLINE double sqrt(double x) { return cl::sycl::sqrt(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
template<typename T>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
T log(const T &x) {
@ -1051,6 +1090,12 @@ T log(const T &x) {
return log(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float log(float x) { return cl::sycl::log(x); }
EIGEN_ALWAYS_INLINE double log(double x) { return cl::sycl::log(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float log(const float &x) { return ::logf(x); }
@ -1066,6 +1111,11 @@ typename NumTraits<T>::Real abs(const T &x) {
return abs(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float abs(float x) { return cl::sycl::fabs(x); }
EIGEN_ALWAYS_INLINE double abs(double x) { return cl::sycl::fabs(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float abs(const float &x) { return ::fabsf(x); }
@ -1091,6 +1141,11 @@ T exp(const T &x) {
return exp(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float exp(float x) { return cl::sycl::exp(x); }
EIGEN_ALWAYS_INLINE double exp(double x) { return cl::sycl::exp(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float exp(const float &x) { return ::expf(x); }
@ -1106,6 +1161,11 @@ T cos(const T &x) {
return cos(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float cos(float x) { return cl::sycl::cos(x); }
EIGEN_ALWAYS_INLINE double cos(double x) { return cl::sycl::cos(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float cos(const float &x) { return ::cosf(x); }
@ -1121,6 +1181,11 @@ T sin(const T &x) {
return sin(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float sin(float x) { return cl::sycl::sin(x); }
EIGEN_ALWAYS_INLINE double sin(double x) { return cl::sycl::sin(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float sin(const float &x) { return ::sinf(x); }
@ -1136,6 +1201,11 @@ T tan(const T &x) {
return tan(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float tan(float x) { return cl::sycl::tan(x); }
EIGEN_ALWAYS_INLINE double tan(double x) { return cl::sycl::tan(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float tan(const float &x) { return ::tanf(x); }
@ -1151,6 +1221,11 @@ T acos(const T &x) {
return acos(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float acos(float x) { return cl::sycl::acos(x); }
EIGEN_ALWAYS_INLINE double acos(double x) { return cl::sycl::acos(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float acos(const float &x) { return ::acosf(x); }
@ -1166,6 +1241,11 @@ T asin(const T &x) {
return asin(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float asin(float x) { return cl::sycl::asin(x); }
EIGEN_ALWAYS_INLINE double asin(double x) { return cl::sycl::asin(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float asin(const float &x) { return ::asinf(x); }
@ -1181,6 +1261,11 @@ T atan(const T &x) {
return atan(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float atan(float x) { return cl::sycl::atan(x); }
EIGEN_ALWAYS_INLINE double atan(double x) { return cl::sycl::atan(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float atan(const float &x) { return ::atanf(x); }
@ -1197,6 +1282,11 @@ T cosh(const T &x) {
return cosh(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float cosh(float x) { return cl::sycl::cosh(x); }
EIGEN_ALWAYS_INLINE double cosh(double x) { return cl::sycl::cosh(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float cosh(const float &x) { return ::coshf(x); }
@ -1212,6 +1302,11 @@ T sinh(const T &x) {
return sinh(x);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float sinh(float x) { return cl::sycl::sinh(x); }
EIGEN_ALWAYS_INLINE double sinh(double x) { return cl::sycl::sinh(x); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float sinh(const float &x) { return ::sinhf(x); }
@ -1227,7 +1322,10 @@ T tanh(const T &x) {
return tanh(x);
}
#if (!defined(__CUDACC__)) && EIGEN_FAST_MATH
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float tanh(float x) { return cl::sycl::tanh(x); }
EIGEN_ALWAYS_INLINE double tanh(double x) { return cl::sycl::tanh(x); }
#elif (!defined(__CUDACC__)) && EIGEN_FAST_MATH
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
float tanh(float x) { return internal::generic_fast_tanh_float(x); }
#endif
@ -1247,6 +1345,11 @@ T fmod(const T& a, const T& b) {
return fmod(a, b);
}
#if defined(__SYCL_DEVICE_ONLY__)
EIGEN_ALWAYS_INLINE float fmod(float x, float y) { return cl::sycl::fmod(x, y); }
EIGEN_ALWAYS_INLINE double fmod(double x, double y) { return cl::sycl::fmod(x, y); }
#endif // defined(__SYCL_DEVICE_ONLY__)
#ifdef __CUDACC__
template <>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
@ -1389,13 +1492,13 @@ template<> struct random_impl<bool>
template<> struct scalar_fuzzy_impl<bool>
{
typedef bool RealScalar;
template<typename OtherScalar> EIGEN_DEVICE_FUNC
static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&)
{
return !x;
}
EIGEN_DEVICE_FUNC
static inline bool isApprox(bool x, bool y, bool)
{
@ -1407,10 +1510,10 @@ template<> struct scalar_fuzzy_impl<bool>
{
return (!x) || y;
}
};
} // end namespace internal
} // end namespace Eigen

View File

@ -678,7 +678,13 @@ struct functor_traits<scalar_ceil_op<Scalar> >
template<typename Scalar> struct scalar_isnan_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_isnan_op)
typedef bool result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return (numext::isnan)(a); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const {
#if defined(__SYCL_DEVICE_ONLY__)
return numext::isnan(a);
#else
return (numext::isnan)(a);
#endif
}
};
template<typename Scalar>
struct functor_traits<scalar_isnan_op<Scalar> >
@ -696,7 +702,13 @@ struct functor_traits<scalar_isnan_op<Scalar> >
template<typename Scalar> struct scalar_isinf_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_isinf_op)
typedef bool result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return (numext::isinf)(a); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const {
#if defined(__SYCL_DEVICE_ONLY__)
return numext::isinf(a);
#else
return (numext::isinf)(a);
#endif
}
};
template<typename Scalar>
struct functor_traits<scalar_isinf_op<Scalar> >
@ -714,7 +726,13 @@ struct functor_traits<scalar_isinf_op<Scalar> >
template<typename Scalar> struct scalar_isfinite_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_isfinite_op)
typedef bool result_type;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return (numext::isfinite)(a); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const {
#if defined(__SYCL_DEVICE_ONLY__)
return numext::isfinite(a);
#else
return (numext::isfinite)(a);
#endif
}
};
template<typename Scalar>
struct functor_traits<scalar_isfinite_op<Scalar> >

View File

@ -13,7 +13,7 @@
#include "../../../Eigen/Core"
#ifdef EIGEN_USE_SYCL
#if defined(EIGEN_USE_SYCL)
#undef min
#undef max
#undef isnan

View File

@ -16,27 +16,33 @@
#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H
namespace Eigen {
struct SyclDevice {
/// class members:
/// sycl queue
mutable cl::sycl::queue m_queue;
#define ConvertToActualTypeSycl(T, buf_acc) reinterpret_cast<typename cl::sycl::global_ptr<T>::pointer_t>((&(*buf_acc.get_pointer())))
struct QueueInterface {
/// class members:
bool exception_caught_ = false;
/// std::map is the container used to make sure that we create only one buffer
/// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
/// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
mutable std::map<const void *, std::shared_ptr<void>> buffer_map;
mutable std::map<const uint8_t *, cl::sycl::buffer<uint8_t, 1>> buffer_map;
/// sycl queue
mutable cl::sycl::queue m_queue;
/// creating device by using selector
template<typename dev_Selector> explicit SyclDevice(dev_Selector s):
/// SyclStreamDevice is not owned. it is the caller's responsibility to destroy it.
template<typename dev_Selector> explicit QueueInterface(dev_Selector s):
#ifdef EIGEN_EXCEPTIONS
m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) {
m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
for (const auto& e : l) {
try {
std::rethrow_exception(e);
} catch (cl::sycl::exception e) {
std::cout << e.what() << std::endl;
if (e) {
exception_caught_ = true;
std::rethrow_exception(e);
}
} catch (cl::sycl::exception e) {
std::cerr << e.what() << std::endl;
}
}
}))
#else
@ -44,63 +50,119 @@ struct SyclDevice {
#endif
{}
// destructor
~SyclDevice() { deallocate_all(); }
/// creating device by using selector
/// SyclStreamDevice is not owned. it is the caller's responsibility to destroy it.
explicit QueueInterface(cl::sycl::device d):
#ifdef EIGEN_EXCEPTIONS
m_queue(cl::sycl::queue(d, [&](cl::sycl::exception_list l) {
for (const auto& e : l) {
try {
if (e) {
exception_caught_ = true;
std::rethrow_exception(e);
}
} catch (cl::sycl::exception e) {
std::cerr << e.what() << std::endl;
}
}
}))
#else
m_queue(cl::sycl::queue(d))
#endif
{}
/// Allocating device pointer. This pointer is actually an 8 bytes host pointer used as key to access the sycl device buffer.
/// The reason is that we cannot use device buffer as a pointer as a m_data in Eigen leafNode expressions. So we create a key
/// pointer to be used in Eigen expression construction. When we convert the Eigen construction into the sycl construction we
/// use this pointer as a key in our buffer_map and we make sure that we dedicate only one buffer only for this pointer.
/// The device pointer would be deleted by calling deallocate function.
EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const {
auto buf = cl::sycl::buffer<uint8_t,1>(cl::sycl::range<1>(num_bytes));
auto ptr =buf.get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>().get_pointer();
buf.set_final_data(nullptr);
buffer_map.insert(std::pair<const uint8_t *, cl::sycl::buffer<uint8_t, 1>>(ptr,buf));
return static_cast<void*>(ptr);
}
/// This is used to deallocate the device pointer. p is used as a key inside
/// the map to find the device buffer and delete it.
template <typename T> EIGEN_STRONG_INLINE void deallocate(T *p) const {
auto it = buffer_map.find(p);
EIGEN_STRONG_INLINE void deallocate(const void *p) const {
auto it = buffer_map.find(static_cast<const uint8_t*>(p));
if (it != buffer_map.end()) {
buffer_map.erase(it);
internal::aligned_free(p);
}
}
/// This is called by the SyclDevice destructor to release all allocated memory if the user didn't already do so.
/// We also free the host pointer that we have dedicated as a key to accessing the device buffer.
EIGEN_STRONG_INLINE void deallocate_all() const {
std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin();
while (it!=buffer_map.end()) {
auto p=it->first;
buffer_map.erase(it);
internal::aligned_free(const_cast<void*>(p));
it=buffer_map.begin();
EIGEN_STRONG_INLINE std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1>>::iterator find_buffer(const void* ptr) const {
auto it1 = buffer_map.find(static_cast<const uint8_t*>(ptr));
if (it1 != buffer_map.end()){
return it1;
}
buffer_map.clear();
else{
for(std::map<const uint8_t *, cl::sycl::buffer<uint8_t,1>>::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){
auto size = it->second.get_size();
if((it->first < (static_cast<const uint8_t*>(ptr))) && ((static_cast<const uint8_t*>(ptr)) < (it->first + size)) ) return it;
}
}
//eigen_assert("No sycl buffer found. Make sure that you have allocated memory for your buffer by calling allocate function in SyclDevice");
std::cerr << "No sycl buffer found. Make sure that you have allocated memory for your buffer by calling allocate function in SyclDevice"<< std::endl;
abort();
//return buffer_map.end();
}
// This function checks if the runtime recorded an error for the
// underlying stream device.
EIGEN_STRONG_INLINE bool ok() const {
return !exception_caught_;
}
// destructor
~QueueInterface() { buffer_map.clear(); }
};
template <typename T> class MemCopyFunctor {
public:
typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer> read_accessor;
typedef cl::sycl::accessor<uint8_t, 1, cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer> write_accessor;
MemCopyFunctor(read_accessor src_acc, write_accessor dst_acc, size_t rng, size_t i, size_t offset): m_src_acc(src_acc), m_dst_acc(dst_acc), m_rng(rng), m_i(i), m_offset(offset) {}
void operator()(cl::sycl::nd_item<1> itemID) {
auto src_ptr = ConvertToActualTypeSycl(T, m_src_acc);
auto dst_ptr = ConvertToActualTypeSycl(T, m_dst_acc);
auto globalid = itemID.get_global_linear_id();
if (globalid < m_rng) {
dst_ptr[globalid + m_i] = src_ptr[globalid + m_offset];
}
}
private:
read_accessor m_src_acc;
write_accessor m_dst_acc;
size_t m_rng;
size_t m_i;
size_t m_offset;
};
struct SyclDevice {
// class member.
QueueInterface* m_queue_stream;
/// QueueInterface is not owned. it is the caller's responsibility to destroy it.
explicit SyclDevice(QueueInterface* queue_stream) : m_queue_stream(queue_stream){}
/// Creation of sycl accessor for a buffer. This function first tries to find
/// the buffer in the buffer_map. If found it gets the accessor from it, if not,
/// the function then adds an entry by creating a sycl buffer for that particular pointer.
template <cl::sycl::access::mode AcMd, typename T> EIGEN_STRONG_INLINE cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const {
return (get_sycl_buffer<T>(num_bytes, ptr)->template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
}
/// Inserting a new sycl buffer. For every allocated device pointer only one buffer would be created. The buffer type is a device- only buffer.
/// The key pointer used to access the device buffer(the device pointer(ptr) ) must be initialised by the allocate function.
template<typename T> EIGEN_STRONG_INLINE std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(size_t num_bytes, const T *ptr) const {
using Type = cl::sycl::buffer<T, 1>;
std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret;
if(ptr!=nullptr){
ret= buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
[](void *dataMem) { delete static_cast<Type*>(dataMem); })));
(static_cast<Type*>(ret.first->second.get()))->set_final_data(nullptr);
} else {
eigen_assert("The device memory is not allocated. Please call allocate on the device!!");
}
return ret;
template <cl::sycl::access::mode AcMd> EIGEN_STRONG_INLINE cl::sycl::accessor<uint8_t, 1, AcMd, cl::sycl::access::target::global_buffer>
get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const void* ptr) const {
return (get_sycl_buffer(num_bytes, ptr).template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
}
/// Accessing the created sycl device buffer for the device pointer
template <typename T> EIGEN_STRONG_INLINE cl::sycl::buffer<T, 1>* get_sycl_buffer(size_t num_bytes,const T * ptr) const {
return static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(num_bytes, ptr).first->second.get());
EIGEN_STRONG_INLINE cl::sycl::buffer<uint8_t, 1>& get_sycl_buffer(size_t , const void * ptr) const {
return m_queue_stream->find_buffer(ptr)->second;
}
/// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
EIGEN_STRONG_INLINE void parallel_for_setup(size_t n, size_t &tileSize, size_t &rng, size_t &GRange) const {
tileSize =m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
tileSize =sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
rng = n;
if (rng==0) rng=1;
GRange=rng;
@ -110,58 +172,35 @@ struct SyclDevice {
if (xMode != 0) GRange += (tileSize - xMode);
}
}
/// Allocating device pointer. This pointer is actually an 8 bytes host pointer used as key to access the sycl device buffer.
/// The reason is that we cannot use device buffer as a pointer as a m_data in Eigen leafNode expressions. So we create a key
/// pointer to be used in Eigen expression construction. When we convert the Eigen construction into the sycl construction we
/// use this pointer as a key in our buffer_map and we make sure that we dedicate only one buffer only for this pointer.
/// The device pointer would be deleted by calling deallocate function.
EIGEN_STRONG_INLINE void *allocate(size_t) const {
return internal::aligned_malloc(8);
/// allocate device memory
EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const {
return m_queue_stream->allocate(num_bytes);
}
/// deallocate device memory
EIGEN_STRONG_INLINE void deallocate(const void *p) const {
m_queue_stream->deallocate(p);
}
// some runtime conditions that can be applied here
EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; }
template <typename T> EIGEN_STRONG_INLINE std::map<const void *, std::shared_ptr<void>>::iterator find_nearest(const T* ptr) const {
auto it1 = buffer_map.find(ptr);
if (it1 != buffer_map.end()){
return it1;
}
else{
for(std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){
auto size = ((cl::sycl::buffer<T, 1>*)it->second.get())->get_size();
if((static_cast<const T*>(it->first) < ptr) && (ptr < (static_cast<const T*>(it->first)) + size)) return it;
}
}
return buffer_map.end();
}
/// the memcpy function
template<typename T> EIGEN_STRONG_INLINE void memcpy(void *dst, const T *src, size_t n) const {
auto it1 = find_nearest(src);
auto it2 = find_nearest(static_cast<T*>(dst));
if ((it1 != buffer_map.end()) && (it2!=buffer_map.end())) {
auto offset= (src - (static_cast<const T*>(it1->first)));
auto i= ((static_cast<T*>(dst)) - const_cast<T*>((static_cast<const T*>(it2->first))));
size_t rng, GRange, tileSize;
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
m_queue.submit([&](cl::sycl::handler &cgh) {
auto src_acc =((cl::sycl::buffer<T, 1>*)it1->second.get())-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
auto dst_acc =((cl::sycl::buffer<T, 1>*)it2->second.get())-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
typedef decltype(src_acc) DevToDev;
cgh.parallel_for<DevToDev>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
auto globalid=itemID.get_global_linear_id();
if (globalid< rng) {
dst_acc[globalid+i ]=src_acc[globalid+offset];
}
});
});
m_queue.throw_asynchronous();
} else{
eigen_assert("no source or destination device memory found.");
}
//::memcpy(dst, src, n);
auto it1 = m_queue_stream->find_buffer((void*)src);
auto it2 = m_queue_stream->find_buffer(dst);
auto offset= (static_cast<const uint8_t*>(static_cast<const void*>(src))) - it1->first;
auto i= (static_cast<const uint8_t*>(dst)) - it2->first;
offset/=sizeof(T);
i/=sizeof(T);
size_t rng, GRange, tileSize;
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
sycl_queue().submit([&](cl::sycl::handler &cgh) {
auto src_acc =it1->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
auto dst_acc =it2->second.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
cgh.parallel_for(cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<T>(src_acc, dst_acc, rng, 0, offset));
});
sycl_queue().throw_asynchronous();
}
/// The memcpyHostToDevice is used to copy the device only pointer to a host pointer. Using the device
@ -170,8 +209,7 @@ struct SyclDevice {
/// buffer to host. Then we use the memcpy to copy the data to the host accessor. The first time that
/// this buffer is accessed, the data will be copied to the device.
template<typename T> EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
auto host_acc= get_sycl_buffer(n, dst)-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
auto host_acc= get_sycl_buffer(n, dst). template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
::memcpy(host_acc.get_pointer(), src, n);
}
/// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl
@ -180,57 +218,53 @@ struct SyclDevice {
/// buffer with map_allocator on the gpu in parallel. At the end of the function call the destination buffer would be destroyed and the data
/// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back
/// to the cpu only once per function call.
template<typename T> EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
auto it = find_nearest(src);
auto offset = src- (static_cast<const T*>(it->first));
if (it != buffer_map.end()) {
template<typename T> EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const T *src, size_t n) const {
auto it = m_queue_stream->find_buffer(src);
auto offset =static_cast<const uint8_t*>(static_cast<const void*>(src))- it->first;
offset/=sizeof(T);
size_t rng, GRange, tileSize;
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
// Assuming that the dst is the start of the destination pointer
auto dest_buf = cl::sycl::buffer<T, 1, cl::sycl::map_allocator<T>>(dst, cl::sycl::range<1>(rng));
typedef decltype(dest_buf) SYCLDTOH;
m_queue.submit([&](cl::sycl::handler &cgh) {
auto src_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
auto dest_buf = cl::sycl::buffer<uint8_t, 1, cl::sycl::map_allocator<uint8_t> >(static_cast<uint8_t*>(dst), cl::sycl::range<1>(rng*sizeof(T)));
sycl_queue().submit([&](cl::sycl::handler &cgh) {
auto src_acc= it->second.template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
auto dst_acc =dest_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
cgh.parallel_for<SYCLDTOH>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), MemCopyFunctor<T>(src_acc, dst_acc, rng, 0, offset));
});
sycl_queue().throw_asynchronous();
}
/// returning the sycl queue
EIGEN_STRONG_INLINE cl::sycl::queue& sycl_queue() const { return m_queue_stream->m_queue;}
/// Here is the implementation of memset function on sycl.
template<typename T> EIGEN_STRONG_INLINE void memset(T *buff, int c, size_t n) const {
size_t rng, GRange, tileSize;
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
sycl_queue().submit([&](cl::sycl::handler &cgh) {
auto buf_acc =get_sycl_buffer(n, static_cast<uint8_t*>(static_cast<void*>(buff))). template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
cgh.parallel_for<SyclDevice>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
auto globalid=itemID.get_global_linear_id();
if (globalid< dst_acc.get_size()) {
dst_acc[globalid] = src_acc[globalid + offset];
if (globalid< buf_acc.get_size()) {
for(size_t i=0; i<sizeof(T); i++)
buf_acc[globalid*sizeof(T) + i] = c;
}
});
});
m_queue.throw_asynchronous();
} else{
eigen_assert("no device memory found. The memory might be destroyed before creation");
}
}
/// Here is the implementation of memset function on sycl.
template<typename T> EIGEN_STRONG_INLINE void memset(T *buff, int c, size_t n) const {
size_t rng, GRange, tileSize;
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
m_queue.submit([&](cl::sycl::handler &cgh) {
auto buf_acc =get_sycl_buffer(n, buff)-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
cgh.parallel_for<SyclDevice>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
auto globalid=itemID.get_global_linear_id();
auto buf_ptr= reinterpret_cast<typename cl::sycl::global_ptr<unsigned char>::pointer_t>((&(*buf_acc.get_pointer())));
if (globalid< buf_acc.get_size()) {
for(size_t i=0; i<sizeof(T); i++)
buf_ptr[globalid*sizeof(T) + i] = c;
}
});
});
m_queue.throw_asynchronous();
sycl_queue().throw_asynchronous();
}
/// No need for sycl it should act the same as CPU version
EIGEN_STRONG_INLINE int majorDeviceVersion() const {
return 1;
EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; }
/// There is no need to synchronise the buffer in sycl as it is automatically handled by sycl runtime scheduler.
EIGEN_STRONG_INLINE void synchronize() const {
sycl_queue().wait_and_throw();
}
// This function checks if the runtime recorded an error for the
// underlying stream device.
EIGEN_STRONG_INLINE bool ok() const {
return m_queue_stream->ok();
}
/// There is no need to synchronise the stream in sycl as it is automatically handled by sycl runtime scheduler.
EIGEN_STRONG_INLINE void synchronize() const {}
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H

View File

@ -27,7 +27,7 @@ namespace internal {
template<typename CoeffReturnType, typename KernelName> struct syclGenericBufferReducer{
template<typename BufferTOut, typename BufferTIn>
static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){
do {
auto f = [length, local, bufOut, &bufI](cl::sycl::handler& h) mutable {
cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)},
@ -37,7 +37,7 @@ static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de
auto aI =
bufI.template get_access<cl::sycl::access::mode::read_write>(h);
auto aOut =
bufOut->template get_access<cl::sycl::access::mode::discard_write>(h);
bufOut.template get_access<cl::sycl::access::mode::discard_write>(h);
cl::sycl::accessor<CoeffReturnType, 1, cl::sycl::access::mode::read_write,
cl::sycl::access::target::local>
scratch(cl::sycl::range<1>(local), h);
@ -61,7 +61,7 @@ static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de
/* Apply the reduction operation between the current local
* id and the one on the other half of the vector. */
if (globalid < length) {
int min = (length < local) ? length : local;
auto min = (length < local) ? length : local;
for (size_t offset = min / 2; offset > 0; offset /= 2) {
if (localid < offset) {
scratch[localid] += scratch[localid + offset];
@ -72,14 +72,15 @@ static void run(BufferTOut* bufOut, BufferTIn& bufI, const Eigen::SyclDevice& de
if (localid == 0) {
aI[id.get_group(0)] = scratch[localid];
if((length<=local) && globalid ==0){
aOut[globalid]=scratch[localid];
auto aOutPtr = ConvertToActualTypeSycl(CoeffReturnType, aOut);
aOutPtr[0]=scratch[0];
}
}
}
});
};
dev.m_queue.submit(f);
dev.m_queue.throw_asynchronous();
dev.sycl_queue().submit(f);
dev.sycl_queue().throw_asynchronous();
/* At this point, you could queue::wait_and_throw() to ensure that
* errors are caught quickly. However, this would likely impact
@ -116,7 +117,7 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
if(rng ==0) {
red_factor=1;
};
size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
size_t tileSize =dev.sycl_queue().get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
size_t GRange=std::max((size_t )1, rng);
// convert global range to power of 2 for redecution
@ -134,7 +135,9 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
/// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one.
if (GRange < outTileSize) outTileSize=GRange;
// getting final out buffer at the moment the created buffer is true because there is no need for assign
auto out_buffer =dev.template get_sycl_buffer<typename Eigen::internal::remove_all<CoeffReturnType>::type>(self.dimensions().TotalSize(), output);
// auto out_buffer =dev.template get_sycl_buffer<typename Eigen::internal::remove_all<CoeffReturnType>::type>(self.dimensions().TotalSize(), output);
auto out_buffer =dev.get_sycl_buffer(self.dimensions().TotalSize(), output);
/// creating the shared memory for calculating reduction.
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
/// recursively apply reduction on it in order to reduce the whole.
@ -142,7 +145,7 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
Dims dims= self.xprDims();
Op functor = reducer;
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
dev.sycl_queue().submit([&](cl::sycl::handler &cgh) {
// create a tuple of accessors from Evaluator
auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
auto tmp_global_accessor = temp_global_buffer. template get_access<cl::sycl::access::mode::read_write, cl::sycl::access::target::global_buffer>(cgh);
@ -161,16 +164,16 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
auto globalid=itemID.get_global_linear_id();
if(globalid<rng)
tmp_global_accessor.get_pointer()[globalid]=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, red_factor*globalid, red_factor, const_cast<Op&>(functor));
tmp_global_accessor.get_pointer()[globalid]=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, static_cast<typename DevExpr::Index>(red_factor*globalid), red_factor, const_cast<Op&>(functor));
else
tmp_global_accessor.get_pointer()[globalid]=static_cast<CoeffReturnType>(0);
if(remaining!=0 && globalid==0 )
// this will add the rest of input buffer when the input size is not devidable to red_factor.
tmp_global_accessor.get_pointer()[globalid]+=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, red_factor*(rng), remaining, const_cast<Op&>(functor));
tmp_global_accessor.get_pointer()[0]+=InnerMostDimReducer<decltype(device_self_evaluator), Op, false>::reduce(device_self_evaluator, static_cast<typename DevExpr::Index>(red_factor*(rng)), static_cast<typename DevExpr::Index>(remaining), const_cast<Op&>(functor));
});
});
dev.m_queue.throw_asynchronous();
dev.sycl_queue().throw_asynchronous();
/// This is used to recursively reduce the tmp value to an element of 1;
syclGenericBufferReducer<CoeffReturnType,HostExpr>::run(out_buffer, temp_global_buffer,dev, GRange, outTileSize);
@ -198,7 +201,7 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
Dims dims= self.xprDims();
Op functor = reducer;
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
dev.sycl_queue().submit([&](cl::sycl::handler &cgh) {
// create a tuple of accessors from Evaluator
auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl());
auto output_accessor = dev.template get_sycl_accessor<cl::sycl::access::mode::discard_write>(num_coeffs_to_preserve,cgh, output);
@ -212,19 +215,20 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
const auto device_self_expr= TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
/// the device_evaluator is detectable and recognisable on the device.
typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice> DeiceSelf;
typedef Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice> DeviceSelf;
auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
auto output_accessor_ptr =ConvertToActualTypeSycl(typename DeviceSelf::CoeffReturnType, output_accessor);
/// const cast added as a naive solution to solve the qualifier drop error
auto globalid=itemID.get_global_linear_id();
if (globalid< range) {
typename DeiceSelf::CoeffReturnType accum = functor.initialize();
GenericDimReducer<DeiceSelf::NumReducedDims-1, DeiceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(globalid),const_cast<Op&>(functor), &accum);
typename DeviceSelf::CoeffReturnType accum = functor.initialize();
GenericDimReducer<DeviceSelf::NumReducedDims-1, DeviceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(static_cast<typename DevExpr::Index>(globalid)),const_cast<Op&>(functor), &accum);
functor.finalize(accum);
output_accessor.get_pointer()[globalid]= accum;
output_accessor_ptr[globalid]= accum;
}
});
});
dev.m_queue.throw_asynchronous();
dev.sycl_queue().throw_asynchronous();
return false;
}
};

View File

@ -30,7 +30,8 @@ namespace internal {
template <typename PtrType, size_t N, typename... Params>
struct EvalToLHSConstructor {
PtrType expr;
EvalToLHSConstructor(const utility::tuple::Tuple<Params...> &t): expr((&(*(utility::tuple::get<N>(t).get_pointer())))) {}
EvalToLHSConstructor(const utility::tuple::Tuple<Params...> &t) : expr(ConvertToActualTypeSycl(typename Eigen::internal::remove_all<PtrType>::type, utility::tuple::get<N>(t))) {}
//EvalToLHSConstructor(const utility::tuple::Tuple<Params...> &t): expr((&(*(utility::tuple::get<N>(t).get_pointer())))) {}
};
/// \struct ExprConstructor is used to reconstruct the expression on the device and
@ -53,9 +54,11 @@ CVQual PlaceHolder<CVQual TensorMap<T, Options3_, MakePointer_>, N>, Params...>{
Type expr;\
template <typename FuncDetector>\
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
: expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get<N>(t)), fd.dimensions())){}\
};
//: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}
TENSORMAP(const)
TENSORMAP()
@ -163,7 +166,7 @@ struct ExprConstructor<CVQual TensorAssignOp<OrigLHSExpr, OrigRHSExpr>, CVQual
ASSIGN()
#undef ASSIGN
/// specialisation of the \ref ExprConstructor struct when the node type is
/// TensorEvalToOp
/// TensorEvalToOp /// 0 here is the output number in the buffer
#define EVALTO(CVQual)\
template <typename OrigExpr, typename Expr, typename... Params>\
struct ExprConstructor<CVQual TensorEvalToOp<OrigExpr, MakeGlobalPointer>, CVQual TensorEvalToOp<Expr>, Params...> {\
@ -189,12 +192,13 @@ template <typename OrigExpr, typename DevExpr, size_t N, typename... Params>\
struct ExprConstructor<CVQual TensorForcedEvalOp<OrigExpr, MakeGlobalPointer>,\
CVQual PlaceHolder<CVQual TensorForcedEvalOp<DevExpr>, N>, Params...> {\
typedef CVQual TensorMap<Tensor<typename TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::Scalar,\
TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::NumDimensions, 0, typename TensorForcedEvalOp<DevExpr>::Index>, 0, MakeGlobalPointer> Type;\
TensorForcedEvalOp<DevExpr, MakeGlobalPointer>::NumDimensions, Eigen::internal::traits<TensorForcedEvalOp<DevExpr, MakeGlobalPointer>>::Layout, typename TensorForcedEvalOp<DevExpr>::Index>, Eigen::internal::traits<TensorForcedEvalOp<DevExpr, MakeGlobalPointer>>::Layout, MakeGlobalPointer> Type;\
Type expr;\
template <typename FuncDetector>\
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
: expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get<N>(t)), fd.dimensions())) {}\
};
//: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}
FORCEDEVAL(const)
FORCEDEVAL()
@ -214,12 +218,13 @@ struct ExprConstructor<CVQual TensorReductionOp<OP, Dim, OrigExpr, MakeGlobalPoi
CVQual PlaceHolder<CVQual TensorReductionOp<OP, Dim, DevExpr>, N>, Params...> {\
static const size_t NumIndices= ValueCondition< TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions==0, 1, TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::NumDimensions >::Res;\
typedef CVQual TensorMap<Tensor<typename TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>::Scalar,\
NumIndices, 0, typename TensorReductionOp<OP, Dim, DevExpr>::Index>, 0, MakeGlobalPointer> Type;\
NumIndices, Eigen::internal::traits<TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>>::Layout, typename TensorReductionOp<OP, Dim, DevExpr>::Index>, Eigen::internal::traits<TensorReductionOp<OP, Dim, DevExpr, MakeGlobalPointer>>::Layout, MakeGlobalPointer> Type;\
Type expr;\
template <typename FuncDetector>\
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
:expr(Type(ConvertToActualTypeSycl(typename Type::Scalar, utility::tuple::get<N>(t)), fd.dimensions())) {}\
};
//: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}
SYCLREDUCTIONEXPR(const)
SYCLREDUCTIONEXPR()

View File

@ -57,9 +57,8 @@ struct AccessorConstructor{
return utility::tuple::append(ExtractAccessor<Arg1>::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor<Arg2>::getTuple(cgh, eval2), ExtractAccessor<Arg3>::getTuple(cgh, eval3)));
}
template< cl::sycl::access::mode AcM, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval)
-> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM,
typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()))){
return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM, typename Eigen::internal::remove_all<typename Arg::CoeffReturnType>::type>(eval.dimensions().TotalSize(), cgh,eval.data()));
-> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor<AcM>(eval.dimensions().TotalSize(), cgh,eval.data()))){
return utility::tuple::make_tuple(eval.device().template get_sycl_accessor<AcM>(eval.dimensions().TotalSize(), cgh,eval.data()));
}
};

View File

@ -148,7 +148,7 @@ template<typename InDim>
template<typename Dim> struct DimConstr<Dim, 0> {
template<typename InDim>
static inline Dim getDim(InDim dims ) {return Dim(dims.TotalSize());}
static inline Dim getDim(InDim dims ) {return Dim(static_cast<Dim>(dims.TotalSize()));}
};
template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>

View File

@ -37,11 +37,11 @@ void run(Expr &expr, Dev &dev) {
typedef typename internal::createPlaceHolderExpression<Expr>::Type PlaceHolderExpr;
auto functors = internal::extractFunctors(evaluator);
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
dev.sycl_queue().submit([&](cl::sycl::handler &cgh) {
// create a tuple of accessors from Evaluator
auto tuple_of_accessors = internal::createTupleOfAccessors<decltype(evaluator)>(cgh, evaluator);
size_t range, GRange, tileSize;
dev.parallel_for_setup(utility::tuple::get<0>(tuple_of_accessors).get_range()[0], tileSize, range, GRange);
dev.parallel_for_setup(utility::tuple::get<0>(tuple_of_accessors).get_range()[0]/sizeof(typename Expr::Scalar), tileSize, range, GRange);
// run the kernel
cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
@ -49,11 +49,11 @@ void run(Expr &expr, Dev &dev) {
auto device_expr =internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
auto device_evaluator = Eigen::TensorEvaluator<decltype(device_expr.expr), Eigen::DefaultDevice>(device_expr.expr, Eigen::DefaultDevice());
if (itemID.get_global_linear_id() < range) {
device_evaluator.evalScalar(static_cast<int>(itemID.get_global_linear_id()));
device_evaluator.evalScalar(static_cast<typename DevExpr::Index>(itemID.get_global_linear_id()));
}
});
});
dev.m_queue.throw_asynchronous();
dev.sycl_queue().throw_asynchronous();
}
evaluator.cleanup();

View File

@ -147,6 +147,7 @@ if(EIGEN_TEST_CXX11)
ei_add_test_sycl(cxx11_tensor_device_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_reduction_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_morphing_sycl "-std=c++11")
ei_add_test_sycl(cxx11_tensor_builtins_sycl "-std=c++11")
endif(EIGEN_TEST_SYCL)
# It should be safe to always run these tests as there is some fallback code for
# older compiler that don't support cxx11.

View File

@ -14,7 +14,7 @@
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_broadcast_sycl
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t
#define EIGEN_USE_SYCL
#include "main.h"
@ -25,38 +25,47 @@ using Eigen::SyclDevice;
using Eigen::Tensor;
using Eigen::TensorMap;
template <typename DataType, int DataLayout, typename IndexType>
static void test_broadcast_sycl_fixed(const Eigen::SyclDevice &sycl_device){
// BROADCAST test:
array<int, 4> in_range = {{2, 3, 5, 7}};
array<int, 4> broadcasts = {{2, 3, 1, 4}};
array<int, 4> out_range; // = in_range * broadcasts
IndexType inDim1=2;
IndexType inDim2=3;
IndexType inDim3=5;
IndexType inDim4=7;
IndexType bDim1=2;
IndexType bDim2=3;
IndexType bDim3=1;
IndexType bDim4=4;
array<IndexType, 4> in_range = {{inDim1, inDim2, inDim3, inDim4}};
array<IndexType, 4> broadcasts = {{bDim1, bDim2, bDim3, bDim4}};
array<IndexType, 4> out_range; // = in_range * broadcasts
for (size_t i = 0; i < out_range.size(); ++i)
out_range[i] = in_range[i] * broadcasts[i];
Tensor<float, 4> input(in_range);
Tensor<float, 4> out(out_range);
Tensor<DataType, 4, DataLayout, IndexType> input(in_range);
Tensor<DataType, 4, DataLayout, IndexType> out(out_range);
for (size_t i = 0; i < in_range.size(); ++i)
VERIFY_IS_EQUAL(out.dimension(i), out_range[i]);
for (int i = 0; i < input.size(); ++i)
input(i) = static_cast<float>(i);
for (IndexType i = 0; i < input.size(); ++i)
input(i) = static_cast<DataType>(i);
float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float)));
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
DataType * gpu_in_data = static_cast<DataType*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(DataType)));
DataType * gpu_out_data = static_cast<DataType*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType)));
TensorMap<TensorFixedSize<float, Sizes<2, 3, 5, 7>>> gpu_in(gpu_in_data, in_range);
TensorMap<Tensor<float, 4>> gpu_out(gpu_out_data, out_range);
sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float));
TensorMap<TensorFixedSize<DataType, Sizes<2, 3, 5, 7>, DataLayout, IndexType>> gpu_in(gpu_in_data, in_range);
TensorMap<Tensor<DataType, 4, DataLayout, IndexType>> gpu_out(gpu_out_data, out_range);
sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(DataType));
gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType));
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 9; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 28; ++l) {
for (IndexType i = 0; i < inDim1*bDim1; ++i) {
for (IndexType j = 0; j < inDim2*bDim2; ++j) {
for (IndexType k = 0; k < inDim3*bDim3; ++k) {
for (IndexType l = 0; l < inDim4*bDim4; ++l) {
VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l));
}
}
@ -67,40 +76,48 @@ static void test_broadcast_sycl_fixed(const Eigen::SyclDevice &sycl_device){
sycl_device.deallocate(gpu_out_data);
}
template <typename DataType, int DataLayout, typename IndexType>
static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){
// BROADCAST test:
array<int, 4> in_range = {{2, 3, 5, 7}};
array<int, 4> broadcasts = {{2, 3, 1, 4}};
array<int, 4> out_range; // = in_range * broadcasts
IndexType inDim1=2;
IndexType inDim2=3;
IndexType inDim3=5;
IndexType inDim4=7;
IndexType bDim1=2;
IndexType bDim2=3;
IndexType bDim3=1;
IndexType bDim4=4;
array<IndexType, 4> in_range = {{inDim1, inDim2, inDim3, inDim4}};
array<IndexType, 4> broadcasts = {{bDim1, bDim2, bDim3, bDim4}};
array<IndexType, 4> out_range; // = in_range * broadcasts
for (size_t i = 0; i < out_range.size(); ++i)
out_range[i] = in_range[i] * broadcasts[i];
Tensor<float, 4> input(in_range);
Tensor<float, 4> out(out_range);
Tensor<DataType, 4, DataLayout, IndexType> input(in_range);
Tensor<DataType, 4, DataLayout, IndexType> out(out_range);
for (size_t i = 0; i < in_range.size(); ++i)
VERIFY_IS_EQUAL(out.dimension(i), out_range[i]);
for (int i = 0; i < input.size(); ++i)
input(i) = static_cast<float>(i);
for (IndexType i = 0; i < input.size(); ++i)
input(i) = static_cast<DataType>(i);
float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float)));
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
DataType * gpu_in_data = static_cast<DataType*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(DataType)));
DataType * gpu_out_data = static_cast<DataType*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType)));
TensorMap<Tensor<float, 4>> gpu_in(gpu_in_data, in_range);
TensorMap<Tensor<float, 4>> gpu_out(gpu_out_data, out_range);
sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float));
TensorMap<Tensor<DataType, 4, DataLayout, IndexType>> gpu_in(gpu_in_data, in_range);
TensorMap<Tensor<DataType, 4, DataLayout, IndexType>> gpu_out(gpu_out_data, out_range);
sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(DataType));
gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType));
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 9; ++j) {
for (int k = 0; k < 5; ++k) {
for (int l = 0; l < 28; ++l) {
VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l));
for (IndexType i = 0; i < inDim1*bDim1; ++i) {
for (IndexType j = 0; j < inDim2*bDim2; ++j) {
for (IndexType k = 0; k < inDim3*bDim3; ++k) {
for (IndexType l = 0; l < inDim4*bDim4; ++l) {
VERIFY_IS_APPROX(input(i%inDim1,j%inDim2,k%inDim3,l%inDim4), out(i,j,k,l));
}
}
}
@ -110,10 +127,24 @@ static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){
sycl_device.deallocate(gpu_out_data);
}
template<typename DataType> void sycl_broadcast_test_per_device(const cl::sycl::device& d){
std::cout << "Running on " << d.template get_info<cl::sycl::info::device::name>() << std::endl;
QueueInterface queueInterface(d);
auto sycl_device = Eigen::SyclDevice(&queueInterface);
test_broadcast_sycl_fixed<DataType, RowMajor, int>(sycl_device);
test_broadcast_sycl<DataType, RowMajor, int>(sycl_device);
test_broadcast_sycl_fixed<DataType, ColMajor, int>(sycl_device);
test_broadcast_sycl<DataType, ColMajor, int>(sycl_device);
test_broadcast_sycl_fixed<DataType, RowMajor, int64_t>(sycl_device);
test_broadcast_sycl<DataType, RowMajor, int64_t>(sycl_device);
test_broadcast_sycl_fixed<DataType, ColMajor, int64_t>(sycl_device);
test_broadcast_sycl<DataType, ColMajor, int64_t>(sycl_device);
}
void test_cxx11_tensor_broadcast_sycl() {
cl::sycl::gpu_selector s;
Eigen::SyclDevice sycl_device(s);
CALL_SUBTEST(test_broadcast_sycl_fixed(sycl_device));
CALL_SUBTEST(test_broadcast_sycl(sycl_device));
for (const auto& device : cl::sycl::device::get_devices()) {
CALL_SUBTEST(sycl_broadcast_test_per_device<float>(device));
}
}

View File

@ -0,0 +1,148 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016
// Mehdi Goli Codeplay Software Ltd.
// Ralph Potter Codeplay Software Ltd.
// Luke Iwanski Codeplay Software Ltd.
// Contact: <eigen@codeplay.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_builtins_sycl
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_SYCL
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::array;
using Eigen::SyclDevice;
using Eigen::Tensor;
using Eigen::TensorMap;
namespace std {
template <typename T> T rsqrt(T x) { return 1 / std::sqrt(x); }
template <typename T> T square(T x) { return x * x; }
template <typename T> T cube(T x) { return x * x * x; }
template <typename T> T inverse(T x) { return 1 / x; }
}
#define TEST_UNARY_BUILTINS_FOR_SCALAR(FUNC, SCALAR, OPERATOR) \
{ \
/* out OPERATOR in.FUNC() */ \
Tensor<SCALAR, 3> in(tensorRange); \
Tensor<SCALAR, 3> out(tensorRange); \
in = in.random() + static_cast<SCALAR>(0.01); \
out = out.random() + static_cast<SCALAR>(0.01); \
Tensor<SCALAR, 3> reference(out); \
SCALAR *gpu_data = static_cast<SCALAR *>( \
sycl_device.allocate(in.size() * sizeof(SCALAR))); \
SCALAR *gpu_data_out = static_cast<SCALAR *>( \
sycl_device.allocate(out.size() * sizeof(SCALAR))); \
TensorMap<Tensor<SCALAR, 3>> gpu(gpu_data, tensorRange); \
TensorMap<Tensor<SCALAR, 3>> gpu_out(gpu_data_out, tensorRange); \
sycl_device.memcpyHostToDevice(gpu_data, in.data(), \
(in.size()) * sizeof(SCALAR)); \
sycl_device.memcpyHostToDevice(gpu_data_out, out.data(), \
(out.size()) * sizeof(SCALAR)); \
gpu_out.device(sycl_device) OPERATOR gpu.FUNC(); \
sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, \
(out.size()) * sizeof(SCALAR)); \
for (int i = 0; i < out.size(); ++i) { \
SCALAR ver = reference(i); \
ver OPERATOR std::FUNC(in(i)); \
VERIFY_IS_APPROX(out(i), ver); \
} \
sycl_device.deallocate(gpu_data); \
sycl_device.deallocate(gpu_data_out); \
} \
{ \
/* out OPERATOR out.FUNC() */ \
Tensor<SCALAR, 3> out(tensorRange); \
out = out.random() + static_cast<SCALAR>(0.01); \
Tensor<SCALAR, 3> reference(out); \
SCALAR *gpu_data_out = static_cast<SCALAR *>( \
sycl_device.allocate(out.size() * sizeof(SCALAR))); \
TensorMap<Tensor<SCALAR, 3>> gpu_out(gpu_data_out, tensorRange); \
sycl_device.memcpyHostToDevice(gpu_data_out, out.data(), \
(out.size()) * sizeof(SCALAR)); \
gpu_out.device(sycl_device) OPERATOR gpu_out.FUNC(); \
sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, \
(out.size()) * sizeof(SCALAR)); \
for (int i = 0; i < out.size(); ++i) { \
SCALAR ver = reference(i); \
ver OPERATOR std::FUNC(reference(i)); \
VERIFY_IS_APPROX(out(i), ver); \
} \
sycl_device.deallocate(gpu_data_out); \
}
#define TEST_UNARY_BUILTINS_OPERATOR(SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(abs, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(sqrt, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(rsqrt, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(square, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(cube, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(inverse, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(tanh, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(exp, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(log, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(abs, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(ceil, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(floor, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(round, SCALAR, OPERATOR) \
TEST_UNARY_BUILTINS_FOR_SCALAR(log1p, SCALAR, OPERATOR)
#define TEST_IS_THAT_RETURNS_BOOL(SCALAR, FUNC) \
{ \
/* out = in.FUNC() */ \
Tensor<SCALAR, 3> in(tensorRange); \
Tensor<bool, 3> out(tensorRange); \
in = in.random() + static_cast<SCALAR>(0.01); \
SCALAR *gpu_data = static_cast<SCALAR *>( \
sycl_device.allocate(in.size() * sizeof(SCALAR))); \
bool *gpu_data_out = \
static_cast<bool *>(sycl_device.allocate(out.size() * sizeof(bool))); \
TensorMap<Tensor<SCALAR, 3>> gpu(gpu_data, tensorRange); \
TensorMap<Tensor<bool, 3>> gpu_out(gpu_data_out, tensorRange); \
sycl_device.memcpyHostToDevice(gpu_data, in.data(), \
(in.size()) * sizeof(SCALAR)); \
gpu_out.device(sycl_device) = gpu.FUNC(); \
sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, \
(out.size()) * sizeof(bool)); \
for (int i = 0; i < out.size(); ++i) { \
VERIFY_IS_EQUAL(out(i), std::FUNC(in(i))); \
} \
sycl_device.deallocate(gpu_data); \
sycl_device.deallocate(gpu_data_out); \
}
#define TEST_UNARY_BUILTINS(SCALAR) \
TEST_UNARY_BUILTINS_OPERATOR(SCALAR, += ) \
TEST_UNARY_BUILTINS_OPERATOR(SCALAR, = ) \
TEST_IS_THAT_RETURNS_BOOL(SCALAR, isnan) \
TEST_IS_THAT_RETURNS_BOOL(SCALAR, isfinite) \
TEST_IS_THAT_RETURNS_BOOL(SCALAR, isinf)
static void test_builtin_unary_sycl(const Eigen::SyclDevice &sycl_device) {
int sizeDim1 = 10;
int sizeDim2 = 10;
int sizeDim3 = 10;
array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
TEST_UNARY_BUILTINS(float)
/// your GPU must support double. Otherwise, disable the double test.
TEST_UNARY_BUILTINS(double)
}
void test_cxx11_tensor_builtins_sycl() {
cl::sycl::gpu_selector s;
QueueInterface queueInterface(s);
Eigen::SyclDevice sycl_device(&queueInterface);
CALL_SUBTEST(test_builtin_unary_sycl(sycl_device));
}

View File

@ -19,26 +19,59 @@
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
#include<stdint.h>
#include <stdint.h>
#include <iostream>
void test_device_sycl(const Eigen::SyclDevice &sycl_device) {
std::cout <<"Helo from ComputeCpp: the requested device exists and the device name is : "
<< sycl_device.m_queue.get_device(). template get_info<cl::sycl::info::device::name>() <<std::endl;;
template <typename DataType, int DataLayout>
void test_device_memory(const Eigen::SyclDevice &sycl_device) {
std::cout << "Running on : "
<< sycl_device.sycl_queue().get_device(). template get_info<cl::sycl::info::device::name>()
<<std::endl;
int sizeDim1 = 100;
array<int, 1> tensorRange = {{sizeDim1}};
Tensor<int, 1> in(tensorRange);
Tensor<int, 1> in1(tensorRange);
memset(in1.data(), 1,in1.size()*sizeof(int));
int * gpu_in_data = static_cast<int*>(sycl_device.allocate(in.size()*sizeof(int)));
sycl_device.memset(gpu_in_data, 1,in.size()*sizeof(int) );
sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(int) );
for (int i=0; i<in.size(); i++)
VERIFY_IS_APPROX(in(i), in1(i));
Tensor<DataType, 1, DataLayout> in(tensorRange);
Tensor<DataType, 1, DataLayout> in1(tensorRange);
memset(in1.data(), 1, in1.size() * sizeof(DataType));
DataType* gpu_in_data = static_cast<DataType*>(sycl_device.allocate(in.size()*sizeof(DataType)));
sycl_device.memset(gpu_in_data, 1, in.size()*sizeof(DataType));
sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(DataType));
for (int i=0; i<in.size(); i++) {
VERIFY_IS_EQUAL(in(i), in1(i));
}
sycl_device.deallocate(gpu_in_data);
}
void test_cxx11_tensor_device_sycl() {
cl::sycl::gpu_selector s;
Eigen::SyclDevice sycl_device(s);
CALL_SUBTEST(test_device_sycl(sycl_device));
template <typename DataType, int DataLayout>
void test_device_exceptions(const Eigen::SyclDevice &sycl_device) {
VERIFY(sycl_device.ok());
int sizeDim1 = 100;
array<int, 1> tensorDims = {{sizeDim1}};
DataType* gpu_data = static_cast<DataType*>(sycl_device.allocate(sizeDim1*sizeof(DataType)));
sycl_device.memset(gpu_data, 1, sizeDim1*sizeof(DataType));
TensorMap<Tensor<DataType, 1, DataLayout>> in(gpu_data, tensorDims);
TensorMap<Tensor<DataType, 1, DataLayout>> out(gpu_data, tensorDims);
out.device(sycl_device) = in / in.constant(0);
sycl_device.synchronize();
VERIFY(!sycl_device.ok());
sycl_device.deallocate(gpu_data);
}
template<typename DataType> void sycl_device_test_per_device(const cl::sycl::device& d){
std::cout << "Running on " << d.template get_info<cl::sycl::info::device::name>() << std::endl;
QueueInterface queueInterface(d);
auto sycl_device = Eigen::SyclDevice(&queueInterface);
test_device_memory<DataType, RowMajor>(sycl_device);
test_device_memory<DataType, ColMajor>(sycl_device);
/// this test throw an exception. enable it if you want to see the exception
//test_device_exceptions<DataType, RowMajor>(sycl_device);
/// this test throw an exception. enable it if you want to see the exception
//test_device_exceptions<DataType, ColMajor>(sycl_device);
}
void test_cxx11_tensor_device_sycl() {
for (const auto& device : cl::sycl::device::get_devices()) {
CALL_SUBTEST(sycl_device_test_per_device<float>(device));
}
}

View File

@ -21,33 +21,33 @@
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
template <typename DataType, int DataLayout>
void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) {
int sizeDim1 = 100;
int sizeDim2 = 200;
int sizeDim3 = 200;
int sizeDim2 = 20;
int sizeDim3 = 20;
Eigen::array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
Eigen::Tensor<float, 3> in1(tensorRange);
Eigen::Tensor<float, 3> in2(tensorRange);
Eigen::Tensor<float, 3> out(tensorRange);
Eigen::Tensor<DataType, 3, DataLayout> in1(tensorRange);
Eigen::Tensor<DataType, 3, DataLayout> in2(tensorRange);
Eigen::Tensor<DataType, 3, DataLayout> out(tensorRange);
float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
DataType * gpu_in1_data = static_cast<DataType*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(DataType)));
DataType * gpu_in2_data = static_cast<DataType*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(DataType)));
DataType * gpu_out_data = static_cast<DataType*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType)));
in1 = in1.random() + in1.constant(10.0f);
in2 = in2.random() + in2.constant(10.0f);
// creating TensorMap from tensor
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
Eigen::TensorMap<Eigen::Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange);
sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(float));
Eigen::TensorMap<Eigen::Tensor<DataType, 3, DataLayout>> gpu_in1(gpu_in1_data, tensorRange);
Eigen::TensorMap<Eigen::Tensor<DataType, 3, DataLayout>> gpu_in2(gpu_in2_data, tensorRange);
Eigen::TensorMap<Eigen::Tensor<DataType, 3, DataLayout>> gpu_out(gpu_out_data, tensorRange);
sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(DataType));
sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in1.dimensions().TotalSize())*sizeof(DataType));
/// c=(a+b)*b
gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2;
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -63,8 +63,19 @@ void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) {
}
void test_cxx11_tensor_forced_eval_sycl() {
cl::sycl::gpu_selector s;
Eigen::SyclDevice sycl_device(s);
CALL_SUBTEST(test_forced_eval_sycl(sycl_device));
template <typename DataType, typename Dev_selector> void tensorForced_evalperDevice(Dev_selector s){
QueueInterface queueInterface(s);
auto sycl_device = Eigen::SyclDevice(&queueInterface);
test_forced_eval_sycl<DataType, RowMajor>(sycl_device);
test_forced_eval_sycl<DataType, ColMajor>(sycl_device);
}
void test_cxx11_tensor_forced_eval_sycl() {
printf("Test on GPU: OpenCL\n");
CALL_SUBTEST(tensorForced_evalperDevice<float>((cl::sycl::gpu_selector())));
printf("repeating the test on CPU: OpenCL\n");
CALL_SUBTEST(tensorForced_evalperDevice<float>((cl::sycl::cpu_selector())));
printf("repeating the test on CPU: HOST\n");
CALL_SUBTEST(tensorForced_evalperDevice<float>((cl::sycl::host_selector())));
printf("Test Passed******************\n" );
}

View File

@ -28,7 +28,7 @@ using Eigen::SyclDevice;
using Eigen::Tensor;
using Eigen::TensorMap;
template <typename DataType, int DataLayout>
static void test_simple_slice(const Eigen::SyclDevice &sycl_device)
{
int sizeDim1 = 2;
@ -37,31 +37,31 @@ static void test_simple_slice(const Eigen::SyclDevice &sycl_device)
int sizeDim4 = 7;
int sizeDim5 = 11;
array<int, 5> tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}};
Tensor<float, 5> tensor(tensorRange);
Tensor<DataType, 5,DataLayout> tensor(tensorRange);
tensor.setRandom();
array<int, 5> slice1_range ={{1, 1, 1, 1, 1}};
Tensor<float, 5> slice1(slice1_range);
Tensor<DataType, 5,DataLayout> slice1(slice1_range);
float* gpu_data1 = static_cast<float*>(sycl_device.allocate(tensor.size()*sizeof(float)));
float* gpu_data2 = static_cast<float*>(sycl_device.allocate(slice1.size()*sizeof(float)));
TensorMap<Tensor<float, 5>> gpu1(gpu_data1, tensorRange);
TensorMap<Tensor<float, 5>> gpu2(gpu_data2, slice1_range);
DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(tensor.size()*sizeof(DataType)));
DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(slice1.size()*sizeof(DataType)));
TensorMap<Tensor<DataType, 5,DataLayout>> gpu1(gpu_data1, tensorRange);
TensorMap<Tensor<DataType, 5,DataLayout>> gpu2(gpu_data2, slice1_range);
Eigen::DSizes<ptrdiff_t, 5> indices(1,2,3,4,5);
Eigen::DSizes<ptrdiff_t, 5> sizes(1,1,1,1,1);
sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType));
gpu2.device(sycl_device)=gpu1.slice(indices, sizes);
sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(DataType));
VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5));
array<int, 5> slice2_range ={{1,1,2,2,3}};
Tensor<float, 5> slice2(slice2_range);
float* gpu_data3 = static_cast<float*>(sycl_device.allocate(slice2.size()*sizeof(float)));
TensorMap<Tensor<float, 5>> gpu3(gpu_data3, slice2_range);
Tensor<DataType, 5,DataLayout> slice2(slice2_range);
DataType* gpu_data3 = static_cast<DataType*>(sycl_device.allocate(slice2.size()*sizeof(DataType)));
TensorMap<Tensor<DataType, 5,DataLayout>> gpu3(gpu_data3, slice2_range);
Eigen::DSizes<ptrdiff_t, 5> indices2(1,1,3,4,5);
Eigen::DSizes<ptrdiff_t, 5> sizes2(1,1,2,2,3);
gpu3.device(sycl_device)=gpu1.slice(indices2, sizes2);
sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(DataType));
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
for (int k = 0; k < 3; ++k) {
@ -74,11 +74,22 @@ static void test_simple_slice(const Eigen::SyclDevice &sycl_device)
sycl_device.deallocate(gpu_data3);
}
template<typename DataType, typename dev_Selector> void sycl_slicing_test_per_device(dev_Selector s){
QueueInterface queueInterface(s);
auto sycl_device = Eigen::SyclDevice(&queueInterface);
test_simple_slice<DataType, RowMajor>(sycl_device);
test_simple_slice<DataType, ColMajor>(sycl_device);
}
void test_cxx11_tensor_morphing_sycl()
{
/// Currentlly it only works on cpu. Adding GPU cause LLVM ERROR in cunstructing OpenCL Kernel at runtime.
cl::sycl::cpu_selector s;
Eigen::SyclDevice sycl_device(s);
CALL_SUBTEST(test_simple_slice(sycl_device));
// printf("Test on GPU: OpenCL\n");
// CALL_SUBTEST(sycl_device_test_per_device((cl::sycl::gpu_selector())));
printf("repeating the test on CPU: OpenCL\n");
CALL_SUBTEST(sycl_slicing_test_per_device<float>((cl::sycl::cpu_selector())));
printf("repeating the test on CPU: HOST\n");
CALL_SUBTEST(sycl_slicing_test_per_device<float>((cl::sycl::host_selector())));
printf("Test Passed******************\n" );
}

View File

@ -21,37 +21,37 @@
#include <unsupported/Eigen/CXX11/Tensor>
template <typename DataType, int DataLayout>
static void test_full_reductions_sycl(const Eigen::SyclDevice& sycl_device) {
const int num_rows = 452;
const int num_cols = 765;
array<int, 2> tensorRange = {{num_rows, num_cols}};
Tensor<float, 2> in(tensorRange);
Tensor<float, 0> full_redux;
Tensor<float, 0> full_redux_gpu;
Tensor<DataType, 2, DataLayout> in(tensorRange);
Tensor<DataType, 0, DataLayout> full_redux;
Tensor<DataType, 0, DataLayout> full_redux_gpu;
in.setRandom();
full_redux = in.sum();
float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
float* gpu_out_data =(float*)sycl_device.allocate(sizeof(float));
DataType* gpu_in_data = static_cast<DataType*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(DataType)));
DataType* gpu_out_data =(DataType*)sycl_device.allocate(sizeof(DataType));
TensorMap<Tensor<float, 2> > in_gpu(gpu_in_data, tensorRange);
TensorMap<Tensor<float, 0> > out_gpu(gpu_out_data);
TensorMap<Tensor<DataType, 2, DataLayout> > in_gpu(gpu_in_data, tensorRange);
TensorMap<Tensor<DataType, 0, DataLayout> > out_gpu(gpu_out_data);
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(DataType));
out_gpu.device(sycl_device) = in_gpu.sum();
sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(float));
sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, sizeof(DataType));
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux_gpu(), full_redux());
sycl_device.deallocate(gpu_in_data);
sycl_device.deallocate(gpu_out_data);
}
template <typename DataType, int DataLayout>
static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device) {
int dim_x = 145;
@ -63,23 +63,23 @@ static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device)
red_axis[0] = 0;
array<int, 2> reduced_tensorRange = {{dim_y, dim_z}};
Tensor<float, 3> in(tensorRange);
Tensor<float, 2> redux(reduced_tensorRange);
Tensor<float, 2> redux_gpu(reduced_tensorRange);
Tensor<DataType, 3, DataLayout> in(tensorRange);
Tensor<DataType, 2, DataLayout> redux(reduced_tensorRange);
Tensor<DataType, 2, DataLayout> redux_gpu(reduced_tensorRange);
in.setRandom();
redux= in.sum(red_axis);
float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
DataType* gpu_in_data = static_cast<DataType*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(DataType)));
DataType* gpu_out_data = static_cast<DataType*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(DataType)));
TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange);
TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange);
TensorMap<Tensor<DataType, 3, DataLayout> > in_gpu(gpu_in_data, tensorRange);
TensorMap<Tensor<DataType, 2, DataLayout> > out_gpu(gpu_out_data, reduced_tensorRange);
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(DataType));
out_gpu.device(sycl_device) = in_gpu.sum(red_axis);
sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(DataType));
// Check that the CPU and GPU reductions return the same result.
for(int j=0; j<reduced_tensorRange[0]; j++ )
@ -90,6 +90,7 @@ static void test_first_dim_reductions_sycl(const Eigen::SyclDevice& sycl_device)
sycl_device.deallocate(gpu_out_data);
}
template <typename DataType, int DataLayout>
static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device) {
int dim_x = 567;
@ -101,23 +102,23 @@ static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device)
red_axis[0] = 2;
array<int, 2> reduced_tensorRange = {{dim_x, dim_y}};
Tensor<float, 3> in(tensorRange);
Tensor<float, 2> redux(reduced_tensorRange);
Tensor<float, 2> redux_gpu(reduced_tensorRange);
Tensor<DataType, 3, DataLayout> in(tensorRange);
Tensor<DataType, 2, DataLayout> redux(reduced_tensorRange);
Tensor<DataType, 2, DataLayout> redux_gpu(reduced_tensorRange);
in.setRandom();
redux= in.sum(red_axis);
float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(float)));
float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(float)));
DataType* gpu_in_data = static_cast<DataType*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(DataType)));
DataType* gpu_out_data = static_cast<DataType*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*sizeof(DataType)));
TensorMap<Tensor<float, 3> > in_gpu(gpu_in_data, tensorRange);
TensorMap<Tensor<float, 2> > out_gpu(gpu_out_data, reduced_tensorRange);
TensorMap<Tensor<DataType, 3, DataLayout> > in_gpu(gpu_in_data, tensorRange);
TensorMap<Tensor<DataType, 2, DataLayout> > out_gpu(gpu_out_data, reduced_tensorRange);
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(DataType));
out_gpu.device(sycl_device) = in_gpu.sum(red_axis);
sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(float));
sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*sizeof(DataType));
// Check that the CPU and GPU reductions return the same result.
for(int j=0; j<reduced_tensorRange[0]; j++ )
for(int k=0; k<reduced_tensorRange[1]; k++ )
@ -127,12 +128,20 @@ static void test_last_dim_reductions_sycl(const Eigen::SyclDevice &sycl_device)
sycl_device.deallocate(gpu_out_data);
}
template<typename DataType> void sycl_reduction_test_per_device(const cl::sycl::device& d){
std::cout << "Running on " << d.template get_info<cl::sycl::info::device::name>() << std::endl;
QueueInterface queueInterface(d);
auto sycl_device = Eigen::SyclDevice(&queueInterface);
void test_cxx11_tensor_reduction_sycl() {
cl::sycl::gpu_selector s;
Eigen::SyclDevice sycl_device(s);
CALL_SUBTEST((test_full_reductions_sycl(sycl_device)));
CALL_SUBTEST((test_first_dim_reductions_sycl(sycl_device)));
CALL_SUBTEST((test_last_dim_reductions_sycl(sycl_device)));
test_full_reductions_sycl<DataType, RowMajor>(sycl_device);
test_first_dim_reductions_sycl<DataType, RowMajor>(sycl_device);
test_last_dim_reductions_sycl<DataType, RowMajor>(sycl_device);
test_full_reductions_sycl<DataType, ColMajor>(sycl_device);
test_first_dim_reductions_sycl<DataType, ColMajor>(sycl_device);
test_last_dim_reductions_sycl<DataType, ColMajor>(sycl_device);
}
void test_cxx11_tensor_reduction_sycl() {
for (const auto& device : cl::sycl::device::get_devices()) {
CALL_SUBTEST(sycl_reduction_test_per_device<float>(device));
}
}

View File

@ -26,35 +26,32 @@ using Eigen::array;
using Eigen::SyclDevice;
using Eigen::Tensor;
using Eigen::TensorMap;
template <typename DataType, int DataLayout>
void test_sycl_mem_transfers(const Eigen::SyclDevice &sycl_device) {
int sizeDim1 = 100;
int sizeDim2 = 100;
int sizeDim3 = 100;
int sizeDim2 = 10;
int sizeDim3 = 20;
array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
Tensor<float, 3> in1(tensorRange);
Tensor<float, 3> out1(tensorRange);
Tensor<float, 3> out2(tensorRange);
Tensor<float, 3> out3(tensorRange);
Tensor<DataType, 3, DataLayout> in1(tensorRange);
Tensor<DataType, 3, DataLayout> out1(tensorRange);
Tensor<DataType, 3, DataLayout> out2(tensorRange);
Tensor<DataType, 3, DataLayout> out3(tensorRange);
in1 = in1.random();
float* gpu_data1 = static_cast<float*>(sycl_device.allocate(in1.size()*sizeof(float)));
float* gpu_data2 = static_cast<float*>(sycl_device.allocate(out1.size()*sizeof(float)));
//float* gpu_data = static_cast<float*>(sycl_device.allocate(out2.size()*sizeof(float)));
DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(in1.size()*sizeof(DataType)));
DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(out1.size()*sizeof(DataType)));
TensorMap<Tensor<float, 3>> gpu1(gpu_data1, tensorRange);
TensorMap<Tensor<float, 3>> gpu2(gpu_data2, tensorRange);
//TensorMap<Tensor<float, 3>> gpu_out2(gpu_out2_data, tensorRange);
sycl_device.memcpyHostToDevice(gpu_data1, in1.data(),(in1.size())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_data2, in1.data(),(in1.size())*sizeof(float));
TensorMap<Tensor<DataType, 3, DataLayout>> gpu1(gpu_data1, tensorRange);
TensorMap<Tensor<DataType, 3, DataLayout>> gpu2(gpu_data2, tensorRange);
sycl_device.memcpyHostToDevice(gpu_data1, in1.data(),(in1.size())*sizeof(DataType));
sycl_device.memcpyHostToDevice(gpu_data2, in1.data(),(in1.size())*sizeof(DataType));
gpu1.device(sycl_device) = gpu1 * 3.14f;
gpu2.device(sycl_device) = gpu2 * 2.7f;
sycl_device.memcpyDeviceToHost(out1.data(), gpu_data1,(out1.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(out2.data(), gpu_data1,(out2.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(out3.data(), gpu_data2,(out3.size())*sizeof(float));
// sycl_device.Synchronize();
sycl_device.memcpyDeviceToHost(out1.data(), gpu_data1,(out1.size())*sizeof(DataType));
sycl_device.memcpyDeviceToHost(out2.data(), gpu_data1,(out2.size())*sizeof(DataType));
sycl_device.memcpyDeviceToHost(out3.data(), gpu_data2,(out3.size())*sizeof(DataType));
for (int i = 0; i < in1.size(); ++i) {
VERIFY_IS_APPROX(out1(i), in1(i) * 3.14f);
@ -65,34 +62,34 @@ void test_sycl_mem_transfers(const Eigen::SyclDevice &sycl_device) {
sycl_device.deallocate(gpu_data1);
sycl_device.deallocate(gpu_data2);
}
template <typename DataType, int DataLayout>
void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
int sizeDim1 = 100;
int sizeDim2 = 100;
int sizeDim3 = 100;
int sizeDim2 = 10;
int sizeDim3 = 20;
array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
Tensor<float, 3> in1(tensorRange);
Tensor<float, 3> in2(tensorRange);
Tensor<float, 3> in3(tensorRange);
Tensor<float, 3> out(tensorRange);
Tensor<DataType, 3,DataLayout> in1(tensorRange);
Tensor<DataType, 3,DataLayout> in2(tensorRange);
Tensor<DataType, 3,DataLayout> in3(tensorRange);
Tensor<DataType, 3,DataLayout> out(tensorRange);
in2 = in2.random();
in3 = in3.random();
float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.size()*sizeof(float)));
float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.size()*sizeof(float)));
float * gpu_in3_data = static_cast<float*>(sycl_device.allocate(in3.size()*sizeof(float)));
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.size()*sizeof(float)));
DataType * gpu_in1_data = static_cast<DataType*>(sycl_device.allocate(in1.size()*sizeof(DataType)));
DataType * gpu_in2_data = static_cast<DataType*>(sycl_device.allocate(in2.size()*sizeof(DataType)));
DataType * gpu_in3_data = static_cast<DataType*>(sycl_device.allocate(in3.size()*sizeof(DataType)));
DataType * gpu_out_data = static_cast<DataType*>(sycl_device.allocate(out.size()*sizeof(DataType)));
TensorMap<Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
TensorMap<Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
TensorMap<Tensor<float, 3>> gpu_in3(gpu_in3_data, tensorRange);
TensorMap<Tensor<float, 3>> gpu_out(gpu_out_data, tensorRange);
TensorMap<Tensor<DataType, 3, DataLayout>> gpu_in1(gpu_in1_data, tensorRange);
TensorMap<Tensor<DataType, 3, DataLayout>> gpu_in2(gpu_in2_data, tensorRange);
TensorMap<Tensor<DataType, 3, DataLayout>> gpu_in3(gpu_in3_data, tensorRange);
TensorMap<Tensor<DataType, 3, DataLayout>> gpu_out(gpu_out_data, tensorRange);
/// a=1.2f
gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f);
sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -104,7 +101,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
/// a=b*1.2f
gpu_out.device(sycl_device) = gpu_in1 * 1.2f;
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -116,9 +113,9 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
printf("a=b*1.2f Test Passed\n");
/// c=a*b
sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.size())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.size())*sizeof(DataType));
gpu_out.device(sycl_device) = gpu_in1 * gpu_in2;
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -132,7 +129,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
/// c=a+b
gpu_out.device(sycl_device) = gpu_in1 + gpu_in2;
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -146,7 +143,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
/// c=a*a
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1;
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -160,7 +157,7 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
//a*3.14f + b*2.7f
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f);
sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -173,9 +170,9 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
printf("a*3.14f + b*2.7f Test Passed\n");
///d= (a>0.5? b:c)
sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.size())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.size())*sizeof(DataType));
gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3);
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -191,10 +188,20 @@ void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
sycl_device.deallocate(gpu_in3_data);
sycl_device.deallocate(gpu_out_data);
}
void test_cxx11_tensor_sycl() {
cl::sycl::gpu_selector s;
Eigen::SyclDevice sycl_device(s);
CALL_SUBTEST(test_sycl_mem_transfers(sycl_device));
CALL_SUBTEST(test_sycl_computations(sycl_device));
template<typename DataType, typename dev_Selector> void sycl_computing_test_per_device(dev_Selector s){
QueueInterface queueInterface(s);
auto sycl_device = Eigen::SyclDevice(&queueInterface);
test_sycl_mem_transfers<DataType, RowMajor>(sycl_device);
test_sycl_computations<DataType, RowMajor>(sycl_device);
test_sycl_mem_transfers<DataType, ColMajor>(sycl_device);
test_sycl_computations<DataType, ColMajor>(sycl_device);
}
void test_cxx11_tensor_sycl() {
printf("Test on GPU: OpenCL\n");
CALL_SUBTEST(sycl_computing_test_per_device<float>((cl::sycl::gpu_selector())));
printf("repeating the test on CPU: OpenCL\n");
CALL_SUBTEST(sycl_computing_test_per_device<float>((cl::sycl::cpu_selector())));
printf("repeating the test on CPU: HOST\n");
CALL_SUBTEST(sycl_computing_test_per_device<float>((cl::sycl::host_selector())));
printf("Test Passed******************\n" );
}