From 0ebe3808ca8b2c96d9d77024ba8d4d0bdfb7e23c Mon Sep 17 00:00:00 2001 From: Mehdi Goli Date: Fri, 4 Nov 2016 18:18:19 +0000 Subject: [PATCH] Removed the sycl include from Eigen/Core and moved it to Unsupported/Eigen/CXX11/Tensor; added TensorReduction for sycl (full reduction and partial reduction); added TensorReduction test case for sycl (full reduction and partial reduction); fixed the tile size on TensorSyclRun.h based on the device max work group size; --- Eigen/Core | 10 - unsupported/Eigen/CXX11/Tensor | 15 +- .../Eigen/CXX11/src/Tensor/TensorDeviceSycl.h | 49 ++-- .../Eigen/CXX11/src/Tensor/TensorEvalTo.h | 4 +- .../src/Tensor/TensorForwardDeclarations.h | 2 +- .../Eigen/CXX11/src/Tensor/TensorReduction.h | 81 ++++-- .../CXX11/src/Tensor/TensorReductionSycl.h | 242 ++++++++++++++++++ .../Eigen/CXX11/src/Tensor/TensorSycl.h | 17 +- .../TensorSyclConvertToDeviceExpression.h | 12 + .../src/Tensor/TensorSyclExprConstructor.h | 40 ++- .../src/Tensor/TensorSyclExtractAccessor.h | 73 +++--- .../src/Tensor/TensorSyclExtractFunctors.h | 23 ++ .../CXX11/src/Tensor/TensorSyclLeafCount.h | 41 +-- .../CXX11/src/Tensor/TensorSyclPlaceHolder.h | 99 ------- .../src/Tensor/TensorSyclPlaceHolderExpr.h | 31 ++- .../Eigen/CXX11/src/Tensor/TensorSyclRun.h | 17 +- unsupported/test/CMakeLists.txt | 9 +- .../test/cxx11_tensor_reduction_sycl.cpp | 147 +++++++++++ 18 files changed, 663 insertions(+), 249 deletions(-) create mode 100644 unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h delete mode 100644 unsupported/Eigen/CXX11/src/Tensor/TensorSyclPlaceHolder.h create mode 100644 unsupported/test/cxx11_tensor_reduction_sycl.cpp diff --git a/Eigen/Core b/Eigen/Core index 2d2616254..8ce3d4d06 100644 --- a/Eigen/Core +++ b/Eigen/Core @@ -14,16 +14,6 @@ // first thing Eigen does: stop the compiler from committing suicide #include "src/Core/util/DisableStupidWarnings.h" -/// This will no longer be needed after the next release of the computecppCE -#ifdef EIGEN_USE_SYCL -#undef min -#undef max -#undef isnan -#undef isinf -#undef isfinite -#include -#endif - // Handle NVCC/CUDA/SYCL #if defined(__CUDACC__) || defined(__SYCL_DEVICE_ONLY__) // Do not try asserts on CUDA and SYCL! diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor index 388976d2e..1cf19d6c1 100644 --- a/unsupported/Eigen/CXX11/Tensor +++ b/unsupported/Eigen/CXX11/Tensor @@ -13,6 +13,15 @@ #include "../../../Eigen/Core" +#ifdef EIGEN_USE_SYCL +#undef min +#undef max +#undef isnan +#undef isinf +#undef isfinite +#include +#endif + #include #include "../SpecialFunctions" @@ -69,10 +78,6 @@ typedef unsigned __int64 uint64_t; #endif #endif -#ifdef EIGEN_USE_SYCL -#include -#endif - #include "src/Tensor/TensorMacros.h" #include "src/Tensor/TensorForwardDeclarations.h" #include "src/Tensor/TensorMeta.h" @@ -81,7 +86,6 @@ typedef unsigned __int64 uint64_t; #include "src/Tensor/TensorDeviceDefault.h" #include "src/Tensor/TensorDeviceThreadPool.h" #include "src/Tensor/TensorDeviceCuda.h" -#include "src/Tensor/TensorSycl.h" #include "src/Tensor/TensorDeviceSycl.h" #include "src/Tensor/TensorIndexList.h" #include "src/Tensor/TensorDimensionList.h" @@ -128,6 +132,7 @@ typedef unsigned __int64 uint64_t; #include "src/Tensor/TensorAssign.h" #include "src/Tensor/TensorScan.h" +#include "src/Tensor/TensorSycl.h" #include "src/Tensor/TensorExecutor.h" #include "src/Tensor/TensorDevice.h" diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h index bfd36f5aa..4231a11ff 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h @@ -1,12 +1,11 @@ // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // -// Copyright (C) 2016 Benoit Steiner // Mehdi Goli Codeplay Software Ltd. // Ralph Potter Codeplay Software Ltd. // Luke Iwanski Codeplay Software Ltd. -// Cummins Chris PhD student at The University of Edinburgh. // Contact: +// Copyright (C) 2016 Benoit Steiner // // This Source Code Form is subject to the terms of the Mozilla @@ -25,12 +24,8 @@ namespace Eigen { template struct BufferT { using Type = cl::sycl::buffer>; - static inline void add_sycl_buffer( - const T *ptr, size_t num_bytes, - std::map> &buffer_map) { - buffer_map.insert(std::pair>( - ptr, std::shared_ptr(std::make_shared( - Type(const_cast(ptr), cl::sycl::range<1>(num_bytes)))))); + static inline void add_sycl_buffer(const T *ptr, size_t num_bytes,std::map> &buffer_map) { + buffer_map.insert(std::pair>(ptr, std::shared_ptr(std::make_shared(Type(const_cast(ptr), cl::sycl::range<1>(num_bytes)))))); } }; @@ -39,12 +34,8 @@ struct BufferT { template struct BufferT { using Type = cl::sycl::buffer; - static inline void add_sycl_buffer( - const T *ptr, size_t num_bytes, - std::map> &buffer_map) { - buffer_map.insert(std::pair>( - ptr, std::shared_ptr( - std::make_shared(Type(cl::sycl::range<1>(num_bytes)))))); + static inline void add_sycl_buffer(const T *ptr, size_t num_bytes, std::map> &buffer_map) { + buffer_map.insert(std::pair>(ptr, std::shared_ptr(std::make_shared(Type(cl::sycl::range<1>(num_bytes)))))); } }; @@ -78,15 +69,20 @@ struct SyclDevice { /// for that particular pointer. template inline cl::sycl::accessor - get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, - const T *ptr) const { + get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const { + return (get_sycl_buffer(num_bytes, ptr).template get_access(cgh)); + } + +template + inline typename BufferT::Type + get_sycl_buffer(size_t num_bytes,const T * ptr) const { + if(MapAllocator && !ptr){ + eigen_assert("pointer with map_Allocator cannot be null. Please initialise the input pointer"); } auto it = buffer_map.find(ptr); if (it == buffer_map.end()) { BufferT::add_sycl_buffer(ptr, num_bytes, buffer_map); } - return ( - ((typename BufferT::Type *)(buffer_map.at(ptr).get())) - ->template get_access(cgh)); + return (*((typename BufferT::Type*)((buffer_map.at(ptr).get())))); } /// allocating memory on the cpu @@ -100,22 +96,21 @@ struct SyclDevice { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void *buffer) const { internal::aligned_free(buffer); } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, - size_t n) const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const { ::memcpy(dst, src, n); } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice( - void *dst, const void *src, size_t n) const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void *dst, const void *src, size_t n) const { memcpy(dst, src, n); } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost( - void *dst, const void *src, size_t n) const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void *dst, const void *src, size_t n) const { memcpy(dst, src, n); } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, - size_t n) const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const { ::memset(buffer, c, n); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const { + return 1; + } }; } // end namespace Eigen diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h index 68d14a7e5..06987132b 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h @@ -47,13 +47,13 @@ struct traits > template class MakePointer_> struct eval, Eigen::Dense> { - typedef const TensorEvalToOp& type; + typedef const TensorEvalToOp& type; }; template class MakePointer_> struct nested, 1, typename eval >::type> { - typedef TensorEvalToOp type; + typedef TensorEvalToOp type; }; } // end namespace internal diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h index 6497b1830..52b803d7f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h @@ -33,7 +33,7 @@ template class TensorCwiseUnaryOp; template class TensorCwiseBinaryOp; template class TensorCwiseTernaryOp; template class TensorSelectOp; -template class TensorReductionOp; +template class MakePointer_ = MakePointer > class TensorReductionOp; template class TensorIndexTupleOp; template class TensorTupleReducerOp; template class TensorConcatenationOp; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h index d34ff98b0..367bccf63 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h @@ -2,6 +2,7 @@ // for linear algebra. // // Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2016 Mehdi Goli, Codeplay Software Ltd // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed @@ -20,8 +21,8 @@ namespace Eigen { */ namespace internal { -template -struct traits > + template class MakePointer_ > + struct traits > : traits { typedef traits XprTraits; @@ -31,18 +32,24 @@ struct traits > typedef typename XprType::Nested Nested; static const int NumDimensions = XprTraits::NumDimensions - array_size::value; static const int Layout = XprTraits::Layout; + + template struct MakePointer { + // Intermediate typedef to workaround MSVC issue. + typedef MakePointer_ MakePointerT; + typedef typename MakePointerT::Type Type; + }; }; -template -struct eval, Eigen::Dense> +template class MakePointer_> +struct eval, Eigen::Dense> { - typedef const TensorReductionOp& type; + typedef const TensorReductionOp& type; }; -template -struct nested, 1, typename eval >::type> +template class MakePointer_> +struct nested, 1, typename eval >::type> { - typedef TensorReductionOp type; + typedef TensorReductionOp type; }; @@ -339,8 +346,8 @@ __global__ void OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnTy } // end namespace internal -template -class TensorReductionOp : public TensorBase, ReadOnlyAccessors> { +template class MakePointer_> +class TensorReductionOp : public TensorBase, ReadOnlyAccessors> { public: typedef typename Eigen::internal::traits::Scalar Scalar; typedef typename Eigen::NumTraits::Real RealScalar; @@ -371,18 +378,19 @@ class TensorReductionOp : public TensorBase // Eval as rvalue -template -struct TensorEvaluator, Device> +template class MakePointer_, typename Device> +struct TensorEvaluator, Device> { - typedef TensorReductionOp XprType; + typedef TensorReductionOp XprType; typedef typename XprType::Index Index; + typedef ArgType ChildType; typedef typename TensorEvaluator::Dimensions InputDimensions; static const int NumInputDims = internal::array_size::value; static const int NumReducedDims = internal::array_size::value; static const int NumOutputDims = NumInputDims - NumReducedDims; typedef typename internal::conditional, DSizes >::type Dimensions; typedef typename XprType::Scalar Scalar; - typedef TensorEvaluator, Device> Self; + typedef TensorEvaluator, Device> Self; static const bool InputPacketAccess = TensorEvaluator::PacketAccess; typedef typename internal::remove_const::type CoeffReturnType; typedef typename PacketType::type PacketReturnType; @@ -401,7 +409,7 @@ struct TensorEvaluator, Device> static const bool RunningFullReduction = (NumOutputDims==0); EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) - : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device) + : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device), m_xpr_dims(op.dims()) { EIGEN_STATIC_ASSERT((NumInputDims >= NumReducedDims), YOU_MADE_A_PROGRAMMING_MISTAKE); EIGEN_STATIC_ASSERT((!ReducingInnerMostDims | !PreservingInnerMostDims | (NumReducedDims == NumInputDims)), @@ -471,25 +479,35 @@ struct TensorEvaluator, Device> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(CoeffReturnType* data) { + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(typename MakePointer_::Type data) { m_impl.evalSubExprsIfNeeded(NULL); // Use the FullReducer if possible. - if (RunningFullReduction && + if ((RunningFullReduction && RunningOnSycl) ||(RunningFullReduction && internal::FullReducer::HasOptimizedImplementation && ((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) || - !RunningOnGPU)) { + !RunningOnGPU))) { bool need_assign = false; if (!data) { m_result = static_cast(m_device.allocate(sizeof(CoeffReturnType))); data = m_result; need_assign = true; } - Op reducer(m_reducer); internal::FullReducer::run(*this, reducer, m_device, data); return need_assign; } + else if(RunningOnSycl){ + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + if (!data) { + data = static_cast(m_device.allocate(sizeof(CoeffReturnType) * num_coeffs_to_preserve)); + m_result = data; + } + Op reducer(m_reducer); + internal::InnerReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve); + return (m_result != NULL); + } // Attempt to use an optimized reduction. else if (RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) { @@ -572,7 +590,7 @@ struct TensorEvaluator, Device> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { - if ((RunningFullReduction || RunningOnGPU) && m_result) { + if ((RunningOnSycl || RunningFullReduction || RunningOnGPU) && m_result) { return *(m_result + index); } Op reducer(m_reducer); @@ -644,7 +662,20 @@ struct TensorEvaluator, Device> } } - EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } + /// required by sycl in order to extract the output accessor +#ifndef EIGEN_USE_SYCL + EIGEN_DEVICE_FUNC typename MakePointer_::Type data() const { return NULL; } +#else + EIGEN_DEVICE_FUNC typename MakePointer_::Type data() const { + return m_result; } +#endif + /// required by sycl in order to extract the accessor + const TensorEvaluator& impl() const { return m_impl; } + /// added for sycl in order to construct the buffer from the sycl device + const Device& device() const{return m_device;} + /// added for sycl in order to re-construct the reduction eval on the device for the sub-kernel + const Dims& xprDims() const {return m_xpr_dims;} + private: template friend struct internal::GenericDimReducer; @@ -737,12 +768,18 @@ struct TensorEvaluator, Device> // For full reductions #if defined(EIGEN_USE_GPU) && defined(__CUDACC__) static const bool RunningOnGPU = internal::is_same::value; + static const bool RunningOnSycl=false; +#elif defined(EIGEN_USE_SYCL) +static const bool RunningOnSycl = internal::is_same::type, Eigen::SyclDevice>::value; +static const bool RunningOnGPU = false; #else static const bool RunningOnGPU = false; + static const bool RunningOnSycl=false; #endif - CoeffReturnType* m_result; + typename MakePointer_::Type m_result; const Device& m_device; + const Dims& m_xpr_dims; }; } // end namespace Eigen diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h new file mode 100644 index 000000000..1c89132db --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h @@ -0,0 +1,242 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * TensorSyclPlaceHolderExpr.h + * + * \brief: + * This is the specialisation of the placeholder expression based on the + * operation type + * +*****************************************************************/ + +#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP +#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP + +namespace Eigen { +namespace internal { + +template struct syclGenericBufferReducer{ +template +static void run(BufferTOut& bufOut, BufferTIn& bufI, const Eigen::SyclDevice& dev, size_t length, size_t local){ + do { + auto f = [length, local, &bufOut, &bufI](cl::sycl::handler& h) mutable { + cl::sycl::nd_range<1> r{cl::sycl::range<1>{std::max(length, local)}, + cl::sycl::range<1>{std::min(length, local)}}; + /* Two accessors are used: one to the buffer that is being reduced, + * and a second to local memory, used to store intermediate data. */ + auto aI = + bufI.template get_access(h); + auto aOut = + bufOut.template get_access(h); + cl::sycl::accessor + scratch(cl::sycl::range<1>(local), h); + + /* The parallel_for invocation chosen is the variant with an nd_item + * parameter, since the code requires barriers for correctness. */ + h.parallel_for( + r, [aOut, aI, scratch, local, length](cl::sycl::nd_item<1> id) { + size_t globalid = id.get_global(0); + size_t localid = id.get_local(0); + /* All threads collectively read from global memory into local. + * The barrier ensures all threads' IO is resolved before + * execution continues (strictly speaking, all threads within + * a single work-group - there is no co-ordination between + * work-groups, only work-items). */ + if (globalid < length) { + scratch[localid] = aI[globalid]; + } + id.barrier(cl::sycl::access::fence_space::local_space); + + /* Apply the reduction operation between the current local + * id and the one on the other half of the vector. */ + if (globalid < length) { + int min = (length < local) ? length : local; + for (size_t offset = min / 2; offset > 0; offset /= 2) { + if (localid < offset) { + scratch[localid] += scratch[localid + offset]; + } + id.barrier(cl::sycl::access::fence_space::local_space); + } + /* The final result will be stored in local id 0. */ + if (localid == 0) { + aI[id.get_group(0)] = scratch[localid]; + if((length<=local) && globalid ==0){ + aOut[globalid]=scratch[localid]; + } + } + } + }); + }; + dev.m_queue.submit(f); + dev.m_queue.throw_asynchronous(); + + /* At this point, you could queue::wait_and_throw() to ensure that + * errors are caught quickly. However, this would likely impact + * performance negatively. */ + length = length / local; + + } while (length > 1); + + + +} + +}; + +/// For now let's start with a full reducer +/// Self is useless here because in expression construction we are going to treat reduction as a leafnode. +/// we want to take reduction child and then build a construction and apply the full reducer function on it. Fullreducre applies the +/// reduction operation on the child of the reduction. once it is done the reduction is an empty shell and can be thrown away and treated as +// a leafNode. +template +struct FullReducer { + + typedef typename Self::CoeffReturnType CoeffReturnType; + static const bool HasOptimizedImplementation = false; + + static void run(const Self& self, Op& reducer, const Eigen::SyclDevice& dev, CoeffReturnType* output) { + typedef const typename Self::ChildType HostExpr; /// this is the child of reduction + typedef typename TensorSycl::internal::createPlaceHolderExpression::Type PlaceHolderExpr; + auto functors = TensorSycl::internal::extractFunctors(self.impl()); + int red_factor =256; /// initial reduction. If the size is less than red_factor we only creates one thread. + size_t inputSize =self.impl().dimensions().TotalSize(); + size_t rng = inputSize/red_factor; // the total number of thread initially is half the size of the input + size_t remaining = inputSize% red_factor; + if(rng ==0) { + red_factor=1; + }; + size_t tileSize =dev.m_queue.get_device(). template get_info()/2; + size_t GRange=std::max((size_t )1, rng); + + // convert global range to power of 2 for redecution + GRange--; + GRange |= GRange >> 1; + GRange |= GRange >> 2; + GRange |= GRange >> 4; + GRange |= GRange >> 8; + GRange |= GRange >> 16; +#if __x86_64__ || __ppc64__ || _WIN64 + GRange |= GRange >> 32; +#endif + GRange++; + size_t outTileSize = tileSize; + /// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one. + if (GRange < outTileSize) outTileSize=GRange; + // getting final out buffer at the moment the created buffer is true because there is no need for assign + auto out_buffer =dev.template get_sycl_buffer::type>(self.dimensions().TotalSize(), output); + /// creating the shared memory for calculating reduction. + /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can + /// recursively apply reduction on it in order to reduce the whole. + auto temp_global_buffer =cl::sycl::buffer(cl::sycl::range<1>(GRange)); + typedef typename Eigen::internal::remove_all::type Dims; + Dims dims= self.xprDims(); + Op functor = reducer; + dev.m_queue.submit([&](cl::sycl::handler &cgh) { + // create a tuple of accessors from Evaluator + auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl()); + auto tmp_global_accessor = temp_global_buffer. template get_access(cgh); + + cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(outTileSize)), [=](cl::sycl::nd_item<1> itemID) { + typedef typename TensorSycl::internal::ConvertToDeviceExpression::Type DevExpr; + auto device_expr = TensorSycl::internal::createDeviceExpression(functors, tuple_of_accessors); + /// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour + /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the + /// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here. + const auto device_self_expr= TensorReductionOp(device_expr.expr, dims, functor); + /// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is + /// the device_evaluator is detectable and recognisable on the device. + auto device_self_evaluator = Eigen::TensorEvaluator(device_self_expr, Eigen::DefaultDevice()); + /// const cast added as a naive solution to solve the qualifier drop error + auto globalid=itemID.get_global_linear_id(); + + if(globalid::reduce(device_self_evaluator, red_factor*globalid, red_factor, const_cast(functor)); + else + tmp_global_accessor.get_pointer()[globalid]=static_cast(0); + + if(remaining!=0 && globalid==0 ) + // this will add the rest of input buffer when the input size is not devidable to red_factor. + tmp_global_accessor.get_pointer()[globalid]+=InnerMostDimReducer::reduce(device_self_evaluator, red_factor*(rng), remaining, const_cast(functor)); + }); + }); + dev.m_queue.throw_asynchronous(); + +/// This is used to recursively reduce the tmp value to an element of 1; + syclGenericBufferReducer::run(out_buffer, temp_global_buffer,dev, GRange, outTileSize); + } + +}; + +template +struct InnerReducer { + + typedef typename Self::CoeffReturnType CoeffReturnType; + static const bool HasOptimizedImplementation = false; + + static bool run(const Self& self, Op& reducer, const Eigen::SyclDevice& dev, CoeffReturnType* output, typename Self::Index , typename Self::Index num_coeffs_to_preserve) { + typedef const typename Self::ChildType HostExpr; /// this is the child of reduction + typedef typename TensorSycl::internal::createPlaceHolderExpression::Type PlaceHolderExpr; + auto functors = TensorSycl::internal::extractFunctors(self.impl()); + + size_t tileSize =dev.m_queue.get_device(). template get_info()/2; + + size_t GRange=num_coeffs_to_preserve; + if (tileSize>GRange) tileSize=GRange; + else if(GRange>tileSize){ + size_t xMode = GRange % tileSize; + if (xMode != 0) GRange += (tileSize - xMode); + } + // getting final out buffer at the moment the created buffer is true because there is no need for assign + /// creating the shared memory for calculating reduction. + /// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can + /// recursively apply reduction on it in order to reduce the whole. + typedef typename Eigen::internal::remove_all::type Dims; + Dims dims= self.xprDims(); + Op functor = reducer; + + dev.m_queue.submit([&](cl::sycl::handler &cgh) { + // create a tuple of accessors from Evaluator + auto tuple_of_accessors = TensorSycl::internal::createTupleOfAccessors(cgh, self.impl()); + auto output_accessor = dev.template get_sycl_accessor(num_coeffs_to_preserve,cgh, output); + + cgh.parallel_for( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) { + typedef typename TensorSycl::internal::ConvertToDeviceExpression::Type DevExpr; + auto device_expr = TensorSycl::internal::createDeviceExpression(functors, tuple_of_accessors); + /// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour + /// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the + /// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here. + const auto device_self_expr= TensorReductionOp(device_expr.expr, dims, functor); + /// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is + /// the device_evaluator is detectable and recognisable on the device. + typedef Eigen::TensorEvaluator DeiceSelf; + auto device_self_evaluator = Eigen::TensorEvaluator(device_self_expr, Eigen::DefaultDevice()); + /// const cast added as a naive solution to solve the qualifier drop error + auto globalid=itemID.get_global_linear_id(); + if (globalid< static_cast(num_coeffs_to_preserve)) { + typename DeiceSelf::CoeffReturnType accum = functor.initialize(); + GenericDimReducer::reduce(device_self_evaluator, device_self_evaluator.firstInput(globalid),const_cast(functor), &accum); + functor.finalize(accum); + output_accessor.get_pointer()[globalid]= accum; + } + }); + }); + dev.m_queue.throw_asynchronous(); + return false; + } +}; + +} // end namespace internal +} // namespace Eigen + +#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h index da15f7942..bb8800d45 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h @@ -22,6 +22,13 @@ struct MakeGlobalPointer { typedef typename cl::sycl::global_ptr::pointer_t Type; }; +// global pointer to set different attribute state for a class +template +struct MakeLocalPointer { + typedef typename cl::sycl::local_ptr::pointer_t Type; +}; + + namespace Eigen { namespace TensorSycl { namespace internal { @@ -43,9 +50,7 @@ template struct GetType{ // tuple construction #include "TensorSyclTuple.h" -// This file contains the PlaceHolder that replaces the actual data -#include "TensorSyclPlaceHolder.h" - +// counting number of leaf at compile time #include "TensorSyclLeafCount.h" // The index PlaceHolder takes the actual expression and replaces the actual @@ -57,9 +62,6 @@ template struct GetType{ // creation of an accessor tuple from a tuple of SYCL buffers #include "TensorSyclExtractAccessor.h" -// actual data extraction using accessors -//#include "GetDeviceData.h" - // this is used to change the address space type in tensor map for GPU #include "TensorSyclConvertToDeviceExpression.h" @@ -70,6 +72,9 @@ template struct GetType{ // this is used to construct the expression on the device #include "TensorSyclExprConstructor.h" +/// this is used for extracting tensor reduction +#include "TensorReductionSycl.h" + // kernel execution using fusion #include "TensorSyclRun.h" diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h index a94c30426..8729c86ee 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h @@ -102,6 +102,18 @@ KERNELBROKERCONVERT(, false, TensorForcedEvalOp) KERNELBROKERCONVERT(const, true, TensorEvalToOp) KERNELBROKERCONVERT(, false, TensorEvalToOp) #undef KERNELBROKERCONVERT + +/// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorReductionOp +#define KERNELBROKERCONVERTREDUCTION(CVQual)\ +template class MakePointer_>\ +struct ConvertToDeviceExpression > {\ + typedef CVQual TensorReductionOp::Type, MakeGlobalPointer> Type;\ +}; + +KERNELBROKERCONVERTREDUCTION(const) +KERNELBROKERCONVERTREDUCTION() +#undef KERNELBROKERCONVERTREDUCTION + } // namespace internal } // namespace TensorSycl } // namespace Eigen diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h index 833d5e271..7ed3a3a56 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExprConstructor.h @@ -33,8 +33,7 @@ struct EvalToLHSConstructor { EvalToLHSConstructor(const utility::tuple::Tuple &t): expr((&(*(utility::tuple::get(t).get_pointer())))) {} }; -/// \struct ExprConstructor is used to reconstruct the expression on the device -/// and +/// \struct ExprConstructor is used to reconstruct the expression on the device and /// recreate the expression with MakeGlobalPointer containing the device address /// space for the TensorMap pointers used in eval function. /// It receives the original expression type, the functor of the node, the tuple @@ -49,7 +48,7 @@ struct ExprConstructor; template class MakePointer_, size_t N, typename... Params>\ struct ExprConstructor< CVQual TensorMap, Options2_, MakeGlobalPointer>,\ -CVQual Eigen::internal::PlaceHolder, Options3_, MakePointer_>, N>, Params...>{\ +CVQual PlaceHolder, Options3_, MakePointer_>, N>, Params...>{\ typedef CVQual TensorMap, Options2_, MakeGlobalPointer> Type;\ Type expr;\ template \ @@ -187,7 +186,7 @@ EVALTO() #define FORCEDEVAL(CVQual)\ template \ struct ExprConstructor,\ -CVQual Eigen::internal::PlaceHolder, N>, Params...> {\ +CVQual PlaceHolder, N>, Params...> {\ typedef CVQual TensorMap::Scalar,\ TensorForcedEvalOp::NumDimensions, 0, typename TensorForcedEvalOp::Index>, 0, MakeGlobalPointer> Type;\ Type expr;\ @@ -200,14 +199,41 @@ FORCEDEVAL(const) FORCEDEVAL() #undef FORCEDEVAL +template struct ValueCondition { + static const size_t Res =X; +}; +template struct ValueCondition { + static const size_t Res =Y; +}; + +/// specialisation of the \ref ExprConstructor struct when the node type is TensorReductionOp +#define SYCLREDUCTIONEXPR(CVQual)\ +template \ +struct ExprConstructor,\ +CVQual PlaceHolder, N>, Params...> {\ + static const size_t NumIndices= ValueCondition< TensorReductionOp::NumDimensions==0, 1, TensorReductionOp::NumDimensions >::Res;\ + typedef CVQual TensorMap::Scalar,\ + NumIndices, 0, typename TensorReductionOp::Index>, 0, MakeGlobalPointer> Type;\ + Type expr;\ + template \ + ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple &t)\ + : expr(Type((&(*(utility::tuple::get(t).get_pointer()))), fd.dimensions())) {}\ +}; + +SYCLREDUCTIONEXPR(const) +SYCLREDUCTIONEXPR() +#undef SYCLREDUCTIONEXPR + /// template deduction for \ref ExprConstructor struct template auto createDeviceExpression(FuncD &funcD, const utility::tuple::Tuple &t) -> decltype(ExprConstructor(funcD, t)) { return ExprConstructor(funcD, t); } -} -} -} // namespace Eigen + +} /// namespace TensorSycl +} /// namespace internal +} /// namespace Eigen + #endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXPR_CONSTRUCTOR_HPP diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h index ceec528ea..3af5f8cfc 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractAccessor.h @@ -56,10 +56,10 @@ struct AccessorConstructor{ -> decltype(utility::tuple::append(ExtractAccessor::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor::getTuple(cgh, eval2), ExtractAccessor::getTuple(cgh, eval3)))) { return utility::tuple::append(ExtractAccessor::getTuple(cgh, eval1),utility::tuple::append(ExtractAccessor::getTuple(cgh, eval2), ExtractAccessor::getTuple(cgh, eval3))); } - template< cl::sycl::access::mode AcM, typename Arg> static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval) - -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor static inline auto getAccessor(cl::sycl::handler& cgh, Arg eval) + -> decltype(utility::tuple::make_tuple( eval.device().template get_sycl_accessor::type>(eval.dimensions().TotalSize(), cgh,eval.data()))){ - return utility::tuple::make_tuple(eval.device().template get_sycl_accessor::type>(eval.dimensions().TotalSize(), cgh,eval.data())); + return utility::tuple::make_tuple(eval.device().template get_sycl_accessor::type>(eval.dimensions().TotalSize(), cgh,eval.data())); } }; @@ -73,14 +73,12 @@ struct ExtractAccessor, Dev> > } }; -/// specialisation of the \ref ExtractAccessor struct when the node type is -/// TensorCwiseNullaryOp, TensorCwiseUnaryOp and TensorBroadcastingOp +/// specialisation of the \ref ExtractAccessor struct when the node type is TensorCwiseNullaryOp, TensorCwiseUnaryOp and TensorBroadcastingOp template class UnaryCategory, typename OP, typename RHSExpr, typename Dev> struct ExtractAccessor, Dev> > : ExtractAccessor, Dev> > {}; -/// specialisation of the \ref ExtractAccessor struct when the node type is -/// const TensorCwiseBinaryOp +/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorCwiseBinaryOp template class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev> struct ExtractAccessor, Dev> > { static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator, Dev> eval) @@ -88,9 +86,7 @@ struct ExtractAccessor class BinaryCategory, typename OP, typename LHSExpr, typename RHSExpr, typename Dev> struct ExtractAccessor, Dev> > : ExtractAccessor, Dev> >{}; @@ -105,8 +101,7 @@ struct ExtractAccessor class TernaryCategory, typename OP, typename Arg1Expr, typename Arg2Expr, typename Arg3Expr, typename Dev> struct ExtractAccessor, Dev> > : ExtractAccessor, Dev> >{}; @@ -127,8 +122,7 @@ template struct ExtractAccessor, Dev> > : ExtractAccessor, Dev> >{}; -/// specialisation of the \ref ExtractAccessor struct when the node type is -/// const TensorAssignOp +/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorAssignOp template struct ExtractAccessor, Dev> > { static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator, Dev> eval) @@ -137,65 +131,74 @@ struct ExtractAccessor, D } }; -/// specialisation of the \ref ExtractAccessor struct when the node type is -/// TensorAssignOp +/// specialisation of the \ref ExtractAccessor struct when the node type is TensorAssignOp template struct ExtractAccessor, Dev> > : ExtractAccessor, Dev> >{}; -/// specialisation of the \ref ExtractAccessor struct when the node type is -/// const TensorMap +/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorMap #define TENSORMAPEXPR(CVQual, ACCType)\ template \ struct ExtractAccessor, Dev> > {\ static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator, Dev> eval)\ - -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){\ - return AccessorConstructor::template getAccessor(cgh, eval);\ + -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){\ + return AccessorConstructor::template getAccessor(cgh, eval);\ }\ }; TENSORMAPEXPR(const, cl::sycl::access::mode::read) TENSORMAPEXPR(, cl::sycl::access::mode::read_write) #undef TENSORMAPEXPR -/// specialisation of the \ref ExtractAccessor struct when the node type is -/// const TensorForcedEvalOp +/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorForcedEvalOp template struct ExtractAccessor, Dev> > { static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator, Dev> eval) - -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){ - return AccessorConstructor::template getAccessor(cgh, eval); + -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){ + return AccessorConstructor::template getAccessor(cgh, eval); } }; -/// specialisation of the \ref ExtractAccessor struct when the node type is -/// TensorForcedEvalOp +/// specialisation of the \ref ExtractAccessor struct when the node type is TensorForcedEvalOp template struct ExtractAccessor, Dev> > : ExtractAccessor, Dev> >{}; -/// specialisation of the \ref ExtractAccessor struct when the node type is -/// const TensorEvalToOp +/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorEvalToOp template struct ExtractAccessor, Dev> > { static inline auto getTuple(cl::sycl::handler& cgh,const TensorEvaluator, Dev> eval) - -> decltype(utility::tuple::append(AccessorConstructor::template getAccessor(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()))){ - return utility::tuple::append(AccessorConstructor::template getAccessor(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl())); + -> decltype(utility::tuple::append(AccessorConstructor::template getAccessor(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl()))){ + return utility::tuple::append(AccessorConstructor::template getAccessor(cgh, eval), AccessorConstructor::getTuple(cgh, eval.impl())); } }; -/// specialisation of the \ref ExtractAccessor struct when the node type is -/// TensorEvalToOp +/// specialisation of the \ref ExtractAccessor struct when the node type is TensorEvalToOp template struct ExtractAccessor, Dev> > : ExtractAccessor, Dev> >{}; +/// specialisation of the \ref ExtractAccessor struct when the node type is const TensorReductionOp +template +struct ExtractAccessor, Dev> > { + static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator, Dev> eval) + -> decltype(AccessorConstructor::template getAccessor(cgh, eval)){ + return AccessorConstructor::template getAccessor(cgh, eval); + } +}; + +/// specialisation of the \ref ExtractAccessor struct when the node type is TensorReductionOp +template +struct ExtractAccessor, Dev> > +: ExtractAccessor, Dev> >{}; + /// template deduction for \ref ExtractAccessor template auto createTupleOfAccessors(cl::sycl::handler& cgh, const Evaluator& expr) -> decltype(ExtractAccessor::getTuple(cgh, expr)) { return ExtractAccessor::getTuple(cgh, expr); } -} -} -} + +} /// namespace TensorSycl +} /// namespace internal +} /// namespace Eigen #endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_EXTRACT_ACCESSOR_HPP diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h index 801b4f5d7..427125343 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclExtractFunctors.h @@ -141,7 +141,30 @@ template struct FunctorExtractor, Dev> > : FunctorExtractor, Dev> > {}; +template struct DimConstr { +template + static inline Dim getDim(InDim dims ) {return dims;} +}; +template struct DimConstr { + template + static inline Dim getDim(InDim dims ) {return Dim(dims.TotalSize());} +}; + +template class MakePointer_, typename Device> +struct FunctorExtractor, Device>>{ + typedef TensorEvaluator, Device> Evaluator; + typedef typename Eigen::internal::conditional, typename Evaluator::Dimensions >::type Dimensions; + const Dimensions m_dimensions; + const Dimensions& dimensions() const { return m_dimensions; } + FunctorExtractor(const TensorEvaluator, Device>& expr) + : m_dimensions(DimConstr::getDim(expr.dimensions())) {} +}; + + +template class MakePointer_, typename Device> +struct FunctorExtractor, Device>> +: FunctorExtractor, Device>>{}; /// template deduction function for FunctorExtractor template auto inline extractFunctors(const Evaluator& evaluator)-> FunctorExtractor { diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h index 8d520d2da..25d1fac9b 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorSyclLeafCount.h @@ -43,8 +43,7 @@ struct CategoryCount{ static const size_t Count = LeafCount::Count + CategoryCount::Count; }; -/// specialisation of the \ref LeafCount struct when the node type is const -/// TensorMap +/// specialisation of the \ref LeafCount struct when the node type is const TensorMap template class MakePointer_> struct LeafCount > { static const size_t Count =1; @@ -61,18 +60,15 @@ struct LeafCount >: CategoryCount template