mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-09-12 09:23:12 +08:00
Adding TensorFixsize; adding sycl device memcpy; adding insial stage of slicing.
This commit is contained in:
parent
a5c3f15682
commit
f8ca893976
@ -123,9 +123,45 @@ struct SyclDevice {
|
|||||||
// some runtime conditions that can be applied here
|
// some runtime conditions that can be applied here
|
||||||
EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; }
|
EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; }
|
||||||
|
|
||||||
|
template <typename T> EIGEN_STRONG_INLINE std::map<const void *, std::shared_ptr<void>>::iterator find_nearest(const T* ptr) const {
|
||||||
|
auto it1 = buffer_map.find(ptr);
|
||||||
|
if (it1 != buffer_map.end()){
|
||||||
|
return it1;
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
for(std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin(); it!=buffer_map.end(); ++it){
|
||||||
|
auto size = ((cl::sycl::buffer<T, 1>*)it->second.get())->get_size();
|
||||||
|
if((static_cast<const T*>(it->first) < ptr) && (ptr < (static_cast<const T*>(it->first)) + size)) return it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buffer_map.end();
|
||||||
|
}
|
||||||
|
|
||||||
/// the memcpy function
|
/// the memcpy function
|
||||||
EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
|
template<typename T> EIGEN_STRONG_INLINE void memcpy(void *dst, const T *src, size_t n) const {
|
||||||
::memcpy(dst, src, n);
|
auto it1 = find_nearest(src);
|
||||||
|
auto it2 = find_nearest(static_cast<T*>(dst));
|
||||||
|
if ((it1 != buffer_map.end()) && (it2!=buffer_map.end())) {
|
||||||
|
auto offset= (src - (static_cast<const T*>(it1->first)));
|
||||||
|
auto i= ((static_cast<T*>(dst)) - const_cast<T*>((static_cast<const T*>(it2->first))));
|
||||||
|
size_t rng, GRange, tileSize;
|
||||||
|
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
|
||||||
|
m_queue.submit([&](cl::sycl::handler &cgh) {
|
||||||
|
auto src_acc =((cl::sycl::buffer<T, 1>*)it1->second.get())-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
|
||||||
|
auto dst_acc =((cl::sycl::buffer<T, 1>*)it2->second.get())-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
|
||||||
|
typedef decltype(src_acc) DevToDev;
|
||||||
|
cgh.parallel_for<DevToDev>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||||
|
auto globalid=itemID.get_global_linear_id();
|
||||||
|
if (globalid< rng) {
|
||||||
|
dst_acc[globalid+i ]=src_acc[globalid+offset];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
m_queue.throw_asynchronous();
|
||||||
|
} else{
|
||||||
|
eigen_assert("no source or destination device memory found.");
|
||||||
|
}
|
||||||
|
//::memcpy(dst, src, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The memcpyHostToDevice is used to copy the device only pointer to a host pointer. Using the device
|
/// The memcpyHostToDevice is used to copy the device only pointer to a host pointer. Using the device
|
||||||
@ -136,7 +172,7 @@ struct SyclDevice {
|
|||||||
template<typename T> EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
|
template<typename T> EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
|
||||||
|
|
||||||
auto host_acc= get_sycl_buffer(n, dst)-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
|
auto host_acc= get_sycl_buffer(n, dst)-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
|
||||||
memcpy(host_acc.get_pointer(), src, n);
|
::memcpy(host_acc.get_pointer(), src, n);
|
||||||
}
|
}
|
||||||
/// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl
|
/// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl
|
||||||
/// buffer with map_allocator for the destination pointer with a discard_write accessor on it. The lifespan of the buffer is bound to the
|
/// buffer with map_allocator for the destination pointer with a discard_write accessor on it. The lifespan of the buffer is bound to the
|
||||||
@ -145,11 +181,12 @@ struct SyclDevice {
|
|||||||
/// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back
|
/// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back
|
||||||
/// to the cpu only once per function call.
|
/// to the cpu only once per function call.
|
||||||
template<typename T> EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
|
template<typename T> EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
|
||||||
auto it = buffer_map.find(src);
|
auto it = find_nearest(src);
|
||||||
|
auto offset = src- (static_cast<const T*>(it->first));
|
||||||
if (it != buffer_map.end()) {
|
if (it != buffer_map.end()) {
|
||||||
size_t rng, GRange, tileSize;
|
size_t rng, GRange, tileSize;
|
||||||
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
|
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
|
||||||
|
// Assuming that the dst is the start of the destination pointer
|
||||||
auto dest_buf = cl::sycl::buffer<T, 1, cl::sycl::map_allocator<T>>(dst, cl::sycl::range<1>(rng));
|
auto dest_buf = cl::sycl::buffer<T, 1, cl::sycl::map_allocator<T>>(dst, cl::sycl::range<1>(rng));
|
||||||
typedef decltype(dest_buf) SYCLDTOH;
|
typedef decltype(dest_buf) SYCLDTOH;
|
||||||
m_queue.submit([&](cl::sycl::handler &cgh) {
|
m_queue.submit([&](cl::sycl::handler &cgh) {
|
||||||
@ -158,7 +195,7 @@ struct SyclDevice {
|
|||||||
cgh.parallel_for<SYCLDTOH>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
cgh.parallel_for<SYCLDTOH>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||||
auto globalid=itemID.get_global_linear_id();
|
auto globalid=itemID.get_global_linear_id();
|
||||||
if (globalid< dst_acc.get_size()) {
|
if (globalid< dst_acc.get_size()) {
|
||||||
dst_acc[globalid] = src_acc[globalid];
|
dst_acc[globalid] = src_acc[globalid + offset];
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -299,6 +299,16 @@ template <typename Index> struct MemcpyTriggerForSlicing<Index, GpuDevice> {
|
|||||||
EIGEN_DEVICE_FUNC bool operator ()(Index val) const { return val > 4*1024*1024; }
|
EIGEN_DEVICE_FUNC bool operator ()(Index val) const { return val > 4*1024*1024; }
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// It is very expensive to start the memcpy kernel on GPU: we therefore only
|
||||||
|
// use it for large copies.
|
||||||
|
#ifdef EIGEN_USE_SYCL
|
||||||
|
template <typename Index> struct MemcpyTriggerForSlicing<Index, const Eigen::SyclDevice> {
|
||||||
|
EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const SyclDevice&) { }
|
||||||
|
EIGEN_DEVICE_FUNC bool operator ()(Index val) const { return val > 4*1024*1024; }
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Eval as rvalue
|
// Eval as rvalue
|
||||||
@ -493,7 +503,14 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
|
|||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
/// used by stcl
|
||||||
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() const{
|
||||||
|
return m_impl;
|
||||||
|
}
|
||||||
|
/// used by stcl
|
||||||
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const StartIndices& startIndices() const{
|
||||||
|
return m_offsets;
|
||||||
|
}
|
||||||
protected:
|
protected:
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
|
||||||
{
|
{
|
||||||
|
@ -48,9 +48,9 @@ struct DeviceConvertor{
|
|||||||
/// specialisation of the \ref ConvertToDeviceExpression struct when the node
|
/// specialisation of the \ref ConvertToDeviceExpression struct when the node
|
||||||
/// type is TensorMap
|
/// type is TensorMap
|
||||||
#define TENSORMAPCONVERT(CVQual)\
|
#define TENSORMAPCONVERT(CVQual)\
|
||||||
template <typename Scalar_, int Options_, int Options2_, int NumIndices_, typename IndexType_, template <class> class MakePointer_>\
|
template <typename T, int Options2_, template <class> class MakePointer_>\
|
||||||
struct ConvertToDeviceExpression<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_> > {\
|
struct ConvertToDeviceExpression<CVQual TensorMap<T, Options2_, MakePointer_> > {\
|
||||||
typedef CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer> Type;\
|
typedef CVQual TensorMap<T, Options2_, MakeGlobalPointer> Type;\
|
||||||
};
|
};
|
||||||
|
|
||||||
TENSORMAPCONVERT(const)
|
TENSORMAPCONVERT(const)
|
||||||
@ -114,6 +114,16 @@ KERNELBROKERCONVERTREDUCTION(const)
|
|||||||
KERNELBROKERCONVERTREDUCTION()
|
KERNELBROKERCONVERTREDUCTION()
|
||||||
#undef KERNELBROKERCONVERTREDUCTION
|
#undef KERNELBROKERCONVERTREDUCTION
|
||||||
|
|
||||||
|
#define KERNELBROKERCONVERTSLICEOP(CVQual)\
|
||||||
|
template<typename StartIndices, typename Sizes, typename XprType>\
|
||||||
|
struct ConvertToDeviceExpression<CVQual TensorSlicingOp <StartIndices, Sizes, XprType> >{\
|
||||||
|
typedef CVQual TensorSlicingOp<StartIndices, Sizes, typename ConvertToDeviceExpression<XprType>::Type> Type;\
|
||||||
|
};
|
||||||
|
|
||||||
|
KERNELBROKERCONVERTSLICEOP(const)
|
||||||
|
KERNELBROKERCONVERTSLICEOP()
|
||||||
|
#undef KERNELBROKERCONVERTSLICEOP
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
} // namespace TensorSycl
|
} // namespace TensorSycl
|
||||||
} // namespace Eigen
|
} // namespace Eigen
|
||||||
|
@ -45,17 +45,18 @@ struct ExprConstructor;
|
|||||||
/// specialisation of the \ref ExprConstructor struct when the node type is
|
/// specialisation of the \ref ExprConstructor struct when the node type is
|
||||||
/// TensorMap
|
/// TensorMap
|
||||||
#define TENSORMAP(CVQual)\
|
#define TENSORMAP(CVQual)\
|
||||||
template <typename Scalar_, int Options_, int Options2_, int Options3_, int NumIndices_, typename IndexType_,\
|
template <typename T, int Options2_, int Options3_,\
|
||||||
template <class> class MakePointer_, size_t N, typename... Params>\
|
template <class> class MakePointer_, size_t N, typename... Params>\
|
||||||
struct ExprConstructor< CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer>,\
|
struct ExprConstructor< CVQual TensorMap<T, Options2_, MakeGlobalPointer>,\
|
||||||
CVQual PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options3_, MakePointer_>, N>, Params...>{\
|
CVQual PlaceHolder<CVQual TensorMap<T, Options3_, MakePointer_>, N>, Params...>{\
|
||||||
typedef CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer> Type;\
|
typedef CVQual TensorMap<T, Options2_, MakeGlobalPointer> Type;\
|
||||||
Type expr;\
|
Type expr;\
|
||||||
template <typename FuncDetector>\
|
template <typename FuncDetector>\
|
||||||
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
|
ExprConstructor(FuncDetector &fd, const utility::tuple::Tuple<Params...> &t)\
|
||||||
: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
|
: expr(Type((&(*(utility::tuple::get<N>(t).get_pointer()))), fd.dimensions())) {}\
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
TENSORMAP(const)
|
TENSORMAP(const)
|
||||||
TENSORMAP()
|
TENSORMAP()
|
||||||
#undef TENSORMAP
|
#undef TENSORMAP
|
||||||
@ -224,6 +225,25 @@ SYCLREDUCTIONEXPR(const)
|
|||||||
SYCLREDUCTIONEXPR()
|
SYCLREDUCTIONEXPR()
|
||||||
#undef SYCLREDUCTIONEXPR
|
#undef SYCLREDUCTIONEXPR
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#define SYCLSLICEOPEXPR(CVQual)\
|
||||||
|
template<typename StartIndices, typename Sizes, typename OrigXprType, typename XprType, typename... Params>\
|
||||||
|
struct ExprConstructor<CVQual TensorSlicingOp <StartIndices, Sizes, OrigXprType> , CVQual TensorSlicingOp<StartIndices, Sizes, XprType>, Params... >{\
|
||||||
|
typedef ExprConstructor<OrigXprType, XprType, Params...> my_xpr_type;\
|
||||||
|
typedef CVQual TensorSlicingOp<StartIndices, Sizes, typename my_xpr_type::Type> Type ;\
|
||||||
|
my_xpr_type xprExpr;\
|
||||||
|
Type expr;\
|
||||||
|
template <typename FuncDetector>\
|
||||||
|
ExprConstructor(FuncDetector &funcD, const utility::tuple::Tuple<Params...> &t)\
|
||||||
|
: xprExpr(funcD.xprExpr, t), expr(xprExpr.expr, funcD.startIndices(), funcD.dimensions()) {}\
|
||||||
|
};
|
||||||
|
|
||||||
|
SYCLSLICEOPEXPR(const)
|
||||||
|
SYCLSLICEOPEXPR()
|
||||||
|
#undef SYCLSLICEOPEXPR
|
||||||
|
|
||||||
|
|
||||||
/// template deduction for \ref ExprConstructor struct
|
/// template deduction for \ref ExprConstructor struct
|
||||||
template <typename OrigExpr, typename IndexExpr, typename FuncD, typename... Params>
|
template <typename OrigExpr, typename IndexExpr, typename FuncD, typename... Params>
|
||||||
auto createDeviceExpression(FuncD &funcD, const utility::tuple::Tuple<Params...> &t)
|
auto createDeviceExpression(FuncD &funcD, const utility::tuple::Tuple<Params...> &t)
|
||||||
|
@ -191,6 +191,20 @@ template <typename OP, typename Dim, typename Expr, typename Dev>
|
|||||||
struct ExtractAccessor<TensorEvaluator<TensorReductionOp<OP, Dim, Expr>, Dev> >
|
struct ExtractAccessor<TensorEvaluator<TensorReductionOp<OP, Dim, Expr>, Dev> >
|
||||||
: ExtractAccessor<TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> >{};
|
: ExtractAccessor<TensorEvaluator<const TensorReductionOp<OP, Dim, Expr>, Dev> >{};
|
||||||
|
|
||||||
|
|
||||||
|
/// specialisation of the \ref ExtractAccessor struct when the node type is
|
||||||
|
/// const TensorSlicingOp. This is a special case where there is no OP
|
||||||
|
template <typename StartIndices, typename Sizes, typename XprType, typename Dev>
|
||||||
|
struct ExtractAccessor<TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, XprType>, Dev> > {
|
||||||
|
static inline auto getTuple(cl::sycl::handler& cgh, const TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, XprType>, Dev> eval)
|
||||||
|
-> decltype(AccessorConstructor::getTuple(cgh, eval.impl())){
|
||||||
|
return AccessorConstructor::getTuple(cgh, eval.impl());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename StartIndices, typename Sizes, typename XprType, typename Dev>
|
||||||
|
struct ExtractAccessor<TensorEvaluator<TensorSlicingOp<StartIndices, Sizes, XprType>, Dev> >
|
||||||
|
:ExtractAccessor<TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, XprType>, Dev> >{};
|
||||||
/// template deduction for \ref ExtractAccessor
|
/// template deduction for \ref ExtractAccessor
|
||||||
template <typename Evaluator>
|
template <typename Evaluator>
|
||||||
auto createTupleOfAccessors(cl::sycl::handler& cgh, const Evaluator& expr)
|
auto createTupleOfAccessors(cl::sycl::handler& cgh, const Evaluator& expr)
|
||||||
|
@ -165,6 +165,23 @@ struct FunctorExtractor<TensorEvaluator<const TensorReductionOp<Op, Dims, ArgTyp
|
|||||||
template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
|
template<typename Op, typename Dims, typename ArgType, template <class> class MakePointer_, typename Device>
|
||||||
struct FunctorExtractor<TensorEvaluator<TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>
|
struct FunctorExtractor<TensorEvaluator<TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>
|
||||||
: FunctorExtractor<TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>{};
|
: FunctorExtractor<TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>, Device>>{};
|
||||||
|
|
||||||
|
/// specialisation of the \ref FunctorExtractor struct when the node type is
|
||||||
|
/// const TensorSlicingOp. This is an specialisation without OP so it has to be separated.
|
||||||
|
template <typename StartIndices, typename Sizes, typename XprType, typename Dev>
|
||||||
|
struct FunctorExtractor<TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, XprType>, Dev> > {
|
||||||
|
FunctorExtractor<TensorEvaluator<XprType, Dev> > xprExpr;
|
||||||
|
const StartIndices m_offsets;
|
||||||
|
const Sizes m_dimensions;
|
||||||
|
FunctorExtractor(const TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, XprType>, Dev>& expr)
|
||||||
|
: xprExpr(expr.impl()), m_offsets(expr.startIndices()), m_dimensions(expr.dimensions()) {}
|
||||||
|
EIGEN_STRONG_INLINE const StartIndices& startIndices() const {return m_offsets;}
|
||||||
|
EIGEN_STRONG_INLINE const Sizes& dimensions() const {return m_dimensions;}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename StartIndices, typename Sizes, typename XprType, typename Dev>
|
||||||
|
struct FunctorExtractor<TensorEvaluator<TensorSlicingOp<StartIndices, Sizes, XprType>, Dev> >
|
||||||
|
:FunctorExtractor<TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, XprType>, Dev> > {};
|
||||||
/// template deduction function for FunctorExtractor
|
/// template deduction function for FunctorExtractor
|
||||||
template <typename Evaluator>
|
template <typename Evaluator>
|
||||||
auto inline extractFunctors(const Evaluator& evaluator)-> FunctorExtractor<Evaluator> {
|
auto inline extractFunctors(const Evaluator& evaluator)-> FunctorExtractor<Evaluator> {
|
||||||
|
@ -103,6 +103,15 @@ struct LeafCount<const TensorReductionOp<OP, Dim, Expr> > {
|
|||||||
template <typename OP, typename Dim, typename Expr>
|
template <typename OP, typename Dim, typename Expr>
|
||||||
struct LeafCount<TensorReductionOp<OP, Dim, Expr> >: LeafCount<const TensorReductionOp<OP, Dim, Expr> >{};
|
struct LeafCount<TensorReductionOp<OP, Dim, Expr> >: LeafCount<const TensorReductionOp<OP, Dim, Expr> >{};
|
||||||
|
|
||||||
|
/// specialisation of the \ref LeafCount struct when the node type is const TensorSlicingOp
|
||||||
|
template <typename StartIndices, typename Sizes, typename XprType>
|
||||||
|
struct LeafCount<const TensorSlicingOp<StartIndices, Sizes, XprType> >:CategoryCount<XprType>{};
|
||||||
|
|
||||||
|
/// specialisation of the \ref LeafCount struct when the node type is TensorSlicingOp
|
||||||
|
template <typename StartIndices, typename Sizes, typename XprType>
|
||||||
|
struct LeafCount<TensorSlicingOp<StartIndices, Sizes, XprType> >
|
||||||
|
: LeafCount<const TensorSlicingOp<StartIndices, Sizes, XprType> >{};
|
||||||
|
|
||||||
/// specialisation of the \ref LeafCount struct when the node type is TensorEvalToOp
|
/// specialisation of the \ref LeafCount struct when the node type is TensorEvalToOp
|
||||||
template <typename Expr>
|
template <typename Expr>
|
||||||
struct LeafCount<TensorEvalToOp<Expr> >: LeafCount<const TensorEvalToOp<Expr> >{};
|
struct LeafCount<TensorEvalToOp<Expr> >: LeafCount<const TensorEvalToOp<Expr> >{};
|
||||||
|
@ -122,9 +122,9 @@ ASSIGNEXPR()
|
|||||||
/// specialisation of the \ref PlaceHolderExpression when the node is
|
/// specialisation of the \ref PlaceHolderExpression when the node is
|
||||||
/// TensorMap
|
/// TensorMap
|
||||||
#define TENSORMAPEXPR(CVQual)\
|
#define TENSORMAPEXPR(CVQual)\
|
||||||
template <typename Scalar_, int Options_, int Options2_, int NumIndices_, typename IndexType_, template <class> class MakePointer_, size_t N>\
|
template <typename T, int Options2_, template <class> class MakePointer_, size_t N>\
|
||||||
struct PlaceHolderExpression< CVQual TensorMap< Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_>, N> {\
|
struct PlaceHolderExpression< CVQual TensorMap< T, Options2_, MakePointer_>, N> {\
|
||||||
typedef CVQual PlaceHolder<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_>, N> Type;\
|
typedef CVQual PlaceHolder<CVQual TensorMap<T, Options2_, MakePointer_>, N> Type;\
|
||||||
};
|
};
|
||||||
|
|
||||||
TENSORMAPEXPR(const)
|
TENSORMAPEXPR(const)
|
||||||
@ -167,6 +167,20 @@ SYCLREDUCTION(const)
|
|||||||
SYCLREDUCTION()
|
SYCLREDUCTION()
|
||||||
#undef SYCLREDUCTION
|
#undef SYCLREDUCTION
|
||||||
|
|
||||||
|
|
||||||
|
/// specialisation of the \ref PlaceHolderExpression when the node is
|
||||||
|
/// TensorCwiseSelectOp
|
||||||
|
#define SLICEOPEXPR(CVQual)\
|
||||||
|
template <typename StartIndices, typename Sizes, typename XprType, size_t N>\
|
||||||
|
struct PlaceHolderExpression<CVQual TensorSlicingOp<StartIndices, Sizes, XprType>, N> {\
|
||||||
|
typedef CVQual TensorSlicingOp<StartIndices, Sizes, typename CalculateIndex<N, XprType>::ArgType> Type;\
|
||||||
|
};
|
||||||
|
|
||||||
|
SLICEOPEXPR(const)
|
||||||
|
SLICEOPEXPR()
|
||||||
|
#undef SLICEOPEXPR
|
||||||
|
|
||||||
|
|
||||||
/// template deduction for \ref PlaceHolderExpression struct
|
/// template deduction for \ref PlaceHolderExpression struct
|
||||||
template <typename Expr>
|
template <typename Expr>
|
||||||
struct createPlaceHolderExpression {
|
struct createPlaceHolderExpression {
|
||||||
|
@ -146,6 +146,7 @@ if(EIGEN_TEST_CXX11)
|
|||||||
ei_add_test_sycl(cxx11_tensor_broadcast_sycl "-std=c++11")
|
ei_add_test_sycl(cxx11_tensor_broadcast_sycl "-std=c++11")
|
||||||
ei_add_test_sycl(cxx11_tensor_device_sycl "-std=c++11")
|
ei_add_test_sycl(cxx11_tensor_device_sycl "-std=c++11")
|
||||||
ei_add_test_sycl(cxx11_tensor_reduction_sycl "-std=c++11")
|
ei_add_test_sycl(cxx11_tensor_reduction_sycl "-std=c++11")
|
||||||
|
ei_add_test_sycl(cxx11_tensor_morphing_sycl "-std=c++11")
|
||||||
endif(EIGEN_TEST_SYCL)
|
endif(EIGEN_TEST_SYCL)
|
||||||
# It should be safe to always run these tests as there is some fallback code for
|
# It should be safe to always run these tests as there is some fallback code for
|
||||||
# older compiler that don't support cxx11.
|
# older compiler that don't support cxx11.
|
||||||
|
@ -47,7 +47,8 @@ static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){
|
|||||||
float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float)));
|
float * gpu_in_data = static_cast<float*>(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(float)));
|
||||||
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
|
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
|
||||||
|
|
||||||
TensorMap<Tensor<float, 4>> gpu_in(gpu_in_data, in_range);
|
TensorMap<TensorFixedSize<float, Sizes<2, 3, 5, 7>>> gpu_in(gpu_in_data, in_range);
|
||||||
|
//TensorMap<Tensor<float, 4>> gpu_in(gpu_in_data, in_range);
|
||||||
TensorMap<Tensor<float, 4>> gpu_out(gpu_out_data, out_range);
|
TensorMap<Tensor<float, 4>> gpu_out(gpu_out_data, out_range);
|
||||||
sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float));
|
sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(float));
|
||||||
gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
|
gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts);
|
||||||
|
84
unsupported/test/cxx11_tensor_morphing_sycl.cpp
Normal file
84
unsupported/test/cxx11_tensor_morphing_sycl.cpp
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
// This file is part of Eigen, a lightweight C++ template library
|
||||||
|
// for linear algebra.
|
||||||
|
//
|
||||||
|
// Copyright (C) 2016
|
||||||
|
// Mehdi Goli Codeplay Software Ltd.
|
||||||
|
// Ralph Potter Codeplay Software Ltd.
|
||||||
|
// Luke Iwanski Codeplay Software Ltd.
|
||||||
|
// Contact: <eigen@codeplay.com>
|
||||||
|
// Benoit Steiner <benoit.steiner.goog@gmail.com>
|
||||||
|
//
|
||||||
|
// This Source Code Form is subject to the terms of the Mozilla
|
||||||
|
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||||
|
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
|
||||||
|
#define EIGEN_TEST_NO_LONGDOUBLE
|
||||||
|
#define EIGEN_TEST_NO_COMPLEX
|
||||||
|
#define EIGEN_TEST_FUNC cxx11_tensor_morphing_sycl
|
||||||
|
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
|
||||||
|
#define EIGEN_USE_SYCL
|
||||||
|
|
||||||
|
|
||||||
|
#include "main.h"
|
||||||
|
#include <unsupported/Eigen/CXX11/Tensor>
|
||||||
|
|
||||||
|
using Eigen::array;
|
||||||
|
using Eigen::SyclDevice;
|
||||||
|
using Eigen::Tensor;
|
||||||
|
using Eigen::TensorMap;
|
||||||
|
|
||||||
|
|
||||||
|
static void test_simple_slice(const Eigen::SyclDevice &sycl_device)
|
||||||
|
{
|
||||||
|
int sizeDim1 = 2;
|
||||||
|
int sizeDim2 = 3;
|
||||||
|
int sizeDim3 = 5;
|
||||||
|
int sizeDim4 = 7;
|
||||||
|
int sizeDim5 = 11;
|
||||||
|
array<int, 5> tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}};
|
||||||
|
Tensor<float, 5> tensor(tensorRange);
|
||||||
|
tensor.setRandom();
|
||||||
|
array<int, 5> slice1_range ={{1, 1, 1, 1, 1}};
|
||||||
|
Tensor<float, 5> slice1(slice1_range);
|
||||||
|
|
||||||
|
float* gpu_data1 = static_cast<float*>(sycl_device.allocate(tensor.size()*sizeof(float)));
|
||||||
|
float* gpu_data2 = static_cast<float*>(sycl_device.allocate(slice1.size()*sizeof(float)));
|
||||||
|
TensorMap<Tensor<float, 5>> gpu1(gpu_data1, tensorRange);
|
||||||
|
TensorMap<Tensor<float, 5>> gpu2(gpu_data2, slice1_range);
|
||||||
|
Eigen::DSizes<ptrdiff_t, 5> indices(1,2,3,4,5);
|
||||||
|
Eigen::DSizes<ptrdiff_t, 5> sizes(1,1,1,1,1);
|
||||||
|
sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(float));
|
||||||
|
gpu2.device(sycl_device)=gpu1.slice(indices, sizes);
|
||||||
|
sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(float));
|
||||||
|
VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5));
|
||||||
|
|
||||||
|
|
||||||
|
array<int, 5> slice2_range ={{1,1,2,2,3}};
|
||||||
|
Tensor<float, 5> slice2(slice2_range);
|
||||||
|
float* gpu_data3 = static_cast<float*>(sycl_device.allocate(slice2.size()*sizeof(float)));
|
||||||
|
TensorMap<Tensor<float, 5>> gpu3(gpu_data3, slice2_range);
|
||||||
|
Eigen::DSizes<ptrdiff_t, 5> indices2(1,1,3,4,5);
|
||||||
|
Eigen::DSizes<ptrdiff_t, 5> sizes2(1,1,2,2,3);
|
||||||
|
gpu3.device(sycl_device)=gpu1.slice(indices2, sizes2);
|
||||||
|
sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(float));
|
||||||
|
for (int i = 0; i < 2; ++i) {
|
||||||
|
for (int j = 0; j < 2; ++j) {
|
||||||
|
for (int k = 0; k < 3; ++k) {
|
||||||
|
VERIFY_IS_EQUAL(slice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sycl_device.deallocate(gpu_data1);
|
||||||
|
sycl_device.deallocate(gpu_data2);
|
||||||
|
sycl_device.deallocate(gpu_data3);
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_cxx11_tensor_morphing_sycl()
|
||||||
|
{
|
||||||
|
/// Currentlly it only works on cpu. Adding GPU cause LLVM ERROR in cunstructing OpenCL Kernel at runtime.
|
||||||
|
cl::sycl::cpu_selector s;
|
||||||
|
Eigen::SyclDevice sycl_device(s);
|
||||||
|
CALL_SUBTEST(test_simple_slice(sycl_device));
|
||||||
|
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user