mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-09-15 02:43:14 +08:00
Adding Memset; optimising MecopyDeviceToHost by removing double copying;
This commit is contained in:
parent
75c080b176
commit
2e704d4257
@ -72,9 +72,14 @@ struct SyclDevice {
|
||||
|
||||
template<typename T> inline std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(const T *ptr, size_t num_bytes) const {
|
||||
using Type = cl::sycl::buffer<T, 1>;
|
||||
std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret = buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
|
||||
[](void *dataMem) { delete static_cast<Type*>(dataMem); })));
|
||||
(static_cast<Type*>(buffer_map.at(ptr).get()))->set_final_data(nullptr);
|
||||
std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret;
|
||||
if(ptr!=nullptr){
|
||||
ret= buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
|
||||
[](void *dataMem) { delete static_cast<Type*>(dataMem); })));
|
||||
(static_cast<Type*>(ret.first->second.get()))->set_final_data(nullptr);
|
||||
} else {
|
||||
eigen_assert("The Device memory is not allocated please call allocate on the device is not initialised!!")
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -83,36 +88,77 @@ struct SyclDevice {
|
||||
}
|
||||
|
||||
/// allocating memory on the cpu
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t) const {
|
||||
void *allocate(size_t) const {
|
||||
return internal::aligned_malloc(8);
|
||||
}
|
||||
|
||||
// some runtime conditions that can be applied here
|
||||
bool isDeviceSuitable() const { return true; }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
|
||||
void memcpy(void *dst, const void *src, size_t n) const {
|
||||
::memcpy(dst, src, n);
|
||||
}
|
||||
|
||||
template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
|
||||
template<typename T> void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
|
||||
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
|
||||
memcpy(host_acc.get_pointer(), src, n);
|
||||
}
|
||||
/// whith the current implementation of sycl, the data is copied twice from device to host. This will be fixed soon.
|
||||
template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
|
||||
|
||||
inline void parallel_for_setup(size_t n, size_t &tileSize, size_t &rng, size_t &GRange) const {
|
||||
tileSize =m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
|
||||
rng = n;
|
||||
if (rng==0) rng=1;
|
||||
GRange=rng;
|
||||
if (tileSize>GRange) tileSize=GRange;
|
||||
else if(GRange>tileSize){
|
||||
size_t xMode = GRange % tileSize;
|
||||
if (xMode != 0) GRange += (tileSize - xMode);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T> void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
|
||||
auto it = buffer_map.find(src);
|
||||
if (it != buffer_map.end()) {
|
||||
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::host_buffer>();
|
||||
memcpy(dst,host_acc.get_pointer(), n);
|
||||
size_t rng, GRange, tileSize;
|
||||
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
|
||||
|
||||
auto dest_buf = cl::sycl::buffer<T, 1, cl::sycl::map_allocator<T>>(dst, cl::sycl::range<1>(rng));
|
||||
typedef decltype(dest_buf) SYCLDTOH;
|
||||
m_queue.submit([&](cl::sycl::handler &cgh) {
|
||||
auto src_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
|
||||
auto dst_acc =dest_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
|
||||
cgh.parallel_for<SYCLDTOH>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||
auto globalid=itemID.get_global_linear_id();
|
||||
if (globalid< dst_acc.get_size()) {
|
||||
dst_acc[globalid] = src_acc[globalid];
|
||||
}
|
||||
});
|
||||
});
|
||||
m_queue.throw_asynchronous();
|
||||
|
||||
} else{
|
||||
eigen_assert("no device memory found. The memory might be destroyed before creation");
|
||||
}
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const {
|
||||
::memset(buffer, c, n);
|
||||
template<typename T> void memset(T *buff, int c, size_t n) const {
|
||||
|
||||
size_t rng, GRange, tileSize;
|
||||
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
|
||||
m_queue.submit([&](cl::sycl::handler &cgh) {
|
||||
auto buf_acc =(static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(buff, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
|
||||
cgh.parallel_for<SyclDevice>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||
auto globalid=itemID.get_global_linear_id();
|
||||
auto buf_ptr= reinterpret_cast<typename cl::sycl::global_ptr<unsigned char>::pointer_t>((&(*buf_acc.get_pointer())));
|
||||
if (globalid< buf_acc.get_size()) {
|
||||
for(size_t i=0; i<sizeof(T); i++)
|
||||
buf_ptr[globalid*sizeof(T) + i] = c;
|
||||
}
|
||||
});
|
||||
});
|
||||
m_queue.throw_asynchronous();
|
||||
}
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
|
||||
int majorDeviceVersion() const {
|
||||
return 1;
|
||||
}
|
||||
};
|
||||
|
@ -188,15 +188,8 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
|
||||
typedef const typename Self::ChildType HostExpr; /// this is the child of reduction
|
||||
typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
|
||||
auto functors = TensorSycl::internal::extractFunctors(self.impl());
|
||||
|
||||
size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
|
||||
|
||||
size_t GRange=num_coeffs_to_preserve;
|
||||
if (tileSize>GRange) tileSize=GRange;
|
||||
else if(GRange>tileSize){
|
||||
size_t xMode = GRange % tileSize;
|
||||
if (xMode != 0) GRange += (tileSize - xMode);
|
||||
}
|
||||
size_t range, GRange, tileSize;
|
||||
dev.parallel_for_setup(num_coeffs_to_preserve, tileSize, range, GRange);
|
||||
// getting final out buffer at the moment the created buffer is true because there is no need for assign
|
||||
/// creating the shared memory for calculating reduction.
|
||||
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
|
||||
@ -223,7 +216,7 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
|
||||
auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
|
||||
/// const cast added as a naive solution to solve the qualifier drop error
|
||||
auto globalid=itemID.get_global_linear_id();
|
||||
if (globalid< static_cast<size_t>(num_coeffs_to_preserve)) {
|
||||
if (globalid< range) {
|
||||
typename DeiceSelf::CoeffReturnType accum = functor.initialize();
|
||||
GenericDimReducer<DeiceSelf::NumReducedDims-1, DeiceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(globalid),const_cast<Op&>(functor), &accum);
|
||||
functor.finalize(accum);
|
||||
|
@ -37,18 +37,12 @@ void run(Expr &expr, Dev &dev) {
|
||||
typedef typename internal::createPlaceHolderExpression<Expr>::Type PlaceHolderExpr;
|
||||
auto functors = internal::extractFunctors(evaluator);
|
||||
|
||||
size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
|
||||
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
|
||||
|
||||
// create a tuple of accessors from Evaluator
|
||||
auto tuple_of_accessors = internal::createTupleOfAccessors<decltype(evaluator)>(cgh, evaluator);
|
||||
const auto range = utility::tuple::get<0>(tuple_of_accessors).get_range()[0];
|
||||
size_t GRange=range;
|
||||
if (tileSize>GRange) tileSize=GRange;
|
||||
else if(GRange>tileSize){
|
||||
size_t xMode = GRange % tileSize;
|
||||
if (xMode != 0) GRange += (tileSize - xMode);
|
||||
}
|
||||
size_t range, GRange, tileSize;
|
||||
dev.parallel_for_setup(utility::tuple::get<0>(tuple_of_accessors).get_range()[0], tileSize, range, GRange);
|
||||
|
||||
// run the kernel
|
||||
cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||
typedef typename internal::ConvertToDeviceExpression<Expr>::Type DevExpr;
|
||||
|
@ -19,10 +19,23 @@
|
||||
|
||||
#include "main.h"
|
||||
#include <unsupported/Eigen/CXX11/Tensor>
|
||||
#include<stdint.h>
|
||||
|
||||
void test_device_sycl(const Eigen::SyclDevice &sycl_device) {
|
||||
std::cout <<"Helo from ComputeCpp: the requested device exists and the device name is : "
|
||||
<< sycl_device.m_queue.get_device(). template get_info<cl::sycl::info::device::name>() <<std::endl;;
|
||||
int sizeDim1 = 100;
|
||||
|
||||
array<int, 1> tensorRange = {{sizeDim1}};
|
||||
Tensor<int, 1> in(tensorRange);
|
||||
Tensor<int, 1> in1(tensorRange);
|
||||
memset(in1.data(), 1,in1.dimensions().TotalSize()*sizeof(int));
|
||||
int * gpu_in_data = static_cast<int*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(int)));
|
||||
sycl_device.memset(gpu_in_data, 1,in.dimensions().TotalSize()*sizeof(int) );
|
||||
sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.dimensions().TotalSize()*sizeof(int) );
|
||||
for (int i=0; i<in.dimensions().TotalSize(); i++)
|
||||
VERIFY_IS_APPROX(in(i), in1(i));
|
||||
sycl_device.deallocate(gpu_in_data);
|
||||
}
|
||||
void test_cxx11_tensor_device_sycl() {
|
||||
cl::sycl::gpu_selector s;
|
||||
|
Loading…
x
Reference in New Issue
Block a user