Merged in benoitsteiner/opencl (pull request PR-247)

This commit is contained in:
Benoit Steiner 2016-11-11 00:01:28 +00:00
commit f4722aa479
5 changed files with 137 additions and 50 deletions

View File

@ -44,14 +44,14 @@ struct SyclDevice {
// destructor
~SyclDevice() { deallocate_all(); }
template <typename T> void deallocate(T *p) const {
template <typename T> EIGEN_STRONG_INLINE void deallocate(T *p) const {
auto it = buffer_map.find(p);
if (it != buffer_map.end()) {
buffer_map.erase(it);
internal::aligned_free(p);
}
}
void deallocate_all() const {
EIGEN_STRONG_INLINE void deallocate_all() const {
std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin();
while (it!=buffer_map.end()) {
auto p=it->first;
@ -72,9 +72,14 @@ struct SyclDevice {
template<typename T> inline std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(const T *ptr, size_t num_bytes) const {
using Type = cl::sycl::buffer<T, 1>;
std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret = buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
[](void *dataMem) { delete static_cast<Type*>(dataMem); })));
(static_cast<Type*>(buffer_map.at(ptr).get()))->set_final_data(nullptr);
std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret;
if(ptr!=nullptr){
ret= buffer_map.insert(std::pair<const void *, std::shared_ptr<void>>(ptr, std::shared_ptr<void>(new Type(cl::sycl::range<1>(num_bytes)),
[](void *dataMem) { delete static_cast<Type*>(dataMem); })));
(static_cast<Type*>(ret.first->second.get()))->set_final_data(nullptr);
} else {
eigen_assert("The device memory is not allocated. Please call allocate on the device!!");
}
return ret;
}
@ -83,36 +88,77 @@ struct SyclDevice {
}
/// allocating memory on the cpu
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void *allocate(size_t) const {
EIGEN_STRONG_INLINE void *allocate(size_t) const {
return internal::aligned_malloc(8);
}
// some runtime conditions that can be applied here
bool isDeviceSuitable() const { return true; }
EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
::memcpy(dst, src, n);
}
template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
template<typename T> EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
memcpy(host_acc.get_pointer(), src, n);
}
/// whith the current implementation of sycl, the data is copied twice from device to host. This will be fixed soon.
template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
EIGEN_STRONG_INLINE void parallel_for_setup(size_t n, size_t &tileSize, size_t &rng, size_t &GRange) const {
tileSize =m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
rng = n;
if (rng==0) rng=1;
GRange=rng;
if (tileSize>GRange) tileSize=GRange;
else if(GRange>tileSize){
size_t xMode = GRange % tileSize;
if (xMode != 0) GRange += (tileSize - xMode);
}
}
template<typename T> EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
auto it = buffer_map.find(src);
if (it != buffer_map.end()) {
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::host_buffer>();
memcpy(dst,host_acc.get_pointer(), n);
size_t rng, GRange, tileSize;
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
auto dest_buf = cl::sycl::buffer<T, 1, cl::sycl::map_allocator<T>>(dst, cl::sycl::range<1>(rng));
typedef decltype(dest_buf) SYCLDTOH;
m_queue.submit([&](cl::sycl::handler &cgh) {
auto src_acc= (static_cast<cl::sycl::buffer<T, 1>*>(it->second.get()))-> template get_access<cl::sycl::access::mode::read, cl::sycl::access::target::global_buffer>(cgh);
auto dst_acc =dest_buf.template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
cgh.parallel_for<SYCLDTOH>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
auto globalid=itemID.get_global_linear_id();
if (globalid< dst_acc.get_size()) {
dst_acc[globalid] = src_acc[globalid];
}
});
});
m_queue.throw_asynchronous();
} else{
eigen_assert("no device memory found. The memory might be destroyed before creation");
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void *buffer, int c, size_t n) const {
::memset(buffer, c, n);
template<typename T> EIGEN_STRONG_INLINE void memset(T *buff, int c, size_t n) const {
size_t rng, GRange, tileSize;
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
m_queue.submit([&](cl::sycl::handler &cgh) {
auto buf_acc =(static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(buff, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
cgh.parallel_for<SyclDevice>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
auto globalid=itemID.get_global_linear_id();
auto buf_ptr= reinterpret_cast<typename cl::sycl::global_ptr<unsigned char>::pointer_t>((&(*buf_acc.get_pointer())));
if (globalid< buf_acc.get_size()) {
for(size_t i=0; i<sizeof(T); i++)
buf_ptr[globalid*sizeof(T) + i] = c;
}
});
});
m_queue.throw_asynchronous();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
EIGEN_STRONG_INLINE int majorDeviceVersion() const {
return 1;
}
};

View File

@ -188,15 +188,8 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
typedef const typename Self::ChildType HostExpr; /// this is the child of reduction
typedef typename TensorSycl::internal::createPlaceHolderExpression<HostExpr>::Type PlaceHolderExpr;
auto functors = TensorSycl::internal::extractFunctors(self.impl());
size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
size_t GRange=num_coeffs_to_preserve;
if (tileSize>GRange) tileSize=GRange;
else if(GRange>tileSize){
size_t xMode = GRange % tileSize;
if (xMode != 0) GRange += (tileSize - xMode);
}
size_t range, GRange, tileSize;
dev.parallel_for_setup(num_coeffs_to_preserve, tileSize, range, GRange);
// getting final out buffer at the moment the created buffer is true because there is no need for assign
/// creating the shared memory for calculating reduction.
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
@ -223,7 +216,7 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
auto device_self_evaluator = Eigen::TensorEvaluator<decltype(device_self_expr), Eigen::DefaultDevice>(device_self_expr, Eigen::DefaultDevice());
/// const cast added as a naive solution to solve the qualifier drop error
auto globalid=itemID.get_global_linear_id();
if (globalid< static_cast<size_t>(num_coeffs_to_preserve)) {
if (globalid< range) {
typename DeiceSelf::CoeffReturnType accum = functor.initialize();
GenericDimReducer<DeiceSelf::NumReducedDims-1, DeiceSelf, Op>::reduce(device_self_evaluator, device_self_evaluator.firstInput(globalid),const_cast<Op&>(functor), &accum);
functor.finalize(accum);

View File

@ -37,18 +37,12 @@ void run(Expr &expr, Dev &dev) {
typedef typename internal::createPlaceHolderExpression<Expr>::Type PlaceHolderExpr;
auto functors = internal::extractFunctors(evaluator);
size_t tileSize =dev.m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
dev.m_queue.submit([&](cl::sycl::handler &cgh) {
// create a tuple of accessors from Evaluator
auto tuple_of_accessors = internal::createTupleOfAccessors<decltype(evaluator)>(cgh, evaluator);
const auto range = utility::tuple::get<0>(tuple_of_accessors).get_range()[0];
size_t GRange=range;
if (tileSize>GRange) tileSize=GRange;
else if(GRange>tileSize){
size_t xMode = GRange % tileSize;
if (xMode != 0) GRange += (tileSize - xMode);
}
size_t range, GRange, tileSize;
dev.parallel_for_setup(utility::tuple::get<0>(tuple_of_accessors).get_range()[0], tileSize, range, GRange);
// run the kernel
cgh.parallel_for<PlaceHolderExpr>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
typedef typename internal::ConvertToDeviceExpression<Expr>::Type DevExpr;

View File

@ -19,10 +19,23 @@
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
#include<stdint.h>
void test_device_sycl(const Eigen::SyclDevice &sycl_device) {
std::cout <<"Helo from ComputeCpp: the requested device exists and the device name is : "
<< sycl_device.m_queue.get_device(). template get_info<cl::sycl::info::device::name>() <<std::endl;;
int sizeDim1 = 100;
array<int, 1> tensorRange = {{sizeDim1}};
Tensor<int, 1> in(tensorRange);
Tensor<int, 1> in1(tensorRange);
memset(in1.data(), 1,in1.size()*sizeof(int));
int * gpu_in_data = static_cast<int*>(sycl_device.allocate(in.size()*sizeof(int)));
sycl_device.memset(gpu_in_data, 1,in.size()*sizeof(int) );
sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(int) );
for (int i=0; i<in.size(); i++)
VERIFY_IS_APPROX(in(i), in1(i));
sycl_device.deallocate(gpu_in_data);
}
void test_cxx11_tensor_device_sycl() {
cl::sycl::gpu_selector s;

View File

@ -27,7 +27,46 @@ using Eigen::SyclDevice;
using Eigen::Tensor;
using Eigen::TensorMap;
void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
void test_sycl_mem_transfers(const Eigen::SyclDevice &sycl_device) {
int sizeDim1 = 100;
int sizeDim2 = 100;
int sizeDim3 = 100;
array<int, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
Tensor<float, 3> in1(tensorRange);
Tensor<float, 3> out1(tensorRange);
Tensor<float, 3> out2(tensorRange);
Tensor<float, 3> out3(tensorRange);
in1 = in1.random();
float* gpu_data1 = static_cast<float*>(sycl_device.allocate(in1.size()*sizeof(float)));
float* gpu_data2 = static_cast<float*>(sycl_device.allocate(out1.size()*sizeof(float)));
//float* gpu_data = static_cast<float*>(sycl_device.allocate(out2.size()*sizeof(float)));
TensorMap<Tensor<float, 3>> gpu1(gpu_data1, tensorRange);
TensorMap<Tensor<float, 3>> gpu2(gpu_data2, tensorRange);
//TensorMap<Tensor<float, 3>> gpu_out2(gpu_out2_data, tensorRange);
sycl_device.memcpyHostToDevice(gpu_data1, in1.data(),(in1.size())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_data2, in1.data(),(in1.size())*sizeof(float));
gpu1.device(sycl_device) = gpu1 * 3.14f;
gpu2.device(sycl_device) = gpu2 * 2.7f;
sycl_device.memcpyDeviceToHost(out1.data(), gpu_data1,(out1.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(out2.data(), gpu_data1,(out2.size())*sizeof(float));
sycl_device.memcpyDeviceToHost(out3.data(), gpu_data2,(out3.size())*sizeof(float));
// sycl_device.Synchronize();
for (int i = 0; i < in1.size(); ++i) {
VERIFY_IS_APPROX(out1(i), in1(i) * 3.14f);
VERIFY_IS_APPROX(out2(i), in1(i) * 3.14f);
VERIFY_IS_APPROX(out3(i), in1(i) * 2.7f);
}
sycl_device.deallocate(gpu_data1);
sycl_device.deallocate(gpu_data2);
}
void test_sycl_computations(const Eigen::SyclDevice &sycl_device) {
int sizeDim1 = 100;
int sizeDim2 = 100;
@ -41,10 +80,10 @@ void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
in2 = in2.random();
in3 = in3.random();
float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(float)));
float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(float)));
float * gpu_in3_data = static_cast<float*>(sycl_device.allocate(in3.dimensions().TotalSize()*sizeof(float)));
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(float)));
float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.size()*sizeof(float)));
float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.size()*sizeof(float)));
float * gpu_in3_data = static_cast<float*>(sycl_device.allocate(in3.size()*sizeof(float)));
float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.size()*sizeof(float)));
TensorMap<Tensor<float, 3>> gpu_in1(gpu_in1_data, tensorRange);
TensorMap<Tensor<float, 3>> gpu_in2(gpu_in2_data, tensorRange);
@ -53,7 +92,7 @@ void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
/// a=1.2f
gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f);
sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.size())*sizeof(float));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -65,7 +104,7 @@ void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
/// a=b*1.2f
gpu_out.device(sycl_device) = gpu_in1 * 1.2f;
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.size())*sizeof(float));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -77,9 +116,9 @@ void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
printf("a=b*1.2f Test Passed\n");
/// c=a*b
sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.size())*sizeof(float));
gpu_out.device(sycl_device) = gpu_in1 * gpu_in2;
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -93,7 +132,7 @@ void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
/// c=a+b
gpu_out.device(sycl_device) = gpu_in1 + gpu_in2;
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -107,7 +146,7 @@ void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
/// c=a*a
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1;
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -121,7 +160,7 @@ void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
//a*3.14f + b*2.7f
gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f);
sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.size())*sizeof(float));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -134,9 +173,9 @@ void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
printf("a*3.14f + b*2.7f Test Passed\n");
///d= (a>0.5? b:c)
sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.size())*sizeof(float));
gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3);
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(float));
sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(float));
for (int i = 0; i < sizeDim1; ++i) {
for (int j = 0; j < sizeDim2; ++j) {
for (int k = 0; k < sizeDim3; ++k) {
@ -152,8 +191,10 @@ void test_sycl_cpu(const Eigen::SyclDevice &sycl_device) {
sycl_device.deallocate(gpu_in3_data);
sycl_device.deallocate(gpu_out_data);
}
void test_cxx11_tensor_sycl() {
cl::sycl::gpu_selector s;
Eigen::SyclDevice sycl_device(s);
CALL_SUBTEST(test_sycl_cpu(sycl_device));
CALL_SUBTEST(test_sycl_mem_transfers(sycl_device));
CALL_SUBTEST(test_sycl_computations(sycl_device));
}