mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 11:49:02 +08:00
Adding comment to TensorDeviceSycl.h and cleaning the code.
This commit is contained in:
parent
3be3963021
commit
a5c3f15682
@ -17,16 +17,18 @@
|
|||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
struct SyclDevice {
|
struct SyclDevice {
|
||||||
/// class members
|
/// class members:
|
||||||
|
|
||||||
/// sycl queue
|
/// sycl queue
|
||||||
mutable cl::sycl::queue m_queue;
|
mutable cl::sycl::queue m_queue;
|
||||||
|
|
||||||
/// std::map is the container used to make sure that we create only one buffer
|
/// std::map is the container used to make sure that we create only one buffer
|
||||||
/// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
|
/// per pointer. The lifespan of the buffer now depends on the lifespan of SyclDevice.
|
||||||
/// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
|
/// If a non-read-only pointer is needed to be accessed on the host we should manually deallocate it.
|
||||||
mutable std::map<const void *, std::shared_ptr<void>> buffer_map;
|
mutable std::map<const void *, std::shared_ptr<void>> buffer_map;
|
||||||
|
|
||||||
/// creating device by using selector
|
/// creating device by using selector
|
||||||
template<typename dev_Selector> SyclDevice(dev_Selector s)
|
template<typename dev_Selector> explicit SyclDevice(dev_Selector s):
|
||||||
:
|
|
||||||
#ifdef EIGEN_EXCEPTIONS
|
#ifdef EIGEN_EXCEPTIONS
|
||||||
m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) {
|
m_queue(cl::sycl::queue(s, [=](cl::sycl::exception_list l) {
|
||||||
for (const auto& e : l) {
|
for (const auto& e : l) {
|
||||||
@ -41,9 +43,12 @@ struct SyclDevice {
|
|||||||
m_queue(cl::sycl::queue(s))
|
m_queue(cl::sycl::queue(s))
|
||||||
#endif
|
#endif
|
||||||
{}
|
{}
|
||||||
|
|
||||||
// destructor
|
// destructor
|
||||||
~SyclDevice() { deallocate_all(); }
|
~SyclDevice() { deallocate_all(); }
|
||||||
|
|
||||||
|
/// This is used to deallocate the device pointer. p is used as a key inside
|
||||||
|
/// the map to find the device buffer and delete it.
|
||||||
template <typename T> EIGEN_STRONG_INLINE void deallocate(T *p) const {
|
template <typename T> EIGEN_STRONG_INLINE void deallocate(T *p) const {
|
||||||
auto it = buffer_map.find(p);
|
auto it = buffer_map.find(p);
|
||||||
if (it != buffer_map.end()) {
|
if (it != buffer_map.end()) {
|
||||||
@ -51,6 +56,9 @@ struct SyclDevice {
|
|||||||
internal::aligned_free(p);
|
internal::aligned_free(p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This is called by the SyclDevice destructor to release all allocated memory if the user didn't already do so.
|
||||||
|
/// We also free the host pointer that we have dedicated as a key to accessing the device buffer.
|
||||||
EIGEN_STRONG_INLINE void deallocate_all() const {
|
EIGEN_STRONG_INLINE void deallocate_all() const {
|
||||||
std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin();
|
std::map<const void *, std::shared_ptr<void>>::iterator it=buffer_map.begin();
|
||||||
while (it!=buffer_map.end()) {
|
while (it!=buffer_map.end()) {
|
||||||
@ -62,15 +70,17 @@ struct SyclDevice {
|
|||||||
buffer_map.clear();
|
buffer_map.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// creation of sycl accessor for a buffer. This function first tries to find
|
/// Creation of sycl accessor for a buffer. This function first tries to find
|
||||||
/// the buffer in the buffer_map. If found it gets the accessor from it, if not,
|
/// the buffer in the buffer_map. If found it gets the accessor from it, if not,
|
||||||
///the function then adds an entry by creating a sycl buffer for that particular pointer.
|
/// the function then adds an entry by creating a sycl buffer for that particular pointer.
|
||||||
template <cl::sycl::access::mode AcMd, typename T> inline cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
|
template <cl::sycl::access::mode AcMd, typename T> EIGEN_STRONG_INLINE cl::sycl::accessor<T, 1, AcMd, cl::sycl::access::target::global_buffer>
|
||||||
get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const {
|
get_sycl_accessor(size_t num_bytes, cl::sycl::handler &cgh, const T * ptr) const {
|
||||||
return (get_sycl_buffer<T>(num_bytes, ptr)->template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
|
return (get_sycl_buffer<T>(num_bytes, ptr)->template get_access<AcMd, cl::sycl::access::target::global_buffer>(cgh));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T> inline std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(const T *ptr, size_t num_bytes) const {
|
/// Inserting a new sycl buffer. For every allocated device pointer only one buffer would be created. The buffer type is a device- only buffer.
|
||||||
|
/// The key pointer used to access the device buffer(the device pointer(ptr) ) must be initialised by the allocate function.
|
||||||
|
template<typename T> EIGEN_STRONG_INLINE std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> add_sycl_buffer(size_t num_bytes, const T *ptr) const {
|
||||||
using Type = cl::sycl::buffer<T, 1>;
|
using Type = cl::sycl::buffer<T, 1>;
|
||||||
std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret;
|
std::pair<std::map<const void *, std::shared_ptr<void>>::iterator,bool> ret;
|
||||||
if(ptr!=nullptr){
|
if(ptr!=nullptr){
|
||||||
@ -83,32 +93,17 @@ struct SyclDevice {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T> inline cl::sycl::buffer<T, 1>* get_sycl_buffer(size_t num_bytes,const T * ptr) const {
|
/// Accessing the created sycl device buffer for the device pointer
|
||||||
return static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(ptr, num_bytes).first->second.get());
|
template <typename T> EIGEN_STRONG_INLINE cl::sycl::buffer<T, 1>* get_sycl_buffer(size_t num_bytes,const T * ptr) const {
|
||||||
}
|
return static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(num_bytes, ptr).first->second.get());
|
||||||
|
|
||||||
/// allocating memory on the cpu
|
|
||||||
EIGEN_STRONG_INLINE void *allocate(size_t) const {
|
|
||||||
return internal::aligned_malloc(8);
|
|
||||||
}
|
|
||||||
|
|
||||||
// some runtime conditions that can be applied here
|
|
||||||
EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; }
|
|
||||||
|
|
||||||
EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
|
|
||||||
::memcpy(dst, src, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename T> EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
|
|
||||||
auto host_acc= (static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(dst, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
|
|
||||||
memcpy(host_acc.get_pointer(), src, n);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This is used to prepare the number of threads and also the number of threads per block for sycl kernels
|
||||||
EIGEN_STRONG_INLINE void parallel_for_setup(size_t n, size_t &tileSize, size_t &rng, size_t &GRange) const {
|
EIGEN_STRONG_INLINE void parallel_for_setup(size_t n, size_t &tileSize, size_t &rng, size_t &GRange) const {
|
||||||
tileSize =m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
|
tileSize =m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>()/2;
|
||||||
rng = n;
|
rng = n;
|
||||||
if (rng==0) rng=1;
|
if (rng==0) rng=1;
|
||||||
GRange=rng;
|
GRange=rng;
|
||||||
if (tileSize>GRange) tileSize=GRange;
|
if (tileSize>GRange) tileSize=GRange;
|
||||||
else if(GRange>tileSize){
|
else if(GRange>tileSize){
|
||||||
size_t xMode = GRange % tileSize;
|
size_t xMode = GRange % tileSize;
|
||||||
@ -116,6 +111,39 @@ struct SyclDevice {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Allocating device pointer. This pointer is actually an 8 bytes host pointer used as key to access the sycl device buffer.
|
||||||
|
/// The reason is that we cannot use device buffer as a pointer as a m_data in Eigen leafNode expressions. So we create a key
|
||||||
|
/// pointer to be used in Eigen expression construction. When we convert the Eigen construction into the sycl construction we
|
||||||
|
/// use this pointer as a key in our buffer_map and we make sure that we dedicate only one buffer only for this pointer.
|
||||||
|
/// The device pointer would be deleted by calling deallocate function.
|
||||||
|
EIGEN_STRONG_INLINE void *allocate(size_t) const {
|
||||||
|
return internal::aligned_malloc(8);
|
||||||
|
}
|
||||||
|
|
||||||
|
// some runtime conditions that can be applied here
|
||||||
|
EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; }
|
||||||
|
|
||||||
|
/// the memcpy function
|
||||||
|
EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const {
|
||||||
|
::memcpy(dst, src, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The memcpyHostToDevice is used to copy the device only pointer to a host pointer. Using the device
|
||||||
|
/// pointer created as a key we find the sycl buffer and get the host accessor with discard_write mode
|
||||||
|
/// on it. Using a discard_write accessor guarantees that we do not bring back the current value of the
|
||||||
|
/// buffer to host. Then we use the memcpy to copy the data to the host accessor. The first time that
|
||||||
|
/// this buffer is accessed, the data will be copied to the device.
|
||||||
|
template<typename T> EIGEN_STRONG_INLINE void memcpyHostToDevice(T *dst, const T *src, size_t n) const {
|
||||||
|
|
||||||
|
auto host_acc= get_sycl_buffer(n, dst)-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::host_buffer>();
|
||||||
|
memcpy(host_acc.get_pointer(), src, n);
|
||||||
|
}
|
||||||
|
/// The memcpyDeviceToHost is used to copy the data from host to device. Here, in order to avoid double copying the data. We create a sycl
|
||||||
|
/// buffer with map_allocator for the destination pointer with a discard_write accessor on it. The lifespan of the buffer is bound to the
|
||||||
|
/// lifespan of the memcpyDeviceToHost function. We create a kernel to copy the data, from the device- only source buffer to the destination
|
||||||
|
/// buffer with map_allocator on the gpu in parallel. At the end of the function call the destination buffer would be destroyed and the data
|
||||||
|
/// would be available on the dst pointer using fast copy technique (map_allocator). In this case we can make sure that we copy the data back
|
||||||
|
/// to the cpu only once per function call.
|
||||||
template<typename T> EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
|
template<typename T> EIGEN_STRONG_INLINE void memcpyDeviceToHost(T *dst, const T *src, size_t n) const {
|
||||||
auto it = buffer_map.find(src);
|
auto it = buffer_map.find(src);
|
||||||
if (it != buffer_map.end()) {
|
if (it != buffer_map.end()) {
|
||||||
@ -141,12 +169,12 @@ struct SyclDevice {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Here is the implementation of memset function on sycl.
|
||||||
template<typename T> EIGEN_STRONG_INLINE void memset(T *buff, int c, size_t n) const {
|
template<typename T> EIGEN_STRONG_INLINE void memset(T *buff, int c, size_t n) const {
|
||||||
|
|
||||||
size_t rng, GRange, tileSize;
|
size_t rng, GRange, tileSize;
|
||||||
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
|
parallel_for_setup(n/sizeof(T), tileSize, rng, GRange);
|
||||||
m_queue.submit([&](cl::sycl::handler &cgh) {
|
m_queue.submit([&](cl::sycl::handler &cgh) {
|
||||||
auto buf_acc =(static_cast<cl::sycl::buffer<T, 1>*>(add_sycl_buffer(buff, n).first->second.get()))-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
|
auto buf_acc =get_sycl_buffer(n, buff)-> template get_access<cl::sycl::access::mode::discard_write, cl::sycl::access::target::global_buffer>(cgh);
|
||||||
cgh.parallel_for<SyclDevice>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
cgh.parallel_for<SyclDevice>( cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), [=](cl::sycl::nd_item<1> itemID) {
|
||||||
auto globalid=itemID.get_global_linear_id();
|
auto globalid=itemID.get_global_linear_id();
|
||||||
auto buf_ptr= reinterpret_cast<typename cl::sycl::global_ptr<unsigned char>::pointer_t>((&(*buf_acc.get_pointer())));
|
auto buf_ptr= reinterpret_cast<typename cl::sycl::global_ptr<unsigned char>::pointer_t>((&(*buf_acc.get_pointer())));
|
||||||
@ -158,9 +186,12 @@ struct SyclDevice {
|
|||||||
});
|
});
|
||||||
m_queue.throw_asynchronous();
|
m_queue.throw_asynchronous();
|
||||||
}
|
}
|
||||||
|
/// No need for sycl it should act the same as CPU version
|
||||||
EIGEN_STRONG_INLINE int majorDeviceVersion() const {
|
EIGEN_STRONG_INLINE int majorDeviceVersion() const {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
/// There is no need to synchronise the stream in sycl as it is automatically handled by sycl runtime scheduler.
|
||||||
|
EIGEN_STRONG_INLINE void synchronize() const {}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // end namespace Eigen
|
} // end namespace Eigen
|
||||||
|
Loading…
x
Reference in New Issue
Block a user