mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 03:39:01 +08:00
Merged in ibab/eigen/fix-tensor-scan-gpu (pull request PR-205)
Add missing CUDA kernel to tensor scan op
This commit is contained in:
commit
b047ca765f
@ -9,9 +9,11 @@
|
||||
|
||||
#ifndef EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
|
||||
#define EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Op, typename XprType>
|
||||
struct traits<TensorScanOp<Op, XprType> >
|
||||
: public traits<XprType> {
|
||||
@ -42,9 +44,7 @@ struct nested<TensorScanOp<Op, XprType>, 1,
|
||||
* \ingroup CXX11_Tensor_Module
|
||||
*
|
||||
* \brief Tensor scan class.
|
||||
*
|
||||
*/
|
||||
|
||||
template <typename Op, typename XprType>
|
||||
class TensorScanOp
|
||||
: public TensorBase<TensorScanOp<Op, XprType>, ReadOnlyAccessors> {
|
||||
@ -76,6 +76,9 @@ protected:
|
||||
const bool m_exclusive;
|
||||
};
|
||||
|
||||
template <typename Self, typename Reducer, typename Device>
|
||||
struct ScanLauncher;
|
||||
|
||||
// Eval as rvalue
|
||||
template <typename Op, typename ArgType, typename Device>
|
||||
struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
|
||||
@ -87,6 +90,7 @@ struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
|
||||
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
|
||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
|
||||
typedef TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> Self;
|
||||
|
||||
enum {
|
||||
IsAligned = false,
|
||||
@ -128,17 +132,42 @@ struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
|
||||
return m_impl.dimensions();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& stride() const {
|
||||
return m_stride;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& size() const {
|
||||
return m_size;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Op& accumulator() const {
|
||||
return m_accumulator;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool exclusive() const {
|
||||
return m_exclusive;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& inner() const {
|
||||
return m_impl;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device& device() const {
|
||||
return m_device;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
|
||||
m_impl.evalSubExprsIfNeeded(NULL);
|
||||
ScanLauncher<Self, Op, Device> launcher;
|
||||
if (data) {
|
||||
accumulateTo(data);
|
||||
launcher(*this, data);
|
||||
return false;
|
||||
} else {
|
||||
const Index total_size = internal::array_prod(dimensions());
|
||||
m_output = static_cast<CoeffReturnType*>(m_device.allocate(total_size * sizeof(Scalar)));
|
||||
accumulateTo(m_output);
|
||||
return true;
|
||||
}
|
||||
|
||||
const Index total_size = internal::array_prod(dimensions());
|
||||
m_output = static_cast<CoeffReturnType*>(m_device.allocate(total_size * sizeof(Scalar)));
|
||||
launcher(*this, m_output);
|
||||
return true;
|
||||
}
|
||||
|
||||
template<int LoadMode>
|
||||
@ -176,34 +205,83 @@ protected:
|
||||
const Index m_size;
|
||||
Index m_stride;
|
||||
CoeffReturnType* m_output;
|
||||
};
|
||||
|
||||
// CPU implementation of scan
|
||||
// TODO(ibab) This single-threaded implementation should be parallelized,
|
||||
// at least by running multiple scans at the same time.
|
||||
template <typename Self, typename Reducer, typename Device>
|
||||
struct ScanLauncher {
|
||||
void operator()(Self& self, typename Self::CoeffReturnType *data) {
|
||||
Index total_size = internal::array_prod(self.dimensions());
|
||||
|
||||
// TODO(ibab) Parallelize this single-threaded implementation if desired
|
||||
EIGEN_DEVICE_FUNC void accumulateTo(Scalar* data) {
|
||||
// We fix the index along the scan axis to 0 and perform a
|
||||
// scan per remaining entry. The iteration is split into two nested
|
||||
// loops to avoid an integer division by keeping track of each idx1 and idx2.
|
||||
for (Index idx1 = 0; idx1 < dimensions().TotalSize() / m_size; idx1 += m_stride) {
|
||||
for (Index idx2 = 0; idx2 < m_stride; idx2++) {
|
||||
// Calculate the starting offset for the scan
|
||||
Index offset = idx1 * m_size + idx2;
|
||||
for (Index idx1 = 0; idx1 < total_size; idx1 += self.stride() * self.size()) {
|
||||
for (Index idx2 = 0; idx2 < self.stride(); idx2++) {
|
||||
// Calculate the starting offset for the scan
|
||||
Index offset = idx1 + idx2;
|
||||
|
||||
// Compute the scan along the axis, starting at the calculated offset
|
||||
CoeffReturnType accum = m_accumulator.initialize();
|
||||
for (Index idx3 = 0; idx3 < m_size; idx3++) {
|
||||
Index curr = offset + idx3 * m_stride;
|
||||
if (m_exclusive) {
|
||||
data[curr] = m_accumulator.finalize(accum);
|
||||
m_accumulator.reduce(m_impl.coeff(curr), &accum);
|
||||
} else {
|
||||
m_accumulator.reduce(m_impl.coeff(curr), &accum);
|
||||
data[curr] = m_accumulator.finalize(accum);
|
||||
}
|
||||
// Compute the scan along the axis, starting at the calculated offset
|
||||
typename Self::CoeffReturnType accum = self.accumulator().initialize();
|
||||
for (Index idx3 = 0; idx3 < self.size(); idx3++) {
|
||||
Index curr = offset + idx3 * self.stride();
|
||||
|
||||
if (self.exclusive()) {
|
||||
data[curr] = self.accumulator().finalize(accum);
|
||||
self.accumulator().reduce(self.inner().coeff(curr), &accum);
|
||||
} else {
|
||||
self.accumulator().reduce(self.inner().coeff(curr), &accum);
|
||||
data[curr] = self.accumulator().finalize(accum);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
|
||||
|
||||
// GPU implementation of scan
|
||||
// TODO(ibab) This placeholder implementation performs multiple scans in
|
||||
// parallel, but it would be better to use a parallel scan algorithm and
|
||||
// optimize memory access.
|
||||
template <typename Self, typename Reducer>
|
||||
__global__ void ScanKernel(Self self, Index total_size, typename Self::CoeffReturnType* data) {
|
||||
// Compute offset as in the CPU version
|
||||
Index val = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
Index offset = (val / self.stride()) * self.stride() * self.size() + val % self.stride();
|
||||
|
||||
if (offset + (self.size() - 1) * self.stride() < total_size) {
|
||||
// Compute the scan along the axis, starting at the calculated offset
|
||||
typename Self::CoeffReturnType accum = self.accumulator().initialize();
|
||||
for (Index idx = 0; idx < self.size(); idx++) {
|
||||
Index curr = offset + idx * self.stride();
|
||||
if (self.exclusive()) {
|
||||
data[curr] = self.accumulator().finalize(accum);
|
||||
self.accumulator().reduce(self.inner().coeff(curr), &accum);
|
||||
} else {
|
||||
self.accumulator().reduce(self.inner().coeff(curr), &accum);
|
||||
data[curr] = self.accumulator().finalize(accum);
|
||||
}
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
}
|
||||
|
||||
template <typename Self, typename Reducer>
|
||||
struct ScanLauncher<Self, Reducer, GpuDevice> {
|
||||
void operator()(const Self& self, typename Self::CoeffReturnType* data) {
|
||||
Index total_size = internal::array_prod(self.dimensions());
|
||||
Index num_blocks = (total_size / self.size() + 63) / 64;
|
||||
Index block_size = 64;
|
||||
LAUNCH_CUDA_KERNEL((ScanKernel<Self, Reducer>), num_blocks, block_size, 0, self.device(), self, total_size, data);
|
||||
}
|
||||
};
|
||||
#endif // EIGEN_USE_GPU && __CUDACC__
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
|
||||
|
@ -220,7 +220,7 @@ if(CUDA_FOUND AND EIGEN_TEST_CUDA)
|
||||
ei_add_test(cxx11_tensor_reduction_cuda)
|
||||
ei_add_test(cxx11_tensor_argmax_cuda)
|
||||
ei_add_test(cxx11_tensor_cast_float16_cuda)
|
||||
# ei_add_test(cxx11_tensor_scan_cuda)
|
||||
ei_add_test(cxx11_tensor_scan_cuda)
|
||||
|
||||
# The random number generation code requires arch 3.5 or greater.
|
||||
if (${EIGEN_CUDA_COMPUTE_ARCH} GREATER 34)
|
||||
|
@ -14,87 +14,73 @@
|
||||
|
||||
using Eigen::Tensor;
|
||||
|
||||
template <int DataLayout, typename Type=float>
|
||||
template <int DataLayout, typename Type=float, bool Exclusive = false>
|
||||
static void test_1d_scan()
|
||||
{
|
||||
int size = 50;
|
||||
Tensor<Type, 1, DataLayout> tensor(size);
|
||||
tensor.setRandom();
|
||||
Tensor<Type, 1, DataLayout> result = tensor.cumsum(0);
|
||||
int size = 50;
|
||||
Tensor<Type, 1, DataLayout> tensor(size);
|
||||
tensor.setRandom();
|
||||
Tensor<Type, 1, DataLayout> result = tensor.cumsum(0, Exclusive);
|
||||
|
||||
VERIFY_IS_EQUAL(tensor.dimension(0), result.dimension(0));
|
||||
VERIFY_IS_EQUAL(tensor.dimension(0), result.dimension(0));
|
||||
|
||||
float accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
float accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
if (Exclusive) {
|
||||
VERIFY_IS_EQUAL(result(i), accum);
|
||||
accum += tensor(i);
|
||||
} else {
|
||||
accum += tensor(i);
|
||||
VERIFY_IS_EQUAL(result(i), accum);
|
||||
}
|
||||
}
|
||||
|
||||
accum = 1;
|
||||
result = tensor.cumprod(0);
|
||||
for (int i = 0; i < size; i++) {
|
||||
accum = 1;
|
||||
result = tensor.cumprod(0, Exclusive);
|
||||
for (int i = 0; i < size; i++) {
|
||||
if (Exclusive) {
|
||||
VERIFY_IS_EQUAL(result(i), accum);
|
||||
accum *= tensor(i);
|
||||
} else {
|
||||
accum *= tensor(i);
|
||||
VERIFY_IS_EQUAL(result(i), accum);
|
||||
}
|
||||
}
|
||||
|
||||
template <int DataLayout, typename Type=float>
|
||||
static void test_1d_inclusive_scan()
|
||||
{
|
||||
int size = 50;
|
||||
Tensor<Type, 1, DataLayout> tensor(size);
|
||||
tensor.setRandom();
|
||||
Tensor<Type, 1, DataLayout> result = tensor.cumsum(0, true);
|
||||
|
||||
VERIFY_IS_EQUAL(tensor.dimension(0), result.dimension(0));
|
||||
|
||||
float accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
VERIFY_IS_EQUAL(result(i), accum);
|
||||
accum += tensor(i);
|
||||
}
|
||||
|
||||
accum = 1;
|
||||
result = tensor.cumprod(0, true);
|
||||
for (int i = 0; i < size; i++) {
|
||||
VERIFY_IS_EQUAL(result(i), accum);
|
||||
accum *= tensor(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <int DataLayout, typename Type=float>
|
||||
static void test_4d_scan()
|
||||
{
|
||||
int size = 5;
|
||||
Tensor<Type, 4, DataLayout> tensor(size, size, size, size);
|
||||
tensor.setRandom();
|
||||
int size = 5;
|
||||
Tensor<Type, 4, DataLayout> tensor(size, size, size, size);
|
||||
tensor.setRandom();
|
||||
|
||||
Tensor<Type, 4, DataLayout> result(size, size, size, size);
|
||||
Tensor<Type, 4, DataLayout> result(size, size, size, size);
|
||||
|
||||
result = tensor.cumsum(0);
|
||||
float accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
accum += tensor(i, 0, 0, 0);
|
||||
VERIFY_IS_EQUAL(result(i, 0, 0, 0), accum);
|
||||
}
|
||||
result = tensor.cumsum(1);
|
||||
accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
accum += tensor(0, i, 0, 0);
|
||||
VERIFY_IS_EQUAL(result(0, i, 0, 0), accum);
|
||||
}
|
||||
result = tensor.cumsum(2);
|
||||
accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
accum += tensor(0, 0, i, 0);
|
||||
VERIFY_IS_EQUAL(result(0, 0, i, 0), accum);
|
||||
}
|
||||
result = tensor.cumsum(3);
|
||||
accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
accum += tensor(0, 0, 0, i);
|
||||
VERIFY_IS_EQUAL(result(0, 0, 0, i), accum);
|
||||
}
|
||||
result = tensor.cumsum(0);
|
||||
float accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
accum += tensor(i, 1, 2, 3);
|
||||
VERIFY_IS_EQUAL(result(i, 1, 2, 3), accum);
|
||||
}
|
||||
result = tensor.cumsum(1);
|
||||
accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
accum += tensor(1, i, 2, 3);
|
||||
VERIFY_IS_EQUAL(result(1, i, 2, 3), accum);
|
||||
}
|
||||
result = tensor.cumsum(2);
|
||||
accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
accum += tensor(1, 2, i, 3);
|
||||
VERIFY_IS_EQUAL(result(1, 2, i, 3), accum);
|
||||
}
|
||||
result = tensor.cumsum(3);
|
||||
accum = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
accum += tensor(1, 2, 3, i);
|
||||
VERIFY_IS_EQUAL(result(1, 2, 3, i), accum);
|
||||
}
|
||||
}
|
||||
|
||||
template <int DataLayout>
|
||||
@ -113,8 +99,10 @@ static void test_tensor_maps() {
|
||||
}
|
||||
|
||||
void test_cxx11_tensor_scan() {
|
||||
CALL_SUBTEST(test_1d_scan<ColMajor>());
|
||||
CALL_SUBTEST(test_1d_scan<RowMajor>());
|
||||
CALL_SUBTEST((test_1d_scan<ColMajor, float, true>()));
|
||||
CALL_SUBTEST((test_1d_scan<ColMajor, float, false>()));
|
||||
CALL_SUBTEST((test_1d_scan<RowMajor, float, true>()));
|
||||
CALL_SUBTEST((test_1d_scan<RowMajor, float, false>()));
|
||||
CALL_SUBTEST(test_4d_scan<ColMajor>());
|
||||
CALL_SUBTEST(test_4d_scan<RowMajor>());
|
||||
CALL_SUBTEST(test_tensor_maps<ColMajor>());
|
||||
|
Loading…
x
Reference in New Issue
Block a user