mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-11 19:29:02 +08:00
Add missing CUDA kernel to tensor scan op
The TensorScanOp implementation was missing a CUDA kernel launch. This adds a simple placeholder implementation.
This commit is contained in:
parent
328c5d876a
commit
85699850d9
@ -9,9 +9,11 @@
|
|||||||
|
|
||||||
#ifndef EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
|
#ifndef EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
|
||||||
#define EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
|
#define EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
template <typename Op, typename XprType>
|
template <typename Op, typename XprType>
|
||||||
struct traits<TensorScanOp<Op, XprType> >
|
struct traits<TensorScanOp<Op, XprType> >
|
||||||
: public traits<XprType> {
|
: public traits<XprType> {
|
||||||
@ -42,9 +44,7 @@ struct nested<TensorScanOp<Op, XprType>, 1,
|
|||||||
* \ingroup CXX11_Tensor_Module
|
* \ingroup CXX11_Tensor_Module
|
||||||
*
|
*
|
||||||
* \brief Tensor scan class.
|
* \brief Tensor scan class.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
template <typename Op, typename XprType>
|
template <typename Op, typename XprType>
|
||||||
class TensorScanOp
|
class TensorScanOp
|
||||||
: public TensorBase<TensorScanOp<Op, XprType>, ReadOnlyAccessors> {
|
: public TensorBase<TensorScanOp<Op, XprType>, ReadOnlyAccessors> {
|
||||||
@ -76,6 +76,9 @@ protected:
|
|||||||
const bool m_exclusive;
|
const bool m_exclusive;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <typename Self, typename Reducer, typename Device>
|
||||||
|
struct ScanLauncher;
|
||||||
|
|
||||||
// Eval as rvalue
|
// Eval as rvalue
|
||||||
template <typename Op, typename ArgType, typename Device>
|
template <typename Op, typename ArgType, typename Device>
|
||||||
struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
|
struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
|
||||||
@ -87,6 +90,7 @@ struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
|
|||||||
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
|
typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
|
||||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||||
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
|
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
|
||||||
|
typedef TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> Self;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
IsAligned = false,
|
IsAligned = false,
|
||||||
@ -128,18 +132,43 @@ struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
|
|||||||
return m_impl.dimensions();
|
return m_impl.dimensions();
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& stride() const {
|
||||||
|
return m_stride;
|
||||||
|
}
|
||||||
|
|
||||||
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& size() const {
|
||||||
|
return m_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Op& accumulator() const {
|
||||||
|
return m_accumulator;
|
||||||
|
}
|
||||||
|
|
||||||
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool exclusive() const {
|
||||||
|
return m_exclusive;
|
||||||
|
}
|
||||||
|
|
||||||
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& inner() const {
|
||||||
|
return m_impl;
|
||||||
|
}
|
||||||
|
|
||||||
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device& device() const {
|
||||||
|
return m_device;
|
||||||
|
}
|
||||||
|
|
||||||
|
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
|
||||||
m_impl.evalSubExprsIfNeeded(NULL);
|
m_impl.evalSubExprsIfNeeded(NULL);
|
||||||
|
ScanLauncher<Self, Op, Device> launcher;
|
||||||
if (data) {
|
if (data) {
|
||||||
accumulateTo(data);
|
launcher(*this, data);
|
||||||
return false;
|
return false;
|
||||||
} else {
|
}
|
||||||
|
|
||||||
const Index total_size = internal::array_prod(dimensions());
|
const Index total_size = internal::array_prod(dimensions());
|
||||||
m_output = static_cast<CoeffReturnType*>(m_device.allocate(total_size * sizeof(Scalar)));
|
m_output = static_cast<CoeffReturnType*>(m_device.allocate(total_size * sizeof(Scalar)));
|
||||||
accumulateTo(m_output);
|
launcher(*this, m_output);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
template<int LoadMode>
|
template<int LoadMode>
|
||||||
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const {
|
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const {
|
||||||
@ -176,27 +205,35 @@ protected:
|
|||||||
const Index m_size;
|
const Index m_size;
|
||||||
Index m_stride;
|
Index m_stride;
|
||||||
CoeffReturnType* m_output;
|
CoeffReturnType* m_output;
|
||||||
|
};
|
||||||
|
|
||||||
|
// CPU implementation of scan
|
||||||
|
// TODO(ibab) This single-threaded implementation should be parallelized,
|
||||||
|
// at least by running multiple scans at the same time.
|
||||||
|
template <typename Self, typename Reducer, typename Device>
|
||||||
|
struct ScanLauncher {
|
||||||
|
void operator()(Self& self, typename Self::CoeffReturnType *data) {
|
||||||
|
Index total_size = internal::array_prod(self.dimensions());
|
||||||
|
|
||||||
// TODO(ibab) Parallelize this single-threaded implementation if desired
|
|
||||||
EIGEN_DEVICE_FUNC void accumulateTo(Scalar* data) {
|
|
||||||
// We fix the index along the scan axis to 0 and perform a
|
// We fix the index along the scan axis to 0 and perform a
|
||||||
// scan per remaining entry. The iteration is split into two nested
|
// scan per remaining entry. The iteration is split into two nested
|
||||||
// loops to avoid an integer division by keeping track of each idx1 and idx2.
|
// loops to avoid an integer division by keeping track of each idx1 and idx2.
|
||||||
for (Index idx1 = 0; idx1 < dimensions().TotalSize() / m_size; idx1 += m_stride) {
|
for (Index idx1 = 0; idx1 < total_size; idx1 += self.stride() * self.size()) {
|
||||||
for (Index idx2 = 0; idx2 < m_stride; idx2++) {
|
for (Index idx2 = 0; idx2 < self.stride(); idx2++) {
|
||||||
// Calculate the starting offset for the scan
|
// Calculate the starting offset for the scan
|
||||||
Index offset = idx1 * m_size + idx2;
|
Index offset = idx1 + idx2;
|
||||||
|
|
||||||
// Compute the scan along the axis, starting at the calculated offset
|
// Compute the scan along the axis, starting at the calculated offset
|
||||||
CoeffReturnType accum = m_accumulator.initialize();
|
typename Self::CoeffReturnType accum = self.accumulator().initialize();
|
||||||
for (Index idx3 = 0; idx3 < m_size; idx3++) {
|
for (Index idx3 = 0; idx3 < self.size(); idx3++) {
|
||||||
Index curr = offset + idx3 * m_stride;
|
Index curr = offset + idx3 * self.stride();
|
||||||
if (m_exclusive) {
|
|
||||||
data[curr] = m_accumulator.finalize(accum);
|
if (self.exclusive()) {
|
||||||
m_accumulator.reduce(m_impl.coeff(curr), &accum);
|
data[curr] = self.accumulator().finalize(accum);
|
||||||
|
self.accumulator().reduce(self.inner().coeff(curr), &accum);
|
||||||
} else {
|
} else {
|
||||||
m_accumulator.reduce(m_impl.coeff(curr), &accum);
|
self.accumulator().reduce(self.inner().coeff(curr), &accum);
|
||||||
data[curr] = m_accumulator.finalize(accum);
|
data[curr] = self.accumulator().finalize(accum);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -204,6 +241,47 @@ protected:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
|
||||||
|
|
||||||
|
// GPU implementation of scan
|
||||||
|
// TODO(ibab) This placeholder implementation performs multiple scans in
|
||||||
|
// parallel, but it would be better to use a parallel scan algorithm and
|
||||||
|
// optimize memory access.
|
||||||
|
template <typename Self, typename Reducer>
|
||||||
|
__global__ void ScanKernel(Self self, Index total_size, typename Self::CoeffReturnType* data) {
|
||||||
|
// Compute offset as in the CPU version
|
||||||
|
Index val = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
Index offset = (val / self.stride()) * self.stride() * self.size() + val % self.stride();
|
||||||
|
|
||||||
|
if (offset + (self.size() - 1) * self.stride() < total_size) {
|
||||||
|
// Compute the scan along the axis, starting at the calculated offset
|
||||||
|
typename Self::CoeffReturnType accum = self.accumulator().initialize();
|
||||||
|
for (Index idx = 0; idx < self.size(); idx++) {
|
||||||
|
Index curr = offset + idx * self.stride();
|
||||||
|
if (self.exclusive()) {
|
||||||
|
data[curr] = self.accumulator().finalize(accum);
|
||||||
|
self.accumulator().reduce(self.inner().coeff(curr), &accum);
|
||||||
|
} else {
|
||||||
|
self.accumulator().reduce(self.inner().coeff(curr), &accum);
|
||||||
|
data[curr] = self.accumulator().finalize(accum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Self, typename Reducer>
|
||||||
|
struct ScanLauncher<Self, Reducer, GpuDevice> {
|
||||||
|
void operator()(const Self& self, typename Self::CoeffReturnType* data) {
|
||||||
|
Index total_size = internal::array_prod(self.dimensions());
|
||||||
|
Index num_blocks = (total_size / self.size() + 63) / 64;
|
||||||
|
Index block_size = 64;
|
||||||
|
LAUNCH_CUDA_KERNEL((ScanKernel<Self, Reducer>), num_blocks, block_size, 0, self.device(), self, total_size, data);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
#endif // EIGEN_USE_GPU && __CUDACC__
|
||||||
|
|
||||||
} // end namespace Eigen
|
} // end namespace Eigen
|
||||||
|
|
||||||
#endif // EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
|
#endif // EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
|
||||||
|
@ -220,7 +220,7 @@ if(CUDA_FOUND AND EIGEN_TEST_CUDA)
|
|||||||
ei_add_test(cxx11_tensor_reduction_cuda)
|
ei_add_test(cxx11_tensor_reduction_cuda)
|
||||||
ei_add_test(cxx11_tensor_argmax_cuda)
|
ei_add_test(cxx11_tensor_argmax_cuda)
|
||||||
ei_add_test(cxx11_tensor_cast_float16_cuda)
|
ei_add_test(cxx11_tensor_cast_float16_cuda)
|
||||||
# ei_add_test(cxx11_tensor_scan_cuda)
|
ei_add_test(cxx11_tensor_scan_cuda)
|
||||||
|
|
||||||
# The random number generation code requires arch 3.5 or greater.
|
# The random number generation code requires arch 3.5 or greater.
|
||||||
if (${EIGEN_CUDA_COMPUTE_ARCH} GREATER 34)
|
if (${EIGEN_CUDA_COMPUTE_ARCH} GREATER 34)
|
||||||
|
@ -14,51 +14,37 @@
|
|||||||
|
|
||||||
using Eigen::Tensor;
|
using Eigen::Tensor;
|
||||||
|
|
||||||
template <int DataLayout, typename Type=float>
|
template <int DataLayout, typename Type=float, bool Exclusive = false>
|
||||||
static void test_1d_scan()
|
static void test_1d_scan()
|
||||||
{
|
{
|
||||||
int size = 50;
|
int size = 50;
|
||||||
Tensor<Type, 1, DataLayout> tensor(size);
|
Tensor<Type, 1, DataLayout> tensor(size);
|
||||||
tensor.setRandom();
|
tensor.setRandom();
|
||||||
Tensor<Type, 1, DataLayout> result = tensor.cumsum(0);
|
Tensor<Type, 1, DataLayout> result = tensor.cumsum(0, Exclusive);
|
||||||
|
|
||||||
VERIFY_IS_EQUAL(tensor.dimension(0), result.dimension(0));
|
VERIFY_IS_EQUAL(tensor.dimension(0), result.dimension(0));
|
||||||
|
|
||||||
float accum = 0;
|
float accum = 0;
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
|
if (Exclusive) {
|
||||||
|
VERIFY_IS_EQUAL(result(i), accum);
|
||||||
|
accum += tensor(i);
|
||||||
|
} else {
|
||||||
accum += tensor(i);
|
accum += tensor(i);
|
||||||
VERIFY_IS_EQUAL(result(i), accum);
|
VERIFY_IS_EQUAL(result(i), accum);
|
||||||
}
|
}
|
||||||
|
|
||||||
accum = 1;
|
|
||||||
result = tensor.cumprod(0);
|
|
||||||
for (int i = 0; i < size; i++) {
|
|
||||||
accum *= tensor(i);
|
|
||||||
VERIFY_IS_EQUAL(result(i), accum);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <int DataLayout, typename Type=float>
|
|
||||||
static void test_1d_inclusive_scan()
|
|
||||||
{
|
|
||||||
int size = 50;
|
|
||||||
Tensor<Type, 1, DataLayout> tensor(size);
|
|
||||||
tensor.setRandom();
|
|
||||||
Tensor<Type, 1, DataLayout> result = tensor.cumsum(0, true);
|
|
||||||
|
|
||||||
VERIFY_IS_EQUAL(tensor.dimension(0), result.dimension(0));
|
|
||||||
|
|
||||||
float accum = 0;
|
|
||||||
for (int i = 0; i < size; i++) {
|
|
||||||
VERIFY_IS_EQUAL(result(i), accum);
|
|
||||||
accum += tensor(i);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
accum = 1;
|
accum = 1;
|
||||||
result = tensor.cumprod(0, true);
|
result = tensor.cumprod(0, Exclusive);
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
|
if (Exclusive) {
|
||||||
VERIFY_IS_EQUAL(result(i), accum);
|
VERIFY_IS_EQUAL(result(i), accum);
|
||||||
accum *= tensor(i);
|
accum *= tensor(i);
|
||||||
|
} else {
|
||||||
|
accum *= tensor(i);
|
||||||
|
VERIFY_IS_EQUAL(result(i), accum);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,26 +60,26 @@ static void test_4d_scan()
|
|||||||
result = tensor.cumsum(0);
|
result = tensor.cumsum(0);
|
||||||
float accum = 0;
|
float accum = 0;
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
accum += tensor(i, 0, 0, 0);
|
accum += tensor(i, 1, 2, 3);
|
||||||
VERIFY_IS_EQUAL(result(i, 0, 0, 0), accum);
|
VERIFY_IS_EQUAL(result(i, 1, 2, 3), accum);
|
||||||
}
|
}
|
||||||
result = tensor.cumsum(1);
|
result = tensor.cumsum(1);
|
||||||
accum = 0;
|
accum = 0;
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
accum += tensor(0, i, 0, 0);
|
accum += tensor(1, i, 2, 3);
|
||||||
VERIFY_IS_EQUAL(result(0, i, 0, 0), accum);
|
VERIFY_IS_EQUAL(result(1, i, 2, 3), accum);
|
||||||
}
|
}
|
||||||
result = tensor.cumsum(2);
|
result = tensor.cumsum(2);
|
||||||
accum = 0;
|
accum = 0;
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
accum += tensor(0, 0, i, 0);
|
accum += tensor(1, 2, i, 3);
|
||||||
VERIFY_IS_EQUAL(result(0, 0, i, 0), accum);
|
VERIFY_IS_EQUAL(result(1, 2, i, 3), accum);
|
||||||
}
|
}
|
||||||
result = tensor.cumsum(3);
|
result = tensor.cumsum(3);
|
||||||
accum = 0;
|
accum = 0;
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
accum += tensor(0, 0, 0, i);
|
accum += tensor(1, 2, 3, i);
|
||||||
VERIFY_IS_EQUAL(result(0, 0, 0, i), accum);
|
VERIFY_IS_EQUAL(result(1, 2, 3, i), accum);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,8 +99,10 @@ static void test_tensor_maps() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void test_cxx11_tensor_scan() {
|
void test_cxx11_tensor_scan() {
|
||||||
CALL_SUBTEST(test_1d_scan<ColMajor>());
|
CALL_SUBTEST((test_1d_scan<ColMajor, float, true>()));
|
||||||
CALL_SUBTEST(test_1d_scan<RowMajor>());
|
CALL_SUBTEST((test_1d_scan<ColMajor, float, false>()));
|
||||||
|
CALL_SUBTEST((test_1d_scan<RowMajor, float, true>()));
|
||||||
|
CALL_SUBTEST((test_1d_scan<RowMajor, float, false>()));
|
||||||
CALL_SUBTEST(test_4d_scan<ColMajor>());
|
CALL_SUBTEST(test_4d_scan<ColMajor>());
|
||||||
CALL_SUBTEST(test_4d_scan<RowMajor>());
|
CALL_SUBTEST(test_4d_scan<RowMajor>());
|
||||||
CALL_SUBTEST(test_tensor_maps<ColMajor>());
|
CALL_SUBTEST(test_tensor_maps<ColMajor>());
|
||||||
|
Loading…
x
Reference in New Issue
Block a user