From b5d2078c4a9cdb81416586cca5658e38b059148d Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Tue, 22 Dec 2015 15:06:17 -0800 Subject: [PATCH] Optimized outer reduction on GPUs. --- .../Eigen/CXX11/src/Tensor/TensorReduction.h | 39 +++++++++++++- .../CXX11/src/Tensor/TensorReductionCuda.h | 54 +++++++++++++++++++ 2 files changed, 92 insertions(+), 1 deletion(-) diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h index aaa877185..c30980a49 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h @@ -337,9 +337,23 @@ struct FullReducer { #endif +// Default outer reducer +template +struct OuterReducer { + static const bool HasOptimizedImplementation = false; + + static EIGEN_DEVICE_FUNC void run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { + assert(false && "Not implemented"); + } +}; + + #if defined(EIGEN_USE_GPU) && defined(__CUDACC__) template __global__ void FullReductionKernel(R, const S, I, typename S::CoeffReturnType*); + +template +__global__ void OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*); #endif } // end namespace internal @@ -439,7 +453,7 @@ struct TensorEvaluator, Device> } } } - + // Precompute input strides. if (NumInputDims > 0) { array input_strides; @@ -498,6 +512,28 @@ struct TensorEvaluator, Device> internal::FullReducer::run(*this, reducer, m_device, data); return need_assign; } + + // Attempt to use an optimized reduction. +#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) + else if (RunningOnGPU && data && (m_device.majorDeviceVersion() >= 3)) { + bool preserving_inner_dims = true; + for (int i = 0; i < NumReducedDims; ++i) { + if (static_cast(Layout) == static_cast(ColMajor)) { + preserving_inner_dims &= m_reducedDims[NumInputDims - 1 - i]; + } else { + preserving_inner_dims &= m_reducedDims[i]; + } + } + if (internal::OuterReducer::HasOptimizedImplementation && + preserving_inner_dims) { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + Op reducer(m_reducer); + internal::OuterReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve); + return false; + } + } +#endif return true; } @@ -579,6 +615,7 @@ struct TensorEvaluator, Device> #endif #if defined(EIGEN_USE_GPU) && defined(__CUDACC__) template friend void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*); + template friend void internal::OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*); #endif // Returns the Index in the input tensor of the first value that needs to be diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h index af1b9432c..f0e9d528e 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h @@ -131,6 +131,60 @@ struct FullReducer { } }; + +template +__global__ void OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, + typename Self::CoeffReturnType* output) { + const Index num_threads = blockDim.x * gridDim.x; + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + // Initialize the output values + for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) { + output[i] = reducer.initialize(); + } + + // Do the reduction. + const Index max_iter = DIVUP(num_coeffs_to_reduce, NumPerThread) * num_preserved_coeffs; + for (Index i = thread_id; i < max_iter; i += num_threads) { + const Index input_col = i % num_preserved_coeffs; + const Index input_row = (i / num_preserved_coeffs) * NumPerThread; + typename Self::CoeffReturnType reduced_val = reducer.initialize(); + const Index max_row = numext::mini(input_row + NumPerThread, num_coeffs_to_reduce); + for (Index j = input_row; j < max_row; j++) { + typename Self::CoeffReturnType val = input.m_impl.coeff(j * num_preserved_coeffs + input_col); + reducer.reduce(val, &reduced_val); + } + atomicReduce(&(output[input_col]), reduced_val, reducer); + } +} + + +template +struct OuterReducer { + // Unfortunately nvidia doesn't support well exotic types such as complex, + // so reduce the scope of the optimized version of the code to the simple case + // of floats. + static const bool HasOptimizedImplementation = !Op::IsStateful && + internal::is_same::value; + + template + static void run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) { + assert(false && "Should only be called to reduce floats on a gpu device"); + } + + static void run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + typedef typename Self::Index Index; + + const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals; + const int block_size = 256; + const int num_per_thread = 16; + const int num_blocks = std::ceil(static_cast(num_coeffs) / (block_size * num_per_thread)); + + LAUNCH_CUDA_KERNEL((OuterReductionKernel), + num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output); + } +}; + #endif