This commit is contained in:
Gael Guennebaud 2016-01-09 08:37:01 +01:00
commit b557662e58
5 changed files with 17 additions and 29 deletions

View File

@ -132,13 +132,13 @@ template <typename T> class array<T, 0> {
return *static_cast<const T*>(NULL); return *static_cast<const T*>(NULL);
} }
static EIGEN_ALWAYS_INLINE std::size_t size() { return 0; } static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::size_t size() { return 0; }
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array() { } EIGEN_STRONG_INLINE array() { }
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
array(std::initializer_list<T> l) { EIGEN_DEVICE_FUNC array(std::initializer_list<T> l) {
eigen_assert(l.size() == 0); eigen_assert(l.size() == 0);
} }
#endif #endif

View File

@ -238,14 +238,10 @@ struct GpuDevice {
}; };
#ifndef __CUDA_ARCH__
#define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \ #define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
(kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \ (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
assert(cudaGetLastError() == cudaSuccess); assert(cudaGetLastError() == cudaSuccess);
#else
#define LAUNCH_CUDA_KERNEL(...) \
eigen_assert(false && "Cannot launch a kernel from another kernel");
#endif
// FIXME: Should be device and kernel specific. // FIXME: Should be device and kernel specific.
#ifdef __CUDACC__ #ifdef __CUDACC__

View File

@ -156,14 +156,14 @@ template <typename Expression>
class TensorExecutor<Expression, GpuDevice, false> { class TensorExecutor<Expression, GpuDevice, false> {
public: public:
typedef typename Expression::Index Index; typedef typename Expression::Index Index;
EIGEN_DEVICE_FUNC static void run(const Expression& expr, const GpuDevice& device); static void run(const Expression& expr, const GpuDevice& device);
}; };
template <typename Expression> template <typename Expression>
class TensorExecutor<Expression, GpuDevice, true> { class TensorExecutor<Expression, GpuDevice, true> {
public: public:
typedef typename Expression::Index Index; typedef typename Expression::Index Index;
EIGEN_DEVICE_FUNC static void run(const Expression& expr, const GpuDevice& device); static void run(const Expression& expr, const GpuDevice& device);
}; };
#if defined(__CUDACC__) #if defined(__CUDACC__)
@ -213,9 +213,8 @@ EigenMetaKernel_Vectorizable(Evaluator memcopied_eval, Index size) {
/*static*/ /*static*/
template <typename Expression> template <typename Expression>
EIGEN_DEVICE_FUNC inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression& expr, const GpuDevice& device) inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression& expr, const GpuDevice& device)
{ {
#ifndef __CUDA_ARCH__
TensorEvaluator<Expression, GpuDevice> evaluator(expr, device); TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign) if (needs_assign)
@ -228,17 +227,13 @@ EIGEN_DEVICE_FUNC inline void TensorExecutor<Expression, GpuDevice, false>::run(
LAUNCH_CUDA_KERNEL((EigenMetaKernel_NonVectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size); LAUNCH_CUDA_KERNEL((EigenMetaKernel_NonVectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size);
} }
evaluator.cleanup(); evaluator.cleanup();
#else
eigen_assert(false && "Cannot launch a kernel from another kernel");
#endif
} }
/*static*/ /*static*/
template<typename Expression> template<typename Expression>
EIGEN_DEVICE_FUNC inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& expr, const GpuDevice& device) inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& expr, const GpuDevice& device)
{ {
#ifndef __CUDA_ARCH__
TensorEvaluator<Expression, GpuDevice> evaluator(expr, device); TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign) if (needs_assign)
@ -251,9 +246,6 @@ EIGEN_DEVICE_FUNC inline void TensorExecutor<Expression, GpuDevice, true>::run(c
LAUNCH_CUDA_KERNEL((EigenMetaKernel_Vectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size); LAUNCH_CUDA_KERNEL((EigenMetaKernel_Vectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size);
} }
evaluator.cleanup(); evaluator.cleanup();
#else
eigen_assert(false && "Cannot launch a kernel from another kernel");
#endif
} }
#endif // __CUDACC__ #endif // __CUDACC__

View File

@ -342,7 +342,7 @@ template <typename Self, typename Op, typename Device>
struct InnerReducer { struct InnerReducer {
static const bool HasOptimizedImplementation = false; static const bool HasOptimizedImplementation = false;
static EIGEN_DEVICE_FUNC void run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { static void run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) {
assert(false && "Not implemented"); assert(false && "Not implemented");
} }
}; };
@ -352,7 +352,7 @@ template <typename Self, typename Op, typename Device>
struct OuterReducer { struct OuterReducer {
static const bool HasOptimizedImplementation = false; static const bool HasOptimizedImplementation = false;
static EIGEN_DEVICE_FUNC void run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { static void run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) {
assert(false && "Not implemented"); assert(false && "Not implemented");
} }
}; };
@ -506,7 +506,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType; typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType; typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
m_impl.evalSubExprsIfNeeded(NULL); m_impl.evalSubExprsIfNeeded(NULL);
// Use the FullReducer if possible. // Use the FullReducer if possible.
@ -527,7 +527,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
} }
// Attempt to use an optimized reduction. // Attempt to use an optimized reduction.
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) #if 0
else if (RunningOnGPU && data && (m_device.majorDeviceVersion() >= 3)) { else if (RunningOnGPU && data && (m_device.majorDeviceVersion() >= 3)) {
bool reducing_inner_dims = true; bool reducing_inner_dims = true;
for (int i = 0; i < NumReducedDims; ++i) { for (int i = 0; i < NumReducedDims; ++i) {
@ -537,12 +537,12 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
reducing_inner_dims &= m_reducedDims[NumInputDims - 1 - i]; reducing_inner_dims &= m_reducedDims[NumInputDims - 1 - i];
} }
} }
if (internal::InnerReducer<Self, Op, GpuDevice>::HasOptimizedImplementation && if (internal::InnerReducer<Self, Op, Device>::HasOptimizedImplementation &&
(reducing_inner_dims || ReducingInnerMostDims)) { (reducing_inner_dims || ReducingInnerMostDims)) {
const Index num_values_to_reduce = internal::array_prod(m_reducedDims); const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
Op reducer(m_reducer); Op reducer(m_reducer);
internal::InnerReducer<Self, Op, GpuDevice>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve); internal::InnerReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve);
return false; return false;
} }
@ -554,12 +554,12 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
preserving_inner_dims &= m_reducedDims[i]; preserving_inner_dims &= m_reducedDims[i];
} }
} }
if (internal::OuterReducer<Self, Op, GpuDevice>::HasOptimizedImplementation && if (internal::OuterReducer<Self, Op, Device>::HasOptimizedImplementation &&
preserving_inner_dims) { preserving_inner_dims) {
const Index num_values_to_reduce = internal::array_prod(m_reducedDims); const Index num_values_to_reduce = internal::array_prod(m_reducedDims);
const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions);
Op reducer(m_reducer); Op reducer(m_reducer);
internal::OuterReducer<Self, Op, GpuDevice>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve); internal::OuterReducer<Self, Op, Device>::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve);
return false; return false;
} }
} }

View File

@ -115,11 +115,11 @@ struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
internal::is_same<typename Self::CoeffReturnType, float>::value; internal::is_same<typename Self::CoeffReturnType, float>::value;
template <typename OutputType> template <typename OutputType>
EIGEN_DEVICE_FUNC static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) { static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) {
assert(false && "Should only be called on floats"); assert(false && "Should only be called on floats");
} }
EIGEN_DEVICE_FUNC static void run(const Self& self, Op& reducer, const GpuDevice& device, float* output) { static void run(const Self& self, Op& reducer, const GpuDevice& device, float* output) {
typedef typename Self::Index Index; typedef typename Self::Index Index;
const Index num_coeffs = array_prod(self.m_impl.dimensions()); const Index num_coeffs = array_prod(self.m_impl.dimensions());