Silenced some compilation warnings triggered by nvcc

This commit is contained in:
Benoit Steiner 2015-12-17 13:39:01 -08:00
parent 40e6250fc3
commit 4aac55f684
3 changed files with 20 additions and 9 deletions

View File

@ -238,11 +238,14 @@ struct GpuDevice {
}; };
#ifndef __CUDA_ARCH__
#define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \ #define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \
(kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \ (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \
assert(cudaGetLastError() == cudaSuccess); assert(cudaGetLastError() == cudaSuccess);
#else
#define LAUNCH_CUDA_KERNEL(...) \
eigen_assert(false && "Cannot launch a kernel from another kernel");
#endif
// FIXME: Should be device and kernel specific. // FIXME: Should be device and kernel specific.
#ifdef __CUDACC__ #ifdef __CUDACC__

View File

@ -156,14 +156,14 @@ template <typename Expression>
class TensorExecutor<Expression, GpuDevice, false> { class TensorExecutor<Expression, GpuDevice, false> {
public: public:
typedef typename Expression::Index Index; typedef typename Expression::Index Index;
static void run(const Expression& expr, const GpuDevice& device); EIGEN_DEVICE_FUNC static void run(const Expression& expr, const GpuDevice& device);
}; };
template <typename Expression> template <typename Expression>
class TensorExecutor<Expression, GpuDevice, true> { class TensorExecutor<Expression, GpuDevice, true> {
public: public:
typedef typename Expression::Index Index; typedef typename Expression::Index Index;
static void run(const Expression& expr, const GpuDevice& device); EIGEN_DEVICE_FUNC static void run(const Expression& expr, const GpuDevice& device);
}; };
#if defined(__CUDACC__) #if defined(__CUDACC__)
@ -213,8 +213,9 @@ EigenMetaKernel_Vectorizable(Evaluator memcopied_eval, Index size) {
/*static*/ /*static*/
template <typename Expression> template <typename Expression>
inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression& expr, const GpuDevice& device) EIGEN_DEVICE_FUNC inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression& expr, const GpuDevice& device)
{ {
#ifndef __CUDA_ARCH__
TensorEvaluator<Expression, GpuDevice> evaluator(expr, device); TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign) if (needs_assign)
@ -227,13 +228,17 @@ inline void TensorExecutor<Expression, GpuDevice, false>::run(const Expression&
LAUNCH_CUDA_KERNEL((EigenMetaKernel_NonVectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size); LAUNCH_CUDA_KERNEL((EigenMetaKernel_NonVectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size);
} }
evaluator.cleanup(); evaluator.cleanup();
#else
eigen_assert(false && "Cannot launch a kernel from another kernel");
#endif
} }
/*static*/ /*static*/
template<typename Expression> template<typename Expression>
inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& expr, const GpuDevice& device) EIGEN_DEVICE_FUNC inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& expr, const GpuDevice& device)
{ {
#ifndef __CUDA_ARCH__
TensorEvaluator<Expression, GpuDevice> evaluator(expr, device); TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
if (needs_assign) if (needs_assign)
@ -246,6 +251,9 @@ inline void TensorExecutor<Expression, GpuDevice, true>::run(const Expression& e
LAUNCH_CUDA_KERNEL((EigenMetaKernel_Vectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size); LAUNCH_CUDA_KERNEL((EigenMetaKernel_Vectorizable<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size);
} }
evaluator.cleanup(); evaluator.cleanup();
#else
eigen_assert(false && "Cannot launch a kernel from another kernel");
#endif
} }
#endif // __CUDACC__ #endif // __CUDACC__

View File

@ -454,7 +454,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
input_strides[i] = input_strides[i + 1] * input_dims[i + 1]; input_strides[i] = input_strides[i + 1] * input_dims[i + 1];
} }
} }
int outputIndex = 0; int outputIndex = 0;
int reduceIndex = 0; int reduceIndex = 0;
for (int i = 0; i < NumInputDims; ++i) { for (int i = 0; i < NumInputDims; ++i) {
@ -473,13 +473,13 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
m_preservedStrides[0] = internal::array_prod(input_dims); m_preservedStrides[0] = internal::array_prod(input_dims);
} }
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType; typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType; typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
m_impl.evalSubExprsIfNeeded(NULL); m_impl.evalSubExprsIfNeeded(NULL);
// Use the FullReducer if possible. // Use the FullReducer if possible.