diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h index 488d32cb4..d084880de 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h @@ -85,7 +85,7 @@ struct TensorEvaluator EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array& coords) const { eigen_assert(m_data); - if (Layout == ColMajor) { + if (static_cast(Layout) == static_cast(ColMajor)) { return m_data[m_dims.IndexOfColMajor(coords)]; } else { return m_data[m_dims.IndexOfRowMajor(coords)]; @@ -158,7 +158,7 @@ struct TensorEvaluator EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array& coords) const { eigen_assert(m_data); - const Index index = (Layout == ColMajor) ? m_dims.IndexOfColMajor(coords) + const Index index = (static_cast(Layout) == static_cast(ColMajor)) ? m_dims.IndexOfColMajor(coords) : m_dims.IndexOfRowMajor(coords); #ifdef __CUDA_ARCH__ return __ldg(m_data+index); @@ -366,8 +366,8 @@ struct TensorEvaluator m_thenImpl(op.thenExpression(), device), m_elseImpl(op.elseExpression(), device) { - EIGEN_STATIC_ASSERT((TensorEvaluator::Layout == TensorEvaluator::Layout), YOU_MADE_A_PROGRAMMING_MISTAKE); - EIGEN_STATIC_ASSERT((TensorEvaluator::Layout == TensorEvaluator::Layout), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); eigen_assert(dimensions_match(m_condImpl.dimensions(), m_thenImpl.dimensions())); eigen_assert(dimensions_match(m_thenImpl.dimensions(), m_elseImpl.dimensions())); } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h index 87a4b0758..1191b2411 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h @@ -308,7 +308,7 @@ struct TensorEvaluator, Devi const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); const Sizes& output_dims = op.sizes(); - if (Layout == ColMajor) { + if (static_cast(Layout) == static_cast(ColMajor)) { m_inputStrides[0] = 1; for (int i = 1; i < NumDims; ++i) { m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; @@ -348,7 +348,7 @@ struct TensorEvaluator, Devi m_impl.evalSubExprsIfNeeded(NULL); if (internal::is_arithmetic::value && data && m_impl.data()) { Index contiguous_values = 1; - if (Layout == ColMajor) { + if (static_cast(Layout) == static_cast(ColMajor)) { for (int i = 0; i < NumDims; ++i) { contiguous_values *= dimensions()[i]; if (dimensions()[i] != m_impl.dimensions()[i]) { @@ -394,7 +394,7 @@ struct TensorEvaluator, Devi Index inputIndices[] = {0, 0}; Index indices[] = {index, index + packetSize - 1}; - if (Layout == ColMajor) { + if (static_cast(Layout) == static_cast(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { const Index idx0 = indices[0] / m_fastOutputStrides[i]; const Index idx1 = indices[1] / m_fastOutputStrides[i]; @@ -446,7 +446,7 @@ struct TensorEvaluator, Devi Scalar* result = m_impl.data(); if (result) { Index offset = 0; - if (Layout == ColMajor) { + if (static_cast(Layout) == static_cast(ColMajor)) { for (int i = 0; i < NumDims; ++i) { if (m_dimensions[i] != m_impl.dimensions()[i]) { offset += m_offsets[i] * m_inputStrides[i]; @@ -482,7 +482,7 @@ struct TensorEvaluator, Devi EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const { Index inputIndex = 0; - if (Layout == ColMajor) { + if (static_cast(Layout) == static_cast(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { const Index idx = index / m_fastOutputStrides[i]; inputIndex += (idx + m_offsets[i]) * m_inputStrides[i]; @@ -547,7 +547,7 @@ struct TensorEvaluator, Device> const int packetSize = internal::unpacket_traits::size; Index inputIndices[] = {0, 0}; Index indices[] = {index, index + packetSize - 1}; - if (Layout == ColMajor) { + if (static_cast(Layout) == static_cast(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { const Index idx0 = indices[0] / this->m_fastOutputStrides[i]; const Index idx1 = indices[1] / this->m_fastOutputStrides[i]; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h index 439cf3230..82969b4c0 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h @@ -106,7 +106,7 @@ struct TensorEvaluator, Device { // Compute strides m_dimensions = m_impl.dimensions(); - if (Layout == ColMajor) { + if (static_cast(Layout) == static_cast(ColMajor)) { m_strides[0] = 1; for (int i = 1; i < NumDims; ++i) { m_strides[i] = m_strides[i-1] * m_dimensions[i-1]; @@ -138,7 +138,7 @@ struct TensorEvaluator, Device { eigen_assert(index < dimensions().TotalSize()); Index inputIndex = 0; - if (Layout == ColMajor) { + if (static_cast(Layout) == static_cast(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { Index idx = index / m_strides[i]; index -= idx * m_strides[i];