diff --git a/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h b/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h index f102872ae..423ca4be4 100644 --- a/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h +++ b/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h @@ -66,6 +66,12 @@ template constexpr inline T const& array_ #undef STD_GET_ARR_HACK +template struct array_size; +template struct array_size > { + static const size_t value = N; +}; + + /* Suppose you have a template of the form * template struct X; * And you want to specialize it in such a way: diff --git a/unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h b/unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h index 636063f9e..1d3164d6a 100644 --- a/unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h +++ b/unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h @@ -182,23 +182,32 @@ array repeat(t v) { } template -t array_prod(const array& a) { +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const array& a) { t prod = 1; for (size_t i = 0; i < n; ++i) { prod *= a[i]; } return prod; } template -t array_prod(const array& /*a*/) { +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const array& /*a*/) { return 0; } -template inline T& array_get(array& a) { +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& array_get(array& a) { return a[I]; } -template inline const T& array_get(const array& a) { +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +const T& array_get(const array& a) { return a[I]; } + +template struct array_size; +template struct array_size > { + static const size_t value = N; +}; + + struct sum_op { template static inline bool run(A a, B b) { return a + b; } }; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index d371eb76d..5149de1bb 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -107,7 +107,7 @@ struct TensorEvaluator XprType; - static const int NumDims = max_n_1::Dimensions::count + TensorEvaluator::Dimensions::count - 2 * Indices::size>::size; + static const int NumDims = max_n_1::Dimensions::count + TensorEvaluator::Dimensions::count - 2 * internal::array_size::value>::size; typedef typename XprType::Index Index; typedef DSizes Dimensions; @@ -128,7 +128,7 @@ struct TensorEvaluator::Dimensions& left_dims = m_leftImpl.dimensions(); for (int i = 0; i < TensorEvaluator::Dimensions::count; ++i) { bool skip = false; - for (int j = 0; j < Indices::size; ++j) { + for (int j = 0; j < internal::array_size::value; ++j) { if (op.indices()[j].first == i) { skip = true; m_leftOffsets[2*skipped] = stride; @@ -151,7 +151,7 @@ struct TensorEvaluator::Dimensions& right_dims = m_rightImpl.dimensions(); for (int i = 0; i < TensorEvaluator::Dimensions::count; ++i) { bool skip = false; - for (int j = 0; j < Indices::size; ++j) { + for (int j = 0; j < internal::array_size::value; ++j) { if (op.indices()[j].second == i) { skip = true; m_rightOffsets[2*skipped] = stride; @@ -168,7 +168,7 @@ struct TensorEvaluator::Dimensions::count + TensorEvaluator::Dimensions::count == 2 * Indices::size) { + if (TensorEvaluator::Dimensions::count + TensorEvaluator::Dimensions::count == 2 * internal::array_size::value) { m_dimensions[0] = 1; } } @@ -209,7 +209,7 @@ struct TensorEvaluator::value-1) { partialStitch(left, right, StitchIndex+1, accum); } else { accum += m_leftImpl.coeff(left) * m_rightImpl.coeff(right); @@ -218,9 +218,9 @@ struct TensorEvaluator m_leftOffsets; - array m_rightOffsets; - array m_stitchsize; + array::value> m_leftOffsets; + array::value> m_rightOffsets; + array::value> m_stitchsize; Index m_shiftright; Dimensions m_dimensions; TensorEvaluator m_leftImpl;