diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h index d6af6fedd..f3f65f908 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h @@ -568,10 +568,6 @@ struct TensorEvaluator { const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) { - const Index PacketSize = Vectorizable ? unpacket_traits::size : 1; const Index size = array_prod(evaluator.dimensions()); -#if !defined(EIGEN_USE_SIMPLE_THREAD_POOL) && defined(EIGEN_USE_COST_MODEL) +#if !defined(EIGEN_USE_SIMPLE_THREAD_POOL) device.parallelFor(size, evaluator.costPerCoeff(Vectorizable), EvalRange::alignBlockSize, [&evaluator](Index first, Index last) { @@ -160,15 +159,15 @@ class TensorExecutor { }); #else size_t num_threads = device.numThreads(); -#ifdef EIGEN_USE_COST_MODEL if (num_threads > 1) { + cost = evaluator.costPerCoeff(Vectorizable) num_threads = TensorCostModel::numThreads( size, evaluator.costPerCoeff(Vectorizable), num_threads); } -#endif if (num_threads == 1) { EvalRange::run(&evaluator, 0, size); } else { + const Index PacketSize = Vectorizable ? unpacket_traits::size : 1; Index blocksz = std::ceil(static_cast(size)/num_threads) + PacketSize - 1; const Index blocksize = numext::maxi(PacketSize, (blocksz - (blocksz % PacketSize))); const Index numblocks = size / blocksize; @@ -185,7 +184,7 @@ class TensorExecutor { } barrier.Wait(); } -#endif // defined(EIGEN_USE_NONBLOCKING_THREAD_POOL) && defined(EIGEN_USE_COST_MODEL) +#endif // defined(!EIGEN_USE_SIMPLE_THREAD_POOL) } evaluator.cleanup(); } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h index 10c1a7e6c..6d3903c3f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h @@ -248,16 +248,12 @@ struct FullReducer { *output = reducer.finalize(reducer.initialize()); return; } -#ifdef EIGEN_USE_COST_MODEL const TensorOpCost cost = self.m_impl.costPerCoeff(Vectorizable) + TensorOpCost(0, 0, internal::functor_traits::Cost, Vectorizable, PacketSize); const int num_threads = TensorCostModel::numThreads( num_coeffs, cost, device.numThreads()); -#else - const int num_threads = device.numThreads(); -#endif if (num_threads == 1) { *output = InnerMostDimReducer::reduce(self, 0, num_coeffs, reducer); @@ -472,22 +468,14 @@ struct TensorEvaluator, Device> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } - static bool size_large_enough(Index total_size) { -#ifndef EIGEN_USE_COST_MODEL - return total_size > 1024 * 1024; -#else - return true || total_size; -#endif - } - EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool evalSubExprsIfNeeded(CoeffReturnType* data) { m_impl.evalSubExprsIfNeeded(NULL); // Use the FullReducer if possible. - if (RunningFullReduction && internal::FullReducer::HasOptimizedImplementation && + if (RunningFullReduction && + internal::FullReducer::HasOptimizedImplementation && ((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) || - (!RunningOnGPU && size_large_enough(internal::array_prod(m_impl.dimensions()))))) { - + !RunningOnGPU)) { bool need_assign = false; if (!data) { m_result = static_cast(m_device.allocate(sizeof(CoeffReturnType)));