diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 1c44541bd..057e90e50 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -317,6 +317,7 @@ class TensorExecutor class TensorExecutor { @@ -326,7 +327,6 @@ class TensorExecutor { }; -#if defined(EIGEN_GPUCC) template struct EigenMetaKernelEval { static __device__ EIGEN_ALWAYS_INLINE diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h b/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h index 49603d6c1..bd1910dcc 100644 --- a/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h +++ b/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h @@ -56,6 +56,7 @@ class ThreadPoolTempl : public Eigen::ThreadPoolInterface { thread_data_[i].thread.reset( env_.CreateThread([this, i]() { WorkerLoop(i); })); } + global_steal_partition_ = EncodePartition(0, num_threads_); #ifndef EIGEN_THREAD_LOCAL // Wait for workers to initialize per_thread_map_. Otherwise we might race // with them in Schedule or CurrentThreadId. @@ -237,6 +238,7 @@ class ThreadPoolTempl : public Eigen::ThreadPoolInterface { MaxSizeVector thread_data_; MaxSizeVector> all_coprimes_; MaxSizeVector waiters_; + unsigned global_steal_partition_; std::atomic blocked_; std::atomic spinning_; std::atomic done_; @@ -354,6 +356,9 @@ class ThreadPoolTempl : public Eigen::ThreadPoolInterface { Task LocalSteal() { PerThread* pt = GetPerThread(); unsigned partition = GetStealPartition(pt->thread_id); + // If thread steal partition is the same as global partition, there is no + // need to go through the steal loop twice. + if (global_steal_partition_ == partition) return Task(); unsigned start, limit; DecodePartition(partition, &start, &limit); AssertBounds(start, limit);