diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h index e55459d0c..288d79f1f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h @@ -932,7 +932,9 @@ struct TensorEvaluator 1) { Index mid = (start + end) / 2; - device_.enqueueNoNotification([=]() { enqueue_packing_helper(mid, end, k, rhs); }); + device_.enqueueNoNotification([this, mid, end, k, rhs]() { + enqueue_packing_helper(mid, end, k, rhs); + }); end = mid; } @@ -996,7 +1000,9 @@ struct TensorEvaluator 0 || std::this_thread::get_id() == created_by_thread_id_); if (pack_async) { - device_.enqueueNoNotification([=]() { enqueue_packing_helper(start, end, k, rhs); }); + device_.enqueueNoNotification([this, start, end, k, rhs]() { + enqueue_packing_helper(start, end, k, rhs); + }); } else { enqueue_packing_helper(start, end, k, rhs); } @@ -1277,7 +1283,9 @@ struct TensorEvaluator 1) { Index mid_block_idx = (start_block_idx + end_block_idx) / 2; evaluator->m_device.enqueueNoNotification( - [this, mid_block_idx, end_block_idx]() { evalAsync(mid_block_idx, end_block_idx); }); + [this, mid_block_idx, end_block_idx]() { + evalAsync(mid_block_idx, end_block_idx); + }); end_block_idx = mid_block_idx; } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h index ca1fe6aef..c95c8f223 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h @@ -192,7 +192,7 @@ struct ThreadPoolDevice { // block_count leaves that do actual computations. Barrier barrier(static_cast(block.count)); std::function handleRange; - handleRange = [=, &handleRange, &barrier, &f](Index firstIdx, Index lastIdx) { + handleRange = [this, block, &handleRange, &barrier, &f](Index firstIdx, Index lastIdx) { while (lastIdx - firstIdx > block.size) { // Split into halves and schedule the second half on a different thread. const Index midIdx = firstIdx + numext::div_ceil((lastIdx - firstIdx) / 2, block.size) * block.size;