From c696dbcaa6e17cdfa6c9ff37dadf89cf4b707504 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Fri, 21 Sep 2018 23:02:33 +0200 Subject: [PATCH] Fiw shadowing of last and all --- .../src/Tensor/TensorContractionMapper.h | 6 +-- .../CXX11/src/Tensor/TensorDeviceThreadPool.h | 12 ++--- .../Eigen/CXX11/src/Tensor/TensorExecutor.h | 48 +++++++++---------- .../Eigen/CXX11/src/Tensor/TensorPadding.h | 40 ++++++++-------- .../CXX11/src/Tensor/TensorReductionGpu.h | 4 +- .../Eigen/CXX11/src/ThreadPool/EventCount.h | 8 ++-- 6 files changed, 59 insertions(+), 59 deletions(-) diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h index dbb0f76bb..2d3b69128 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h @@ -255,7 +255,7 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper indexPair = this->computeIndexPair(i, j, packet_size - 1); const Index first = indexPair.first; - const Index last = indexPair.second; + const Index lastIdx = indexPair.second; // We can always do optimized packet reads from left hand side right now, because // the vertical matrix dimension on the left hand side is never contracting. @@ -263,7 +263,7 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper::value <= 1 || !inner_dim_reordered) && - (last - first) == (packet_size - 1)) { + (lastIdx - first) == (packet_size - 1)) { return this->m_tensor.template packet(first); } @@ -276,7 +276,7 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapperm_tensor.coeff(internal_pair.first); data[k + 1] = this->m_tensor.coeff(internal_pair.second); } - data[packet_size - 1] = this->m_tensor.coeff(last); + data[packet_size - 1] = this->m_tensor.coeff(lastIdx); return pload(data); } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h index 6fc6688d3..1612c004b 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h @@ -213,17 +213,17 @@ struct ThreadPoolDevice { // block_count leaves that do actual computations. Barrier barrier(static_cast(block_count)); std::function handleRange; - handleRange = [=, &handleRange, &barrier, &f](Index first, Index last) { - if (last - first <= block_size) { + handleRange = [=, &handleRange, &barrier, &f](Index firstIdx, Index lastIdx) { + if (lastIdx - firstIdx <= block_size) { // Single block or less, execute directly. - f(first, last); + f(firstIdx, lastIdx); barrier.Notify(); return; } // Split into halves and submit to the pool. - Index mid = first + divup((last - first) / 2, block_size) * block_size; - pool_->Schedule([=, &handleRange]() { handleRange(mid, last); }); - handleRange(first, mid); + Index mid = firstIdx + divup((lastIdx - firstIdx) / 2, block_size) * block_size; + pool_->Schedule([=, &handleRange]() { handleRange(mid, lastIdx); }); + handleRange(firstIdx, mid); }; handleRange(0, n); barrier.Wait(); diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index bfe1f97b8..1c44541bd 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -165,11 +165,11 @@ class TensorExecutor struct EvalRange { - static void run(Evaluator* evaluator_in, const StorageIndex first, - const StorageIndex last) { + static void run(Evaluator* evaluator_in, const StorageIndex firstIdx, + const StorageIndex lastIdx) { Evaluator evaluator = *evaluator_in; - eigen_assert(last >= first); - for (StorageIndex i = first; i < last; ++i) { + eigen_assert(lastIdx >= firstIdx); + for (StorageIndex i = firstIdx; i < lastIdx; ++i) { evaluator.evalScalar(i); } } @@ -182,14 +182,14 @@ struct EvalRange { static const int PacketSize = unpacket_traits::size; - static void run(Evaluator* evaluator_in, const StorageIndex first, - const StorageIndex last) { + static void run(Evaluator* evaluator_in, const StorageIndex firstIdx, + const StorageIndex lastIdx) { Evaluator evaluator = *evaluator_in; - eigen_assert(last >= first); - StorageIndex i = first; - if (last - first >= PacketSize) { - eigen_assert(first % PacketSize == 0); - StorageIndex last_chunk_offset = last - 4 * PacketSize; + eigen_assert(lastIdx >= firstIdx); + StorageIndex i = firstIdx; + if (lastIdx - firstIdx >= PacketSize) { + eigen_assert(firstIdx % PacketSize == 0); + StorageIndex last_chunk_offset = lastIdx - 4 * PacketSize; // Give compiler a strong possibility to unroll the loop. But don't insist // on unrolling, because if the function is expensive compiler should not // unroll the loop at the expense of inlining. @@ -198,12 +198,12 @@ struct EvalRange { evaluator.evalPacket(i + j * PacketSize); } } - last_chunk_offset = last - PacketSize; + last_chunk_offset = lastIdx - PacketSize; for (; i <= last_chunk_offset; i += PacketSize) { evaluator.evalPacket(i); } } - for (; i < last; ++i) { + for (; i < lastIdx; ++i) { evaluator.evalScalar(i); } } @@ -234,8 +234,8 @@ class TensorExecutor { const StorageIndex size = array_prod(evaluator.dimensions()); device.parallelFor(size, evaluator.costPerCoeff(Vectorizable), EvalRange::alignBlockSize, - [&evaluator](StorageIndex first, StorageIndex last) { - EvalRange::run(&evaluator, first, last); + [&evaluator](StorageIndex firstIdx, StorageIndex lastIdx) { + EvalRange::run(&evaluator, firstIdx, lastIdx); }); } evaluator.cleanup(); @@ -292,8 +292,8 @@ class TensorExecutor= -1 && thread_idx < num_threads); Scalar* thread_buf = reinterpret_cast( static_cast(buf) + aligned_blocksize * (thread_idx + 1)); - for (StorageIndex i = first; i < last; ++i) { + for (StorageIndex i = firstIdx; i < lastIdx; ++i) { auto block = block_mapper.GetBlockForIndex(i, thread_buf); evaluator.evalBlock(&block); } @@ -330,8 +330,8 @@ class TensorExecutor { template struct EigenMetaKernelEval { static __device__ EIGEN_ALWAYS_INLINE - void run(Evaluator& eval, StorageIndex first, StorageIndex last, StorageIndex step_size) { - for (StorageIndex i = first; i < last; i += step_size) { + void run(Evaluator& eval, StorageIndex firstIdx, StorageIndex lastIdx, StorageIndex step_size) { + for (StorageIndex i = firstIdx; i < lastIdx; i += step_size) { eval.evalScalar(i); } } @@ -340,17 +340,17 @@ struct EigenMetaKernelEval { template struct EigenMetaKernelEval { static __device__ EIGEN_ALWAYS_INLINE - void run(Evaluator& eval, StorageIndex first, StorageIndex last, StorageIndex step_size) { + void run(Evaluator& eval, StorageIndex firstIdx, StorageIndex lastIdx, StorageIndex step_size) { const StorageIndex PacketSize = unpacket_traits::size; - const StorageIndex vectorized_size = (last / PacketSize) * PacketSize; + const StorageIndex vectorized_size = (lastIdx / PacketSize) * PacketSize; const StorageIndex vectorized_step_size = step_size * PacketSize; // Use the vector path - for (StorageIndex i = first * PacketSize; i < vectorized_size; + for (StorageIndex i = firstIdx * PacketSize; i < vectorized_size; i += vectorized_step_size) { eval.evalPacket(i); } - for (StorageIndex i = vectorized_size + first; i < last; i += step_size) { + for (StorageIndex i = vectorized_size + firstIdx; i < lastIdx; i += step_size) { eval.evalScalar(i); } } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h b/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h index 59c1704ed..4837f2200 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h @@ -273,21 +273,21 @@ struct TensorEvaluator, Device const Index initialIndex = index; Index inputIndex = 0; for (int i = NumDims - 1; i > 0; --i) { - const Index first = index; - const Index last = index + PacketSize - 1; + const Index firstIdx = index; + const Index lastIdx = index + PacketSize - 1; const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i]; const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i]; const Index lastPaddedRight = m_outputStrides[i+1]; - if (!isLeftPaddingCompileTimeZero(i) && last < lastPaddedLeft) { + if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) { // all the coefficient are in the padding zone. return internal::pset1(m_paddingValue); } - else if (!isRightPaddingCompileTimeZero(i) && first >= firstPaddedRight && last < lastPaddedRight) { + else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { // all the coefficient are in the padding zone. return internal::pset1(m_paddingValue); } - else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (first >= lastPaddedLeft && last < firstPaddedRight)) { + else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { // all the coefficient are between the 2 padding zones. const Index idx = index / m_outputStrides[i]; inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; @@ -299,21 +299,21 @@ struct TensorEvaluator, Device } } - const Index last = index + PacketSize - 1; - const Index first = index; + const Index lastIdx = index + PacketSize - 1; + const Index firstIdx = index; const Index lastPaddedLeft = m_padding[0].first; const Index firstPaddedRight = (m_dimensions[0] - m_padding[0].second); const Index lastPaddedRight = m_outputStrides[1]; - if (!isLeftPaddingCompileTimeZero(0) && last < lastPaddedLeft) { + if (!isLeftPaddingCompileTimeZero(0) && lastIdx < lastPaddedLeft) { // all the coefficient are in the padding zone. return internal::pset1(m_paddingValue); } - else if (!isRightPaddingCompileTimeZero(0) && first >= firstPaddedRight && last < lastPaddedRight) { + else if (!isRightPaddingCompileTimeZero(0) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { // all the coefficient are in the padding zone. return internal::pset1(m_paddingValue); } - else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) || (first >= lastPaddedLeft && last < firstPaddedRight)) { + else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { // all the coefficient are between the 2 padding zones. inputIndex += (index - m_padding[0].first); return m_impl.template packet(inputIndex); @@ -331,21 +331,21 @@ struct TensorEvaluator, Device Index inputIndex = 0; for (int i = 0; i < NumDims - 1; ++i) { - const Index first = index; - const Index last = index + PacketSize - 1; + const Index firstIdx = index; + const Index lastIdx = index + PacketSize - 1; const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i+1]; const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i+1]; const Index lastPaddedRight = m_outputStrides[i]; - if (!isLeftPaddingCompileTimeZero(i) && last < lastPaddedLeft) { + if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) { // all the coefficient are in the padding zone. return internal::pset1(m_paddingValue); } - else if (!isRightPaddingCompileTimeZero(i) && first >= firstPaddedRight && last < lastPaddedRight) { + else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { // all the coefficient are in the padding zone. return internal::pset1(m_paddingValue); } - else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (first >= lastPaddedLeft && last < firstPaddedRight)) { + else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { // all the coefficient are between the 2 padding zones. const Index idx = index / m_outputStrides[i+1]; inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; @@ -357,21 +357,21 @@ struct TensorEvaluator, Device } } - const Index last = index + PacketSize - 1; - const Index first = index; + const Index lastIdx = index + PacketSize - 1; + const Index firstIdx = index; const Index lastPaddedLeft = m_padding[NumDims-1].first; const Index firstPaddedRight = (m_dimensions[NumDims-1] - m_padding[NumDims-1].second); const Index lastPaddedRight = m_outputStrides[NumDims-1]; - if (!isLeftPaddingCompileTimeZero(NumDims-1) && last < lastPaddedLeft) { + if (!isLeftPaddingCompileTimeZero(NumDims-1) && lastIdx < lastPaddedLeft) { // all the coefficient are in the padding zone. return internal::pset1(m_paddingValue); } - else if (!isRightPaddingCompileTimeZero(NumDims-1) && first >= firstPaddedRight && last < lastPaddedRight) { + else if (!isRightPaddingCompileTimeZero(NumDims-1) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { // all the coefficient are in the padding zone. return internal::pset1(m_paddingValue); } - else if ((isLeftPaddingCompileTimeZero(NumDims-1) && isRightPaddingCompileTimeZero(NumDims-1)) || (first >= lastPaddedLeft && last < firstPaddedRight)) { + else if ((isLeftPaddingCompileTimeZero(NumDims-1) && isRightPaddingCompileTimeZero(NumDims-1)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { // all the coefficient are between the 2 padding zones. inputIndex += (index - m_padding[NumDims-1].first); return m_impl.template packet(inputIndex); diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h index 7504c1598..88940e6e6 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h @@ -208,8 +208,8 @@ __global__ void ReductionInitFullReduxKernelHalfFloat(Reducer reducer, const Sel eigen_assert(blockDim.x == 1); eigen_assert(gridDim.x == 1); if (num_coeffs % 2 != 0) { - half last = input.m_impl.coeff(num_coeffs-1); - *scratch = __halves2half2(last, reducer.initialize()); + half lastCoeff = input.m_impl.coeff(num_coeffs-1); + *scratch = __halves2half2(lastCoeff, reducer.initialize()); } else { *scratch = reducer.template initializePacket(); } diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h index 22c952ae1..7a71f89fd 100644 --- a/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h +++ b/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h @@ -128,7 +128,7 @@ class EventCount { // Notify wakes one or all waiting threads. // Must be called after changing the associated wait predicate. - void Notify(bool all) { + void Notify(bool notifyAll) { std::atomic_thread_fence(std::memory_order_seq_cst); uint64_t state = state_.load(std::memory_order_acquire); for (;;) { @@ -137,7 +137,7 @@ class EventCount { return; uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; uint64_t newstate; - if (all) { + if (notifyAll) { // Reset prewait counter and empty wait list. newstate = (state & kEpochMask) + (kEpochInc * waiters) + kStackMask; } else if (waiters) { @@ -157,10 +157,10 @@ class EventCount { } if (state_.compare_exchange_weak(state, newstate, std::memory_order_acquire)) { - if (!all && waiters) return; // unblocked pre-wait thread + if (!notifyAll && waiters) return; // unblocked pre-wait thread if ((state & kStackMask) == kStackMask) return; Waiter* w = &waiters_[state & kStackMask]; - if (!all) w->next.store(nullptr, std::memory_order_relaxed); + if (!notifyAll) w->next.store(nullptr, std::memory_order_relaxed); Unpark(w); return; }