mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-06-20 22:52:51 +08:00
Improved performance of full reduction by 2 order of magnitude on CPU and 3 orders of magnitude on GPU
This commit is contained in:
parent
f0ce85b757
commit
db9dbbda32
@ -28,8 +28,25 @@ struct DefaultDevice {
|
||||
::memset(buffer, c, n);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE size_t numThreads() const {
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t numThreads() const {
|
||||
#ifndef __CUDA_ARCH__
|
||||
// Running on the host CPU
|
||||
return 1;
|
||||
#else
|
||||
// Running on a CUDA device
|
||||
return 32;
|
||||
#endif
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
|
||||
#ifndef __CUDA_ARCH__
|
||||
// Running single threaded on the host CPU
|
||||
// Should return an enum that encodes the ISA supported by the CPU
|
||||
return 1;
|
||||
#else
|
||||
// Running on a CUDA device
|
||||
return __CUDA_ARCH__ / 100;
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
@ -204,6 +221,11 @@ struct ThreadPoolDevice {
|
||||
return num_threads_;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const {
|
||||
// Should return an enum that encodes the ISA supported by the CPU
|
||||
return 1;
|
||||
}
|
||||
|
||||
template <class Function, class... Args>
|
||||
EIGEN_STRONG_INLINE Notification* enqueue(Function&& f, Args&&... args) const {
|
||||
Notification* n = new Notification();
|
||||
@ -308,6 +330,8 @@ struct GpuDevice {
|
||||
return 32;
|
||||
}
|
||||
|
||||
inline int majorDeviceVersion() const { return m_deviceProperties.major; }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const {
|
||||
cudaStreamSynchronize(*stream_);
|
||||
}
|
||||
|
@ -44,6 +44,38 @@ struct nested<TensorReductionOp<Op, Dims, XprType>, 1, typename eval<TensorReduc
|
||||
};
|
||||
|
||||
|
||||
template <typename OutputDims> struct DimInitializer {
|
||||
template <typename InputDims, typename ReducedDims> EIGEN_DEVICE_FUNC
|
||||
static void run(const InputDims& input_dims,
|
||||
const array<bool, internal::array_size<InputDims>::value>& reduced,
|
||||
OutputDims* output_dims, ReducedDims* reduced_dims) {
|
||||
const int NumInputDims = internal::array_size<InputDims>::value;
|
||||
int outputIndex = 0;
|
||||
int reduceIndex = 0;
|
||||
for (int i = 0; i < NumInputDims; ++i) {
|
||||
if (reduced[i]) {
|
||||
(*reduced_dims)[reduceIndex] = input_dims[i];
|
||||
++reduceIndex;
|
||||
} else {
|
||||
(*output_dims)[outputIndex] = input_dims[i];
|
||||
++outputIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct DimInitializer<Sizes<1> > {
|
||||
template <typename InputDims, typename Index, size_t Rank> EIGEN_DEVICE_FUNC
|
||||
static void run(const InputDims& input_dims, const array<bool, Rank>& reduced,
|
||||
Sizes<1>* output_dims, array<Index, Rank>* reduced_dims) {
|
||||
const int NumInputDims = internal::array_size<InputDims>::value;
|
||||
for (int i = 0; i < NumInputDims; ++i) {
|
||||
(*reduced_dims)[i] = input_dims[i];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename ReducedDims, int NumTensorDims, int Layout>
|
||||
struct are_inner_most_dims {
|
||||
static const bool value = false;
|
||||
@ -144,7 +176,7 @@ template <int DimIndex, typename Self, typename Op>
|
||||
struct InnerMostDimPreserver<DimIndex, Self, Op, true> {
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) {
|
||||
EIGEN_STATIC_ASSERT(DimIndex > 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
for (int j = 0; j < self.m_reducedDims[DimIndex]; ++j) {
|
||||
for (typename Self::Index j = 0; j < self.m_reducedDims[DimIndex]; ++j) {
|
||||
const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex];
|
||||
InnerMostDimPreserver<DimIndex-1, Self, Op>::reduce(self, input, reducer, accum);
|
||||
}
|
||||
@ -154,13 +186,325 @@ struct InnerMostDimPreserver<DimIndex, Self, Op, true> {
|
||||
template <typename Self, typename Op>
|
||||
struct InnerMostDimPreserver<0, Self, Op, true> {
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) {
|
||||
for (int j = 0; j < self.m_reducedDims[0]; ++j) {
|
||||
for (typename Self::Index j = 0; j < self.m_reducedDims[0]; ++j) {
|
||||
const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0];
|
||||
reducer.reducePacket(self.m_impl.template packet<Unaligned>(input), accum);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Default full reducer
|
||||
template <typename Self, typename Op, typename Device, bool Vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
|
||||
struct FullReducer {
|
||||
static const bool HasOptimizedImplementation = false;
|
||||
|
||||
static EIGEN_DEVICE_FUNC void run(const Self& self, Op& reducer, const Device&, typename Self::CoeffReturnType* output) {
|
||||
const typename Self::Index num_coeffs = array_prod(self.m_impl.dimensions());
|
||||
*output = InnerMostDimReducer<Self, Op>::reduce(self, 0, num_coeffs, reducer);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
// Multithreaded full reducers
|
||||
template <typename Eval, typename Op, bool Vectorizable = (Eval::InputPacketAccess & Op::PacketAccess)>
|
||||
struct FullReducerShard {
|
||||
static void run(const Eval& eval, typename Eval::Index firstIndex, typename Eval::Index numValuesToReduce, Op& reducer, FullReducerShard* shard) {
|
||||
|
||||
shard->saccum = reducer.initialize();
|
||||
for (typename Eval::Index j = 0; j < numValuesToReduce; ++j) {
|
||||
reducer.reduce(eval.m_impl.coeff(firstIndex + j), &shard->saccum);
|
||||
}
|
||||
}
|
||||
|
||||
typename Eval::CoeffReturnType saccum;
|
||||
};
|
||||
|
||||
template <typename Eval, typename Op>
|
||||
struct FullReducerShard<Eval, Op, true> {
|
||||
static void run(const Eval& eval, typename Eval::Index firstIndex, typename Eval::Index numValuesToReduce, Op& reducer, FullReducerShard* shard) {
|
||||
|
||||
const int packetSize = internal::unpacket_traits<typename Eval::PacketReturnType>::size;
|
||||
const typename Eval::Index VectorizedSize = (numValuesToReduce / packetSize) * packetSize;
|
||||
|
||||
shard->paccum = reducer.template initializePacket<typename Eval::PacketReturnType>();
|
||||
for (typename Eval::Index j = 0; j < VectorizedSize; j += packetSize) {
|
||||
reducer.reducePacket(eval.m_impl.template packet<Unaligned>(firstIndex + j), &shard->paccum);
|
||||
}
|
||||
shard->saccum = reducer.initialize();
|
||||
for (typename Eval::Index j = VectorizedSize; j < numValuesToReduce; ++j) {
|
||||
reducer.reduce(eval.m_impl.coeff(firstIndex + j), &shard->saccum);
|
||||
}
|
||||
}
|
||||
|
||||
typename Eval::PacketReturnType paccum;
|
||||
typename Eval::CoeffReturnType saccum;
|
||||
};
|
||||
|
||||
|
||||
template <typename Self, typename Op>
|
||||
struct FullReducer<Self, Op, ThreadPoolDevice, false> {
|
||||
static const bool HasOptimizedImplementation = !Op::IsStateful;
|
||||
|
||||
// launch one reducer per thread and accumulate the result.
|
||||
static void run(const Self& self, Op& reducer, const ThreadPoolDevice& device, typename Self::CoeffReturnType* output) {
|
||||
typedef typename Self::Index Index;
|
||||
const Index num_coeffs = array_prod(self.m_impl.dimensions());
|
||||
const Index blocksize = std::floor<Index>(static_cast<float>(num_coeffs)/device.numThreads());
|
||||
const Index numblocks = blocksize > 0 ? num_coeffs / blocksize : 0;
|
||||
eigen_assert(num_coeffs >= numblocks * blocksize);
|
||||
|
||||
std::vector<Notification*> results;
|
||||
results.reserve(numblocks);
|
||||
std::vector<FullReducerShard<Self, Op, false> > shards;
|
||||
shards.resize(numblocks);
|
||||
for (Index i = 0; i < numblocks; ++i) {
|
||||
results.push_back(device.enqueue(&FullReducerShard<Self, Op, false>::run, self, i*blocksize, blocksize, reducer, &shards[i]));
|
||||
}
|
||||
|
||||
FullReducerShard<Self, Op, false> finalShard;
|
||||
if (numblocks * blocksize < num_coeffs) {
|
||||
FullReducerShard<Self, Op, false>::run(self, numblocks * blocksize, num_coeffs - numblocks * blocksize, reducer, &finalShard);
|
||||
} else {
|
||||
finalShard.saccum = reducer.initialize();
|
||||
}
|
||||
|
||||
for (Index i = 0; i < numblocks; ++i) {
|
||||
wait_until_ready(results[i]);
|
||||
delete results[i];
|
||||
}
|
||||
|
||||
for (Index i = 0; i < numblocks; ++i) {
|
||||
reducer.reduce(shards[i].saccum, &finalShard.saccum);
|
||||
}
|
||||
*output = reducer.finalize(finalShard.saccum);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Self, typename Op>
|
||||
struct FullReducer<Self, Op, ThreadPoolDevice, true> {
|
||||
static const bool HasOptimizedImplementation = !Op::IsStateful;
|
||||
|
||||
// launch one reducer per thread and accumulate the result.
|
||||
static void run(const Self& self, Op& reducer, const ThreadPoolDevice& device, typename Self::CoeffReturnType* output) {
|
||||
typedef typename Self::Index Index;
|
||||
const Index num_coeffs = array_prod(self.m_impl.dimensions());
|
||||
const Index blocksize = std::floor<Index>(static_cast<float>(num_coeffs)/device.numThreads());
|
||||
const Index numblocks = blocksize > 0 ? num_coeffs / blocksize : 0;
|
||||
eigen_assert(num_coeffs >= numblocks * blocksize);
|
||||
|
||||
std::vector<Notification*> results;
|
||||
results.reserve(numblocks);
|
||||
std::vector<FullReducerShard<Self, Op, true> > shards;
|
||||
shards.resize(numblocks);
|
||||
for (Index i = 0; i < numblocks; ++i) {
|
||||
results.push_back(device.enqueue(&FullReducerShard<Self, Op, true>::run, self, i*blocksize, blocksize, reducer, &shards[i]));
|
||||
}
|
||||
|
||||
FullReducerShard<Self, Op, true> finalShard;
|
||||
if (numblocks * blocksize < num_coeffs) {
|
||||
FullReducerShard<Self, Op, true>::run(self, numblocks * blocksize, num_coeffs - numblocks * blocksize, reducer, &finalShard);
|
||||
} else {
|
||||
finalShard.paccum = reducer.template initializePacket<typename Self::PacketReturnType>();
|
||||
finalShard.saccum = reducer.initialize();
|
||||
}
|
||||
|
||||
for (Index i = 0; i < numblocks; ++i) {
|
||||
wait_until_ready(results[i]);
|
||||
delete results[i];
|
||||
}
|
||||
|
||||
for (Index i = 0; i < numblocks; ++i) {
|
||||
reducer.reducePacket(shards[i].paccum, &finalShard.paccum);
|
||||
reducer.reduce(shards[i].saccum, &finalShard.saccum);
|
||||
}
|
||||
|
||||
*output = reducer.finalizeBoth(finalShard.saccum, finalShard.paccum);
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
|
||||
// Full reducers for GPU, don't vectorize for now
|
||||
|
||||
// Reducer function that enables multiple cuda thread to safely accumulate at the same
|
||||
// output address. It basically reads the current value of the output variable, and
|
||||
// attempts to update it with the new value. If in the meantime another cuda thread
|
||||
// updated the content of the output address it will try again.
|
||||
template <typename T, typename R>
|
||||
__device__ EIGEN_ALWAYS_INLINE void atomicReduce(T* output, T accum, R& reducer) {
|
||||
#if __CUDA_ARCH__ >= 300
|
||||
if (sizeof(T) == 4)
|
||||
{
|
||||
unsigned int oldval = *reinterpret_cast<unsigned int*>(output);
|
||||
unsigned int newval = oldval;
|
||||
reducer.reduce(accum, reinterpret_cast<T*>(&newval));
|
||||
if (newval == oldval) {
|
||||
return;
|
||||
}
|
||||
unsigned int readback;
|
||||
while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) {
|
||||
oldval = readback;
|
||||
newval = oldval;
|
||||
reducer.reduce(accum, reinterpret_cast<T*>(&newval));
|
||||
if (newval == oldval) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (sizeof(T) == 8) {
|
||||
unsigned long long oldval = *reinterpret_cast<unsigned long long*>(output);
|
||||
unsigned long long newval = oldval;
|
||||
reducer.reduce(accum, reinterpret_cast<T*>(&newval));
|
||||
if (newval == oldval) {
|
||||
return;
|
||||
}
|
||||
unsigned long long readback;
|
||||
while ((readback = atomicCAS((unsigned long long*)output, oldval, newval)) != oldval) {
|
||||
oldval = readback;
|
||||
newval = oldval;
|
||||
reducer.reduce(accum, reinterpret_cast<T*>(&newval));
|
||||
if (newval == oldval) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
assert(0 && "Wordsize not supported");
|
||||
}
|
||||
#else
|
||||
assert(0 && "Shouldn't be called on unsupported device");
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ inline void atomicReduce(T* output, T accum, SumReducer<T>&) {
|
||||
#if __CUDA_ARCH__ >= 300
|
||||
atomicAdd(output, accum);
|
||||
#else
|
||||
assert(0 && "Shouldn't be called on unsupported device");
|
||||
#endif
|
||||
}
|
||||
|
||||
template <int BlockSize, int NumPerThread, typename Self,
|
||||
typename Reducer, typename Index>
|
||||
__global__ void FullReductionKernel(Reducer reducer, const Self input, Index num_coeffs,
|
||||
typename Self::CoeffReturnType* output) {
|
||||
const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x;
|
||||
|
||||
if (first_index == 0) {
|
||||
*output = reducer.initialize();
|
||||
}
|
||||
|
||||
typename Self::CoeffReturnType accum = reducer.initialize();
|
||||
for (Index i = 0; i < NumPerThread; ++i) {
|
||||
const Index index = first_index + i * BlockSize;
|
||||
if (index >= num_coeffs) {
|
||||
break;
|
||||
}
|
||||
typename Self::CoeffReturnType val = input.m_impl.coeff(index);
|
||||
reducer.reduce(val, &accum);
|
||||
}
|
||||
|
||||
for (int offset = warpSize/2; offset > 0; offset /= 2) {
|
||||
reducer.reduce(__shfl_down(accum, offset), &accum);
|
||||
}
|
||||
|
||||
if ((threadIdx.x & (warpSize - 1)) == 0) {
|
||||
atomicReduce(output, accum, reducer);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename Self, typename Op, bool Vectorizable>
|
||||
struct FullReducer<Self, Op, GpuDevice, Vectorizable> {
|
||||
// Unfortunately nvidia doesn't support well exotic types such as complex,
|
||||
// so reduce the scope of the optimized version of the code to the simple case
|
||||
// of floats.
|
||||
static const bool HasOptimizedImplementation = !Op::IsStateful &&
|
||||
internal::is_same<typename Self::CoeffReturnType, float>::value;
|
||||
|
||||
template <typename OutputType>
|
||||
static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) {
|
||||
assert(false && "Should only be called on floats");
|
||||
}
|
||||
|
||||
static void run(const Self& self, Op& reducer, const GpuDevice& device, float* output) {
|
||||
typedef typename Self::Index Index;
|
||||
|
||||
const Index num_coeffs = array_prod(self.m_impl.dimensions());
|
||||
const int block_size = 256;
|
||||
const int num_per_thread = 128;
|
||||
const int num_blocks = std::ceil(static_cast<float>(num_coeffs) / (block_size * num_per_thread));
|
||||
LAUNCH_CUDA_KERNEL((FullReductionKernel<block_size, num_per_thread>),
|
||||
num_blocks, block_size, 0, device, reducer, self, num_coeffs, output);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
template <typename Self, typename Op,
|
||||
bool Vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
|
||||
class BlockReducer {
|
||||
public:
|
||||
typedef typename Self::Index Index;
|
||||
typedef typename Self::Scalar Scalar;
|
||||
typedef typename Self::CoeffReturnType CoeffReturnType;
|
||||
explicit BlockReducer(const Op& reducer) : op_(reducer) {
|
||||
accum_ = op_.initialize();
|
||||
}
|
||||
void Reduce(Index index, Index num_values_to_reduce, Scalar* data) {
|
||||
for (Index i = 0; i < num_values_to_reduce; ++i) {
|
||||
op_.reduce(data[index + i], &accum_);
|
||||
}
|
||||
}
|
||||
CoeffReturnType Finalize() {
|
||||
return op_.finalize(accum_);
|
||||
}
|
||||
|
||||
private:
|
||||
CoeffReturnType accum_;
|
||||
Op op_;
|
||||
};
|
||||
|
||||
|
||||
template <typename Self, typename Op>
|
||||
class BlockReducer<Self, Op, true> {
|
||||
public:
|
||||
typedef typename Self::Index Index;
|
||||
typedef typename Self::Scalar Scalar;
|
||||
typedef typename Self::CoeffReturnType CoeffReturnType;
|
||||
typedef typename Self::PacketReturnType PacketReturnType;
|
||||
explicit BlockReducer(const Op& reducer) : op_(reducer) {
|
||||
vaccum_ = op_.template initializePacket<PacketReturnType>();
|
||||
accum_ = op_.initialize();
|
||||
}
|
||||
void Reduce(Index index, Index num_values_to_reduce, Scalar* data) {
|
||||
const int packet_size = internal::unpacket_traits<PacketReturnType>::size;
|
||||
const typename Self::Index vectorized_size = (num_values_to_reduce /
|
||||
packet_size) * packet_size;
|
||||
for (typename Self::Index i = 0; i < vectorized_size; i += packet_size) {
|
||||
op_.reducePacket(internal::ploadt<PacketReturnType, Unaligned>(
|
||||
&data[index + i]), &vaccum_);
|
||||
}
|
||||
|
||||
for (typename Self::Index i = vectorized_size;
|
||||
i < num_values_to_reduce; ++i) {
|
||||
op_.reduce(data[index + i], &accum_);
|
||||
}
|
||||
}
|
||||
typename Self::CoeffReturnType Finalize() {
|
||||
return op_.finalizeBoth(accum_, vaccum_);
|
||||
}
|
||||
|
||||
private:
|
||||
typename Self::PacketReturnType vaccum_;
|
||||
typename Self::CoeffReturnType accum_;
|
||||
Op op_;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
|
||||
@ -179,6 +523,7 @@ class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
TensorReductionOp(const XprType& expr, const Dims& dims) : m_expr(expr), m_dims(dims)
|
||||
{ }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
TensorReductionOp(const XprType& expr, const Dims& dims, const Op& reducer) : m_expr(expr), m_dims(dims), m_reducer(reducer)
|
||||
{ }
|
||||
|
||||
@ -186,6 +531,7 @@ class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType>
|
||||
const XprType& expression() const { return m_expr; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
const Dims& dims() const { return m_dims; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
const Op& reducer() const { return m_reducer; }
|
||||
|
||||
protected:
|
||||
@ -201,10 +547,11 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
{
|
||||
typedef TensorReductionOp<Op, Dims, ArgType> XprType;
|
||||
typedef typename XprType::Index Index;
|
||||
static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
|
||||
typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions;
|
||||
static const int NumInputDims = internal::array_size<InputDimensions>::value;
|
||||
static const int NumReducedDims = internal::array_size<Dims>::value;
|
||||
static const int NumOutputDims = (NumInputDims==NumReducedDims) ? 1 : NumInputDims - NumReducedDims;
|
||||
typedef DSizes<Index, NumOutputDims> Dimensions;
|
||||
typedef typename internal::conditional<NumInputDims==NumReducedDims, Sizes<1>, DSizes<Index, NumOutputDims> >::type Dimensions;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device> Self;
|
||||
static const bool InputPacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess;
|
||||
@ -218,9 +565,10 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
|
||||
static const bool ReducingInnerMostDims = internal::are_inner_most_dims<Dims, NumInputDims, Layout>::value;
|
||||
static const bool PreservingInnerMostDims = internal::preserve_inner_most_dims<Dims, NumInputDims, Layout>::value;
|
||||
static const bool RunningFullReduction = (NumInputDims==NumReducedDims);
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
|
||||
: m_impl(op.expression(), device), m_reducer(op.reducer())
|
||||
: m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(NumInputDims >= NumReducedDims, YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
EIGEN_STATIC_ASSERT((!ReducingInnerMostDims | !PreservingInnerMostDims | (NumReducedDims == NumInputDims)),
|
||||
@ -238,17 +586,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
}
|
||||
|
||||
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
|
||||
int outputIndex = 0;
|
||||
int reduceIndex = 0;
|
||||
for (int i = 0; i < NumInputDims; ++i) {
|
||||
if (reduced[i]) {
|
||||
m_reducedDims[reduceIndex] = input_dims[i];
|
||||
++reduceIndex;
|
||||
} else {
|
||||
m_dimensions[outputIndex] = input_dims[i];
|
||||
++outputIndex;
|
||||
}
|
||||
}
|
||||
internal::DimInitializer<Dimensions>::run(input_dims, reduced, &m_dimensions, &m_reducedDims);
|
||||
|
||||
// Precompute output strides.
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
@ -277,8 +615,8 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
}
|
||||
}
|
||||
|
||||
outputIndex = 0;
|
||||
reduceIndex = 0;
|
||||
int outputIndex = 0;
|
||||
int reduceIndex = 0;
|
||||
for (int i = 0; i < NumInputDims; ++i) {
|
||||
if (reduced[i]) {
|
||||
m_reducedStrides[reduceIndex] = input_strides[i];
|
||||
@ -291,27 +629,50 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
|
||||
// Special case for full reductions
|
||||
if (NumInputDims == NumReducedDims) {
|
||||
m_dimensions[0] = 1;
|
||||
eigen_assert(m_dimensions[0] == 1);
|
||||
m_preservedStrides[0] = internal::array_prod(input_dims);
|
||||
}
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
|
||||
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
|
||||
typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
|
||||
|
||||
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
|
||||
m_impl.evalSubExprsIfNeeded(NULL);
|
||||
|
||||
// Use the FullReducer if possible.
|
||||
if (RunningFullReduction && internal::FullReducer<Self, Op, Device>::HasOptimizedImplementation &&
|
||||
((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) ||
|
||||
(internal::array_prod(m_impl.dimensions()) > 1024 * 1024))) {
|
||||
|
||||
bool need_assign = false;
|
||||
if (!data) {
|
||||
m_result = static_cast<CoeffReturnType*>(m_device.allocate(sizeof(CoeffReturnType)));
|
||||
data = m_result;
|
||||
need_assign = true;
|
||||
}
|
||||
|
||||
Op reducer(m_reducer);
|
||||
internal::FullReducer<Self, Op, Device>::run(*this, reducer, m_device, data);
|
||||
return need_assign;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
|
||||
m_impl.cleanup();
|
||||
if (m_result) {
|
||||
m_device.deallocate(m_result);
|
||||
}
|
||||
}
|
||||
|
||||
typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
|
||||
typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
|
||||
{
|
||||
if (RunningFullReduction && m_result) {
|
||||
return *m_result;
|
||||
}
|
||||
Op reducer(m_reducer);
|
||||
if (ReducingInnerMostDims) {
|
||||
const Index num_values_to_reduce =
|
||||
@ -372,6 +733,13 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
template <int, typename, typename> friend struct internal::GenericDimReducer;
|
||||
template <typename, typename, bool> friend struct internal::InnerMostDimReducer;
|
||||
template <int, typename, typename, bool> friend struct internal::InnerMostDimPreserver;
|
||||
template <typename S, typename O, typename D, bool V> friend struct internal::FullReducer;
|
||||
#ifdef EIGEN_USE_THREADS
|
||||
template <typename S, typename O, bool V> friend struct internal::FullReducerShard;
|
||||
#endif
|
||||
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
|
||||
template <int B, int N, typename S, typename R, typename I> friend void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*);
|
||||
#endif
|
||||
|
||||
// Returns the Index in the input tensor of the first value that needs to be
|
||||
// used to compute the reduction at output index "index".
|
||||
@ -392,7 +760,12 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
startInput += idx * m_preservedStrides[i];
|
||||
index -= idx * m_outputStrides[i];
|
||||
}
|
||||
startInput += index * m_preservedStrides[0];
|
||||
if (PreservingInnerMostDims) {
|
||||
eigen_assert(m_preservedStrides[0] == 1);
|
||||
startInput += index;
|
||||
} else {
|
||||
startInput += index * m_preservedStrides[0];
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < NumOutputDims - 1; ++i) {
|
||||
// This is index_i in the output tensor.
|
||||
@ -400,7 +773,12 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
startInput += idx * m_preservedStrides[i];
|
||||
index -= idx * m_outputStrides[i];
|
||||
}
|
||||
startInput += index * m_preservedStrides[NumOutputDims - 1];
|
||||
if (PreservingInnerMostDims) {
|
||||
eigen_assert(m_preservedStrides[NumOutputDims - 1] == 1);
|
||||
startInput += index;
|
||||
} else {
|
||||
startInput += index * m_preservedStrides[NumOutputDims - 1];
|
||||
}
|
||||
}
|
||||
return startInput;
|
||||
}
|
||||
@ -425,6 +803,16 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
|
||||
// Operation to apply for computing the reduction.
|
||||
Op m_reducer;
|
||||
|
||||
// For full reductions
|
||||
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
|
||||
static const bool RunningOnGPU = internal::is_same<Device, Eigen::GpuDevice>::value;
|
||||
#else
|
||||
static const bool RunningOnGPU = false;
|
||||
#endif
|
||||
CoeffReturnType* m_result;
|
||||
|
||||
const Device& m_device;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
Loading…
x
Reference in New Issue
Block a user