Improved the performance of tensor reductions

Added the ability to generate random numbers following a normal distribution
Created a test to validate the ability to generate random numbers.
This commit is contained in:
Benoit Steiner 2015-01-14 10:19:33 -08:00
parent 3bd2b41b2e
commit 8f4b8d204b
3 changed files with 474 additions and 67 deletions

View File

@ -16,50 +16,157 @@ namespace internal {
// Standard reduction functors // Standard reduction functors
template <typename T> struct SumReducer template <typename T> struct SumReducer
{ {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE SumReducer() : m_sum(0) { } static const bool PacketAccess = true;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t) {
m_sum += t; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
(*accum) += t;
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize() const { template <typename Packet>
return m_sum; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
(*accum) = padd<Packet>(*accum, p);
} }
private: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
typename internal::remove_all<T>::type m_sum; return static_cast<T>(0);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(0);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizePacket(const T saccum, const Packet& vaccum) const {
return saccum + predux(vaccum);
}
};
template <typename T> struct MeanReducer
{
static const bool PacketAccess = true;
MeanReducer() : count_(0) { }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) {
(*accum) += t;
count_++;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) {
(*accum) = padd<Packet>(*accum, p);
count_ += packet_traits<Packet>::size;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return static_cast<T>(0);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(0);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum / count_;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizePacket(const T saccum, const Packet& vaccum) const {
return (saccum + predux(vaccum)) / count_;
}
protected:
int count_;
}; };
template <typename T> struct MaxReducer template <typename T> struct MaxReducer
{ {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE MaxReducer() : m_max(-(std::numeric_limits<T>::max)()) { } static const bool PacketAccess = true;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t) {
if (t > m_max) { m_max = t; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
if (t > *accum) { *accum = t; }
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize() const { template <typename Packet>
return m_max; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
(*accum) = pmax<Packet>(*accum, p);
} }
private: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
typename internal::remove_all<T>::type m_max; return -(std::numeric_limits<T>::max)();
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(-(std::numeric_limits<T>::max)());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizePacket(const T saccum, const Packet& vaccum) const {
return (std::max)(saccum, predux_max(vaccum));
}
}; };
template <typename T> struct MinReducer template <typename T> struct MinReducer
{ {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE MinReducer() : m_min((std::numeric_limits<T>::max)()) { } static const bool PacketAccess = true;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t) {
if (t < m_min) { m_min = t; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
if (t < *accum) { *accum = t; }
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize() const { template <typename Packet>
return m_min; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
(*accum) = pmin<Packet>(*accum, p);
} }
private: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
typename internal::remove_all<T>::type m_min; return (std::numeric_limits<T>::max)();
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>((std::numeric_limits<T>::max)());
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizePacket(const T saccum, const Packet& vaccum) const {
return (std::min)(saccum, predux_min(vaccum));
}
}; };
template <typename T> struct ProdReducer
{
static const bool PacketAccess = true;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
(*accum) *= t;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const {
(*accum) = pmul<Packet>(*accum, p);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return static_cast<T>(1);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(1);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
return accum;
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizePacket(const T saccum, const Packet& vaccum) const {
return saccum * predux_mul(vaccum);
}
};
#if !defined (EIGEN_USE_GPU) || !defined(__CUDACC__) || !defined(__CUDA_ARCH__) #if !defined (EIGEN_USE_GPU) || !defined(__CUDACC__) || !defined(__CUDA_ARCH__)
// We're not compiling a cuda kernel // We're not compiling a cuda kernel
template <typename T> struct UniformRandomGenerator { template <typename T> struct UniformRandomGenerator {
static const bool PacketAccess = true;
template<typename Index> template<typename Index>
T operator()(Index, Index = 0) const { T operator()(Index, Index = 0) const {
return random<T>(); return random<T>();
@ -81,16 +188,19 @@ template <typename T> struct UniformRandomGenerator {
template <typename T> struct UniformRandomGenerator; template <typename T> struct UniformRandomGenerator;
template <> struct UniformRandomGenerator<float> { template <> struct UniformRandomGenerator<float> {
UniformRandomGenerator() {
static const bool PacketAccess = true;
EIGEN_DEVICE_FUNC UniformRandomGenerator() {
const int tid = blockIdx.x * blockDim.x + threadIdx.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(0, tid, 0, &m_state); curand_init(0, tid, 0, &m_state);
} }
template<typename Index> template<typename Index> EIGEN_DEVICE_FUNC
float operator()(Index, Index = 0) const { float operator()(Index, Index = 0) const {
return curand_uniform(&m_state); return curand_uniform(&m_state);
} }
template<typename Index> template<typename Index> EIGEN_DEVICE_FUNC
float4 packetOp(Index, Index = 0) const { float4 packetOp(Index, Index = 0) const {
return curand_uniform4(&m_state); return curand_uniform4(&m_state);
} }
@ -100,15 +210,18 @@ template <> struct UniformRandomGenerator<float> {
}; };
template <> struct UniformRandomGenerator<double> { template <> struct UniformRandomGenerator<double> {
UniformRandomGenerator() {
static const bool PacketAccess = true;
EIGEN_DEVICE_FUNC UniformRandomGenerator() {
const int tid = blockIdx.x * blockDim.x + threadIdx.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(0, tid, 0, &m_state); curand_init(0, tid, 0, &m_state);
} }
template<typename Index> template<typename Index> EIGEN_DEVICE_FUNC
double operator()(Index, Index = 0) const { double operator()(Index, Index = 0) const {
return curand_uniform_double(&m_state); return curand_uniform_double(&m_state);
} }
template<typename Index> template<typename Index> EIGEN_DEVICE_FUNC
double2 packetOp(Index, Index = 0) const { double2 packetOp(Index, Index = 0) const {
return curand_uniform2_double(&m_state); return curand_uniform2_double(&m_state);
} }
@ -120,6 +233,84 @@ template <> struct UniformRandomGenerator<double> {
#endif #endif
#if (!defined (EIGEN_USE_GPU) || !defined(__CUDACC__) || !defined(__CUDA_ARCH__)) && __cplusplus > 199711
// We're not compiling a cuda kernel
template <typename T> struct NormalRandomGenerator {
static const bool PacketAccess = true;
NormalRandomGenerator() : m_distribution(0, 1) {}
NormalRandomGenerator(const NormalRandomGenerator& other) : m_distribution(other.m_distribution) { }
template<typename Index>
T operator()(Index, Index = 0) const {
return m_distribution(m_generator);
}
template<typename Index>
typename internal::packet_traits<T>::type packetOp(Index, Index = 0) const {
const int packetSize = internal::packet_traits<T>::size;
EIGEN_ALIGN_DEFAULT T values[packetSize];
for (int i = 0; i < packetSize; ++i) {
values[i] = m_distribution(m_generator);
}
return internal::pload<typename internal::packet_traits<T>::type>(values);
}
mutable std::normal_distribution<T> m_distribution;
mutable std::default_random_engine m_generator;
};
#elif defined (EIGEN_USE_GPU) && defined(__CUDACC__) && defined(__CUDA_ARCH__)
// We're compiling a cuda kernel
template <typename T> struct NormalRandomGenerator;
template <> struct NormalRandomGenerator<float> {
static const bool PacketAccess = true;
EIGEN_DEVICE_FUNC NormalRandomGenerator() {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(0, tid, 0, &m_state);
}
template<typename Index> EIGEN_DEVICE_FUNC
float operator()(Index, Index = 0) const {
return curand_normal(&m_state);
}
template<typename Index> EIGEN_DEVICE_FUNC
float4 packetOp(Index, Index = 0) const {
return curand_normal4(&m_state);
}
private:
mutable curandStatePhilox4_32_10_t m_state;
};
template <> struct NormalRandomGenerator<double> {
static const bool PacketAccess = true;
EIGEN_DEVICE_FUNC NormalRandomGenerator() {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(0, tid, 0, &m_state);
}
template<typename Index> EIGEN_DEVICE_FUNC
double operator()(Index, Index = 0) const {
return curand_normal_double(&m_state);
}
template<typename Index> EIGEN_DEVICE_FUNC
double2 packetOp(Index, Index = 0) const {
return curand_normal2_double(&m_state);
}
private:
mutable curandStatePhilox4_32_10_t m_state;
};
#endif
} // end namespace internal } // end namespace internal
} // end namespace Eigen } // end namespace Eigen

View File

@ -43,6 +43,75 @@ struct nested<TensorReductionOp<Op, Dims, XprType>, 1, typename eval<TensorReduc
typedef TensorReductionOp<Op, Dims, XprType> type; typedef TensorReductionOp<Op, Dims, XprType> type;
}; };
template <typename ReducedDims, int NumTensorDims, int Layout>
struct are_inner_most_dims {
static const bool value = false;
};
#if __cplusplus > 199711L
template <typename ReducedDims, int NumTensorDims>
struct are_inner_most_dims<ReducedDims, NumTensorDims, ColMajor>{
static const bool value = indices_statically_known_to_increase<ReducedDims>()() &&
index_statically_eq<ReducedDims>()(0, 0) &&
index_statically_eq<ReducedDims>()(array_size<ReducedDims>::value-1, array_size<ReducedDims>::value-1);
};
template <typename ReducedDims, int NumTensorDims>
struct are_inner_most_dims<ReducedDims, NumTensorDims, RowMajor>{
static const bool value = indices_statically_known_to_increase<ReducedDims>()() &&
index_statically_eq<ReducedDims>()(0, NumTensorDims - array_size<ReducedDims>::value) &&
index_statically_eq<ReducedDims>()(array_size<ReducedDims>::value - 1, NumTensorDims - 1);
};
#endif
template <int DimIndex, typename Self, typename Op>
struct GenericDimReducer {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) {
EIGEN_STATIC_ASSERT(DimIndex > 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
for (int j = 0; j < self.m_reducedDims[DimIndex]; ++j) {
const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex];
GenericDimReducer<DimIndex-1, Self, Op>::reduce(self, input, reducer, accum);
}
}
};
template <typename Self, typename Op>
struct GenericDimReducer<0, Self, Op> {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) {
for (int j = 0; j < self.m_reducedDims[0]; ++j) {
const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0];
reducer.reduce(self.m_impl.coeff(input), accum);
}
}
};
template <typename Self, typename Op, bool Vectorizable = (Self::InputPacketAccess & Op::PacketAccess)>
struct InnerMostDimReducer {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) {
typename Self::CoeffReturnType accum = reducer.initialize();
for (typename Self::Index j = 0; j < numValuesToReduce; ++j) {
reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
}
return reducer.finalize(accum);
}
};
template <typename Self, typename Op>
struct InnerMostDimReducer<Self, Op, true> {
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) {
const int packetSize = internal::unpacket_traits<typename Self::PacketReturnType>::size;
const typename Self::Index VectorizedSize = (numValuesToReduce / packetSize) * packetSize;
typename Self::PacketReturnType p = reducer.template initializePacket<typename Self::PacketReturnType>();
for (typename Self::Index j = 0; j < VectorizedSize; j += packetSize) {
reducer.reducePacket(self.m_impl.template packet<Unaligned>(firstIndex + j), &p);
}
typename Self::CoeffReturnType accum = reducer.initialize();
for (typename Self::Index j = VectorizedSize; j < numValuesToReduce; ++j) {
reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
}
return reducer.finalizePacket(accum, p);
}
};
} // end namespace internal } // end namespace internal
@ -52,8 +121,8 @@ class TensorReductionOp : public TensorBase<TensorReductionOp<Op, Dims, XprType>
typedef typename Eigen::internal::traits<TensorReductionOp>::Scalar Scalar; typedef typename Eigen::internal::traits<TensorReductionOp>::Scalar Scalar;
typedef typename Eigen::internal::traits<TensorReductionOp>::Packet Packet; typedef typename Eigen::internal::traits<TensorReductionOp>::Packet Packet;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType; typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
typedef typename Eigen::internal::nested<TensorReductionOp>::type Nested; typedef typename Eigen::internal::nested<TensorReductionOp>::type Nested;
typedef typename Eigen::internal::traits<TensorReductionOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorReductionOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorReductionOp>::Index Index; typedef typename Eigen::internal::traits<TensorReductionOp>::Index Index;
@ -85,20 +154,27 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
typedef typename XprType::Index Index; typedef typename XprType::Index Index;
static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
static const int NumReducedDims = internal::array_size<Dims>::value; static const int NumReducedDims = internal::array_size<Dims>::value;
static const int NumDims = (NumInputDims==NumReducedDims) ? 1 : NumInputDims - NumReducedDims; static const int NumOutputDims = (NumInputDims==NumReducedDims) ? 1 : NumInputDims - NumReducedDims;
typedef DSizes<Index, NumDims> Dimensions; typedef DSizes<Index, NumOutputDims> Dimensions;
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device> Self;
static const bool InputPacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess;
enum { enum {
IsAligned = false, IsAligned = false,
PacketAccess = false, // The code isn't vectorized properly yet PacketAccess = Self::InputPacketAccess && Op::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
}; };
static const bool ReducingInnerMostDims = internal::are_inner_most_dims<Dims, NumInputDims, Layout>::value;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_reducer(op.reducer()) : m_impl(op.expression(), device), m_reducer(op.reducer())
{ {
EIGEN_STATIC_ASSERT(NumInputDims >= NumReducedDims, YOU_MADE_A_PROGRAMMING_MISTAKE); EIGEN_STATIC_ASSERT(NumInputDims >= NumReducedDims, YOU_MADE_A_PROGRAMMING_MISTAKE);
// Bitmap indicating if an input dimension is reduced or not.
array<bool, NumInputDims> reduced; array<bool, NumInputDims> reduced;
for (int i = 0; i < NumInputDims; ++i) { for (int i = 0; i < NumInputDims; ++i) {
reduced[i] = false; reduced[i] = false;
@ -122,24 +198,41 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
} }
} }
m_outputStrides[0] = 1; // Precompute output strides.
for (int i = 1; i < NumDims; ++i) { if (Layout == ColMajor) {
m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; m_outputStrides[0] = 1;
for (int i = 1; i < NumOutputDims; ++i) {
m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
}
} else {
m_outputStrides[NumOutputDims - 1] = 1;
for (int i = NumOutputDims - 2; i >= 0; --i) {
m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
}
} }
array<Index, NumInputDims> strides; // Precompute input strides.
strides[0] = 1; array<Index, NumInputDims> input_strides;
for (int i = 1; i < NumInputDims; ++i) { if (Layout == ColMajor) {
strides[i] = strides[i-1] * input_dims[i-1]; input_strides[0] = 1;
for (int i = 1; i < NumInputDims; ++i) {
input_strides[i] = input_strides[i-1] * input_dims[i-1];
}
} else {
input_strides[NumInputDims - 1] = 1;
for (int i = NumInputDims - 2; i >= 0; --i) {
input_strides[i] = input_strides[i + 1] * input_dims[i + 1];
}
} }
outputIndex = 0; outputIndex = 0;
reduceIndex = 0; reduceIndex = 0;
for (int i = 0; i < NumInputDims; ++i) { for (int i = 0; i < NumInputDims; ++i) {
if (reduced[i]) { if (reduced[i]) {
m_reducedStrides[reduceIndex] = strides[i]; m_reducedStrides[reduceIndex] = input_strides[i];
++reduceIndex; ++reduceIndex;
} else { } else {
m_preservedStrides[outputIndex] = strides[i]; m_preservedStrides[outputIndex] = input_strides[i];
++outputIndex; ++outputIndex;
} }
} }
@ -147,6 +240,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
// Special case for full reductions // Special case for full reductions
if (NumInputDims == NumReducedDims) { if (NumInputDims == NumReducedDims) {
m_dimensions[0] = 1; m_dimensions[0] = 1;
m_preservedStrides[0] = internal::array_prod(input_dims);
} }
} }
@ -161,14 +255,22 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
m_impl.cleanup(); m_impl.cleanup();
} }
typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType; typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{ {
Op reducer(m_reducer); Op reducer(m_reducer);
reduce(firstInput(index), 0, reducer); if (ReducingInnerMostDims) {
return reducer.finalize(); const Index num_values_to_reduce =
(Layout == ColMajor) ? m_preservedStrides[0] : m_preservedStrides[NumOutputDims - 1];
return internal::InnerMostDimReducer<Self, Op>::reduce(*this, firstInput(index),
num_values_to_reduce, reducer);
} else {
typename Self::CoeffReturnType accum = reducer.initialize();
internal::GenericDimReducer<NumReducedDims-1, Self, Op>::reduce(*this, firstInput(index), reducer, &accum);
return reducer.finalize(accum);
}
} }
// TODO(bsteiner): provide a more efficient implementation. // TODO(bsteiner): provide a more efficient implementation.
@ -179,9 +281,20 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index + packetSize - 1 < dimensions().TotalSize()); eigen_assert(index + packetSize - 1 < dimensions().TotalSize());
EIGEN_ALIGN_DEFAULT CoeffReturnType values[packetSize]; EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize];
for (int i = 0; i < packetSize; ++i) { if (ReducingInnerMostDims) {
values[i] = coeff(index+i); const Index num_values_to_reduce =
(Layout == ColMajor) ? m_preservedStrides[0] : m_preservedStrides[NumOutputDims - 1];
const Index firstIndex = firstInput(index);
for (Index i = 0; i < packetSize; ++i) {
Op reducer(m_reducer);
values[i] = internal::InnerMostDimReducer<Self, Op>::reduce(*this, firstIndex + i * num_values_to_reduce,
num_values_to_reduce, reducer);
}
} else {
for (int i = 0; i < packetSize; ++i) {
values[i] = coeff(index + i);
}
} }
PacketReturnType rslt = internal::pload<PacketReturnType>(values); PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt; return rslt;
@ -190,34 +303,59 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
Scalar* data() const { return NULL; } Scalar* data() const { return NULL; }
private: private:
template <int, typename, typename> friend struct internal::GenericDimReducer;
template <typename, typename, bool> friend struct internal::InnerMostDimReducer;
// Returns the Index in the input tensor of the first value that needs to be
// used to compute the reduction at output index "index".
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
Index startInput = 0; if (ReducingInnerMostDims) {
for (int i = NumDims - 1; i > 0; --i) { if (Layout == ColMajor) {
const Index idx = index / m_outputStrides[i]; return index * m_preservedStrides[0];
startInput += idx * m_preservedStrides[i]; } else {
index -= idx * m_outputStrides[i]; return index * m_preservedStrides[NumOutputDims - 1];
}
}
Index startInput = 0;
if (Layout == ColMajor) {
for (int i = NumOutputDims - 1; i > 0; --i) {
// This is index_i in the output tensor.
const Index idx = index / m_outputStrides[i];
startInput += idx * m_preservedStrides[i];
index -= idx * m_outputStrides[i];
}
startInput += index * m_preservedStrides[0];
} else {
for (int i = 0; i < NumOutputDims - 1; ++i) {
// This is index_i in the output tensor.
const Index idx = index / m_outputStrides[i];
startInput += idx * m_preservedStrides[i];
index -= idx * m_outputStrides[i];
}
startInput += index * m_preservedStrides[NumOutputDims - 1];
} }
startInput += index * m_preservedStrides[0];
return startInput; return startInput;
} }
EIGEN_DEVICE_FUNC void reduce(Index firstIndex, int DimIndex, Op& reducer) const { // Dimensions of the output of the operation.
for (int j = 0; j < m_reducedDims[DimIndex]; ++j) {
const Index input = firstIndex + j * m_reducedStrides[DimIndex];
if (DimIndex < NumReducedDims-1) {
reduce(input, DimIndex+1, reducer);
} else {
reducer.reduce(m_impl.coeff(input));
}
}
}
Dimensions m_dimensions; Dimensions m_dimensions;
array<Index, NumDims> m_outputStrides; // Precomputed strides for the output tensor.
array<Index, NumDims> m_preservedStrides; array<Index, NumOutputDims> m_outputStrides;
// Subset of strides of the input tensor for the non-reduced dimensions.
// Indexed by output dimensions.
array<Index, NumOutputDims> m_preservedStrides;
// Subset of strides of the input tensor for the reduced dimensions.
// Indexed by reduced dimensions.
array<Index, NumReducedDims> m_reducedStrides; array<Index, NumReducedDims> m_reducedStrides;
// Size of the input dimensions that are reduced.
// Indexed by reduced dimensions.
array<Index, NumReducedDims> m_reducedDims; array<Index, NumReducedDims> m_reducedDims;
// Evaluator for the input expression.
TensorEvaluator<ArgType, Device> m_impl; TensorEvaluator<ArgType, Device> m_impl;
// Operation to apply for computing the reduction.
Op m_reducer; Op m_reducer;
}; };

View File

@ -0,0 +1,78 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/CXX11/Tensor>
static void test_default()
{
Tensor<float, 1> vec(6);
vec.setRandom();
// Fixme: we should check that the generated numbers follow a uniform
// distribution instead.
for (int i = 1; i < 6; ++i) {
VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1));
}
}
static void test_normal()
{
Tensor<float, 1> vec(6);
vec.setRandom<Eigen::internal::NormalRandomGenerator<float>>();
// Fixme: we should check that the generated numbers follow a gaussian
// distribution instead.
for (int i = 1; i < 6; ++i) {
VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1));
}
}
struct MyGenerator {
MyGenerator() { }
MyGenerator(const MyGenerator&) { }
// Return a random value to be used. "element_location" is the
// location of the entry to set in the tensor, it can typically
// be ignored.
int operator()(Eigen::DenseIndex element_location, Eigen::DenseIndex /*unused*/ = 0) const {
return 3 * element_location;
}
// Same as above but generates several numbers at a time.
typename internal::packet_traits<int>::type packetOp(
Eigen::DenseIndex packet_location, Eigen::DenseIndex /*unused*/ = 0) const {
const int packetSize = internal::packet_traits<int>::size;
EIGEN_ALIGN_DEFAULT int values[packetSize];
for (int i = 0; i < packetSize; ++i) {
values[i] = 3 * (packet_location + i);
}
return internal::pload<typename internal::packet_traits<int>::type>(values);
}
};
static void test_custom()
{
Tensor<int, 1> vec(6);
vec.setRandom<MyGenerator>();
for (int i = 0; i < 6; ++i) {
VERIFY_IS_EQUAL(vec(i), 3*i);
}
}
void test_cxx11_tensor_random()
{
CALL_SUBTEST(test_default());
CALL_SUBTEST(test_normal());
CALL_SUBTEST(test_custom());
}