Reworked the expression evaluation mechanism in order to make it possible to efficiently compute convolutions and contractions in the future:

* The scheduling of computation is moved out the the assignment code and into a new TensorExecutor class
 * The assignment itself is now a regular node on the expression tree
 * The expression evaluators start by recursively evaluating all their subexpressions if needed
This commit is contained in:
Benoit Steiner 2014-06-13 09:56:51 -07:00
parent aa664eabb9
commit 38ab7e6ed0
14 changed files with 695 additions and 174 deletions

View File

@ -42,8 +42,12 @@
#include "unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h" #include "unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h" #include "unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h" #include "unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h" #include "unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h" #include "unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h" #include "unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h"
#include "unsupported/Eigen/CXX11/src/Tensor/Tensor.h" #include "unsupported/Eigen/CXX11/src/Tensor/Tensor.h"

View File

@ -236,7 +236,9 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_> >
// FIXME: we need to resize the tensor to fix the dimensions of the other. // FIXME: we need to resize the tensor to fix the dimensions of the other.
// Unfortunately this isn't possible yet when the rhs is an expression. // Unfortunately this isn't possible yet when the rhs is an expression.
// resize(other.dimensions()); // resize(other.dimensions());
internal::TensorAssign<Tensor, const OtherDerived>::run(*this, other); typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this; return *this;
} }

View File

@ -10,10 +10,6 @@
#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H #ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H #define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
#ifdef EIGEN_USE_THREADS
#include <future>
#endif
namespace Eigen { namespace Eigen {
/** \class TensorAssign /** \class TensorAssign
@ -21,172 +17,134 @@ namespace Eigen {
* *
* \brief The tensor assignment class. * \brief The tensor assignment class.
* *
* This class is responsible for triggering the evaluation of the expressions * This class is represents the assignment of the values resulting from the evaluation of
* used on the lhs and rhs of an assignment operator and copy the result of * the rhs expression to the memory locations denoted by the lhs expression.
* the evaluation of the rhs expression at the address computed during the
* evaluation lhs expression.
*
* TODO: vectorization. For now the code only uses scalars
* TODO: parallelisation using multithreading on cpu, or kernels on gpu.
*/ */
namespace internal { namespace internal {
template<typename LhsXprType, typename RhsXprType>
// Default strategy: the expressions are evaluated with a single cpu thread. struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
template<typename Derived1, typename Derived2, typename Device = DefaultDevice, bool Vectorizable = TensorEvaluator<Derived1, Device>::PacketAccess & TensorEvaluator<Derived2, Device>::PacketAccess>
struct TensorAssign
{ {
typedef typename Derived1::Index Index; typedef typename LhsXprType::Scalar Scalar;
EIGEN_DEVICE_FUNC typedef typename internal::packet_traits<Scalar>::type Packet;
static inline void run(Derived1& dst, const Derived2& src, const Device& device = Device()) typedef typename traits<LhsXprType>::StorageKind StorageKind;
{ typedef typename promote_index_type<typename traits<LhsXprType>::Index,
TensorEvaluator<Derived1, Device> evalDst(dst, device); typename traits<RhsXprType>::Index>::type Index;
TensorEvaluator<Derived2, Device> evalSrc(src, device); typedef typename LhsXprType::Nested LhsNested;
const Index size = dst.size(); typedef typename RhsXprType::Nested RhsNested;
for (Index i = 0; i < size; ++i) { typedef typename remove_reference<LhsNested>::type _LhsNested;
evalDst.coeffRef(i) = evalSrc.coeff(i); typedef typename remove_reference<RhsNested>::type _RhsNested;
}
} enum {
Flags = 0,
};
}; };
template<typename LhsXprType, typename RhsXprType>
template<typename Derived1, typename Derived2, typename Device> struct eval<TensorAssignOp<LhsXprType, RhsXprType>, Eigen::Dense>
struct TensorAssign<Derived1, Derived2, Device, true>
{ {
typedef typename Derived1::Index Index; typedef const TensorAssignOp<LhsXprType, RhsXprType>& type;
static inline void run(Derived1& dst, const Derived2& src, const Device& device = Device())
{
TensorEvaluator<Derived1, Device> evalDst(dst, device);
TensorEvaluator<Derived2, Device> evalSrc(src, device);
const Index size = dst.size();
static const int LhsStoreMode = TensorEvaluator<Derived1, Device>::IsAligned ? Aligned : Unaligned;
static const int RhsLoadMode = TensorEvaluator<Derived2, Device>::IsAligned ? Aligned : Unaligned;
static const int PacketSize = unpacket_traits<typename TensorEvaluator<Derived1, Device>::PacketReturnType>::size;
const int VectorizedSize = (size / PacketSize) * PacketSize;
for (Index i = 0; i < VectorizedSize; i += PacketSize) {
evalDst.template writePacket<LhsStoreMode>(i, evalSrc.template packet<RhsLoadMode>(i));
}
for (Index i = VectorizedSize; i < size; ++i) {
evalDst.coeffRef(i) = evalSrc.coeff(i);
}
}
}; };
template<typename LhsXprType, typename RhsXprType>
struct nested<TensorAssignOp<LhsXprType, RhsXprType>, 1, typename eval<TensorAssignOp<LhsXprType, RhsXprType> >::type>
// Multicore strategy: the index space is partitioned and each core is assigned to a partition
#ifdef EIGEN_USE_THREADS
template <typename LhsEval, typename RhsEval, typename Index, bool Vectorizable = LhsEval::PacketAccess & RhsEval::PacketAccess>
struct EvalRange {
static void run(LhsEval& dst, const RhsEval& src, const Index first, const Index last) {
eigen_assert(last > first);
for (Index i = first; i < last; ++i) {
dst.coeffRef(i) = src.coeff(i);
}
}
};
template <typename LhsEval, typename RhsEval, typename Index>
struct EvalRange<LhsEval, RhsEval, Index, true> {
static void run(LhsEval& dst, const RhsEval& src, const Index first, const Index last) {
eigen_assert(last > first);
Index i = first;
static const int PacketSize = unpacket_traits<typename LhsEval::PacketReturnType>::size;
if (last - first > PacketSize) {
static const int LhsStoreMode = LhsEval::IsAligned ? Aligned : Unaligned;
static const int RhsLoadMode = RhsEval::IsAligned ? Aligned : Unaligned;
eigen_assert(first % PacketSize == 0);
Index lastPacket = last - (last % PacketSize);
for (; i < lastPacket; i += PacketSize) {
dst.template writePacket<LhsStoreMode>(i, src.template packet<RhsLoadMode>(i));
}
}
for (; i < last; ++i) {
dst.coeffRef(i) = src.coeff(i);
}
}
};
template<typename Derived1, typename Derived2>
struct TensorAssignMultiThreaded
{ {
typedef typename Derived1::Index Index; typedef TensorAssignOp<LhsXprType, RhsXprType> type;
static inline void run(Derived1& dst, const Derived2& src, const ThreadPoolDevice& device)
{
TensorEvaluator<Derived1, DefaultDevice> evalDst(dst, DefaultDevice());
TensorEvaluator<Derived2, DefaultDevice> evalSrc(src, Defaultevice());
const Index size = dst.size();
static const bool Vectorizable = TensorEvaluator<Derived1, DefaultDevice>::PacketAccess & TensorEvaluator<Derived2, DefaultDevice>::PacketAccess;
static const int PacketSize = Vectorizable ? unpacket_traits<typename TensorEvaluator<Derived1, DefaultDevice>::PacketReturnType>::size : 1;
int blocksz = static_cast<int>(ceil(static_cast<float>(size)/device.numThreads()) + PacketSize - 1);
const Index blocksize = std::max<Index>(PacketSize, (blocksz - (blocksz % PacketSize)));
const Index numblocks = size / blocksize;
Index i = 0;
vector<std::future<void> > results;
results.reserve(numblocks);
for (int i = 0; i < numblocks; ++i) {
results.push_back(std::async(std::launch::async, &EvalRange<TensorEvaluator<Derived1, DefaultDevice>, TensorEvaluator<Derived2, DefaultDevice>, Index>::run, evalDst, evalSrc, i*blocksize, (i+1)*blocksize));
}
for (int i = 0; i < numblocks; ++i) {
results[i].get();
}
if (numblocks * blocksize < size) {
EvalRange<TensorEvaluator<Derived1>, TensorEvaluator<Derived2>, Index>::run(evalDst, evalSrc, numblocks * blocksize, size);
}
}
}; };
#endif
} // end namespace internal
// GPU: the evaluation of the expressions is offloaded to a GPU.
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) template<typename LhsXprType, typename RhsXprType>
template <typename LhsEvaluator, typename RhsEvaluator> class TensorAssignOp : public TensorBase<TensorAssignOp<LhsXprType, RhsXprType> >
__global__ void EigenMetaKernelNoCheck(LhsEvaluator evalDst, const RhsEvaluator evalSrc) { {
const int index = blockIdx.x * blockDim.x + threadIdx.x; public:
evalDst.coeffRef(index) = evalSrc.coeff(index); typedef typename Eigen::internal::traits<TensorAssignOp>::Scalar Scalar;
} typedef typename Eigen::internal::traits<TensorAssignOp>::Packet Packet;
template <typename LhsEvaluator, typename RhsEvaluator> typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
__global__ void EigenMetaKernelPeel(LhsEvaluator evalDst, const RhsEvaluator evalSrc, int peel_start_offset, int size) { typedef typename LhsXprType::CoeffReturnType CoeffReturnType;
const int index = peel_start_offset + blockIdx.x * blockDim.x + threadIdx.x; typedef typename LhsXprType::PacketReturnType PacketReturnType;
if (index < size) { typedef typename Eigen::internal::nested<TensorAssignOp>::type Nested;
evalDst.coeffRef(index) = evalSrc.coeff(index); typedef typename Eigen::internal::traits<TensorAssignOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorAssignOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs)
: m_lhs_xpr(lhs), m_rhs_xpr(rhs) {}
/** \returns the nested expressions */
EIGEN_DEVICE_FUNC
typename internal::remove_all<typename LhsXprType::Nested>::type&
lhsExpression() const { return *((typename internal::remove_all<typename LhsXprType::Nested>::type*)&m_lhs_xpr); }
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename RhsXprType::Nested>::type&
rhsExpression() const { return m_rhs_xpr; }
protected:
typename internal::remove_all<typename LhsXprType::Nested>::type& m_lhs_xpr;
const typename internal::remove_all<typename RhsXprType::Nested>::type& m_rhs_xpr;
};
template<typename LeftArgType, typename RightArgType, typename Device>
struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
{
typedef TensorAssignOp<LeftArgType, RightArgType> XprType;
enum {
IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
m_leftImpl(op.lhsExpression(), device),
m_rightImpl(op.rhsExpression(), device)
{ }
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType;
typedef typename TensorEvaluator<RightArgType, Device>::Dimensions Dimensions;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
{
// TODO: use left impl instead if right impl dimensions are known at compile time.
return m_rightImpl.dimensions();
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() {
m_leftImpl.evalSubExprsIfNeeded();
m_rightImpl.evalSubExprsIfNeeded();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_leftImpl.cleanup();
m_rightImpl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) {
m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) {
static const int LhsStoreMode = TensorEvaluator<LeftArgType, Device>::IsAligned ? Aligned : Unaligned;
static const int RhsLoadMode = TensorEvaluator<RightArgType, Device>::IsAligned ? Aligned : Unaligned;
m_leftImpl.template writePacket<LhsStoreMode>(i, m_rightImpl.template packet<RhsLoadMode>(i));
}
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{
return m_leftImpl.coeff(index);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const
{
return m_leftImpl.template packet<LoadMode>(index);
}
private:
TensorEvaluator<LeftArgType, Device> m_leftImpl;
TensorEvaluator<RightArgType, Device> m_rightImpl;
};
} }
template<typename Derived1, typename Derived2>
struct TensorAssignGpu
{
typedef typename Derived1::Index Index;
static inline void run(Derived1& dst, const Derived2& src, const GpuDevice& device)
{
TensorEvaluator<Derived1, GpuDevice> evalDst(dst, device);
TensorEvaluator<Derived2, GpuDevice> evalSrc(src, device);
const Index size = dst.size();
const int block_size = std::min<int>(size, 32*32);
const int num_blocks = size / block_size;
EigenMetaKernelNoCheck<TensorEvaluator<Derived1, GpuDevice>, TensorEvaluator<Derived2, GpuDevice> > <<<num_blocks, block_size, 0, device.stream()>>>(evalDst, evalSrc);
const int remaining_items = size % block_size;
if (remaining_items > 0) {
const int peel_start_offset = num_blocks * block_size;
const int peel_block_size = std::min<int>(size, 32);
const int peel_num_blocks = (remaining_items + peel_block_size - 1) / peel_block_size;
EigenMetaKernelPeel<TensorEvaluator<Derived1, GpuDevice>, TensorEvaluator<Derived2, GpuDevice> > <<<peel_num_blocks, peel_block_size, 0, device.stream()>>>(evalDst, evalSrc, peel_start_offset, size);
}
}
};
#endif
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H #endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H

View File

@ -184,6 +184,14 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
buffer[i] += coeff(i); buffer[i] += coeff(i);
} }
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() {
m_leftImpl.evalSubExprsIfNeeded();
m_rightImpl.evalSubExprsIfNeeded();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_leftImpl.cleanup();
m_rightImpl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{ {

View File

@ -153,6 +153,15 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
const Dimensions& dimensions() const { return m_dimensions; } const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() {
m_inputImpl.evalSubExprsIfNeeded();
m_kernelImpl.evalSubExprsIfNeeded();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_inputImpl.cleanup();
m_kernelImpl.cleanup();
}
void evalTo(typename XprType::Scalar* buffer) const { void evalTo(typename XprType::Scalar* buffer) const {
for (int i = 0; i < dimensions().TotalSize(); ++i) { for (int i = 0; i < dimensions().TotalSize(); ++i) {
buffer[i] += coeff(i); buffer[i] += coeff(i);

View File

@ -31,7 +31,10 @@ template <typename ExpressionType, typename DeviceType> class TensorDevice {
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) { EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) {
internal::TensorAssign<ExpressionType, const OtherDerived, DeviceType>::run(m_expression, other, m_device); typedef TensorAssignOp<ExpressionType, const OtherDerived> Assign;
Assign assign(m_expression, other);
static const bool Vectorize = TensorEvaluator<const Assign, DeviceType>::PacketAccess;
internal::TensorExecutor<const Assign, DeviceType, Vectorize>::run(assign, m_device);
return *this; return *this;
} }
@ -48,7 +51,10 @@ template <typename ExpressionType> class TensorDevice<ExpressionType, ThreadPool
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) { EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) {
internal::TensorAssignMultiThreaded<ExpressionType, const OtherDerived>::run(m_expression, other, m_device); typedef TensorAssignOp<ExpressionType, const OtherDerived> Assign;
Assign assign(m_expression, other);
static const bool Vectorize = TensorEvaluator<const Assign, ThreadPoolDevice>::PacketAccess;
internal::TensorExecutor<const Assign, ThreadPoolDevice, Vectorize>::run(assign, m_device);
return *this; return *this;
} }
@ -67,13 +73,15 @@ template <typename ExpressionType> class TensorDevice<ExpressionType, GpuDevice>
template<typename OtherDerived> template<typename OtherDerived>
EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) { EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) {
internal::TensorAssignGpu<ExpressionType, const OtherDerived>::run(m_expression, other, m_device); typedef TensorAssignOp<ExpressionType, const OtherDerived> Assign;
Assign assign(m_expression, other);
internal::TensorExecutor<const Assign, GpuDevice, false>::run(assign, m_device);
return *this; return *this;
} }
protected: protected:
const GpuDevice& m_device; const GpuDevice& m_device;
ExpressionType& m_expression; ExpressionType m_expression;
}; };
#endif #endif

View File

@ -0,0 +1,146 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H
#define EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H
namespace Eigen {
/** \class TensorForcedEval
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor reshaping class.
*
*
*/
namespace internal {
template<typename XprType>
struct traits<TensorEvalToOp<XprType> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::StorageKind StorageKind;
typedef typename traits<XprType>::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
enum {
Flags = 0,
};
};
template<typename XprType>
struct eval<TensorEvalToOp<XprType>, Eigen::Dense>
{
typedef const TensorEvalToOp<XprType>& type;
};
template<typename XprType>
struct nested<TensorEvalToOp<XprType>, 1, typename eval<TensorEvalToOp<XprType> >::type>
{
typedef TensorEvalToOp<XprType> type;
};
} // end namespace internal
template<typename XprType>
class TensorEvalToOp : public TensorBase<TensorEvalToOp<XprType> >
{
public:
typedef typename Eigen::internal::traits<TensorEvalToOp>::Scalar Scalar;
typedef typename Eigen::internal::traits<TensorEvalToOp>::Packet Packet;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType;
typedef typename Eigen::internal::nested<TensorEvalToOp>::type Nested;
typedef typename Eigen::internal::traits<TensorEvalToOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorEvalToOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvalToOp(Scalar* buffer, const XprType& expr)
: m_xpr(expr), m_buffer(buffer) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
EIGEN_DEVICE_FUNC Scalar* buffer() const { return m_buffer; }
protected:
typename XprType::Nested m_xpr;
Scalar* m_buffer;
};
template<typename ArgType, typename Device>
struct TensorEvaluator<const TensorEvalToOp<ArgType>, Device>
{
typedef TensorEvalToOp<ArgType> XprType;
typedef typename ArgType::Scalar Scalar;
typedef typename ArgType::Packet Packet;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
enum {
IsAligned = true,
PacketAccess = true,
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_device(device), m_buffer(op.buffer())
{ }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~TensorEvaluator() {
}
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() {
m_impl.evalSubExprsIfNeeded();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) {
m_buffer[i] = m_impl.coeff(i);
}
EIGEN_STRONG_INLINE void evalPacket(Index i) {
internal::pstoret<Scalar, Packet, Aligned>(m_buffer + i, m_impl.template packet<TensorEvaluator<ArgType, Device>::IsAligned ? Aligned : Unaligned>(i));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return m_buffer[index];
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
return internal::ploadt<Packet, LoadMode>(m_buffer + index);
}
private:
TensorEvaluator<ArgType, Device> m_impl;
const Device& m_device;
Scalar* m_buffer;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H

View File

@ -38,27 +38,32 @@ struct TensorEvaluator
PacketAccess = Derived::PacketAccess, PacketAccess = Derived::PacketAccess,
}; };
EIGEN_DEVICE_FUNC TensorEvaluator(Derived& m, const Device&) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(Derived& m, const Device&)
: m_data(const_cast<Scalar*>(m.data())), m_dims(m.dimensions()) : m_data(const_cast<Scalar*>(m.data())), m_dims(m.dimensions())
{ } { }
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_dims; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; }
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() { }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
eigen_assert(m_data);
return m_data[index]; return m_data[index];
} }
EIGEN_DEVICE_FUNC Scalar& coeffRef(Index index) { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
eigen_assert(m_data);
return m_data[index]; return m_data[index];
} }
template<int LoadMode> template<int LoadMode> EIGEN_STRONG_INLINE
PacketReturnType packet(Index index) const PacketReturnType packet(Index index) const
{ {
return internal::ploadt<Packet, LoadMode>(m_data + index); return internal::ploadt<Packet, LoadMode>(m_data + index);
} }
template <int StoreMode> template <int StoreMode> EIGEN_STRONG_INLINE
void writePacket(Index index, const Packet& x) void writePacket(Index index, const Packet& x)
{ {
return internal::pstoret<Scalar, Packet, StoreMode>(m_data + index, x); return internal::pstoret<Scalar, Packet, StoreMode>(m_data + index, x);
@ -95,13 +100,16 @@ struct TensorEvaluator<const TensorCwiseNullaryOp<NullaryOp, ArgType>, Device>
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() { }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { }
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{ {
return m_functor(index); return m_functor(index);
} }
template<int LoadMode> template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{ {
return m_functor.packetOp(index); return m_functor.packetOp(index);
} }
@ -137,13 +145,20 @@ struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType>, Device>
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() {
m_argImpl.evalSubExprsIfNeeded();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_argImpl.cleanup();
}
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{ {
return m_functor(m_argImpl.coeff(index)); return m_functor(m_argImpl.coeff(index));
} }
template<int LoadMode> template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{ {
return m_functor.packetOp(m_argImpl.template packet<LoadMode>(index)); return m_functor.packetOp(m_argImpl.template packet<LoadMode>(index));
} }
@ -184,12 +199,21 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
return m_leftImpl.dimensions(); return m_leftImpl.dimensions();
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() {
m_leftImpl.evalSubExprsIfNeeded();
m_rightImpl.evalSubExprsIfNeeded();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_leftImpl.cleanup();
m_rightImpl.cleanup();
}
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{ {
return m_functor(m_leftImpl.coeff(index), m_rightImpl.coeff(index)); return m_functor(m_leftImpl.coeff(index), m_rightImpl.coeff(index));
} }
template<int LoadMode> template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{ {
return m_functor.packetOp(m_leftImpl.template packet<LoadMode>(index), m_rightImpl.template packet<LoadMode>(index)); return m_functor.packetOp(m_leftImpl.template packet<LoadMode>(index), m_rightImpl.template packet<LoadMode>(index));
} }
@ -230,12 +254,24 @@ struct TensorEvaluator<const TensorSelectOp<IfArgType, ThenArgType, ElseArgType>
// TODO: use then or else impl instead if they happen to be known at compile time. // TODO: use then or else impl instead if they happen to be known at compile time.
return m_condImpl.dimensions(); return m_condImpl.dimensions();
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() {
m_condImpl.evalSubExprsIfNeeded();
m_thenImpl.evalSubExprsIfNeeded();
m_elseImpl.evalSubExprsIfNeeded();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_condImpl.cleanup();
m_thenImpl.cleanup();
m_elseImpl.cleanup();
}
EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{ {
return m_condImpl.coeff(index) ? m_thenImpl.coeff(index) : m_elseImpl.coeff(index); return m_condImpl.coeff(index) ? m_thenImpl.coeff(index) : m_elseImpl.coeff(index);
} }
template<int LoadMode> template<int LoadMode>
EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const PacketReturnType packet(Index index) const
{ {
static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
internal::Selector<PacketSize> select; internal::Selector<PacketSize> select;

View File

@ -0,0 +1,194 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H
#define EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H
#ifdef EIGEN_USE_THREADS
#include <future>
#endif
namespace Eigen {
/** \class TensorExecutor
* \ingroup CXX11_Tensor_Module
*
* \brief The tensor executor class.
*
* This class is responsible for launch the evaluation of the expression on
* the specified computing device.
*/
namespace internal {
// Default strategy: the expression is evaluated with a single cpu thread.
template<typename Expression, typename Device = DefaultDevice, bool Vectorizable = TensorEvaluator<Expression, Device>::PacketAccess>
struct TensorExecutor
{
typedef typename Expression::Index Index;
EIGEN_DEVICE_FUNC
static inline void run(const Expression& expr, const Device& device = Device())
{
TensorEvaluator<Expression, Device> evaluator(expr, device);
evaluator.evalSubExprsIfNeeded();
const Index size = evaluator.dimensions().TotalSize();
for (Index i = 0; i < size; ++i) {
evaluator.evalScalar(i);
}
evaluator.cleanup();
}
};
template<typename Expression>
struct TensorExecutor<Expression, DefaultDevice, true>
{
typedef typename Expression::Index Index;
static inline void run(const Expression& expr, const DefaultDevice& device = DefaultDevice())
{
TensorEvaluator<Expression, DefaultDevice> evaluator(expr, device);
evaluator.evalSubExprsIfNeeded();
const Index size = evaluator.dimensions().TotalSize();
static const int PacketSize = unpacket_traits<typename TensorEvaluator<Expression, DefaultDevice>::PacketReturnType>::size;
const int VectorizedSize = (size / PacketSize) * PacketSize;
for (Index i = 0; i < VectorizedSize; i += PacketSize) {
evaluator.evalPacket(i);
}
for (Index i = VectorizedSize; i < size; ++i) {
evaluator.evalScalar(i);
}
evaluator.cleanup();
}
};
// Multicore strategy: the index space is partitioned and each partition is executed on a single core
#ifdef EIGEN_USE_THREADS
template <typename Evaluator, typename Index, bool Vectorizable = Evaluator::PacketAccess>
struct EvalRange {
static void run(Evaluator& evaluator, const Index first, const Index last) {
eigen_assert(last > first);
for (Index i = first; i < last; ++i) {
evaluator.evalScalar(i);
}
}
};
template <typename Evaluator, typename Index>
struct EvalRange<Evaluator, Index, true> {
static void run(Evaluator& evaluator, const Index first, const Index last,) {
eigen_assert(last > first);
Index i = first;
static const int PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size;
if (last - first > PacketSize) {
eigen_assert(first % PacketSize == 0);
Index lastPacket = last - (last % PacketSize);
for (; i < lastPacket; i += PacketSize) {
evaluator.evalPacket(i);
}
}
for (; i < last; ++i) {
evaluator.evalScalar(i);
}
}
};
template<typename Expression, bool Vectorizable>
struct TensorExecutor<Expression, ThreadPoolDevice, Vectorizable>
{
typedef typename Expression::Index Index;
static inline void run(const Expression& expr, const ThreadPoolDevice& device)
{
TensorEvaluator<Expression, ThreadPoolDevice> evaluator(expr, device);
evaluator.evalSubExprsIfNeeded();
const Index size = evaluator.dimensions().TotalSize();
static const int PacketSize = Vectorizable ? unpacket_traits<typename TensorEvaluator<Expression, DefaultDevice>::PacketReturnType>::size : 1;
int blocksz = std::ceil<int>(static_cast<float>(size)/device.numThreads()) + PacketSize - 1;
const Index blocksize = std::max<Index>(PacketSize, (blocksz - (blocksz % PacketSize)));
const Index numblocks = size / blocksize;
TensorEvaluator<Expression, DefaultDevice> single_threaded_eval(expr, DefaultDevice());
Index i = 0;
vector<std::future<void> > results;
results.reserve(numblocks);
for (int i = 0; i < numblocks; ++i) {
results.push_back(std::async(std::launch::async, &EvalRange<TensorEvaluator<Expression, DefaultDevice>, Index>::run, single_threaded_eval, i*blocksize, (i+1)*blocksize));
}
for (int i = 0; i < numblocks; ++i) {
results[i].get();
}
if (numblocks * blocksize < size) {
EvalRange<TensorEvaluator<Expression, DefaultDevice>, Index>::run(single_threaded_eval, numblocks * blocksize, size, nullptr);
}
evaluator.cleanup();
}
};
#endif
// GPU: the evaluation of the expression is offloaded to a GPU.
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
template <typename Evaluator>
__global__ void EigenMetaKernelNoCheck(Evaluator eval) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
eval.evalScalar(index);
}
template <typename Evaluator>
__global__ void EigenMetaKernelPeel(Evaluator eval, int peel_start_offset, int size) {
const int index = peel_start_offset + blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
eval.evalScalar(index);
}
}
template<typename Expression, bool Vectorizable>
struct TensorExecutor<Expression, GpuDevice, Vectorizable>
{
typedef typename Expression::Index Index;
static inline void run(const Expression& expr, const GpuDevice& device)
{
TensorEvaluator<Expression, GpuDevice> evaluator(expr, device);
evaluator.evalSubExprsIfNeeded();
const Index size = evaluator.dimensions().TotalSize();
const int block_size = std::min<int>(size, 32*32);
const int num_blocks = size / block_size;
EigenMetaKernelNoCheck<TensorEvaluator<Expression, GpuDevice> > <<<num_blocks, block_size, 0, device.stream()>>>(evaluator);
const int remaining_items = size % block_size;
if (remaining_items > 0) {
const int peel_start_offset = num_blocks * block_size;
const int peel_block_size = std::min<int>(size, 32);
const int peel_num_blocks = (remaining_items + peel_block_size - 1) / peel_block_size;
EigenMetaKernelPeel<TensorEvaluator<Expression, GpuDevice> > <<<peel_num_blocks, peel_block_size, 0, device.stream()>>>(evaluator, peel_start_offset, size);
}
evaluator.cleanup();
}
};
#endif
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H

View File

@ -200,7 +200,9 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
{ {
// FIXME: check that the dimensions of other match the dimensions of *this. // FIXME: check that the dimensions of other match the dimensions of *this.
// Unfortunately this isn't possible yet when the rhs is an expression. // Unfortunately this isn't possible yet when the rhs is an expression.
internal::TensorAssign<TensorFixedSize, const OtherDerived>::run(*this, other); typedef TensorAssignOp<Self, const OtherDerived> Assign;
Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this; return *this;
} }

View File

@ -0,0 +1,142 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
#define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
namespace Eigen {
/** \class TensorForcedEval
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor reshaping class.
*
*
*/
namespace internal {
template<typename XprType>
struct traits<TensorForcedEvalOp<XprType> >
{
// Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::StorageKind StorageKind;
typedef typename traits<XprType>::Index Index;
typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested;
enum {
Flags = 0,
};
};
template<typename XprType>
struct eval<TensorForcedEvalOp<XprType>, Eigen::Dense>
{
typedef const TensorForcedEvalOp<XprType>& type;
};
template<typename XprType>
struct nested<TensorForcedEvalOp<XprType>, 1, typename eval<TensorForcedEvalOp<XprType> >::type>
{
typedef TensorForcedEvalOp<XprType> type;
};
} // end namespace internal
template<typename XprType>
class TensorForcedEvalOp : public TensorBase<TensorForcedEvalOp<XprType> >
{
public:
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Scalar Scalar;
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Packet Packet;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType;
typedef typename Eigen::internal::nested<TensorForcedEvalOp>::type Nested;
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr)
: m_xpr(expr) {}
EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
};
template<typename ArgType, typename Device>
struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
{
typedef TensorForcedEvalOp<ArgType> XprType;
typedef typename ArgType::Scalar Scalar;
typedef typename ArgType::Packet Packet;
typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
enum {
IsAligned = true,
PacketAccess = true,
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL)
{ }
EIGEN_DEVICE_FUNC ~TensorEvaluator() {
eigen_assert(!m_buffer);
}
typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() {
m_impl.evalSubExprsIfNeeded();
m_buffer = (Scalar*)m_device.allocate(m_impl.dimensions().TotalSize() * sizeof(Scalar));
typedef TensorEvalToOp<const ArgType> EvalTo;
EvalTo evalToTmp(m_buffer, m_op);
internal::TensorExecutor<const EvalTo, Device, TensorEvaluator<ArgType, Device>::PacketAccess>::run(evalToTmp, m_device);
m_impl.cleanup();
}
EIGEN_STRONG_INLINE void cleanup() {
m_device.deallocate(m_buffer);
m_buffer = NULL;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{
return m_buffer[index];
}
template<int LoadMode>
EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
return internal::ploadt<Packet, LoadMode>(m_buffer + index);
}
private:
TensorEvaluator<ArgType, Device> m_impl;
const ArgType m_op;
const Device& m_device;
Scalar* m_buffer;
};
} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H

View File

@ -25,13 +25,16 @@ template<typename XprType> class TensorReductionOp;
template<typename Dimensions, typename LeftXprType, typename RightXprType> class TensorContractionOp; template<typename Dimensions, typename LeftXprType, typename RightXprType> class TensorContractionOp;
template<typename Dimensions, typename InputXprType, typename KernelXprType> class TensorConvolutionOp; template<typename Dimensions, typename InputXprType, typename KernelXprType> class TensorConvolutionOp;
template<typename NewDimensions, typename XprType> class TensorReshapingOp; template<typename NewDimensions, typename XprType> class TensorReshapingOp;
template<typename LeftXprType, typename RightXprType> class TensorAssignOp;
template<typename XprType> class TensorEvalToOp;
template<typename XprType> class TensorForcedEvalOp; template<typename XprType> class TensorForcedEvalOp;
template<typename ExpressionType, typename DeviceType> class TensorDevice; template<typename ExpressionType, typename DeviceType> class TensorDevice;
template<typename Derived, typename Device> struct TensorEvaluator; template<typename Derived, typename Device> struct TensorEvaluator;
namespace internal { namespace internal {
template<typename Derived, typename OtherDerived, typename Device, bool Vectorizable> struct TensorAssign; template<typename Expression, typename Device, bool Vectorizable> class TensorExecutor;
} // end namespace internal } // end namespace internal
} // end namespace Eigen } // end namespace Eigen

View File

@ -246,7 +246,9 @@ template<typename PlainObjectType, int Options_> class TensorMap : public Tensor
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
Self& operator=(const OtherDerived& other) Self& operator=(const OtherDerived& other)
{ {
internal::TensorAssign<Self, const OtherDerived>::run(*this, other); typedef TensorAssignOp<Self, const OtherDerived> Assign;
Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
return *this; return *this;
} }

View File

@ -98,6 +98,13 @@ struct TensorEvaluator<const TensorReshapingOp<ArgType, NewDimensions>, Device>
const Dimensions& dimensions() const { return m_dimensions; } const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() {
m_impl.evalSubExprsIfNeeded();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
m_impl.cleanup();
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{ {
return m_impl.coeff(index); return m_impl.coeff(index);