diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor index c67020581..7e504b302 100644 --- a/unsupported/Eigen/CXX11/Tensor +++ b/unsupported/Eigen/CXX11/Tensor @@ -42,8 +42,12 @@ #include "unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h" #include "unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h" #include "unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h" +#include "unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h" +#include "unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h" #include "unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h" + #include "unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h" +#include "unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h" #include "unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h" #include "unsupported/Eigen/CXX11/src/Tensor/Tensor.h" diff --git a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h index 7f614bbe8..09601fc7d 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h @@ -236,7 +236,9 @@ class Tensor : public TensorBase > // FIXME: we need to resize the tensor to fix the dimensions of the other. // Unfortunately this isn't possible yet when the rhs is an expression. // resize(other.dimensions()); - internal::TensorAssign::run(*this, other); + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); return *this; } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h index 633a7a31b..a2a925775 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h @@ -10,10 +10,6 @@ #ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H #define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H -#ifdef EIGEN_USE_THREADS -#include -#endif - namespace Eigen { /** \class TensorAssign @@ -21,172 +17,134 @@ namespace Eigen { * * \brief The tensor assignment class. * - * This class is responsible for triggering the evaluation of the expressions - * used on the lhs and rhs of an assignment operator and copy the result of - * the evaluation of the rhs expression at the address computed during the - * evaluation lhs expression. - * - * TODO: vectorization. For now the code only uses scalars - * TODO: parallelisation using multithreading on cpu, or kernels on gpu. + * This class is represents the assignment of the values resulting from the evaluation of + * the rhs expression to the memory locations denoted by the lhs expression. */ namespace internal { - -// Default strategy: the expressions are evaluated with a single cpu thread. -template::PacketAccess & TensorEvaluator::PacketAccess> -struct TensorAssign +template +struct traits > { - typedef typename Derived1::Index Index; - EIGEN_DEVICE_FUNC - static inline void run(Derived1& dst, const Derived2& src, const Device& device = Device()) - { - TensorEvaluator evalDst(dst, device); - TensorEvaluator evalSrc(src, device); - const Index size = dst.size(); - for (Index i = 0; i < size; ++i) { - evalDst.coeffRef(i) = evalSrc.coeff(i); - } - } + typedef typename LhsXprType::Scalar Scalar; + typedef typename internal::packet_traits::type Packet; + typedef typename traits::StorageKind StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + + enum { + Flags = 0, + }; }; - -template -struct TensorAssign +template +struct eval, Eigen::Dense> { - typedef typename Derived1::Index Index; - static inline void run(Derived1& dst, const Derived2& src, const Device& device = Device()) - { - TensorEvaluator evalDst(dst, device); - TensorEvaluator evalSrc(src, device); - const Index size = dst.size(); - - static const int LhsStoreMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; - static const int RhsLoadMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; - static const int PacketSize = unpacket_traits::PacketReturnType>::size; - const int VectorizedSize = (size / PacketSize) * PacketSize; - - for (Index i = 0; i < VectorizedSize; i += PacketSize) { - evalDst.template writePacket(i, evalSrc.template packet(i)); - } - for (Index i = VectorizedSize; i < size; ++i) { - evalDst.coeffRef(i) = evalSrc.coeff(i); - } - } + typedef const TensorAssignOp& type; }; - - -// Multicore strategy: the index space is partitioned and each core is assigned to a partition -#ifdef EIGEN_USE_THREADS -template -struct EvalRange { - static void run(LhsEval& dst, const RhsEval& src, const Index first, const Index last) { - eigen_assert(last > first); - for (Index i = first; i < last; ++i) { - dst.coeffRef(i) = src.coeff(i); - } - } -}; - -template -struct EvalRange { - static void run(LhsEval& dst, const RhsEval& src, const Index first, const Index last) { - eigen_assert(last > first); - - Index i = first; - static const int PacketSize = unpacket_traits::size; - if (last - first > PacketSize) { - static const int LhsStoreMode = LhsEval::IsAligned ? Aligned : Unaligned; - static const int RhsLoadMode = RhsEval::IsAligned ? Aligned : Unaligned; - eigen_assert(first % PacketSize == 0); - Index lastPacket = last - (last % PacketSize); - for (; i < lastPacket; i += PacketSize) { - dst.template writePacket(i, src.template packet(i)); - } - } - - for (; i < last; ++i) { - dst.coeffRef(i) = src.coeff(i); - } - } -}; - -template -struct TensorAssignMultiThreaded +template +struct nested, 1, typename eval >::type> { - typedef typename Derived1::Index Index; - static inline void run(Derived1& dst, const Derived2& src, const ThreadPoolDevice& device) - { - TensorEvaluator evalDst(dst, DefaultDevice()); - TensorEvaluator evalSrc(src, Defaultevice()); - const Index size = dst.size(); - - static const bool Vectorizable = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess; - static const int PacketSize = Vectorizable ? unpacket_traits::PacketReturnType>::size : 1; - - int blocksz = static_cast(ceil(static_cast(size)/device.numThreads()) + PacketSize - 1); - const Index blocksize = std::max(PacketSize, (blocksz - (blocksz % PacketSize))); - const Index numblocks = size / blocksize; - - Index i = 0; - vector > results; - results.reserve(numblocks); - for (int i = 0; i < numblocks; ++i) { - results.push_back(std::async(std::launch::async, &EvalRange, TensorEvaluator, Index>::run, evalDst, evalSrc, i*blocksize, (i+1)*blocksize)); - } - - for (int i = 0; i < numblocks; ++i) { - results[i].get(); - } - - if (numblocks * blocksize < size) { - EvalRange, TensorEvaluator, Index>::run(evalDst, evalSrc, numblocks * blocksize, size); - } - } + typedef TensorAssignOp type; }; -#endif + +} // end namespace internal -// GPU: the evaluation of the expressions is offloaded to a GPU. -#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) -template -__global__ void EigenMetaKernelNoCheck(LhsEvaluator evalDst, const RhsEvaluator evalSrc) { - const int index = blockIdx.x * blockDim.x + threadIdx.x; - evalDst.coeffRef(index) = evalSrc.coeff(index); -} -template -__global__ void EigenMetaKernelPeel(LhsEvaluator evalDst, const RhsEvaluator evalSrc, int peel_start_offset, int size) { - const int index = peel_start_offset + blockIdx.x * blockDim.x + threadIdx.x; - if (index < size) { - evalDst.coeffRef(index) = evalSrc.coeff(index); + +template +class TensorAssignOp : public TensorBase > +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::internal::traits::Packet Packet; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename LhsXprType::CoeffReturnType CoeffReturnType; + typedef typename LhsXprType::PacketReturnType PacketReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs) {} + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + typename internal::remove_all::type& + lhsExpression() const { return *((typename internal::remove_all::type*)&m_lhs_xpr); } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename internal::remove_all::type& m_lhs_xpr; + const typename internal::remove_all::type& m_rhs_xpr; +}; + + +template +struct TensorEvaluator, Device> +{ + typedef TensorAssignOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess, + }; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) : + m_leftImpl(op.lhsExpression(), device), + m_rightImpl(op.rhsExpression(), device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketReturnType PacketReturnType; + typedef typename TensorEvaluator::Dimensions Dimensions; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // TODO: use left impl instead if right impl dimensions are known at compile time. + return m_rightImpl.dimensions(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() { + m_leftImpl.evalSubExprsIfNeeded(); + m_rightImpl.evalSubExprsIfNeeded(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) { + m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) { + static const int LhsStoreMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; + static const int RhsLoadMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; + m_leftImpl.template writePacket(i, m_rightImpl.template packet(i)); + } + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_leftImpl.coeff(index); + } + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + { + return m_leftImpl.template packet(index); + } + + private: + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; +}; + } -template -struct TensorAssignGpu -{ - typedef typename Derived1::Index Index; - static inline void run(Derived1& dst, const Derived2& src, const GpuDevice& device) - { - TensorEvaluator evalDst(dst, device); - TensorEvaluator evalSrc(src, device); - const Index size = dst.size(); - const int block_size = std::min(size, 32*32); - const int num_blocks = size / block_size; - EigenMetaKernelNoCheck, TensorEvaluator > <<>>(evalDst, evalSrc); - - const int remaining_items = size % block_size; - if (remaining_items > 0) { - const int peel_start_offset = num_blocks * block_size; - const int peel_block_size = std::min(size, 32); - const int peel_num_blocks = (remaining_items + peel_block_size - 1) / peel_block_size; - EigenMetaKernelPeel, TensorEvaluator > <<>>(evalDst, evalSrc, peel_start_offset, size); - } - } -}; -#endif - -} // end namespace internal - -} // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index cadbabda2..b2e12fd15 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -184,6 +184,14 @@ struct TensorEvaluator class TensorDevice { template EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) { - internal::TensorAssign::run(m_expression, other, m_device); + typedef TensorAssignOp Assign; + Assign assign(m_expression, other); + static const bool Vectorize = TensorEvaluator::PacketAccess; + internal::TensorExecutor::run(assign, m_device); return *this; } @@ -48,7 +51,10 @@ template class TensorDevice EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) { - internal::TensorAssignMultiThreaded::run(m_expression, other, m_device); + typedef TensorAssignOp Assign; + Assign assign(m_expression, other); + static const bool Vectorize = TensorEvaluator::PacketAccess; + internal::TensorExecutor::run(assign, m_device); return *this; } @@ -67,13 +73,15 @@ template class TensorDevice template EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) { - internal::TensorAssignGpu::run(m_expression, other, m_device); + typedef TensorAssignOp Assign; + Assign assign(m_expression, other); + internal::TensorExecutor::run(assign, m_device); return *this; } protected: const GpuDevice& m_device; - ExpressionType& m_expression; + ExpressionType m_expression; }; #endif diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h new file mode 100644 index 000000000..db716a80e --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h @@ -0,0 +1,146 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H +#define EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H + +namespace Eigen { + +/** \class TensorForcedEval + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reshaping class. + * + * + */ +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename XprType::Scalar Scalar; + typedef typename internal::packet_traits::type Packet; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + + enum { + Flags = 0, + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorEvalToOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorEvalToOp type; +}; + +} // end namespace internal + + + + +template +class TensorEvalToOp : public TensorBase > +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::internal::traits::Packet Packet; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketReturnType PacketReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvalToOp(Scalar* buffer, const XprType& expr) + : m_xpr(expr), m_buffer(buffer) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC Scalar* buffer() const { return m_buffer; } + + protected: + typename XprType::Nested m_xpr; + Scalar* m_buffer; +}; + + + +template +struct TensorEvaluator, Device> +{ + typedef TensorEvalToOp XprType; + typedef typename ArgType::Scalar Scalar; + typedef typename ArgType::Packet Packet; + typedef typename TensorEvaluator::Dimensions Dimensions; + + enum { + IsAligned = true, + PacketAccess = true, + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_device(device), m_buffer(op.buffer()) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~TensorEvaluator() { + } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketReturnType PacketReturnType; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() { + m_impl.evalSubExprsIfNeeded(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) { + m_buffer[i] = m_impl.coeff(i); + } + EIGEN_STRONG_INLINE void evalPacket(Index i) { + internal::pstoret(m_buffer + i, m_impl.template packet::IsAligned ? Aligned : Unaligned>(i)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_buffer[index]; + } + + template + EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return internal::ploadt(m_buffer + index); + } + + private: + TensorEvaluator m_impl; + const Device& m_device; + Scalar* m_buffer; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h index 80fe06957..5c8b079da 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h @@ -38,27 +38,32 @@ struct TensorEvaluator PacketAccess = Derived::PacketAccess, }; - EIGEN_DEVICE_FUNC TensorEvaluator(Derived& m, const Device&) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(Derived& m, const Device&) : m_data(const_cast(m.data())), m_dims(m.dimensions()) { } - EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_dims; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; } - EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() { } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + eigen_assert(m_data); return m_data[index]; } - EIGEN_DEVICE_FUNC Scalar& coeffRef(Index index) { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + eigen_assert(m_data); return m_data[index]; } - template + template EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return internal::ploadt(m_data + index); } - template + template EIGEN_STRONG_INLINE void writePacket(Index index, const Packet& x) { return internal::pstoret(m_data + index, x); @@ -95,13 +100,16 @@ struct TensorEvaluator, Device> EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() { } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { } + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { return m_functor(index); } template - EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return m_functor.packetOp(index); } @@ -137,13 +145,20 @@ struct TensorEvaluator, Device> EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() { + m_argImpl.evalSubExprsIfNeeded(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_argImpl.cleanup(); + } + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { return m_functor(m_argImpl.coeff(index)); } template - EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return m_functor.packetOp(m_argImpl.template packet(index)); } @@ -184,12 +199,21 @@ struct TensorEvaluator - EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return m_functor.packetOp(m_leftImpl.template packet(index), m_rightImpl.template packet(index)); } @@ -230,12 +254,24 @@ struct TensorEvaluator // TODO: use then or else impl instead if they happen to be known at compile time. return m_condImpl.dimensions(); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() { + m_condImpl.evalSubExprsIfNeeded(); + m_thenImpl.evalSubExprsIfNeeded(); + m_elseImpl.evalSubExprsIfNeeded(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_condImpl.cleanup(); + m_thenImpl.cleanup(); + m_elseImpl.cleanup(); + } + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { return m_condImpl.coeff(index) ? m_thenImpl.coeff(index) : m_elseImpl.coeff(index); } template - EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + PacketReturnType packet(Index index) const { static const int PacketSize = internal::unpacket_traits::size; internal::Selector select; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h new file mode 100644 index 000000000..3e41f3290 --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -0,0 +1,194 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H + +#ifdef EIGEN_USE_THREADS +#include +#endif + +namespace Eigen { + +/** \class TensorExecutor + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor executor class. + * + * This class is responsible for launch the evaluation of the expression on + * the specified computing device. + */ +namespace internal { + +// Default strategy: the expression is evaluated with a single cpu thread. +template::PacketAccess> +struct TensorExecutor +{ + typedef typename Expression::Index Index; + EIGEN_DEVICE_FUNC + static inline void run(const Expression& expr, const Device& device = Device()) + { + TensorEvaluator evaluator(expr, device); + evaluator.evalSubExprsIfNeeded(); + + const Index size = evaluator.dimensions().TotalSize(); + for (Index i = 0; i < size; ++i) { + evaluator.evalScalar(i); + } + + evaluator.cleanup(); + } +}; + + +template +struct TensorExecutor +{ + typedef typename Expression::Index Index; + static inline void run(const Expression& expr, const DefaultDevice& device = DefaultDevice()) + { + TensorEvaluator evaluator(expr, device); + evaluator.evalSubExprsIfNeeded(); + + const Index size = evaluator.dimensions().TotalSize(); + static const int PacketSize = unpacket_traits::PacketReturnType>::size; + const int VectorizedSize = (size / PacketSize) * PacketSize; + + for (Index i = 0; i < VectorizedSize; i += PacketSize) { + evaluator.evalPacket(i); + } + for (Index i = VectorizedSize; i < size; ++i) { + evaluator.evalScalar(i); + } + + evaluator.cleanup(); + } +}; + + + +// Multicore strategy: the index space is partitioned and each partition is executed on a single core +#ifdef EIGEN_USE_THREADS +template +struct EvalRange { + static void run(Evaluator& evaluator, const Index first, const Index last) { + eigen_assert(last > first); + for (Index i = first; i < last; ++i) { + evaluator.evalScalar(i); + } + } +}; + +template +struct EvalRange { + static void run(Evaluator& evaluator, const Index first, const Index last,) { + eigen_assert(last > first); + + Index i = first; + static const int PacketSize = unpacket_traits::size; + if (last - first > PacketSize) { + eigen_assert(first % PacketSize == 0); + Index lastPacket = last - (last % PacketSize); + for (; i < lastPacket; i += PacketSize) { + evaluator.evalPacket(i); + } + } + + for (; i < last; ++i) { + evaluator.evalScalar(i); + } + } +}; + +template +struct TensorExecutor +{ + typedef typename Expression::Index Index; + static inline void run(const Expression& expr, const ThreadPoolDevice& device) + { + TensorEvaluator evaluator(expr, device); + evaluator.evalSubExprsIfNeeded(); + + const Index size = evaluator.dimensions().TotalSize(); + + static const int PacketSize = Vectorizable ? unpacket_traits::PacketReturnType>::size : 1; + + int blocksz = std::ceil(static_cast(size)/device.numThreads()) + PacketSize - 1; + const Index blocksize = std::max(PacketSize, (blocksz - (blocksz % PacketSize))); + const Index numblocks = size / blocksize; + + TensorEvaluator single_threaded_eval(expr, DefaultDevice()); + + Index i = 0; + vector > results; + results.reserve(numblocks); + for (int i = 0; i < numblocks; ++i) { + results.push_back(std::async(std::launch::async, &EvalRange, Index>::run, single_threaded_eval, i*blocksize, (i+1)*blocksize)); + } + + for (int i = 0; i < numblocks; ++i) { + results[i].get(); + } + + if (numblocks * blocksize < size) { + EvalRange, Index>::run(single_threaded_eval, numblocks * blocksize, size, nullptr); + } + + evaluator.cleanup(); + } +}; +#endif + + +// GPU: the evaluation of the expression is offloaded to a GPU. +#if defined(EIGEN_USE_GPU) && defined(__CUDACC__) +template +__global__ void EigenMetaKernelNoCheck(Evaluator eval) { + const int index = blockIdx.x * blockDim.x + threadIdx.x; + eval.evalScalar(index); +} +template +__global__ void EigenMetaKernelPeel(Evaluator eval, int peel_start_offset, int size) { + const int index = peel_start_offset + blockIdx.x * blockDim.x + threadIdx.x; + if (index < size) { + eval.evalScalar(index); + } +} + +template +struct TensorExecutor +{ + typedef typename Expression::Index Index; + static inline void run(const Expression& expr, const GpuDevice& device) + { + TensorEvaluator evaluator(expr, device); + evaluator.evalSubExprsIfNeeded(); + + const Index size = evaluator.dimensions().TotalSize(); + const int block_size = std::min(size, 32*32); + const int num_blocks = size / block_size; + EigenMetaKernelNoCheck > <<>>(evaluator); + + const int remaining_items = size % block_size; + if (remaining_items > 0) { + const int peel_start_offset = num_blocks * block_size; + const int peel_block_size = std::min(size, 32); + const int peel_num_blocks = (remaining_items + peel_block_size - 1) / peel_block_size; + EigenMetaKernelPeel > <<>>(evaluator, peel_start_offset, size); + } + evaluator.cleanup(); + } +}; +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h index 789c04238..d42167da9 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h @@ -200,7 +200,9 @@ class TensorFixedSize : public TensorBase::run(*this, other); + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); return *this; } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h new file mode 100644 index 000000000..6f6641de6 --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h @@ -0,0 +1,142 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H +#define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H + +namespace Eigen { + +/** \class TensorForcedEval + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reshaping class. + * + * + */ +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename XprType::Scalar Scalar; + typedef typename internal::packet_traits::type Packet; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + + enum { + Flags = 0, + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorForcedEvalOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorForcedEvalOp type; +}; + +} // end namespace internal + + + +template +class TensorForcedEvalOp : public TensorBase > +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::internal::traits::Packet Packet; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketReturnType PacketReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr) + : m_xpr(expr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; +}; + + +template +struct TensorEvaluator, Device> +{ + typedef TensorForcedEvalOp XprType; + typedef typename ArgType::Scalar Scalar; + typedef typename ArgType::Packet Packet; + typedef typename TensorEvaluator::Dimensions Dimensions; + + enum { + IsAligned = true, + PacketAccess = true, + }; + + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL) + { } + + EIGEN_DEVICE_FUNC ~TensorEvaluator() { + eigen_assert(!m_buffer); + } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename XprType::PacketReturnType PacketReturnType; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() { + m_impl.evalSubExprsIfNeeded(); + m_buffer = (Scalar*)m_device.allocate(m_impl.dimensions().TotalSize() * sizeof(Scalar)); + + typedef TensorEvalToOp EvalTo; + EvalTo evalToTmp(m_buffer, m_op); + internal::TensorExecutor::PacketAccess>::run(evalToTmp, m_device); + m_impl.cleanup(); + } + EIGEN_STRONG_INLINE void cleanup() { + m_device.deallocate(m_buffer); + m_buffer = NULL; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_buffer[index]; + } + + template + EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return internal::ploadt(m_buffer + index); + } + + private: + TensorEvaluator m_impl; + const ArgType m_op; + const Device& m_device; + Scalar* m_buffer; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h index 27bfe1d73..c0dffbd0c 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h @@ -25,13 +25,16 @@ template class TensorReductionOp; template class TensorContractionOp; template class TensorConvolutionOp; template class TensorReshapingOp; +template class TensorAssignOp; + +template class TensorEvalToOp; template class TensorForcedEvalOp; template class TensorDevice; template struct TensorEvaluator; namespace internal { -template struct TensorAssign; +template class TensorExecutor; } // end namespace internal } // end namespace Eigen diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h index 3a06170fa..c97135b63 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h @@ -246,7 +246,9 @@ template class TensorMap : public Tensor EIGEN_DEVICE_FUNC Self& operator=(const OtherDerived& other) { - internal::TensorAssign::run(*this, other); + typedef TensorAssignOp Assign; + Assign assign(*this, other); + internal::TensorExecutor::run(assign, DefaultDevice()); return *this; } diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h index e9e74581f..764bba4e6 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h @@ -98,6 +98,13 @@ struct TensorEvaluator, Device> const Dimensions& dimensions() const { return m_dimensions; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() { + m_impl.evalSubExprsIfNeeded(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_impl.coeff(index);