From d55efa6f0f9ab9ec758c6b40204be476c01b7528 Mon Sep 17 00:00:00 2001 From: Eugene Zhulenev Date: Mon, 23 Jul 2018 15:50:55 -0700 Subject: [PATCH 1/3] TensorBlockIO --- .../Eigen/CXX11/src/Tensor/TensorBlock.h | 546 +++++++++++- .../test/cxx11_tensor_block_access.cpp | 793 +++++++++++++++++- 2 files changed, 1304 insertions(+), 35 deletions(-) diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index 59535cd91..8ffc9d093 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -14,6 +14,32 @@ namespace Eigen { namespace internal { +namespace { + +// Helper template to choose between ColMajor and RowMajor values. +template +struct cond; + +template <> +struct cond { + template + EIGEN_STRONG_INLINE const T& operator()(const T& col, + const T& /*row*/) const { + return col; + } +}; + +template <> +struct cond { + template + EIGEN_STRONG_INLINE const T& operator()(const T& /*col*/, + const T& row) const { + return row; + } +}; + +} // namespace + /** * \class TensorBlockShapeType * \ingroup CXX11_Tensor_Module @@ -82,6 +108,512 @@ class TensorBlock { Scalar* m_data; // Not owned. }; +template +struct TensorBlockCopyOp { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const Index num_coeff_to_copy, const Index dst_index, + const Index dst_stride, Scalar* EIGEN_RESTRICT dst_data, + const Index src_index, const Index src_stride, + const Scalar* EIGEN_RESTRICT src_data) { + for (Index i = 0; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i * dst_stride] = + src_data[src_index + i * src_stride]; + } + } +}; + +// NOTE: Benchmarks run on an implementation of this that broke each of the +// loops in these conditionals into it's own template specialization (to +// avoid conditionals in the caller's loop) did not show an improvement. +template +struct TensorBlockCopyOp { + typedef typename packet_traits::type Packet; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const Index num_coeff_to_copy, const Index dst_index, + const Index dst_stride, Scalar* EIGEN_RESTRICT dst_data, + const Index src_index, const Index src_stride, + const Scalar* EIGEN_RESTRICT src_data) { + if (src_stride == 1) { + const Index packet_size = internal::unpacket_traits::size; + const Index vectorized_size = + (num_coeff_to_copy / packet_size) * packet_size; + if (dst_stride == 1) { + // LINEAR + for (Index i = 0; i < vectorized_size; i += packet_size) { + Packet p = internal::ploadu(src_data + src_index + i); + internal::pstoreu(dst_data + dst_index + i, p); + } + for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i] = src_data[src_index + i]; + } + } else { + // SCATTER + for (Index i = 0; i < vectorized_size; i += packet_size) { + Packet p = internal::ploadu(src_data + src_index + i); + internal::pscatter( + dst_data + dst_index + i * dst_stride, p, dst_stride); + } + for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i * dst_stride] = src_data[src_index + i]; + } + } + } else if (src_stride == 0) { + const Index packet_size = internal::unpacket_traits::size; + const Index vectorized_size = + (num_coeff_to_copy / packet_size) * packet_size; + if (dst_stride == 1) { + // LINEAR + for (Index i = 0; i < vectorized_size; i += packet_size) { + Packet p = internal::pload1(src_data + src_index); + internal::pstoreu(dst_data + dst_index + i, p); + } + for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i] = src_data[src_index]; + } + } else { + // SCATTER + for (Index i = 0; i < vectorized_size; i += packet_size) { + Packet p = internal::pload1(src_data + src_index); + internal::pscatter( + dst_data + dst_index + i * dst_stride, p, dst_stride); + } + for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i * dst_stride] = src_data[src_index]; + } + } + } else { + if (dst_stride == 1) { + // GATHER + const Index packet_size = internal::unpacket_traits::size; + const Index vectorized_size = + (num_coeff_to_copy / packet_size) * packet_size; + for (Index i = 0; i < vectorized_size; i += packet_size) { + Packet p = internal::pgather( + src_data + src_index + i * src_stride, src_stride); + internal::pstoreu(dst_data + dst_index + i, p); + } + for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i] = src_data[src_index + i * src_stride]; + } + } else { + // RANDOM + for (Index i = 0; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i * dst_stride] = + src_data[src_index + i * src_stride]; + } + } + } + } +}; + +/** + * \class TensorBlockIO + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor block IO class. + * + * This class is responsible for copying data between a tensor and a tensor + * block. + */ +template +class TensorBlockIO { + public: + typedef typename internal::TensorBlock + TensorBlock; + typedef typename internal::TensorBlockCopyOp + TensorBlockCopyOp; + + protected: + struct BlockIteratorState { + Index input_stride; + Index output_stride; + Index input_span; + Index output_span; + Index size; + Index count; + }; + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Copy( + const TensorBlock& block, Index first_coeff_index, + const array& tensor_to_block_dim_map, + const array& tensor_strides, const Scalar* src_data, + Scalar* dst_data) { + // Find the innermost tensor dimension whose size is not 1. This is the + // effective inner dim. If all dimensions are of size 1, then fallback to + // using the actual innermost dim to avoid out-of-bound access. + Index num_size_one_inner_dims = 0; + for (int i = 0; i < NumDims; ++i) { + const int dim = cond()(i, NumDims - i - 1); + if (block.block_sizes()[tensor_to_block_dim_map[dim]] != 1) { + num_size_one_inner_dims = i; + break; + } + } + // Calculate strides and dimensions. + const Index tensor_stride1_dim = cond()( + num_size_one_inner_dims, NumDims - num_size_one_inner_dims - 1); + const Index block_dim_for_tensor_stride1_dim = + NumDims == 0 ? 1 : tensor_to_block_dim_map[tensor_stride1_dim]; + size_t block_inner_dim_size = + NumDims == 0 ? 1 + : block.block_sizes()[block_dim_for_tensor_stride1_dim]; + for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) { + const int dim = cond()(i, NumDims - i - 1); + const Index block_stride = + block.block_strides()[tensor_to_block_dim_map[dim]]; + if (block_inner_dim_size == block_stride && + block_stride == tensor_strides[dim]) { + block_inner_dim_size *= + block.block_sizes()[tensor_to_block_dim_map[dim]]; + ++num_size_one_inner_dims; + } else { + break; + } + } + + Index inputIndex; + Index outputIndex; + Index input_stride; + Index output_stride; + + // Setup strides to read/write along the tensor's stride1 dimension. + if (BlockRead) { + inputIndex = first_coeff_index; + outputIndex = 0; + input_stride = NumDims == 0 ? 1 : tensor_strides[tensor_stride1_dim]; + output_stride = + NumDims == 0 + ? 1 + : block.block_strides()[block_dim_for_tensor_stride1_dim]; + } else { + inputIndex = 0; + outputIndex = first_coeff_index; + input_stride = + NumDims == 0 + ? 1 + : block.block_strides()[block_dim_for_tensor_stride1_dim]; + output_stride = NumDims == 0 ? 1 : tensor_strides[tensor_stride1_dim]; + } + + const int at_least_1_dim = NumDims <= 1 ? 1 : NumDims - 1; + array block_iter_state; + + // Initialize block iterator state. Squeeze away any dimension of size 1. + int num_squeezed_dims = 0; + for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) { + const int dim = cond()(i + 1, NumDims - i - 2); + const Index size = block.block_sizes()[tensor_to_block_dim_map[dim]]; + if (size == 1) { + continue; + } + block_iter_state[num_squeezed_dims].size = size; + if (BlockRead) { + block_iter_state[num_squeezed_dims].input_stride = tensor_strides[dim]; + block_iter_state[num_squeezed_dims].output_stride = + block.block_strides()[tensor_to_block_dim_map[dim]]; + } else { + block_iter_state[num_squeezed_dims].input_stride = + block.block_strides()[tensor_to_block_dim_map[dim]]; + block_iter_state[num_squeezed_dims].output_stride = tensor_strides[dim]; + } + block_iter_state[num_squeezed_dims].input_span = + block_iter_state[num_squeezed_dims].input_stride * + (block_iter_state[num_squeezed_dims].size - 1); + block_iter_state[num_squeezed_dims].output_span = + block_iter_state[num_squeezed_dims].output_stride * + (block_iter_state[num_squeezed_dims].size - 1); + block_iter_state[num_squeezed_dims].count = 0; + ++num_squeezed_dims; + } + + // Iterate copying data from src to dst. + const Index block_total_size = + NumDims == 0 ? 1 : block.block_sizes().TotalSize(); + for (Index i = 0; i < block_total_size; i += block_inner_dim_size) { + TensorBlockCopyOp::Run(block_inner_dim_size, outputIndex, output_stride, + dst_data, inputIndex, input_stride, src_data); + // Update index. + for (int j = 0; j < num_squeezed_dims; ++j) { + if (++block_iter_state[j].count < block_iter_state[j].size) { + inputIndex += block_iter_state[j].input_stride; + outputIndex += block_iter_state[j].output_stride; + break; + } + block_iter_state[j].count = 0; + inputIndex -= block_iter_state[j].input_span; + outputIndex -= block_iter_state[j].output_span; + } + } + } +}; + +/** + * \class TensorBlockReader + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor block reader class. + * + * This class is responsible for reading a tensor block. + * + */ +template +class TensorBlockReader + : public TensorBlockIO { + public: + typedef typename internal::TensorBlock + TensorBlock; + typedef TensorBlockIO + Base; + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + TensorBlock* block, const Scalar* src_data) { + array tensor_to_block_dim_map; + for (int i = 0; i < NumDims; ++i) { + tensor_to_block_dim_map[i] = i; + } + Base::Copy(*block, block->first_coeff_index(), tensor_to_block_dim_map, + block->tensor_strides(), src_data, block->data()); + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + TensorBlock* block, Index first_coeff_index, + const array& tensor_to_block_dim_map, + const array& tensor_strides, const Scalar* src_data) { + Base::Copy(*block, first_coeff_index, tensor_to_block_dim_map, + tensor_strides, src_data, block->data()); + } +}; + +/** + * \class TensorBlockWriter + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor block writer class. + * + * This class is responsible for writing a tensor block. + * + */ +template +class TensorBlockWriter : public TensorBlockIO { + public: + typedef typename internal::TensorBlock + TensorBlock; + typedef TensorBlockIO + Base; + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const TensorBlock& block, Scalar* dst_data) { + array tensor_to_block_dim_map; + for (int i = 0; i < NumDims; ++i) { + tensor_to_block_dim_map[i] = i; + } + Base::Copy(block, block.first_coeff_index(), tensor_to_block_dim_map, + block.tensor_strides(), block.data(), dst_data); + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const TensorBlock& block, Index first_coeff_index, + const array& tensor_to_block_dim_map, + const array& tensor_strides, Scalar* dst_data) { + Base::Copy(block, first_coeff_index, tensor_to_block_dim_map, + tensor_strides, block.data(), dst_data); + } +}; + +/** + * \class TensorBlockCwiseBinaryOp + * \ingroup CXX11_Tensor_Module + * + * \brief Carries out a cwise binary op on a number of coefficients. + * + * This class reads strided inputs from left and right operands, and writes the + * result of the cwise binary op to the strided output array. + * + */ +template +struct TensorBlockCwiseBinaryOp { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const BinaryFunctor& functor, const Index num_coeff, + const Index output_index, const Index output_stride, + OutputScalar* output_data, const Index left_index, + const Index left_stride, const LeftScalar* left_data, + const Index right_index, const Index right_stride, + const RightScalar* right_data) { + for (Index i = 0; i < num_coeff; ++i) { + output_data[output_index + i * output_stride] = + functor(left_data[left_index + i * left_stride], + right_data[right_index + i * right_stride]); + } + } +}; + +template <> +struct TensorBlockCwiseBinaryOp { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const BinaryFunctor& functor, const Index num_coeff, + const Index output_index, const Index output_stride, + OutputScalar* output_data, const Index left_index, + const Index left_stride, const LeftScalar* left_data, + const Index right_index, const Index right_stride, + const RightScalar* right_data) { + EIGEN_STATIC_ASSERT(functor_traits::PacketAccess, + YOU_MADE_A_PROGRAMMING_MISTAKE); + typedef typename packet_traits::type OutputPacket; + typedef typename packet_traits::type LeftPacket; + typedef typename packet_traits::type RightPacket; + const Index packet_size = unpacket_traits::size; + EIGEN_STATIC_ASSERT(unpacket_traits::size == packet_size, + YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(unpacket_traits::size == packet_size, + YOU_MADE_A_PROGRAMMING_MISTAKE); + const Index vectorized_size = (num_coeff / packet_size) * packet_size; + if (output_stride != 1 || left_stride != 1 || right_stride != 1) { + TensorBlockCwiseBinaryOp::Run( + functor, num_coeff, output_index, output_stride, output_data, + left_index, left_stride, left_data, right_index, right_stride, + right_data); + return; + } + // Vectorization for the most common case. + for (Index i = 0; i < vectorized_size; i += packet_size) { + LeftPacket l = internal::ploadu(left_data + left_index + i); + RightPacket r = + internal::ploadu(right_data + right_index + i); + OutputPacket p = functor.packetOp(l, r); + internal::pstoreu( + output_data + output_index + i, p); + } + for (Index i = vectorized_size; i < num_coeff; ++i) { + output_data[output_index + i] = + functor(left_data[left_index + i], right_data[right_index + i]); + } + } +}; + +/** + * \class TensorBlockCwiseBinaryIO + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor block IO class for carrying out cwise binary ops. + * + * This class carries out the binary op on given blocks. + * + */ +template +struct TensorBlockCwiseBinaryIO { + typedef typename internal::TensorBlock::Dimensions Dimensions; + typedef internal::TensorBlockCwiseBinaryOp< + functor_traits::PacketAccess> + TensorBlockCwiseBinaryOp; + + struct BlockIteratorState { + Index output_stride, output_span; + Index left_stride, left_span; + Index right_stride, right_span; + Index size, count; + }; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const BinaryFunctor& functor, const Dimensions& block_sizes, + const Dimensions& block_strides, OutputScalar* output_data, + const array& left_strides, const LeftScalar* left_data, + const array& right_strides, + const RightScalar* right_data) { + // Find the innermost dimension whose size is not 1. This is the effective + // inner dim. If all dimensions are of size 1, fallback to using the actual + // innermost dim to avoid out-of-bound access. + int num_size_one_inner_dims = 0; + for (int i = 0; i < NumDims; ++i) { + const int dim = cond()(i, NumDims - i - 1); + if (block_sizes[dim] != 1) { + num_size_one_inner_dims = i; + break; + } + } + // Calculate strides and dimensions. + const int inner_dim = + NumDims == 0 ? 1 + : cond()(num_size_one_inner_dims, + NumDims - num_size_one_inner_dims - 1); + Index inner_dim_size = NumDims == 0 ? 1 : block_sizes[inner_dim]; + for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) { + const int dim = cond()(i, NumDims - i - 1); + // Merge multiple inner dims into one for larger inner dim size (i.e. + // fewer calls to TensorBlockCwiseBinaryOp::Run()). + if (inner_dim_size == block_strides[dim] && + block_strides[dim] == left_strides[dim] && + block_strides[dim] == right_strides[dim]) { + inner_dim_size *= block_sizes[dim]; + ++num_size_one_inner_dims; + } else { + break; + } + } + + Index output_index = 0, left_index = 0, right_index = 0; + const Index output_stride = NumDims == 0 ? 1 : block_strides[inner_dim]; + const Index left_stride = NumDims == 0 ? 1 : left_strides[inner_dim]; + const Index right_stride = NumDims == 0 ? 1 : right_strides[inner_dim]; + + const int at_least_1_dim = NumDims <= 1 ? 1 : NumDims - 1; + array block_iter_state; + + // Initialize block iterator state. Squeeze away any dimension of size 1. + int num_squeezed_dims = 0; + for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) { + const int dim = cond()(i + 1, NumDims - i - 2); + const Index size = block_sizes[dim]; + if (size == 1) { + continue; + } + auto& state = block_iter_state[num_squeezed_dims]; + state.output_stride = block_strides[dim]; + state.left_stride = left_strides[dim]; + state.right_stride = right_strides[dim]; + state.size = size; + state.output_span = state.output_stride * (size - 1); + state.left_span = state.left_stride * (size - 1); + state.right_span = state.right_stride * (size - 1); + state.count = 0; + ++num_squeezed_dims; + } + + // Compute cwise binary op. + const Index block_total_size = NumDims == 0 ? 1 : block_sizes.TotalSize(); + for (Index i = 0; i < block_total_size; i += inner_dim_size) { + TensorBlockCwiseBinaryOp::Run(functor, inner_dim_size, output_index, + output_stride, output_data, left_index, + left_stride, left_data, right_index, + right_stride, right_data); + // Update index. + for (int j = 0; j < num_squeezed_dims; ++j) { + auto& state = block_iter_state[j]; + if (++state.count < state.size) { + output_index += state.output_stride; + left_index += state.left_stride; + right_index += state.right_stride; + break; + } + state.count = 0; + output_index -= state.output_span; + left_index -= state.left_span; + right_index -= state.right_span; + } + } + } +}; + /** * \class TensorBlockMapper * \ingroup CXX11_Tensor_Module @@ -90,7 +622,7 @@ class TensorBlock { * * This class is responsible for iterating over the blocks of a tensor. */ -template +template class TensorBlockMapper { public: typedef typename internal::TensorBlock @@ -190,10 +722,6 @@ class TensorBlockMapper { } private: - static int InnerDimIndex(Index i) { - return Layout == static_cast(ColMajor) ? i : NumDims - i - 1; - } - static Dimensions BlockDimensions(const Dimensions& tensor_dims, const TensorBlockShapeType block_shape, size_t min_target_size) { @@ -228,7 +756,7 @@ class TensorBlockMapper { // Add any un-allocated coefficients to inner dimension(s). Index total_size = block_dim_sizes.TotalSize(); for (int i = 0; i < NumDims; ++i) { - const int dim = InnerDimIndex(i); + const int dim = cond()(i, NumDims - i - 1); if (block_dim_sizes[dim] < tensor_dims[dim]) { const Index total_size_other_dims = total_size / block_dim_sizes[dim]; @@ -245,7 +773,7 @@ class TensorBlockMapper { } else if (block_shape == TensorBlockShapeType::kSkewedInnerDims) { Index coeff_to_allocate = min_target_size; for (int i = 0; i < NumDims; ++i) { - const int dim = InnerDimIndex(i); + const int dim = cond()(i, NumDims - i - 1); block_dim_sizes[dim] = numext::mini(coeff_to_allocate, tensor_dims[dim]); coeff_to_allocate = @@ -284,7 +812,7 @@ class TensorBlockMapper { * processed together. * */ -template +template class TensorSliceBlockMapper { public: typedef typename internal::TensorBlock @@ -360,7 +888,7 @@ class TensorSliceBlockMapper { prev_dim = curr_dim; } } else { - for (int i = 0; i < static_cast(NumDims) - 1; ++i) { + for (int i = 0; i < NumDims - 1; ++i) { const Index idx = block_index / m_block_strides[i]; coords[i] = m_tensor_slice_offsets[i] + idx * m_block_dim_sizes[i]; sizes[i] = numext::mini( diff --git a/unsupported/test/cxx11_tensor_block_access.cpp b/unsupported/test/cxx11_tensor_block_access.cpp index 66e61aef1..15f2392a3 100644 --- a/unsupported/test/cxx11_tensor_block_access.cpp +++ b/unsupported/test/cxx11_tensor_block_access.cpp @@ -19,11 +19,33 @@ using Eigen::Index; using Eigen::RowMajor; using Eigen::ColMajor; +using internal::TensorBlockShapeType; + template static const T& choose(int layout, const T& col, const T& row) { return layout == ColMajor ? col : row; } +static const TensorBlockShapeType RandomShape() { + return internal::random() + ? internal::TensorBlockShapeType::kUniformAllDims + : internal::TensorBlockShapeType::kSkewedInnerDims; +} + +template +static std::size_t RandomTargetSize(const DSizes& dims) { + return internal::random(1, dims.TotalSize()); +} + +template +static T* GenerateRandomData(const Index& size) { + T* data = new T[size]; + for (int i = 0; i < size; ++i) { + data[i] = internal::random(); + } + return data; +} + template static void test_block_mapper_sanity() { @@ -75,9 +97,7 @@ static void test_block_mapper_sanity() template static void UpdateCoeffSet( const internal::TensorBlock& block, - Index first_coeff_index, - int dim_index, - std::set* visited_coeffs) { + Index first_coeff_index, int dim_index, std::set* visited_coeffs) { const DSizes block_sizes = block.block_sizes(); const DSizes tensor_strides = block.tensor_strides(); @@ -103,18 +123,11 @@ static void test_block_mapper_maps_every_element() DSizes dims(5, 7, 11, 17); - auto total_coeffs = static_cast(dims.TotalSize()); - // Keep track of elements indices available via block access. std::set coeff_set; // Try different combinations of block types and sizes. - auto block_shape_type = - internal::random() - ? internal::TensorBlockShapeType::kUniformAllDims - : internal::TensorBlockShapeType::kSkewedInnerDims; - auto block_target_size = internal::random(1, total_coeffs); - TensorBlockMapper block_mapper(dims, block_shape_type, block_target_size); + TensorBlockMapper block_mapper(dims, RandomShape(), RandomTargetSize(dims)); for (int i = 0; i < block_mapper.total_block_count(); ++i) { TensorBlock block = block_mapper.GetBlockForIndex(i, nullptr); @@ -124,6 +137,7 @@ static void test_block_mapper_maps_every_element() // Verify that every coefficient in the original Tensor is accessible through // TensorBlock only once. + auto total_coeffs = static_cast(dims.TotalSize()); VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs); VERIFY_IS_EQUAL(*coeff_set.begin(), static_cast(0)); VERIFY_IS_EQUAL(*coeff_set.rbegin(), static_cast(total_coeffs - 1)); @@ -146,13 +160,6 @@ static void test_slice_block_mapper_maps_every_element() auto total_coeffs = static_cast(tensor_slice_extents.TotalSize()); - // Try different combinations of block types and sizes. - auto block_shape_type = - internal::random() - ? internal::TensorBlockShapeType::kUniformAllDims - : internal::TensorBlockShapeType::kSkewedInnerDims; - auto block_target_size = internal::random(1, total_coeffs); - // Pick a random dimension sizes for the tensor blocks. DSizes block_sizes; for (int i = 0; i < 4; ++i) { @@ -164,7 +171,7 @@ static void test_slice_block_mapper_maps_every_element() DimensionList()); for (int i = 0; i < block_mapper.total_block_count(); ++i) { - TensorBlock block = block_mapper.GetBlockForIndex(i, NULL); + TensorBlock block = block_mapper.GetBlockForIndex(i, nullptr); UpdateCoeffSet(block, block.first_coeff_index(), choose(Layout, 3, 0), &coeff_set); } @@ -172,11 +179,745 @@ static void test_slice_block_mapper_maps_every_element() VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs); } -EIGEN_DECLARE_TEST(cxx11_tensor_assign) { - CALL_SUBTEST(test_block_mapper_sanity()); - CALL_SUBTEST(test_block_mapper_sanity()); - CALL_SUBTEST(test_block_mapper_maps_every_element()); - CALL_SUBTEST(test_block_mapper_maps_every_element()); - CALL_SUBTEST(test_slice_block_mapper_maps_every_element()); - CALL_SUBTEST(test_slice_block_mapper_maps_every_element()); +template +static void test_block_io_copy_data_from_source_to_target() +{ + using T = float; + + typedef internal::TensorBlock TensorBlock; + typedef internal::TensorBlockMapper TensorBlockMapper; + + typedef internal::TensorBlockReader + TensorBlockReader; + typedef internal::TensorBlockWriter + TensorBlockWriter; + + typedef std::vector> DataVector; + + DSizes input_tensor_dims(5, 7, 11, 17, 3); + const auto input_tensor_size = input_tensor_dims.TotalSize(); + DataVector input_data(input_tensor_size, 0); + for (int i = 0; i < input_tensor_size; ++i) { + input_data[i] = internal::random(); + } + + DataVector output_data(input_tensor_size, 0); + + TensorBlockMapper block_mapper(input_tensor_dims, RandomShape(), + RandomTargetSize(input_tensor_dims)); + + DataVector block_data(block_mapper.block_dims_total_size(), 0); + for (int i = 0; i < block_mapper.total_block_count(); ++i) { + TensorBlock block = block_mapper.GetBlockForIndex(i, block_data.data()); + TensorBlockReader::Run(&block, input_data.data()); + TensorBlockWriter::Run(block, output_data.data()); + } + + for (int i = 0; i < input_tensor_size; ++i) { + VERIFY_IS_EQUAL(input_data[i], output_data[i]); + } } + +template +static int GetInputIndex(Index output_index, + const array& output_to_input_dim_map, + const array& input_strides, + const array& output_strides) { + int input_index = 0; + if (Layout == ColMajor) { + for (int i = NumDims - 1; i > 0; --i) { + const int idx = output_index / output_strides[i]; + input_index += idx * input_strides[output_to_input_dim_map[i]]; + output_index -= idx * output_strides[i]; + } + return input_index + + output_index * input_strides[output_to_input_dim_map[0]]; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const int idx = output_index / output_strides[i]; + input_index += idx * input_strides[output_to_input_dim_map[i]]; + output_index -= idx * output_strides[i]; + } + return input_index + + output_index * input_strides[output_to_input_dim_map[NumDims - 1]]; + } +} + +template +static array ComputeStrides( + const array& sizes) { + array strides; + if (Layout == ColMajor) { + strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + strides[i] = strides[i - 1] * sizes[i - 1]; + } + } else { + strides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + strides[i] = strides[i + 1] * sizes[i + 1]; + } + } + return strides; +} + +template +static void test_block_io_copy_using_reordered_dimensions() { + typedef internal::TensorBlock TensorBlock; + typedef internal::TensorBlockMapper + TensorBlockMapper; + + typedef internal::TensorBlockReader + TensorBlockReader; + typedef internal::TensorBlockWriter + TensorBlockWriter; + + DSizes input_tensor_dims(5, 7, 11, 17, 3); + const auto input_tensor_size = input_tensor_dims.TotalSize(); + + // Create a random input tensor. + auto* input_data = GenerateRandomData(input_tensor_size); + + // Create a random dimension re-ordering/shuffle. + std::vector shuffle = {0, 1, 2, 3, 4}; + std::shuffle(shuffle.begin(), shuffle.end(), std::mt19937()); + + DSizes output_tensor_dims; + array input_to_output_dim_map; + array output_to_input_dim_map; + for (Index i = 0; i < 5; ++i) { + output_tensor_dims[shuffle[i]] = input_tensor_dims[i]; + input_to_output_dim_map[i] = shuffle[i]; + output_to_input_dim_map[shuffle[i]] = i; + } + + // Random block shape and size. + TensorBlockMapper block_mapper(output_tensor_dims, RandomShape(), + RandomTargetSize(input_tensor_dims)); + + auto* block_data = new float[block_mapper.block_dims_total_size()]; + auto* output_data = new float[input_tensor_size]; + + array input_tensor_strides = + ComputeStrides(input_tensor_dims); + array output_tensor_strides = + ComputeStrides(output_tensor_dims); + + for (Index i = 0; i < block_mapper.total_block_count(); ++i) { + TensorBlock block = block_mapper.GetBlockForIndex(i, block_data); + const Index first_coeff_index = GetInputIndex( + block.first_coeff_index(), output_to_input_dim_map, + input_tensor_strides, output_tensor_strides); + TensorBlockReader::Run(&block, first_coeff_index, input_to_output_dim_map, + input_tensor_strides, input_data); + TensorBlockWriter::Run(block, first_coeff_index, input_to_output_dim_map, + input_tensor_strides, output_data); + } + + for (int i = 0; i < input_tensor_size; ++i) { + VERIFY_IS_EQUAL(input_data[i], output_data[i]); + } + + delete[] input_data; + delete[] block_data; + delete[] output_data; +} + +template +static void test_block_io_zero_stride() +{ + typedef internal::TensorBlock TensorBlock; + typedef internal::TensorBlockReader + TensorBlockReader; + typedef internal::TensorBlockWriter + TensorBlockWriter; + + DSizes input_tensor_dims(1, 2, 1, 3, 1); + const auto input_tensor_size = input_tensor_dims.TotalSize(); + + // Create a random input tensor. + auto* input_data = GenerateRandomData(input_tensor_size); + + DSizes output_tensor_dims(3, 2, 3, 3, 2); + + DSizes input_tensor_strides( + ComputeStrides(input_tensor_dims)); + DSizes output_tensor_strides( + ComputeStrides(output_tensor_dims)); + + DSizes input_tensor_strides_with_zeros(input_tensor_strides); + input_tensor_strides_with_zeros[0] = 0; + input_tensor_strides_with_zeros[2] = 0; + input_tensor_strides_with_zeros[4] = 0; + + // Verify that data was correctly read/written from/into the block. + const auto verify_is_equal = [&](const float* output_data) { + for (int i = 0; i < output_tensor_dims[0]; ++i) { + for (int j = 0; j < output_tensor_dims[1]; ++j) { + for (int k = 0; k < output_tensor_dims[2]; ++k) { + for (int l = 0; l < output_tensor_dims[3]; ++l) { + for (int m = 0; m < output_tensor_dims[4]; ++m) { + const Index output_offset = + i * output_tensor_strides[0] + j * output_tensor_strides[1] + + k * output_tensor_strides[2] + l * output_tensor_strides[3] + + m * output_tensor_strides[4]; + const Index input_offset = + i % input_tensor_dims[0] * input_tensor_strides[0] + + j % input_tensor_dims[1] * input_tensor_strides[1] + + k % input_tensor_dims[2] * input_tensor_strides[2] + + l % input_tensor_dims[3] * input_tensor_strides[3] + + m % input_tensor_dims[4] * input_tensor_strides[4]; + VERIFY_IS_EQUAL(output_data[output_offset], + input_data[input_offset]); + } + } + } + } + } + }; + + { + auto* output_data = new float[output_tensor_dims.TotalSize()]; + TensorBlock read_block(0, output_tensor_dims, output_tensor_strides, + input_tensor_strides_with_zeros, output_data); + TensorBlockReader::Run(&read_block, input_data); + verify_is_equal(output_data); + delete[] output_data; + } + + { + auto* output_data = new float[output_tensor_dims.TotalSize()]; + TensorBlock write_block(0, output_tensor_dims, + input_tensor_strides_with_zeros, + output_tensor_strides, input_data); + TensorBlockWriter::Run(write_block, output_data); + verify_is_equal(output_data); + delete[] output_data; + } + + delete[] input_data; +} + +template +static void test_block_io_squeeze_ones() { + typedef internal::TensorBlock TensorBlock; + typedef internal::TensorBlockReader + TensorBlockReader; + typedef internal::TensorBlockWriter + TensorBlockWriter; + + // Total size > 1. + { + DSizes block_sizes(1, 2, 1, 2, 1); + const auto total_size = block_sizes.TotalSize(); + + // Create a random input tensor. + auto* input_data = GenerateRandomData(total_size); + DSizes strides(ComputeStrides(block_sizes)); + + { + auto* output_data = new float[block_sizes.TotalSize()]; + TensorBlock read_block(0, block_sizes, strides, strides, output_data); + TensorBlockReader::Run(&read_block, input_data); + for (int i = 0; i < total_size; ++i) { + VERIFY_IS_EQUAL(output_data[i], input_data[i]); + } + delete[] output_data; + } + + { + auto* output_data = new float[block_sizes.TotalSize()]; + TensorBlock write_block(0, block_sizes, strides, strides, input_data); + TensorBlockWriter::Run(write_block, output_data); + for (int i = 0; i < total_size; ++i) { + VERIFY_IS_EQUAL(output_data[i], input_data[i]); + } + delete[] output_data; + } + } + + // Total size == 1. + { + DSizes block_sizes(1, 1, 1, 1, 1); + const auto total_size = block_sizes.TotalSize(); + + // Create a random input tensor. + auto* input_data = GenerateRandomData(total_size); + DSizes strides(ComputeStrides(block_sizes)); + + { + auto* output_data = new float[block_sizes.TotalSize()]; + TensorBlock read_block(0, block_sizes, strides, strides, output_data); + TensorBlockReader::Run(&read_block, input_data); + for (int i = 0; i < total_size; ++i) { + VERIFY_IS_EQUAL(output_data[i], input_data[i]); + } + delete[] output_data; + } + + { + auto* output_data = new float[block_sizes.TotalSize()]; + TensorBlock write_block(0, block_sizes, strides, strides, input_data); + TensorBlockWriter::Run(write_block, output_data); + for (int i = 0; i < total_size; ++i) { + VERIFY_IS_EQUAL(output_data[i], input_data[i]); + } + delete[] output_data; + } + } +} + +template +static void test_block_cwise_binary_io_basic() { + typedef internal::scalar_sum_op BinaryFunctor; + typedef internal::TensorBlockCwiseBinaryIO + TensorBlockCwiseBinaryIO; + + DSizes block_sizes(2, 3, 5, 7, 11); + DSizes strides(ComputeStrides(block_sizes)); + + const auto total_size = block_sizes.TotalSize(); + + // Create a random input tensors. + auto* left_data = GenerateRandomData(total_size); + auto* right_data = GenerateRandomData(total_size); + + auto* output_data = new float[total_size]; + BinaryFunctor functor; + TensorBlockCwiseBinaryIO::Run(functor, block_sizes, strides, output_data, + strides, left_data, strides, right_data); + for (int i = 0; i < total_size; ++i) { + VERIFY_IS_EQUAL(output_data[i], functor(left_data[i], right_data[i])); + } + + delete[] left_data; + delete[] right_data; + delete[] output_data; +} + +template +static void test_block_cwise_binary_io_squeeze_ones() { + typedef internal::scalar_sum_op BinaryFunctor; + typedef internal::TensorBlockCwiseBinaryIO + TensorBlockCwiseBinaryIO; + + DSizes block_sizes(1, 2, 1, 3, 1); + DSizes strides(ComputeStrides(block_sizes)); + + const auto total_size = block_sizes.TotalSize(); + + // Create a random input tensors. + auto* left_data = GenerateRandomData(total_size); + auto* right_data = GenerateRandomData(total_size); + + auto* output_data = new float[total_size]; + BinaryFunctor functor; + TensorBlockCwiseBinaryIO::Run(functor, block_sizes, strides, output_data, + strides, left_data, strides, right_data); + for (int i = 0; i < total_size; ++i) { + VERIFY_IS_EQUAL(output_data[i], functor(left_data[i], right_data[i])); + } + + delete[] left_data; + delete[] right_data; + delete[] output_data; +} + +template +static void test_block_cwise_binary_io_zero_strides() { + typedef internal::scalar_sum_op BinaryFunctor; + typedef internal::TensorBlockCwiseBinaryIO + TensorBlockCwiseBinaryIO; + + DSizes left_sizes(1, 3, 1, 7, 1); + DSizes left_strides(ComputeStrides(left_sizes)); + left_strides[0] = 0; + left_strides[2] = 0; + left_strides[4] = 0; + + DSizes right_sizes(2, 1, 5, 1, 11); + DSizes right_strides(ComputeStrides(right_sizes)); + right_strides[1] = 0; + right_strides[3] = 0; + + // Generate random data. + auto* left_data = GenerateRandomData(left_sizes.TotalSize()); + auto* right_data = GenerateRandomData(right_sizes.TotalSize()); + + DSizes output_sizes(2, 3, 5, 7, 11); + DSizes output_strides(ComputeStrides(output_sizes)); + + const auto output_total_size = output_sizes.TotalSize(); + auto* output_data = new float[output_total_size]; + + BinaryFunctor functor; + TensorBlockCwiseBinaryIO::Run(functor, output_sizes, output_strides, + output_data, left_strides, left_data, + right_strides, right_data); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + for (int m = 0; m < 11; ++m) { + Index output_index = i * output_strides[0] + j * output_strides[1] + + k * output_strides[2] + l * output_strides[3] + + m * output_strides[4]; + Index left_index = i * left_strides[0] + j * left_strides[1] + + k * left_strides[2] + l * left_strides[3] + + m * left_strides[4]; + Index right_index = i * right_strides[0] + j * right_strides[1] + + k * right_strides[2] + l * right_strides[3] + + m * right_strides[4]; + VERIFY_IS_EQUAL( + output_data[output_index], + functor(left_data[left_index], right_data[right_index])); + } + } + } + } + } + + delete[] left_data; + delete[] right_data; + delete[] output_data; +} + +template +static void test_uniform_block_shape() +{ + using T = int; + typedef internal::TensorBlock TensorBlock; + typedef internal::TensorBlockMapper TensorBlockMapper; + + { + // Test shape 'UniformAllDims' with uniform 'max_coeff count'. + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 5 * 5 * 5 * 5 * 5; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kUniformAllDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + for (int i = 0; i < 5; ++i) { + VERIFY_IS_EQUAL(5, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } + + // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills + // partially into first inner-most dimension. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 7 * 5 * 5 * 5 * 5; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kUniformAllDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(7, block.block_sizes()[0]); + for (int i = 1; i < 5; ++i) { + VERIFY_IS_EQUAL(5, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 5 * 5 * 5 * 5 * 6; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kUniformAllDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(6, block.block_sizes()[4]); + for (int i = 3; i >= 0; --i) { + VERIFY_IS_EQUAL(5, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } + + // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills + // fully into first inner-most dimension. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 11 * 5 * 5 * 5 * 5; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kUniformAllDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(11, block.block_sizes()[0]); + for (int i = 1; i < 5; ++i) { + VERIFY_IS_EQUAL(5, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 5 * 5 * 5 * 5 * 7; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kUniformAllDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(7, block.block_sizes()[4]); + for (int i = 3; i >= 0; --i) { + VERIFY_IS_EQUAL(5, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } + + // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills + // fully into first few inner-most dimensions. + if (Layout == ColMajor) { + DSizes dims(7, 5, 6, 17, 7); + const size_t max_coeff_count = 7 * 5 * 6 * 7 * 5; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kUniformAllDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(7, block.block_sizes()[0]); + VERIFY_IS_EQUAL(5, block.block_sizes()[1]); + VERIFY_IS_EQUAL(6, block.block_sizes()[2]); + VERIFY_IS_EQUAL(7, block.block_sizes()[3]); + VERIFY_IS_EQUAL(5, block.block_sizes()[4]); + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } else { + DSizes dims(7, 5, 6, 9, 7); + const size_t max_coeff_count = 5 * 5 * 5 * 6 * 7; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kUniformAllDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(7, block.block_sizes()[4]); + VERIFY_IS_EQUAL(6, block.block_sizes()[3]); + VERIFY_IS_EQUAL(5, block.block_sizes()[2]); + VERIFY_IS_EQUAL(5, block.block_sizes()[1]); + VERIFY_IS_EQUAL(5, block.block_sizes()[0]); + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } + + // Test shape 'UniformAllDims' with full allocation to all dims. + if (Layout == ColMajor) { + DSizes dims(7, 5, 6, 17, 7); + const size_t max_coeff_count = 7 * 5 * 6 * 17 * 7; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kUniformAllDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(7, block.block_sizes()[0]); + VERIFY_IS_EQUAL(5, block.block_sizes()[1]); + VERIFY_IS_EQUAL(6, block.block_sizes()[2]); + VERIFY_IS_EQUAL(17, block.block_sizes()[3]); + VERIFY_IS_EQUAL(7, block.block_sizes()[4]); + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } else { + DSizes dims(7, 5, 6, 9, 7); + const size_t max_coeff_count = 7 * 5 * 6 * 9 * 7; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kUniformAllDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(7, block.block_sizes()[4]); + VERIFY_IS_EQUAL(9, block.block_sizes()[3]); + VERIFY_IS_EQUAL(6, block.block_sizes()[2]); + VERIFY_IS_EQUAL(5, block.block_sizes()[1]); + VERIFY_IS_EQUAL(7, block.block_sizes()[0]); + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } +} + +template +static void test_skewed_inner_dim_block_shape() +{ + using T = int; + typedef internal::TensorBlock TensorBlock; + typedef internal::TensorBlockMapper TensorBlockMapper; + + // Test shape 'SkewedInnerDims' with partial allocation to inner-most dim. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 10 * 1 * 1 * 1 * 1; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kSkewedInnerDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(10, block.block_sizes()[0]); + for (int i = 1; i < 5; ++i) { + VERIFY_IS_EQUAL(1, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 1 * 1 * 1 * 1 * 6; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kSkewedInnerDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(6, block.block_sizes()[4]); + for (int i = 3; i >= 0; --i) { + VERIFY_IS_EQUAL(1, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } + + // Test shape 'SkewedInnerDims' with full allocation to inner-most dim. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 11 * 1 * 1 * 1 * 1; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kSkewedInnerDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(11, block.block_sizes()[0]); + for (int i = 1; i < 5; ++i) { + VERIFY_IS_EQUAL(1, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 1 * 1 * 1 * 1 * 7; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kSkewedInnerDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(7, block.block_sizes()[4]); + for (int i = 3; i >= 0; --i) { + VERIFY_IS_EQUAL(1, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } + + // Test shape 'SkewedInnerDims' with full allocation to inner-most dim, + // and partial allocation to second inner-dim. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 11 * 3 * 1 * 1 * 1; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kSkewedInnerDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(11, block.block_sizes()[0]); + VERIFY_IS_EQUAL(3, block.block_sizes()[1]); + for (int i = 2; i < 5; ++i) { + VERIFY_IS_EQUAL(1, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 1 * 1 * 1 * 15 * 7; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kSkewedInnerDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(7, block.block_sizes()[4]); + VERIFY_IS_EQUAL(15, block.block_sizes()[3]); + for (int i = 2; i >= 0; --i) { + VERIFY_IS_EQUAL(1, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } + + // Test shape 'SkewedInnerDims' with full allocation to inner-most dim, + // and partial allocation to third inner-dim. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 11 * 5 * 5 * 1 * 1; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kSkewedInnerDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(11, block.block_sizes()[0]); + VERIFY_IS_EQUAL(5, block.block_sizes()[1]); + VERIFY_IS_EQUAL(5, block.block_sizes()[2]); + for (int i = 3; i < 5; ++i) { + VERIFY_IS_EQUAL(1, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 1 * 1 * 5 * 17 * 7; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kSkewedInnerDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(7, block.block_sizes()[4]); + VERIFY_IS_EQUAL(17, block.block_sizes()[3]); + VERIFY_IS_EQUAL(5, block.block_sizes()[2]); + for (int i = 1; i >= 0; --i) { + VERIFY_IS_EQUAL(1, block.block_sizes()[i]); + } + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } + + // Test shape 'SkewedInnerDims' with full allocation to all dims. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 11 * 5 * 6 * 17 * 7; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kSkewedInnerDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(11, block.block_sizes()[0]); + VERIFY_IS_EQUAL(5, block.block_sizes()[1]); + VERIFY_IS_EQUAL(6, block.block_sizes()[2]); + VERIFY_IS_EQUAL(17, block.block_sizes()[3]); + VERIFY_IS_EQUAL(7, block.block_sizes()[4]); + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const size_t max_coeff_count = 11 * 5 * 6 * 17 * 7; + TensorBlockMapper block_mapper(dims, TensorBlockShapeType::kSkewedInnerDims, + max_coeff_count); + TensorBlock block = block_mapper.GetBlockForIndex(0, nullptr); + VERIFY_IS_EQUAL(7, block.block_sizes()[4]); + VERIFY_IS_EQUAL(17, block.block_sizes()[3]); + VERIFY_IS_EQUAL(6, block.block_sizes()[2]); + VERIFY_IS_EQUAL(5, block.block_sizes()[1]); + VERIFY_IS_EQUAL(11, block.block_sizes()[0]); + VERIFY(block.block_sizes().TotalSize() <= max_coeff_count); + } +} + +template +static void test_empty_dims(const internal::TensorBlockShapeType block_shape) +{ + using T = int; + + // Test blocking of tensors with zero dimensions: + // - we must not crash on asserts and divisions by zero + // - we must not return block with zero dimensions + // (recipe for overflows/underflows, divisions by zero and NaNs later) + // - total block count must be zero + { + typedef internal::TensorBlockMapper TensorBlockMapper; + DSizes dims(0); + for (int max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) { + TensorBlockMapper block_mapper(dims, block_shape, max_coeff_count); + VERIFY_IS_EQUAL(block_mapper.total_block_count(), 0); + VERIFY(block_mapper.block_dims_total_size() >= 1); + } + } + + { + typedef internal::TensorBlockMapper TensorBlockMapper; + for (int dim1 = 0; dim1 < 3; ++dim1) { + for (int dim2 = 0; dim2 < 3; ++dim2) { + DSizes dims(dim1, dim2); + for (int max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) { + TensorBlockMapper block_mapper(dims, block_shape, max_coeff_count); + if (dim1 * dim2 == 0) { + VERIFY_IS_EQUAL(block_mapper.total_block_count(), 0); + } + VERIFY(block_mapper.block_dims_total_size() >= 1); + } + } + } + } +} + +#define CALL_SUBTEST_LAYOUTS(NAME) \ + CALL_SUBTEST(NAME()); \ + CALL_SUBTEST(NAME()) + +#define CALL_SUBTEST_LAYOUTS_WITH_ARG(NAME, ARG) \ + CALL_SUBTEST(NAME(ARG)); \ + CALL_SUBTEST(NAME(ARG)) + +EIGEN_DECLARE_TEST(cxx11_tensor_assign) { + CALL_SUBTEST_LAYOUTS(test_block_mapper_sanity); + CALL_SUBTEST_LAYOUTS(test_block_mapper_maps_every_element); + CALL_SUBTEST_LAYOUTS(test_slice_block_mapper_maps_every_element); + CALL_SUBTEST_LAYOUTS(test_block_io_copy_data_from_source_to_target); + CALL_SUBTEST_LAYOUTS(test_block_io_copy_using_reordered_dimensions); + CALL_SUBTEST_LAYOUTS(test_block_io_zero_stride); + CALL_SUBTEST_LAYOUTS(test_block_io_squeeze_ones); + CALL_SUBTEST_LAYOUTS(test_block_cwise_binary_io_basic); + CALL_SUBTEST_LAYOUTS(test_block_cwise_binary_io_squeeze_ones); + CALL_SUBTEST_LAYOUTS(test_block_cwise_binary_io_zero_strides); + CALL_SUBTEST_LAYOUTS(test_uniform_block_shape); + CALL_SUBTEST_LAYOUTS(test_skewed_inner_dim_block_shape); + + CALL_SUBTEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockShapeType::kUniformAllDims); + CALL_SUBTEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockShapeType::kSkewedInnerDims); +} + +#undef CALL_SUBTEST_LAYOUTS +#undef CALL_SUBTEST_LAYOUTS_WITH_ARG \ No newline at end of file From 6913221c43c6ad41b1fbfc0d263d2764abd11ad2 Mon Sep 17 00:00:00 2001 From: Eugene Zhulenev Date: Wed, 25 Jul 2018 13:51:10 -0700 Subject: [PATCH 2/3] Add tiled evaluation support to TensorExecutor --- unsupported/Eigen/CXX11/Tensor | 2 +- .../Eigen/CXX11/src/Tensor/TensorAssign.h | 44 ++- .../Eigen/CXX11/src/Tensor/TensorBlock.h | 113 +++++++- .../CXX11/src/Tensor/TensorBroadcasting.h | 2 + .../Eigen/CXX11/src/Tensor/TensorChipping.h | 2 + .../CXX11/src/Tensor/TensorConcatenation.h | 2 + .../CXX11/src/Tensor/TensorContraction.h | 1 + .../Eigen/CXX11/src/Tensor/TensorConversion.h | 1 + .../CXX11/src/Tensor/TensorConvolution.h | 10 +- .../CXX11/src/Tensor/TensorConvolutionSycl.h | 1 + .../Eigen/CXX11/src/Tensor/TensorDimensions.h | 16 ++ .../Eigen/CXX11/src/Tensor/TensorEvalTo.h | 1 + .../Eigen/CXX11/src/Tensor/TensorEvaluator.h | 101 ++++++- .../Eigen/CXX11/src/Tensor/TensorExecutor.h | 266 +++++++++++++----- .../Eigen/CXX11/src/Tensor/TensorFixedSize.h | 2 + .../Eigen/CXX11/src/Tensor/TensorForcedEval.h | 1 + .../src/Tensor/TensorForwardDeclarations.h | 8 +- .../Eigen/CXX11/src/Tensor/TensorImagePatch.h | 1 + .../Eigen/CXX11/src/Tensor/TensorLayoutSwap.h | 2 + .../Eigen/CXX11/src/Tensor/TensorMorphing.h | 8 +- .../Eigen/CXX11/src/Tensor/TensorPadding.h | 1 + .../Eigen/CXX11/src/Tensor/TensorPatch.h | 1 + .../Eigen/CXX11/src/Tensor/TensorReduction.h | 1 + .../Eigen/CXX11/src/Tensor/TensorRef.h | 3 + .../Eigen/CXX11/src/Tensor/TensorReverse.h | 2 + .../Eigen/CXX11/src/Tensor/TensorShuffling.h | 2 + .../Eigen/CXX11/src/Tensor/TensorStriding.h | 2 + .../Eigen/CXX11/src/Tensor/TensorTrace.h | 7 +- unsupported/test/CMakeLists.txt | 1 + .../test/cxx11_tensor_block_access.cpp | 2 +- .../cxx11_tensor_complex_cwise_ops_gpu.cu | 2 +- unsupported/test/cxx11_tensor_complex_gpu.cu | 2 +- unsupported/test/cxx11_tensor_executor.cpp | 81 ++++++ 33 files changed, 598 insertions(+), 93 deletions(-) create mode 100644 unsupported/test/cxx11_tensor_executor.cpp diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor index 397d55f76..47514703a 100644 --- a/unsupported/Eigen/CXX11/Tensor +++ b/unsupported/Eigen/CXX11/Tensor @@ -112,13 +112,13 @@ typedef unsigned __int64 uint64_t; #include "src/Tensor/TensorGlobalFunctions.h" #include "src/Tensor/TensorBase.h" +#include "src/Tensor/TensorBlock.h" #include "src/Tensor/TensorEvaluator.h" #include "src/Tensor/TensorExpr.h" #include "src/Tensor/TensorReduction.h" #include "src/Tensor/TensorReductionGpu.h" #include "src/Tensor/TensorArgMax.h" -#include "src/Tensor/TensorBlock.h" #include "src/Tensor/TensorConcatenation.h" #include "src/Tensor/TensorContractionMapper.h" #include "src/Tensor/TensorContractionBlocking.h" diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h index 027305586..199ddb123 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h @@ -68,6 +68,8 @@ class TensorAssignOp : public TensorBase typedef typename Eigen::internal::traits::StorageKind StorageKind; typedef typename Eigen::internal::traits::Index Index; + static const int NumDims = Eigen::internal::traits::NumDimensions; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs) : m_lhs_xpr(lhs), m_rhs_xpr(rhs) {} @@ -95,20 +97,33 @@ struct TensorEvaluator, Device> typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType::type PacketReturnType; typedef typename TensorEvaluator::Dimensions Dimensions; + static const int PacketSize = internal::unpacket_traits::size; + static const int NumDims = XprType::NumDims; enum { - IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, - PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess, - Layout = TensorEvaluator::Layout, - RawAccess = TensorEvaluator::RawAccess + IsAligned = TensorEvaluator::IsAligned & + TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & + TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::BlockAccess & + TensorEvaluator::BlockAccess, + Layout = TensorEvaluator::Layout, + RawAccess = TensorEvaluator::RawAccess }; + typedef typename internal::TensorBlock< + typename internal::remove_const::type, Index, NumDims, Layout> + TensorBlock; + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) : m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device) { - EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT( + (static_cast(TensorEvaluator::Layout) == + static_cast(TensorEvaluator::Layout)), + YOU_MADE_A_PROGRAMMING_MISTAKE); } EIGEN_DEVICE_FUNC const Dimensions& dimensions() const @@ -164,6 +179,25 @@ struct TensorEvaluator, Device> TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements( + std::vector* resources) const { + m_leftImpl.getResourceRequirements(resources); + m_rightImpl.getResourceRequirements(resources); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalBlock(TensorBlock* block) { + if (TensorEvaluator::RawAccess && + m_leftImpl.data() != nullptr) { + TensorBlock left_block(block->first_coeff_index(), block->block_sizes(), + block->tensor_strides(), block->tensor_strides(), + m_leftImpl.data() + block->first_coeff_index()); + m_rightImpl.block(&left_block); + } else { + m_rightImpl.block(block); + m_leftImpl.writeBlock(*block); + } + } + /// required by sycl in order to extract the accessor const TensorEvaluator& left_impl() const { return m_leftImpl; } /// required by sycl in order to extract the accessor diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index 8ffc9d093..5321acecf 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -65,6 +65,40 @@ enum class TensorBlockShapeType { kSkewedInnerDims, }; +struct TensorOpResourceRequirements { + TensorBlockShapeType block_shape; + std::size_t block_total_size; + // TODO(andydavis) Add 'target_num_threads' to support communication of + // thread-resource requirements. This will allow ops deep in the + // expression tree (like reductions) to communicate resources + // requirements based on local state (like the total number of reductions + // to be computed). + TensorOpResourceRequirements(internal::TensorBlockShapeType shape, + const std::size_t size) + : block_shape(shape), block_total_size(size) {} +}; + +// Tries to merge multiple resource requirements. +EIGEN_STRONG_INLINE void MergeResourceRequirements( + const std::vector& resources, + TensorBlockShapeType* block_shape, std::size_t* block_total_size) { + if (resources.empty()) { + return; + } + // TODO(andydavis) Implement different policies (i.e. revert to a default + // policy if block shapes/sizes conflict). + *block_shape = resources[0].block_shape; + *block_total_size = resources[0].block_total_size; + for (int i = 1; i < resources.size(); ++i) { + if (resources[i].block_shape == TensorBlockShapeType::kSkewedInnerDims && + *block_shape != TensorBlockShapeType::kSkewedInnerDims) { + *block_shape = TensorBlockShapeType::kSkewedInnerDims; + } + *block_total_size = + numext::maxi(*block_total_size, resources[i].block_total_size); + } +} + /** * \class TensorBlock * \ingroup CXX11_Tensor_Module @@ -74,7 +108,7 @@ enum class TensorBlockShapeType { * This class represents a tensor block specified by the index of the * first block coefficient, and the size of the block in each dimension. */ -template +template class TensorBlock { public: typedef DSizes Dimensions; @@ -614,6 +648,83 @@ struct TensorBlockCwiseBinaryIO { } }; +/** + * \class TensorBlockView + * \ingroup CXX11_Tensor_Module + * + * \brief Read-only view into a block of data. + * + * This class provides read-only access to a block of data in impl. It may need + * to allocate space for holding the intermediate result. + * + */ +template +struct TensorBlockView { + typedef TensorEvaluator Impl; + typedef typename Impl::Index Index; + typedef typename remove_const::type Scalar; + static const int NumDims = array_size::value; + typedef DSizes Dimensions; + + // Constructs a TensorBlockView for `impl`. `block` is only used for for + // specifying the start offset, shape, and strides of the block. + template + TensorBlockView(const Device& device, + const TensorEvaluator& impl, + const OtherTensorBlock& block) + : m_device(device), + m_block_sizes(block.block_sizes()), + m_data(NULL), + m_allocated_data(NULL) { + if (Impl::RawAccess && impl.data() != NULL) { + m_data = impl.data() + block.first_coeff_index(); + m_block_strides = block.tensor_strides(); + } else { + // Actually make a copy. + + // TODO(wuke): This sometimes put a lot pressure on the heap allocator. + // Consider allowing ops to request additional temporary block memory in + // TensorOpResourceRequirements. + m_allocated_data = static_cast( + m_device.allocate(m_block_sizes.TotalSize() * sizeof(Scalar))); + m_data = m_allocated_data; + if (NumDims > 0) { + if (static_cast(Impl::Layout) == static_cast(ColMajor)) { + m_block_strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_block_strides[i] = m_block_strides[i - 1] * m_block_sizes[i - 1]; + } + } else { + m_block_strides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_block_strides[i] = m_block_strides[i + 1] * m_block_sizes[i + 1]; + } + } + } + TensorBlock input_block( + block.first_coeff_index(), m_block_sizes, m_block_strides, + block.tensor_strides(), m_allocated_data); + impl.block(&input_block); + } + } + + ~TensorBlockView() { + if (m_allocated_data != NULL) { + m_device.deallocate(m_allocated_data); + } + } + + const Dimensions& block_sizes() const { return m_block_sizes; } + const Dimensions& block_strides() const { return m_block_strides; } + const Scalar* data() const { return m_data; } + + private: + const Device& m_device; + Dimensions m_block_sizes, m_block_strides; + const Scalar* m_data; // Not owned. + Scalar* m_allocated_data; // Owned. +}; + /** * \class TensorBlockMapper * \ingroup CXX11_Tensor_Module diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h index 278689915..7ff0d323b 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h @@ -1,4 +1,5 @@ // This file is part of Eigen, a lightweight C++ template library +// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner @@ -110,6 +111,7 @@ struct TensorEvaluator, Device> enum { IsAligned = true, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, RawAccess = false }; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h index 21ffa2872..085c05f3d 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h @@ -146,6 +146,7 @@ struct TensorEvaluator, Device> // slice offsets. IsAligned = false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false @@ -343,6 +344,7 @@ struct TensorEvaluator, Device> enum { IsAligned = false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, RawAccess = false }; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h index a7c1380b8..9f0321880 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h @@ -122,6 +122,7 @@ struct TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, RawAccess = false }; @@ -306,6 +307,7 @@ template::PacketAccess & TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, RawAccess = false }; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h index 0e69cd40c..12cfa8df3 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -240,6 +240,7 @@ struct TensorContractionEvaluatorBase enum { IsAligned = true, PacketAccess = (internal::unpacket_traits::size > 1), + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = true diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h index 182bef918..e0cbbb315 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h @@ -195,6 +195,7 @@ struct TensorEvaluator, Device> enum { IsAligned = false, PacketAccess = true, + BlockAccess = false, Layout = TensorEvaluator::Layout, RawAccess = false }; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h index 25131600d..1ec5819a7 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h @@ -307,6 +307,7 @@ struct TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false @@ -577,11 +578,11 @@ __global__ void EigenConvolutionKernel1D( const float* __restrict kernel, const int numPlanes, const int numX, const int maxX, const int kernelSize, float* buffer) { #if defined(EIGEN_HIPCC) - HIP_DYNAMIC_SHARED(float, s) + HIP_DYNAMIC_SHARED(float, s) #else extern __shared__ float s[]; #endif - + const int first_x = blockIdx.x * maxX; const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1; const int num_x_input = last_x - first_x + GetKernelSize()(kernelSize); @@ -630,7 +631,7 @@ __global__ void EigenConvolutionKernel2D( const int maxX, const int numY, const int maxY, const int kernelSizeX, const int kernelSizeY, float* buffer) { #if defined(EIGEN_HIPCC) - HIP_DYNAMIC_SHARED(float, s) + HIP_DYNAMIC_SHARED(float, s) #else extern __shared__ float s[]; #endif @@ -702,7 +703,7 @@ __global__ void EigenConvolutionKernel3D( const size_t maxZ, const size_t kernelSizeX, const size_t kernelSizeY, const size_t kernelSizeZ, float* buffer) { #if defined(EIGEN_HIPCC) - HIP_DYNAMIC_SHARED(float, s) + HIP_DYNAMIC_SHARED(float, s) #else extern __shared__ float s[]; #endif @@ -778,6 +779,7 @@ struct TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, PacketAccess = false, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h index 65403905a..d301d0c01 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h @@ -242,6 +242,7 @@ struct TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, PacketAccess = false, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h index 192d4aa7b..5ca47cca7 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h @@ -290,6 +290,22 @@ struct DSizes : array { } } +#ifndef EIGEN_EMULATE_CXX11_META_H + template + EIGEN_DEVICE_FUNC DSizes(const Sizes& a) { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = a[i]; + } + } +#else + template + EIGEN_DEVICE_FUNC DSizes(const Sizes& a) { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = a[i]; + } + } +#endif + #if EIGEN_HAS_VARIADIC_TEMPLATES template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) { diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h index d0c027890..af39daa91 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h @@ -107,6 +107,7 @@ struct TensorEvaluator, Device> enum { IsAligned = TensorEvaluator::IsAligned, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = true diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h index fe62ff1ea..ba02802d2 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h @@ -41,11 +41,24 @@ struct TensorEvaluator enum { IsAligned = Derived::IsAligned, PacketAccess = (internal::unpacket_traits::size > 1), + BlockAccess = internal::is_arithmetic::type>::value, Layout = Derived::Layout, CoordAccess = NumCoords > 0, RawAccess = true }; + typedef typename internal::TensorBlock< + typename internal::remove_const::type, Index, NumCoords, Layout> + TensorBlock; + typedef typename internal::TensorBlockReader< + typename internal::remove_const::type, Index, NumCoords, Layout, + PacketAccess> + TensorBlockReader; + typedef typename internal::TensorBlockWriter< + typename internal::remove_const::type, Index, NumCoords, Layout, + PacketAccess> + TensorBlockWriter; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) : m_data(const_cast::template MakePointer::Type>(m.data())), m_dims(m.dimensions()), m_device(device), m_impl(m) { } @@ -113,6 +126,20 @@ struct TensorEvaluator internal::unpacket_traits::size); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements( + std::vector* resources) const {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(TensorBlock* block) const { + assert(m_data != NULL); + TensorBlockReader::Run(block, m_data); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlock& block) { + assert(m_data != NULL); + TensorBlockWriter::Run(block, m_data); + } + EIGEN_DEVICE_FUNC typename internal::traits::template MakePointer::Type data() const { return m_data; } /// required by sycl in order to construct sycl buffer from raw pointer @@ -167,11 +194,20 @@ struct TensorEvaluator enum { IsAligned = Derived::IsAligned, PacketAccess = (internal::unpacket_traits::size > 1), + BlockAccess = internal::is_arithmetic::type>::value, Layout = Derived::Layout, CoordAccess = NumCoords > 0, RawAccess = true }; + typedef typename internal::TensorBlock< + typename internal::remove_const::type, Index, NumCoords, Layout> + TensorBlock; + typedef typename internal::TensorBlockReader< + typename internal::remove_const::type, Index, NumCoords, Layout, + PacketAccess> + TensorBlockReader; + // Used for accessor extraction in SYCL Managed TensorMap: const Derived& derived() const { return m_impl; } @@ -219,6 +255,14 @@ struct TensorEvaluator internal::unpacket_traits::size); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements( + std::vector* resources) const {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(TensorBlock* block) const { + assert(m_data != NULL); + TensorBlockReader::Run(block, m_data); + } + EIGEN_DEVICE_FUNC typename internal::traits::template MakePointer::Type data() const { return m_data; } /// added for sycl in order to construct the buffer from the sycl device @@ -244,6 +288,7 @@ struct TensorEvaluator, Device> enum { IsAligned = true, PacketAccess = internal::functor_traits::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false @@ -308,7 +353,9 @@ struct TensorEvaluator, Device> enum { IsAligned = TensorEvaluator::IsAligned, - PacketAccess = TensorEvaluator::PacketAccess & internal::functor_traits::PacketAccess, + PacketAccess = TensorEvaluator::PacketAccess & + internal::functor_traits::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false @@ -375,16 +422,21 @@ struct TensorEvaluator XprType; enum { - IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, - PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess & + IsAligned = TensorEvaluator::IsAligned & + TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & + TensorEvaluator::PacketAccess & internal::functor_traits::PacketAccess, - Layout = TensorEvaluator::Layout, - CoordAccess = false, // to be implemented - RawAccess = false + BlockAccess = TensorEvaluator::BlockAccess & + TensorEvaluator::BlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false }; EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) - : m_functor(op.functor()), + : m_device(device), + m_functor(op.functor()), m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device) { @@ -399,6 +451,14 @@ struct TensorEvaluator::size; typedef typename TensorEvaluator::Dimensions Dimensions; + static const int NumDims = internal::array_size< + typename TensorEvaluator::Dimensions>::value; + + typedef internal::TensorBlock< + typename internal::remove_const::type, Index, NumDims, + TensorEvaluator::Layout> + TensorBlock; + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { // TODO: use right impl instead if right impl dimensions are known at compile time. @@ -433,6 +493,30 @@ struct TensorEvaluator* resources) const { + m_leftImpl.getResourceRequirements(resources); + m_rightImpl.getResourceRequirements(resources); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block( + TensorBlock* output_block) const { + if (NumDims <= 0) { + output_block->data()[0] = coeff(0); + return; + } + internal::TensorBlockView left_block( + m_device, m_leftImpl, *output_block); + internal::TensorBlockView right_block( + m_device, m_rightImpl, *output_block); + internal::TensorBlockCwiseBinaryIO< + BinaryOp, Index, typename internal::remove_const::type, NumDims, + Layout>::Run(m_functor, output_block->block_sizes(), + output_block->block_strides(), output_block->data(), + left_block.block_strides(), left_block.data(), + right_block.block_strides(), right_block.data()); + } + EIGEN_DEVICE_FUNC typename Eigen::internal::traits::PointerType data() const { return NULL; } /// required by sycl in order to extract the accessor const TensorEvaluator& left_impl() const { return m_leftImpl; } @@ -442,6 +526,7 @@ struct TensorEvaluator m_leftImpl; TensorEvaluator m_rightImpl; @@ -458,6 +543,7 @@ struct TensorEvaluator::IsAligned & TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess & internal::functor_traits::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false @@ -562,6 +648,7 @@ struct TensorEvaluator IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess & internal::packet_traits::HasBlend, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 53640c6aa..024de3696 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -12,29 +12,37 @@ namespace Eigen { -/** \class TensorExecutor - * \ingroup CXX11_Tensor_Module - * - * \brief The tensor executor class. - * - * This class is responsible for launch the evaluation of the expression on - * the specified computing device. - */ +/** + * \class TensorExecutor + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor executor class. + * + * This class is responsible for launch the evaluation of the expression on + * the specified computing device. + * + * @tparam Vectorizable can use packet math (SSE/AVX/etc... registers and + * instructions) + * @tparam Tileable can use block based tensor evaluation + * (see TensorBlock.h) + */ namespace internal { -// Default strategy: the expression is evaluated with a single cpu thread. -template -class TensorExecutor -{ +/** + * Default strategy: the expression is evaluated sequentially with a single cpu + * thread, without vectorization and block evaluation. + */ +template +class TensorExecutor { public: typedef typename Expression::Index Index; EIGEN_DEVICE_FUNC - static inline void run(const Expression& expr, const Device& device = Device()) - { + static inline void run(const Expression& expr, + const Device& device = Device()) { TensorEvaluator evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); - if (needs_assign) - { + if (needs_assign) { const Index size = array_prod(evaluator.dimensions()); for (Index i = 0; i < size; ++i) { evaluator.evalScalar(i); @@ -44,12 +52,14 @@ class TensorExecutor } }; - -template -class TensorExecutor -{ +/** + * Process all the data with a single cpu thread, using vectorized instructions. + */ +template +class TensorExecutor { public: typedef typename Expression::Index Index; + EIGEN_DEVICE_FUNC static inline void run(const Expression& expr, const DefaultDevice& device = DefaultDevice()) { @@ -58,9 +68,11 @@ class TensorExecutor if (needs_assign) { const Index size = array_prod(evaluator.dimensions()); - const int PacketSize = unpacket_traits::PacketReturnType>::size; - // Give the compiler a strong hint to unroll the loop. But don't insist - // on unrolling, because if the function is expensive the compiler should not + const int PacketSize = unpacket_traits::PacketReturnType>::size; + + // Give compiler a strong possibility to unroll the loop. But don't insist + // on unrolling, because if the function is expensive compiler should not // unroll the loop at the expense of inlining. const Index UnrolledSize = (size / (4 * PacketSize)) * 4 * PacketSize; for (Index i = 0; i < UnrolledSize; i += 4*PacketSize) { @@ -80,9 +92,75 @@ class TensorExecutor } }; +/** + * Process all the data with a single cpu thread, using blocks of data. By + * sizing a block to fit L1 cache we get better cache performance. + */ +template +class TensorExecutor { + public: + typedef typename Expression::Index Index; + EIGEN_DEVICE_FUNC + static inline void run(const Expression& expr, + const DefaultDevice& device = DefaultDevice()) { + using Evaluator = TensorEvaluator; -// Multicore strategy: the index space is partitioned and each partition is executed on a single core + using Index = typename traits::Index; + const int NumDims = traits::NumDimensions; + + using Scalar = typename traits::Scalar; + using ScalarNoConst = typename remove_const::type; + + using TensorBlock = + TensorBlock; + using TensorBlockMapper = + TensorBlockMapper; + + Evaluator evaluator(expr, device); + std::size_t total_size = array_prod(evaluator.dimensions()); + std::size_t cache_size = device.firstLevelCacheSize() / sizeof(Scalar); + + if (total_size < cache_size) { + // TODO(andydavis) Reduce block management overhead for small tensors. + // TODO(wuke) Do not do this when evaluating TensorBroadcastingOp. + internal::TensorExecutor::run(expr, device); + return; + } + + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) { + // Size tensor blocks to fit in cache (or requested target block size). + size_t block_total_size = numext::mini(cache_size, total_size); + TensorBlockShapeType block_shape = TensorBlockShapeType::kSkewedInnerDims; + // Query expression tree for desired block size/shape. + std::vector resources; + evaluator.getResourceRequirements(&resources); + MergeResourceRequirements(resources, &block_shape, &block_total_size); + + TensorBlockMapper block_mapper(evaluator.dimensions(), block_shape, + block_total_size); + block_total_size = block_mapper.block_dims_total_size(); + + Scalar* data = static_cast( + device.allocate(block_total_size * sizeof(Scalar))); + + const Index total_block_count = block_mapper.total_block_count(); + for (Index i = 0; i < total_block_count; ++i) { + TensorBlock block = block_mapper.GetBlockForIndex(i, data); + evaluator.evalBlock(&block); + } + device.deallocate(data); + } + evaluator.cleanup(); + } +}; + +/** + * Multicore strategy: the index space is partitioned and each partition is + * executed on a single core. + */ #ifdef EIGEN_USE_THREADS template struct EvalRange { @@ -100,7 +178,7 @@ struct EvalRange { }; template -struct EvalRange { +struct EvalRange { static const int PacketSize = unpacket_traits::size; static void run(Evaluator* evaluator_in, const Index first, const Index last) { @@ -110,8 +188,8 @@ struct EvalRange { if (last - first >= PacketSize) { eigen_assert(first % PacketSize == 0); Index last_chunk_offset = last - 4 * PacketSize; - // Give the compiler a strong hint to unroll the loop. But don't insist - // on unrolling, because if the function is expensive the compiler should not + // Give compiler a strong possibility to unroll the loop. But don't insist + // on unrolling, because if the function is expensive compiler should not // unroll the loop at the expense of inlining. for (; i <= last_chunk_offset; i += 4*PacketSize) { for (Index j = 0; j < 4; j++) { @@ -138,55 +216,113 @@ struct EvalRange { } }; -template -class TensorExecutor { +template +class TensorExecutor { public: typedef typename Expression::Index Index; - static inline void run(const Expression& expr, const ThreadPoolDevice& device) - { - typedef TensorEvaluator Evaluator; - Evaluator evaluator(expr, device); - const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); - if (needs_assign) - { - const Index size = array_prod(evaluator.dimensions()); - size_t num_threads = device.numThreads(); - if (num_threads > 1) { - num_threads = TensorCostModel::numThreads( - size, evaluator.costPerCoeff(Vectorizable), num_threads); - } - if (num_threads == 1) { - EvalRange::run(&evaluator, 0, size); - } else { - const Index PacketSize = Vectorizable ? unpacket_traits::size : 1; - Index blocksz = std::ceil(static_cast(size)/num_threads) + PacketSize - 1; - const Index blocksize = numext::maxi(PacketSize, (blocksz - (blocksz % PacketSize))); - const Index numblocks = size / blocksize; - Barrier barrier(numblocks); - for (int i = 0; i < numblocks; ++i) { - device.enqueue_with_barrier( - &barrier, &EvalRange::run, - &evaluator, i * blocksize, (i + 1) * blocksize); - } - if (numblocks * blocksize < size) { - EvalRange::run( - &evaluator, numblocks * blocksize, size); - } - barrier.Wait(); - } + static inline void run(const Expression& expr, + const ThreadPoolDevice& device) { + typedef TensorEvaluator Evaluator; + typedef EvalRange EvalRange; + + Evaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr); + if (needs_assign) { + const Index PacketSize = + Vectorizable + ? unpacket_traits::size + : 1; + const Index size = array_prod(evaluator.dimensions()); + device.parallelFor(size, evaluator.costPerCoeff(Vectorizable), + EvalRange::alignBlockSize, + [&evaluator](Index first, Index last) { + EvalRange::run(&evaluator, first, last); + }); } evaluator.cleanup(); } }; + +template +class TensorExecutor { + public: + typedef typename Expression::Index Index; + + static inline void run(const Expression& expr, + const ThreadPoolDevice& device) { + typedef TensorEvaluator Evaluator; + typedef typename internal::remove_const< + typename traits::Scalar>::type Scalar; + typedef typename traits::Index Index; + + static const int NumDims = traits::NumDimensions; + + typedef TensorBlock TensorBlock; + typedef TensorBlockMapper + TensorBlockMapper; + + Evaluator evaluator(expr, device); + std::size_t total_size = array_prod(evaluator.dimensions()); + std::size_t cache_size = device.firstLevelCacheSize() / sizeof(Scalar); + if (total_size < cache_size) { + // TODO(andydavis) Reduce block management overhead for small tensors. + internal::TensorExecutor::run(expr, device); + evaluator.cleanup(); + return; + } + + const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr); + if (needs_assign) { + TensorBlockShapeType block_shape = TensorBlockShapeType::kSkewedInnerDims; + size_t block_total_size = 0; + // Query expression tree for desired block size/shape. + std::vector resources; + evaluator.getResourceRequirements(&resources); + MergeResourceRequirements(resources, &block_shape, &block_total_size); + int num_threads = device.numThreads(); + + // Estimate minimum block size based on cost. + TensorOpCost cost = evaluator.costPerCoeff(Vectorizable); + double taskSize = TensorCostModel::taskSize(1, cost); + size_t block_size = static_cast(1.0 / taskSize); + TensorBlockMapper block_mapper(evaluator.dimensions(), block_shape, + block_size); + block_size = block_mapper.block_dims_total_size(); + const size_t aligned_blocksize = + EIGEN_MAX_ALIGN_BYTES * + divup(block_size * sizeof(Scalar), EIGEN_MAX_ALIGN_BYTES); + void* buf = device.allocate((num_threads + 1) * aligned_blocksize); + device.parallelFor( + block_mapper.total_block_count(), cost * block_size, + [=, &device, &evaluator, &block_mapper](Index first, Index last) { + // currentThreadId() returns -1 if called from a thread not in the + // threadpool, such as the main thread dispatching Eigen + // expressions. + const int thread_idx = device.currentThreadId(); + eigen_assert(thread_idx >= -1 && thread_idx < num_threads); + Scalar* thread_buf = reinterpret_cast( + static_cast(buf) + aligned_blocksize * (thread_idx + 1)); + for (Index i = first; i < last; ++i) { + auto block = block_mapper.GetBlockForIndex(i, thread_buf); + evaluator.evalBlock(&block); + } + }); + device.deallocate(buf); + } + evaluator.cleanup(); + } +}; + #endif // EIGEN_USE_THREADS // GPU: the evaluation of the expression is offloaded to a GPU. #if defined(EIGEN_USE_GPU) -template -class TensorExecutor { +template +class TensorExecutor { public: typedef typename Expression::Index Index; static void run(const Expression& expr, const GpuDevice& device); @@ -236,8 +372,8 @@ EigenMetaKernel(Evaluator eval, Index size) { } /*static*/ -template -inline void TensorExecutor::run( +template +inline void TensorExecutor::run( const Expression& expr, const GpuDevice& device) { TensorEvaluator evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h index e943757ad..1342e47a6 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h @@ -40,6 +40,8 @@ class TensorFixedSize : public TensorBase0), + PacketAccess = (internal::packet_traits::size > 1), + BlockAccess = false, Layout = Options_ & RowMajor ? RowMajor : ColMajor, CoordAccess = true, RawAccess = true diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h index b8f0bc798..fdb31928f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h @@ -98,6 +98,7 @@ struct TensorEvaluator, Device> enum { IsAligned = true, PacketAccess = (PacketSize > 1), + BlockAccess = false, Layout = TensorEvaluator::Layout, RawAccess = true }; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h index 19e456e19..8ed1796df 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h @@ -129,8 +129,14 @@ struct IsVectorizable { TensorEvaluator::IsAligned; }; +template +struct IsTileable { + static const bool value = TensorEvaluator::BlockAccess; +}; + template ::value> + bool Vectorizable = IsVectorizable::value, + bool Tileable = IsTileable::value> class TensorExecutor; } // end namespace internal diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h index f0f7c7826..72cb2d15f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h @@ -186,6 +186,7 @@ struct TensorEvaluator, Device> enum { IsAligned = false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, RawAccess = false diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h index 4e384f9b9..e3165fa10 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h @@ -119,6 +119,7 @@ struct TensorEvaluator, Device> enum { IsAligned = TensorEvaluator::IsAligned, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = (static_cast(TensorEvaluator::Layout) == static_cast(ColMajor)) ? RowMajor : ColMajor, CoordAccess = false, // to be implemented RawAccess = TensorEvaluator::RawAccess @@ -181,6 +182,7 @@ template enum { IsAligned = TensorEvaluator::IsAligned, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = (static_cast(TensorEvaluator::Layout) == static_cast(ColMajor)) ? RowMajor : ColMajor, CoordAccess = false // to be implemented }; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h index cda49f8fe..498488649 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h @@ -105,6 +105,7 @@ struct TensorEvaluator, Device> enum { IsAligned = TensorEvaluator::IsAligned, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = TensorEvaluator::RawAccess @@ -170,6 +171,7 @@ template enum { IsAligned = TensorEvaluator::IsAligned, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = TensorEvaluator::RawAccess @@ -325,6 +327,7 @@ struct TensorEvaluator, Devi // slice offsets and sizes. IsAligned = /*TensorEvaluator::IsAligned*/false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, RawAccess = false @@ -557,6 +560,7 @@ struct TensorEvaluator, Device> enum { IsAligned = /*TensorEvaluator::IsAligned*/false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, RawAccess = (NumDims == 1) & TensorEvaluator::RawAccess @@ -716,7 +720,6 @@ struct TensorEvaluator::value; typedef typename XprType::Index Index; typedef typename XprType::Scalar Scalar; - typedef typename internal::remove_const::type ScalarNonConst; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType::type PacketReturnType; typedef Strides Dimensions; @@ -858,7 +861,7 @@ struct TensorEvaluator::type ScalarNonConst; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType::type PacketReturnType; typedef Strides Dimensions; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h b/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h index 5956e513d..ffa22f31e 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h @@ -96,6 +96,7 @@ struct TensorEvaluator, Device enum { IsAligned = true, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = true, RawAccess = false diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h b/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h index 9e0a20abf..950ac32af 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h @@ -94,6 +94,7 @@ struct TensorEvaluator, Device> enum { IsAligned = false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, RawAccess = false diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h index ce573d730..375fc0802 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h @@ -412,6 +412,7 @@ struct TensorEvaluator, enum { IsAligned = false, PacketAccess = Self::InputPacketAccess && Op::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h b/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h index b2b4fd8d3..a6cade50f 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h @@ -136,6 +136,7 @@ template class TensorRef : public TensorBase, Device> enum { IsAligned = false, PacketAccess = false, + BlockAccess = false, Layout = TensorRef::Layout, CoordAccess = false, // to be implemented RawAccess = false @@ -411,6 +413,7 @@ struct TensorEvaluator, Device> : public TensorEvaluator, Device enum { IsAligned = false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false @@ -253,6 +254,7 @@ struct TensorEvaluator, Device> enum { IsAligned = false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h b/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h index 0697fd1ce..6b54f40ad 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h @@ -112,6 +112,7 @@ struct TensorEvaluator, Device> enum { IsAligned = false, PacketAccess = (internal::packet_traits::size > 1), + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false @@ -240,6 +241,7 @@ struct TensorEvaluator, Device> enum { IsAligned = false, PacketAccess = (internal::packet_traits::size > 1), + BlockAccess = false, RawAccess = false }; diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h b/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h index a7eea99b6..c09513c10 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h @@ -112,6 +112,7 @@ struct TensorEvaluator, Device> enum { IsAligned = /*TensorEvaluator::IsAligned*/false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false @@ -273,6 +274,7 @@ struct TensorEvaluator, Device> enum { IsAligned = /*TensorEvaluator::IsAligned*/false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h b/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h index 2b1968de1..c8b2fad1e 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h @@ -95,6 +95,7 @@ struct TensorEvaluator, Device> enum { IsAligned = false, PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, RawAccess = false @@ -110,7 +111,7 @@ struct TensorEvaluator, Device> for (int i = 0; i < NumInputDims; ++i) { m_reduced[i] = false; } - + const Dims& op_dims = op.dims(); for (int i = 0; i < NumReducedDims; ++i) { eigen_assert(op_dims[i] >= 0); @@ -128,7 +129,7 @@ struct TensorEvaluator, Device> eigen_assert(num_distinct_reduce_dims == NumReducedDims); - // Compute the dimensions of the result. + // Compute the dimensions of the result. const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); int output_index = 0; @@ -229,7 +230,7 @@ struct TensorEvaluator, Device> result += m_impl.coeff(cur_index); cur_index += index_stride; } - + return result; } diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt index fa19b2159..239a80926 100644 --- a/unsupported/test/CMakeLists.txt +++ b/unsupported/test/CMakeLists.txt @@ -213,6 +213,7 @@ if(EIGEN_TEST_CXX11) ei_add_test(cxx11_tensor_striding) ei_add_test(cxx11_tensor_notification "-pthread" "${CMAKE_THREAD_LIBS_INIT}") ei_add_test(cxx11_tensor_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}") + ei_add_test(cxx11_tensor_executor "-pthread" "${CMAKE_THREAD_LIBS_INIT}") ei_add_test(cxx11_tensor_ref) ei_add_test(cxx11_tensor_random) ei_add_test(cxx11_tensor_generator) diff --git a/unsupported/test/cxx11_tensor_block_access.cpp b/unsupported/test/cxx11_tensor_block_access.cpp index 15f2392a3..416b686e4 100644 --- a/unsupported/test/cxx11_tensor_block_access.cpp +++ b/unsupported/test/cxx11_tensor_block_access.cpp @@ -901,7 +901,7 @@ static void test_empty_dims(const internal::TensorBlockShapeType block_shape) CALL_SUBTEST(NAME(ARG)); \ CALL_SUBTEST(NAME(ARG)) -EIGEN_DECLARE_TEST(cxx11_tensor_assign) { +EIGEN_DECLARE_TEST(cxx11_tensor_block_access) { CALL_SUBTEST_LAYOUTS(test_block_mapper_sanity); CALL_SUBTEST_LAYOUTS(test_block_mapper_maps_every_element); CALL_SUBTEST_LAYOUTS(test_slice_block_mapper_maps_every_element); diff --git a/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu b/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu index aa28457b1..f2a2a6cfa 100644 --- a/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu +++ b/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu @@ -93,7 +93,7 @@ void test_cuda_complex_cwise_ops() { } -void test_cxx11_tensor_complex_cwise_ops() +EIGEN_DECLARE_TEST(test_cxx11_tensor_complex_cwise_ops) { CALL_SUBTEST(test_cuda_complex_cwise_ops()); CALL_SUBTEST(test_cuda_complex_cwise_ops()); diff --git a/unsupported/test/cxx11_tensor_complex_gpu.cu b/unsupported/test/cxx11_tensor_complex_gpu.cu index 7cf06aa7a..f8b8ae704 100644 --- a/unsupported/test/cxx11_tensor_complex_gpu.cu +++ b/unsupported/test/cxx11_tensor_complex_gpu.cu @@ -177,7 +177,7 @@ static void test_cuda_product_reductions() { } -void test_cxx11_tensor_complex() +EIGEN_DECLARE_TEST(test_cxx11_tensor_complex) { CALL_SUBTEST(test_cuda_nullary()); CALL_SUBTEST(test_cuda_sum_reductions()); diff --git a/unsupported/test/cxx11_tensor_executor.cpp b/unsupported/test/cxx11_tensor_executor.cpp new file mode 100644 index 000000000..5ae45ac5b --- /dev/null +++ b/unsupported/test/cxx11_tensor_executor.cpp @@ -0,0 +1,81 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Eugene Zhulenev +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_USE_THREADS + +#include "main.h" + +#include + +using Eigen::Index; +using Eigen::Tensor; +using Eigen::RowMajor; +using Eigen::ColMajor; + +// A set of tests to verify that different TensorExecutor strategies yields the +// same results for all the ops, supporting tiled execution. + +template +static void test_execute_binary_expr(Device d) { + // Pick a large enough tensor size to bypass small tensor block evaluation + // optimization. + Tensor lhs(840, 390, 37); + Tensor rhs(840, 390, 37); + Tensor dst(840, 390, 37); + + lhs.setRandom(); + rhs.setRandom(); + + const auto expr = lhs + rhs; + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); + + for (int i = 0; i < 840; ++i) { + for (int j = 0; j < 390; ++j) { + for (int k = 0; k < 37; ++k) { + float sum = lhs(i, j, k) + rhs(i, j, k); + VERIFY_IS_EQUAL(sum, dst(i, j, k)); + } + } + } +} + +#define CALL_SUBTEST_COMBINATIONS(NAME) \ + CALL_SUBTEST((NAME(default_device))); \ + CALL_SUBTEST((NAME(default_device))); \ + CALL_SUBTEST((NAME(default_device))); \ + CALL_SUBTEST((NAME(default_device))); \ + CALL_SUBTEST((NAME(default_device))); \ + CALL_SUBTEST((NAME(default_device))); \ + CALL_SUBTEST((NAME(default_device))); \ + CALL_SUBTEST((NAME(default_device))); \ + CALL_SUBTEST((NAME(tp_device))); \ + CALL_SUBTEST((NAME(tp_device))); \ + CALL_SUBTEST((NAME(tp_device))); \ + CALL_SUBTEST((NAME(tp_device))); \ + CALL_SUBTEST((NAME(tp_device))); \ + CALL_SUBTEST((NAME(tp_device))); \ + CALL_SUBTEST((NAME(tp_device))); \ + CALL_SUBTEST((NAME(tp_device))) + +EIGEN_DECLARE_TEST(cxx11_tensor_executor) { + Eigen::DefaultDevice default_device; + + const auto num_threads = internal::random(1, 24); + Eigen::ThreadPool tp(num_threads); + Eigen::ThreadPoolDevice tp_device(&tp, num_threads); + + CALL_SUBTEST_COMBINATIONS(test_execute_binary_expr); +} + +#undef CALL_SUBTEST_COMBINATIONS From 966c2a7bb62a8b5b9ecd349730ffcd3b5719837d Mon Sep 17 00:00:00 2001 From: Eugene Zhulenev Date: Fri, 27 Jul 2018 12:45:17 -0700 Subject: [PATCH 3/3] Rename Index to StorageIndex + use Eigen::Array and Eigen::Map when possible --- .../Eigen/CXX11/src/Tensor/TensorBlock.h | 418 +++++++----------- .../CXX11/src/Tensor/TensorBroadcasting.h | 1 - .../Eigen/CXX11/src/Tensor/TensorEvaluator.h | 9 +- .../Eigen/CXX11/src/Tensor/TensorExecutor.h | 185 ++++---- .../test/cxx11_tensor_block_access.cpp | 292 +++++++----- unsupported/test/cxx11_tensor_executor.cpp | 20 +- 6 files changed, 447 insertions(+), 478 deletions(-) diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h index 5321acecf..84cf6d216 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -67,21 +67,21 @@ enum class TensorBlockShapeType { struct TensorOpResourceRequirements { TensorBlockShapeType block_shape; - std::size_t block_total_size; + Index block_total_size; // TODO(andydavis) Add 'target_num_threads' to support communication of // thread-resource requirements. This will allow ops deep in the // expression tree (like reductions) to communicate resources // requirements based on local state (like the total number of reductions // to be computed). TensorOpResourceRequirements(internal::TensorBlockShapeType shape, - const std::size_t size) + const Index size) : block_shape(shape), block_total_size(size) {} }; // Tries to merge multiple resource requirements. EIGEN_STRONG_INLINE void MergeResourceRequirements( const std::vector& resources, - TensorBlockShapeType* block_shape, std::size_t* block_total_size) { + TensorBlockShapeType* block_shape, Index* block_total_size) { if (resources.empty()) { return; } @@ -108,12 +108,12 @@ EIGEN_STRONG_INLINE void MergeResourceRequirements( * This class represents a tensor block specified by the index of the * first block coefficient, and the size of the block in each dimension. */ -template +template class TensorBlock { public: - typedef DSizes Dimensions; + typedef DSizes Dimensions; - TensorBlock(const Index first_coeff_index, const Dimensions& block_sizes, + TensorBlock(const StorageIndex first_coeff_index, const Dimensions& block_sizes, const Dimensions& block_strides, const Dimensions& tensor_strides, Scalar* data) : m_first_coeff_index(first_coeff_index), @@ -122,7 +122,7 @@ class TensorBlock { m_tensor_strides(tensor_strides), m_data(data) {} - Index first_coeff_index() const { return m_first_coeff_index; } + StorageIndex first_coeff_index() const { return m_first_coeff_index; } const Dimensions& block_sizes() const { return m_block_sizes; } @@ -135,108 +135,33 @@ class TensorBlock { const Scalar* data() const { return m_data; } private: - Index m_first_coeff_index; + StorageIndex m_first_coeff_index; Dimensions m_block_sizes; Dimensions m_block_strides; Dimensions m_tensor_strides; Scalar* m_data; // Not owned. }; -template +template struct TensorBlockCopyOp { static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( - const Index num_coeff_to_copy, const Index dst_index, - const Index dst_stride, Scalar* EIGEN_RESTRICT dst_data, - const Index src_index, const Index src_stride, + const StorageIndex num_coeff_to_copy, const StorageIndex dst_index, + const StorageIndex dst_stride, Scalar* EIGEN_RESTRICT dst_data, + const StorageIndex src_index, const StorageIndex src_stride, const Scalar* EIGEN_RESTRICT src_data) { - for (Index i = 0; i < num_coeff_to_copy; ++i) { - dst_data[dst_index + i * dst_stride] = - src_data[src_index + i * src_stride]; - } - } -}; + const Scalar* src_base = &src_data[src_index]; + Scalar* dst_base = &dst_data[dst_index]; -// NOTE: Benchmarks run on an implementation of this that broke each of the -// loops in these conditionals into it's own template specialization (to -// avoid conditionals in the caller's loop) did not show an improvement. -template -struct TensorBlockCopyOp { - typedef typename packet_traits::type Packet; - static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( - const Index num_coeff_to_copy, const Index dst_index, - const Index dst_stride, Scalar* EIGEN_RESTRICT dst_data, - const Index src_index, const Index src_stride, - const Scalar* EIGEN_RESTRICT src_data) { - if (src_stride == 1) { - const Index packet_size = internal::unpacket_traits::size; - const Index vectorized_size = - (num_coeff_to_copy / packet_size) * packet_size; - if (dst_stride == 1) { - // LINEAR - for (Index i = 0; i < vectorized_size; i += packet_size) { - Packet p = internal::ploadu(src_data + src_index + i); - internal::pstoreu(dst_data + dst_index + i, p); - } - for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { - dst_data[dst_index + i] = src_data[src_index + i]; - } - } else { - // SCATTER - for (Index i = 0; i < vectorized_size; i += packet_size) { - Packet p = internal::ploadu(src_data + src_index + i); - internal::pscatter( - dst_data + dst_index + i * dst_stride, p, dst_stride); - } - for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { - dst_data[dst_index + i * dst_stride] = src_data[src_index + i]; - } - } - } else if (src_stride == 0) { - const Index packet_size = internal::unpacket_traits::size; - const Index vectorized_size = - (num_coeff_to_copy / packet_size) * packet_size; - if (dst_stride == 1) { - // LINEAR - for (Index i = 0; i < vectorized_size; i += packet_size) { - Packet p = internal::pload1(src_data + src_index); - internal::pstoreu(dst_data + dst_index + i, p); - } - for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { - dst_data[dst_index + i] = src_data[src_index]; - } - } else { - // SCATTER - for (Index i = 0; i < vectorized_size; i += packet_size) { - Packet p = internal::pload1(src_data + src_index); - internal::pscatter( - dst_data + dst_index + i * dst_stride, p, dst_stride); - } - for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { - dst_data[dst_index + i * dst_stride] = src_data[src_index]; - } - } - } else { - if (dst_stride == 1) { - // GATHER - const Index packet_size = internal::unpacket_traits::size; - const Index vectorized_size = - (num_coeff_to_copy / packet_size) * packet_size; - for (Index i = 0; i < vectorized_size; i += packet_size) { - Packet p = internal::pgather( - src_data + src_index + i * src_stride, src_stride); - internal::pstoreu(dst_data + dst_index + i, p); - } - for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { - dst_data[dst_index + i] = src_data[src_index + i * src_stride]; - } - } else { - // RANDOM - for (Index i = 0; i < num_coeff_to_copy; ++i) { - dst_data[dst_index + i * dst_stride] = - src_data[src_index + i * src_stride]; - } - } - } + using Src = const Eigen::Array; + using Dst = Eigen::Array; + + using SrcMap = Eigen::Map>; + using DstMap = Eigen::Map>; + + const SrcMap src(src_base, num_coeff_to_copy, InnerStride<>(src_stride)); + DstMap dst(dst_base, num_coeff_to_copy, InnerStride<>(dst_stride)); + + dst = src; } }; @@ -249,34 +174,34 @@ struct TensorBlockCopyOp { * This class is responsible for copying data between a tensor and a tensor * block. */ -template +template class TensorBlockIO { public: - typedef typename internal::TensorBlock + typedef typename internal::TensorBlock TensorBlock; - typedef typename internal::TensorBlockCopyOp + typedef typename internal::TensorBlockCopyOp TensorBlockCopyOp; protected: struct BlockIteratorState { - Index input_stride; - Index output_stride; - Index input_span; - Index output_span; - Index size; - Index count; + StorageIndex input_stride; + StorageIndex output_stride; + StorageIndex input_span; + StorageIndex output_span; + StorageIndex size; + StorageIndex count; }; static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Copy( - const TensorBlock& block, Index first_coeff_index, - const array& tensor_to_block_dim_map, - const array& tensor_strides, const Scalar* src_data, + const TensorBlock& block, StorageIndex first_coeff_index, + const array& tensor_to_block_dim_map, + const array& tensor_strides, const Scalar* src_data, Scalar* dst_data) { // Find the innermost tensor dimension whose size is not 1. This is the // effective inner dim. If all dimensions are of size 1, then fallback to // using the actual innermost dim to avoid out-of-bound access. - Index num_size_one_inner_dims = 0; + StorageIndex num_size_one_inner_dims = 0; for (int i = 0; i < NumDims; ++i) { const int dim = cond()(i, NumDims - i - 1); if (block.block_sizes()[tensor_to_block_dim_map[dim]] != 1) { @@ -285,16 +210,16 @@ class TensorBlockIO { } } // Calculate strides and dimensions. - const Index tensor_stride1_dim = cond()( + const StorageIndex tensor_stride1_dim = cond()( num_size_one_inner_dims, NumDims - num_size_one_inner_dims - 1); - const Index block_dim_for_tensor_stride1_dim = + const StorageIndex block_dim_for_tensor_stride1_dim = NumDims == 0 ? 1 : tensor_to_block_dim_map[tensor_stride1_dim]; size_t block_inner_dim_size = NumDims == 0 ? 1 : block.block_sizes()[block_dim_for_tensor_stride1_dim]; for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) { const int dim = cond()(i, NumDims - i - 1); - const Index block_stride = + const StorageIndex block_stride = block.block_strides()[tensor_to_block_dim_map[dim]]; if (block_inner_dim_size == block_stride && block_stride == tensor_strides[dim]) { @@ -306,10 +231,10 @@ class TensorBlockIO { } } - Index inputIndex; - Index outputIndex; - Index input_stride; - Index output_stride; + StorageIndex inputIndex; + StorageIndex outputIndex; + StorageIndex input_stride; + StorageIndex output_stride; // Setup strides to read/write along the tensor's stride1 dimension. if (BlockRead) { @@ -337,7 +262,7 @@ class TensorBlockIO { int num_squeezed_dims = 0; for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) { const int dim = cond()(i + 1, NumDims - i - 2); - const Index size = block.block_sizes()[tensor_to_block_dim_map[dim]]; + const StorageIndex size = block.block_sizes()[tensor_to_block_dim_map[dim]]; if (size == 1) { continue; } @@ -362,9 +287,9 @@ class TensorBlockIO { } // Iterate copying data from src to dst. - const Index block_total_size = + const StorageIndex block_total_size = NumDims == 0 ? 1 : block.block_sizes().TotalSize(); - for (Index i = 0; i < block_total_size; i += block_inner_dim_size) { + for (StorageIndex i = 0; i < block_total_size; i += block_inner_dim_size) { TensorBlockCopyOp::Run(block_inner_dim_size, outputIndex, output_stride, dst_data, inputIndex, input_stride, src_data); // Update index. @@ -391,19 +316,18 @@ class TensorBlockIO { * This class is responsible for reading a tensor block. * */ -template -class TensorBlockReader - : public TensorBlockIO { +template +class TensorBlockReader : public TensorBlockIO { public: - typedef typename internal::TensorBlock + typedef typename internal::TensorBlock TensorBlock; - typedef TensorBlockIO + typedef TensorBlockIO Base; static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( TensorBlock* block, const Scalar* src_data) { - array tensor_to_block_dim_map; + array tensor_to_block_dim_map; for (int i = 0; i < NumDims; ++i) { tensor_to_block_dim_map[i] = i; } @@ -412,9 +336,9 @@ class TensorBlockReader } static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( - TensorBlock* block, Index first_coeff_index, - const array& tensor_to_block_dim_map, - const array& tensor_strides, const Scalar* src_data) { + TensorBlock* block, StorageIndex first_coeff_index, + const array& tensor_to_block_dim_map, + const array& tensor_strides, const Scalar* src_data) { Base::Copy(*block, first_coeff_index, tensor_to_block_dim_map, tensor_strides, src_data, block->data()); } @@ -429,19 +353,18 @@ class TensorBlockReader * This class is responsible for writing a tensor block. * */ -template -class TensorBlockWriter : public TensorBlockIO { +template +class TensorBlockWriter : public TensorBlockIO { public: - typedef typename internal::TensorBlock + typedef typename internal::TensorBlock TensorBlock; - typedef TensorBlockIO + typedef TensorBlockIO Base; static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( const TensorBlock& block, Scalar* dst_data) { - array tensor_to_block_dim_map; + array tensor_to_block_dim_map; for (int i = 0; i < NumDims; ++i) { tensor_to_block_dim_map[i] = i; } @@ -450,9 +373,9 @@ class TensorBlockWriter : public TensorBlockIO& tensor_to_block_dim_map, - const array& tensor_strides, Scalar* dst_data) { + const TensorBlock& block, StorageIndex first_coeff_index, + const array& tensor_to_block_dim_map, + const array& tensor_strides, Scalar* dst_data) { Base::Copy(block, first_coeff_index, tensor_to_block_dim_map, tensor_strides, block.data(), dst_data); } @@ -468,67 +391,34 @@ class TensorBlockWriter : public TensorBlockIO struct TensorBlockCwiseBinaryOp { - template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( - const BinaryFunctor& functor, const Index num_coeff, - const Index output_index, const Index output_stride, - OutputScalar* output_data, const Index left_index, - const Index left_stride, const LeftScalar* left_data, - const Index right_index, const Index right_stride, + const BinaryFunctor& functor, const StorageIndex num_coeff, + const StorageIndex output_index, const StorageIndex output_stride, + OutputScalar* output_data, const StorageIndex left_index, + const StorageIndex left_stride, const LeftScalar* left_data, + const StorageIndex right_index, const StorageIndex right_stride, const RightScalar* right_data) { - for (Index i = 0; i < num_coeff; ++i) { - output_data[output_index + i * output_stride] = - functor(left_data[left_index + i * left_stride], - right_data[right_index + i * right_stride]); - } - } -}; + using Lhs = const Eigen::Array; + using Rhs = const Eigen::Array; + using Out = Eigen::Array; -template <> -struct TensorBlockCwiseBinaryOp { - template - static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( - const BinaryFunctor& functor, const Index num_coeff, - const Index output_index, const Index output_stride, - OutputScalar* output_data, const Index left_index, - const Index left_stride, const LeftScalar* left_data, - const Index right_index, const Index right_stride, - const RightScalar* right_data) { - EIGEN_STATIC_ASSERT(functor_traits::PacketAccess, - YOU_MADE_A_PROGRAMMING_MISTAKE); - typedef typename packet_traits::type OutputPacket; - typedef typename packet_traits::type LeftPacket; - typedef typename packet_traits::type RightPacket; - const Index packet_size = unpacket_traits::size; - EIGEN_STATIC_ASSERT(unpacket_traits::size == packet_size, - YOU_MADE_A_PROGRAMMING_MISTAKE); - EIGEN_STATIC_ASSERT(unpacket_traits::size == packet_size, - YOU_MADE_A_PROGRAMMING_MISTAKE); - const Index vectorized_size = (num_coeff / packet_size) * packet_size; - if (output_stride != 1 || left_stride != 1 || right_stride != 1) { - TensorBlockCwiseBinaryOp::Run( - functor, num_coeff, output_index, output_stride, output_data, - left_index, left_stride, left_data, right_index, right_stride, - right_data); - return; - } - // Vectorization for the most common case. - for (Index i = 0; i < vectorized_size; i += packet_size) { - LeftPacket l = internal::ploadu(left_data + left_index + i); - RightPacket r = - internal::ploadu(right_data + right_index + i); - OutputPacket p = functor.packetOp(l, r); - internal::pstoreu( - output_data + output_index + i, p); - } - for (Index i = vectorized_size; i < num_coeff; ++i) { - output_data[output_index + i] = - functor(left_data[left_index + i], right_data[right_index + i]); - } + using LhsMap = Eigen::Map>; + using RhsMap = Eigen::Map>; + using OutMap = Eigen::Map>; + + const LeftScalar* lhs_base = &left_data[left_index]; + const RightScalar* rhs_base = &right_data[right_index]; + OutputScalar* out_base = &output_data[output_index]; + + const LhsMap lhs(lhs_base, num_coeff, InnerStride<>(left_stride)); + const RhsMap rhs(rhs_base, num_coeff, InnerStride<>(right_stride)); + OutMap out(out_base, num_coeff, InnerStride<>(output_stride)); + + out = + Eigen::CwiseBinaryOp(lhs, rhs, functor); } }; @@ -541,28 +431,26 @@ struct TensorBlockCwiseBinaryOp { * This class carries out the binary op on given blocks. * */ -template struct TensorBlockCwiseBinaryIO { - typedef typename internal::TensorBlock::Dimensions Dimensions; - typedef internal::TensorBlockCwiseBinaryOp< - functor_traits::PacketAccess> - TensorBlockCwiseBinaryOp; struct BlockIteratorState { - Index output_stride, output_span; - Index left_stride, left_span; - Index right_stride, right_span; - Index size, count; + StorageIndex output_stride, output_span; + StorageIndex left_stride, left_span; + StorageIndex right_stride, right_span; + StorageIndex size, count; }; template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( const BinaryFunctor& functor, const Dimensions& block_sizes, const Dimensions& block_strides, OutputScalar* output_data, - const array& left_strides, const LeftScalar* left_data, - const array& right_strides, + const array& left_strides, + const LeftScalar* left_data, + const array& right_strides, const RightScalar* right_data) { // Find the innermost dimension whose size is not 1. This is the effective // inner dim. If all dimensions are of size 1, fallback to using the actual @@ -580,7 +468,7 @@ struct TensorBlockCwiseBinaryIO { NumDims == 0 ? 1 : cond()(num_size_one_inner_dims, NumDims - num_size_one_inner_dims - 1); - Index inner_dim_size = NumDims == 0 ? 1 : block_sizes[inner_dim]; + StorageIndex inner_dim_size = NumDims == 0 ? 1 : block_sizes[inner_dim]; for (int i = num_size_one_inner_dims + 1; i < NumDims; ++i) { const int dim = cond()(i, NumDims - i - 1); // Merge multiple inner dims into one for larger inner dim size (i.e. @@ -595,10 +483,12 @@ struct TensorBlockCwiseBinaryIO { } } - Index output_index = 0, left_index = 0, right_index = 0; - const Index output_stride = NumDims == 0 ? 1 : block_strides[inner_dim]; - const Index left_stride = NumDims == 0 ? 1 : left_strides[inner_dim]; - const Index right_stride = NumDims == 0 ? 1 : right_strides[inner_dim]; + StorageIndex output_index = 0, left_index = 0, right_index = 0; + const StorageIndex output_stride = + NumDims == 0 ? 1 : block_strides[inner_dim]; + const StorageIndex left_stride = NumDims == 0 ? 1 : left_strides[inner_dim]; + const StorageIndex right_stride = + NumDims == 0 ? 1 : right_strides[inner_dim]; const int at_least_1_dim = NumDims <= 1 ? 1 : NumDims - 1; array block_iter_state; @@ -607,7 +497,7 @@ struct TensorBlockCwiseBinaryIO { int num_squeezed_dims = 0; for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) { const int dim = cond()(i + 1, NumDims - i - 2); - const Index size = block_sizes[dim]; + const StorageIndex size = block_sizes[dim]; if (size == 1) { continue; } @@ -624,8 +514,9 @@ struct TensorBlockCwiseBinaryIO { } // Compute cwise binary op. - const Index block_total_size = NumDims == 0 ? 1 : block_sizes.TotalSize(); - for (Index i = 0; i < block_total_size; i += inner_dim_size) { + const StorageIndex block_total_size = + NumDims == 0 ? 1 : block_sizes.TotalSize(); + for (StorageIndex i = 0; i < block_total_size; i += inner_dim_size) { TensorBlockCwiseBinaryOp::Run(functor, inner_dim_size, output_index, output_stride, output_data, left_index, left_stride, left_data, right_index, @@ -661,10 +552,10 @@ struct TensorBlockCwiseBinaryIO { template struct TensorBlockView { typedef TensorEvaluator Impl; - typedef typename Impl::Index Index; + typedef typename Impl::Index StorageIndex; typedef typename remove_const::type Scalar; static const int NumDims = array_size::value; - typedef DSizes Dimensions; + typedef DSizes Dimensions; // Constructs a TensorBlockView for `impl`. `block` is only used for for // specifying the start offset, shape, and strides of the block. @@ -701,7 +592,7 @@ struct TensorBlockView { } } } - TensorBlock input_block( + TensorBlock input_block( block.first_coeff_index(), m_block_sizes, m_block_strides, block.tensor_strides(), m_allocated_data); impl.block(&input_block); @@ -733,21 +624,21 @@ struct TensorBlockView { * * This class is responsible for iterating over the blocks of a tensor. */ -template +template class TensorBlockMapper { public: - typedef typename internal::TensorBlock + typedef typename internal::TensorBlock TensorBlock; - typedef DSizes Dimensions; + typedef DSizes Dimensions; TensorBlockMapper(const Dimensions& dims, const TensorBlockShapeType block_shape, - size_t min_target_size) + Index min_target_size) : m_dimensions(dims), m_block_dim_sizes(BlockDimensions(dims, block_shape, min_target_size)) { // Calculate block counts by dimension and total block count. - DSizes block_count; - for (size_t i = 0; i < block_count.rank(); ++i) { + DSizes block_count; + for (Index i = 0; i < block_count.rank(); ++i) { block_count[i] = divup(m_dimensions[i], m_block_dim_sizes[i]); } m_total_block_count = array_prod(block_count); @@ -773,15 +664,15 @@ class TensorBlockMapper { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock - GetBlockForIndex(Index block_index, Scalar* data) const { - Index first_coeff_index = 0; - DSizes coords; - DSizes sizes; - DSizes strides; + GetBlockForIndex(StorageIndex block_index, Scalar* data) const { + StorageIndex first_coeff_index = 0; + DSizes coords; + DSizes sizes; + DSizes strides; if (NumDims > 0) { if (static_cast(Layout) == static_cast(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { - const Index idx = block_index / m_block_strides[i]; + const StorageIndex idx = block_index / m_block_strides[i]; coords[i] = idx * m_block_dim_sizes[i]; sizes[i] = numext::mini((m_dimensions[i] - coords[i]), m_block_dim_sizes[i]); @@ -799,7 +690,7 @@ class TensorBlockMapper { } } else { for (int i = 0; i < NumDims - 1; ++i) { - const Index idx = block_index / m_block_strides[i]; + const StorageIndex idx = block_index / m_block_strides[i]; coords[i] = idx * m_block_dim_sizes[i]; sizes[i] = numext::mini((m_dimensions[i] - coords[i]), m_block_dim_sizes[i]); @@ -824,19 +715,20 @@ class TensorBlockMapper { data); } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index total_block_count() const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex total_block_count() const { return m_total_block_count; } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index block_dims_total_size() const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex + block_dims_total_size() const { return m_block_dim_sizes.TotalSize(); } private: static Dimensions BlockDimensions(const Dimensions& tensor_dims, const TensorBlockShapeType block_shape, - size_t min_target_size) { - min_target_size = numext::maxi(1, min_target_size); + Index min_target_size) { + min_target_size = numext::maxi(1, min_target_size); // If tensor fully fits into the target size, we'll treat it a single block. Dimensions block_dim_sizes = tensor_dims; @@ -865,14 +757,14 @@ class TensorBlockMapper { dim_size_target, static_cast(tensor_dims[i])); } // Add any un-allocated coefficients to inner dimension(s). - Index total_size = block_dim_sizes.TotalSize(); + StorageIndex total_size = block_dim_sizes.TotalSize(); for (int i = 0; i < NumDims; ++i) { const int dim = cond()(i, NumDims - i - 1); if (block_dim_sizes[dim] < tensor_dims[dim]) { - const Index total_size_other_dims = + const StorageIndex total_size_other_dims = total_size / block_dim_sizes[dim]; - const Index alloc_avail = - divup(min_target_size, total_size_other_dims); + const StorageIndex alloc_avail = + divup(min_target_size, total_size_other_dims); if (alloc_avail == block_dim_sizes[dim]) { // Insufficient excess coefficients to allocate. break; @@ -882,14 +774,14 @@ class TensorBlockMapper { } } } else if (block_shape == TensorBlockShapeType::kSkewedInnerDims) { - Index coeff_to_allocate = min_target_size; + StorageIndex coeff_to_allocate = min_target_size; for (int i = 0; i < NumDims; ++i) { const int dim = cond()(i, NumDims - i - 1); block_dim_sizes[dim] = numext::mini(coeff_to_allocate, tensor_dims[dim]); - coeff_to_allocate = - divup(coeff_to_allocate, - numext::maxi(static_cast(1), block_dim_sizes[dim])); + coeff_to_allocate = divup( + coeff_to_allocate, + numext::maxi(static_cast(1), block_dim_sizes[dim])); } eigen_assert(coeff_to_allocate == 1); } else { @@ -908,7 +800,7 @@ class TensorBlockMapper { Dimensions m_block_dim_sizes; Dimensions m_block_strides; Dimensions m_tensor_strides; - Index m_total_block_count; + StorageIndex m_total_block_count; }; /** @@ -923,12 +815,12 @@ class TensorBlockMapper { * processed together. * */ -template +template class TensorSliceBlockMapper { public: - typedef typename internal::TensorBlock + typedef typename internal::TensorBlock TensorBlock; - typedef DSizes Dimensions; + typedef DSizes Dimensions; TensorSliceBlockMapper(const Dimensions& tensor_dims, const Dimensions& tensor_slice_offsets, @@ -942,7 +834,7 @@ class TensorSliceBlockMapper { m_block_stride_order(block_stride_order), m_total_block_count(1) { // Calculate block counts by dimension and total block count. - DSizes block_count; + DSizes block_count; for (size_t i = 0; i < block_count.rank(); ++i) { block_count[i] = divup(m_tensor_slice_extents[i], m_block_dim_sizes[i]); } @@ -969,11 +861,11 @@ class TensorSliceBlockMapper { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock - GetBlockForIndex(Index block_index, Scalar* data) const { - Index first_coeff_index = 0; - DSizes coords; - DSizes sizes; - DSizes strides; + GetBlockForIndex(StorageIndex block_index, Scalar* data) const { + StorageIndex first_coeff_index = 0; + DSizes coords; + DSizes sizes; + DSizes strides; if (static_cast(Layout) == static_cast(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { const Index idx = block_index / m_block_strides[i]; @@ -991,16 +883,16 @@ class TensorSliceBlockMapper { m_block_dim_sizes[0]); first_coeff_index += coords[0] * m_tensor_strides[0]; - Index prev_dim = m_block_stride_order[0]; + StorageIndex prev_dim = m_block_stride_order[0]; strides[prev_dim] = 1; for (int i = 1; i < NumDims; ++i) { - const Index curr_dim = m_block_stride_order[i]; + const StorageIndex curr_dim = m_block_stride_order[i]; strides[curr_dim] = strides[prev_dim] * sizes[prev_dim]; prev_dim = curr_dim; } } else { for (int i = 0; i < NumDims - 1; ++i) { - const Index idx = block_index / m_block_strides[i]; + const StorageIndex idx = block_index / m_block_strides[i]; coords[i] = m_tensor_slice_offsets[i] + idx * m_block_dim_sizes[i]; sizes[i] = numext::mini( m_tensor_slice_offsets[i] + m_tensor_slice_extents[i] - coords[i], @@ -1016,10 +908,10 @@ class TensorSliceBlockMapper { m_block_dim_sizes[NumDims - 1]); first_coeff_index += coords[NumDims - 1] * m_tensor_strides[NumDims - 1]; - Index prev_dim = m_block_stride_order[NumDims - 1]; + StorageIndex prev_dim = m_block_stride_order[NumDims - 1]; strides[prev_dim] = 1; for (int i = NumDims - 2; i >= 0; --i) { - const Index curr_dim = m_block_stride_order[i]; + const StorageIndex curr_dim = m_block_stride_order[i]; strides[curr_dim] = strides[prev_dim] * sizes[prev_dim]; prev_dim = curr_dim; } @@ -1029,7 +921,7 @@ class TensorSliceBlockMapper { data); } - EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index total_block_count() const { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE StorageIndex total_block_count() const { return m_total_block_count; } @@ -1041,7 +933,7 @@ class TensorSliceBlockMapper { Dimensions m_block_dim_sizes; Dimensions m_block_stride_order; Dimensions m_block_strides; - Index m_total_block_count; + StorageIndex m_total_block_count; }; } // namespace internal diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h index 7ff0d323b..343ab6269 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h @@ -1,5 +1,4 @@ // This file is part of Eigen, a lightweight C++ template library -// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h index ba02802d2..f9a1bd68c 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h @@ -51,12 +51,10 @@ struct TensorEvaluator typename internal::remove_const::type, Index, NumCoords, Layout> TensorBlock; typedef typename internal::TensorBlockReader< - typename internal::remove_const::type, Index, NumCoords, Layout, - PacketAccess> + typename internal::remove_const::type, Index, NumCoords, Layout> TensorBlockReader; typedef typename internal::TensorBlockWriter< - typename internal::remove_const::type, Index, NumCoords, Layout, - PacketAccess> + typename internal::remove_const::type, Index, NumCoords, Layout> TensorBlockWriter; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) @@ -204,8 +202,7 @@ struct TensorEvaluator typename internal::remove_const::type, Index, NumCoords, Layout> TensorBlock; typedef typename internal::TensorBlockReader< - typename internal::remove_const::type, Index, NumCoords, Layout, - PacketAccess> + typename internal::remove_const::type, Index, NumCoords, Layout> TensorBlockReader; // Used for accessor extraction in SYCL Managed TensorMap: diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h index 024de3696..ac5afd891 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -36,15 +36,16 @@ template class TensorExecutor { public: - typedef typename Expression::Index Index; + using StorageIndex = typename Expression::Index; + EIGEN_DEVICE_FUNC static inline void run(const Expression& expr, const Device& device = Device()) { TensorEvaluator evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) { - const Index size = array_prod(evaluator.dimensions()); - for (Index i = 0; i < size; ++i) { + const StorageIndex size = array_prod(evaluator.dimensions()); + for (StorageIndex i = 0; i < size; ++i) { evaluator.evalScalar(i); } } @@ -56,35 +57,36 @@ class TensorExecutor { * Process all the data with a single cpu thread, using vectorized instructions. */ template -class TensorExecutor { +class TensorExecutor { public: - typedef typename Expression::Index Index; + using StorageIndex = typename Expression::Index; EIGEN_DEVICE_FUNC - static inline void run(const Expression& expr, const DefaultDevice& device = DefaultDevice()) - { + static inline void run(const Expression& expr, + const DefaultDevice& device = DefaultDevice()) { TensorEvaluator evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); - if (needs_assign) - { - const Index size = array_prod(evaluator.dimensions()); + if (needs_assign) { + const StorageIndex size = array_prod(evaluator.dimensions()); const int PacketSize = unpacket_traits::PacketReturnType>::size; // Give compiler a strong possibility to unroll the loop. But don't insist // on unrolling, because if the function is expensive compiler should not // unroll the loop at the expense of inlining. - const Index UnrolledSize = (size / (4 * PacketSize)) * 4 * PacketSize; - for (Index i = 0; i < UnrolledSize; i += 4*PacketSize) { - for (Index j = 0; j < 4; j++) { + const StorageIndex UnrolledSize = + (size / (4 * PacketSize)) * 4 * PacketSize; + for (StorageIndex i = 0; i < UnrolledSize; i += 4 * PacketSize) { + for (StorageIndex j = 0; j < 4; j++) { evaluator.evalPacket(i + j * PacketSize); } } - const Index VectorizedSize = (size / PacketSize) * PacketSize; - for (Index i = UnrolledSize; i < VectorizedSize; i += PacketSize) { + const StorageIndex VectorizedSize = (size / PacketSize) * PacketSize; + for (StorageIndex i = UnrolledSize; i < VectorizedSize; i += PacketSize) { evaluator.evalPacket(i); } - for (Index i = VectorizedSize; i < size; ++i) { + for (StorageIndex i = VectorizedSize; i < size; ++i) { evaluator.evalScalar(i); } } @@ -97,42 +99,41 @@ class TensorExecutor -class TensorExecutor { +class TensorExecutor { public: - typedef typename Expression::Index Index; + using Scalar = typename traits::Scalar; + using ScalarNoConst = typename remove_const::type; + + using Evaluator = TensorEvaluator; + using StorageIndex = typename traits::Index; + + static const int NumDims = traits::NumDimensions; EIGEN_DEVICE_FUNC static inline void run(const Expression& expr, const DefaultDevice& device = DefaultDevice()) { - using Evaluator = TensorEvaluator; - - using Index = typename traits::Index; - const int NumDims = traits::NumDimensions; - - using Scalar = typename traits::Scalar; - using ScalarNoConst = typename remove_const::type; - using TensorBlock = - TensorBlock; - using TensorBlockMapper = - TensorBlockMapper; + TensorBlock; + using TensorBlockMapper = TensorBlockMapper; Evaluator evaluator(expr, device); - std::size_t total_size = array_prod(evaluator.dimensions()); - std::size_t cache_size = device.firstLevelCacheSize() / sizeof(Scalar); + Index total_size = array_prod(evaluator.dimensions()); + Index cache_size = device.firstLevelCacheSize() / sizeof(Scalar); if (total_size < cache_size) { // TODO(andydavis) Reduce block management overhead for small tensors. // TODO(wuke) Do not do this when evaluating TensorBroadcastingOp. internal::TensorExecutor::run(expr, device); + /*Tileable*/ false>::run(expr, device); return; } const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) { // Size tensor blocks to fit in cache (or requested target block size). - size_t block_total_size = numext::mini(cache_size, total_size); + Index block_total_size = numext::mini(cache_size, total_size); TensorBlockShapeType block_shape = TensorBlockShapeType::kSkewedInnerDims; // Query expression tree for desired block size/shape. std::vector resources; @@ -146,8 +147,8 @@ class TensorExecutor Scalar* data = static_cast( device.allocate(block_total_size * sizeof(Scalar))); - const Index total_block_count = block_mapper.total_block_count(); - for (Index i = 0; i < total_block_count; ++i) { + const StorageIndex total_block_count = block_mapper.total_block_count(); + for (StorageIndex i = 0; i < total_block_count; ++i) { TensorBlock block = block_mapper.GetBlockForIndex(i, data); evaluator.evalBlock(&block); } @@ -162,37 +163,38 @@ class TensorExecutor * executed on a single core. */ #ifdef EIGEN_USE_THREADS -template +template struct EvalRange { - static void run(Evaluator* evaluator_in, const Index first, const Index last) { + static void run(Evaluator* evaluator_in, const StorageIndex first, + const StorageIndex last) { Evaluator evaluator = *evaluator_in; eigen_assert(last >= first); - for (Index i = first; i < last; ++i) { + for (StorageIndex i = first; i < last; ++i) { evaluator.evalScalar(i); } } - static Index alignBlockSize(Index size) { - return size; - } + static StorageIndex alignBlockSize(StorageIndex size) { return size; } }; -template -struct EvalRange { - static const int PacketSize = unpacket_traits::size; +template +struct EvalRange { + static const int PacketSize = + unpacket_traits::size; - static void run(Evaluator* evaluator_in, const Index first, const Index last) { + static void run(Evaluator* evaluator_in, const StorageIndex first, + const StorageIndex last) { Evaluator evaluator = *evaluator_in; eigen_assert(last >= first); - Index i = first; + StorageIndex i = first; if (last - first >= PacketSize) { eigen_assert(first % PacketSize == 0); - Index last_chunk_offset = last - 4 * PacketSize; + StorageIndex last_chunk_offset = last - 4 * PacketSize; // Give compiler a strong possibility to unroll the loop. But don't insist // on unrolling, because if the function is expensive compiler should not // unroll the loop at the expense of inlining. - for (; i <= last_chunk_offset; i += 4*PacketSize) { - for (Index j = 0; j < 4; j++) { + for (; i <= last_chunk_offset; i += 4 * PacketSize) { + for (StorageIndex j = 0; j < 4; j++) { evaluator.evalPacket(i + j * PacketSize); } } @@ -206,7 +208,7 @@ struct EvalRange { } } - static Index alignBlockSize(Index size) { + static StorageIndex alignBlockSize(StorageIndex size) { // Align block size to packet size and account for unrolling in run above. if (size >= 16 * PacketSize) { return (size + 4 * PacketSize - 1) & ~(4 * PacketSize - 1); @@ -219,24 +221,24 @@ struct EvalRange { template class TensorExecutor { public: - typedef typename Expression::Index Index; + using StorageIndex = typename Expression::Index; static inline void run(const Expression& expr, const ThreadPoolDevice& device) { typedef TensorEvaluator Evaluator; - typedef EvalRange EvalRange; + typedef EvalRange EvalRange; Evaluator evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr); if (needs_assign) { - const Index PacketSize = + const StorageIndex PacketSize = Vectorizable ? unpacket_traits::size : 1; - const Index size = array_prod(evaluator.dimensions()); + const StorageIndex size = array_prod(evaluator.dimensions()); device.parallelFor(size, evaluator.costPerCoeff(Vectorizable), EvalRange::alignBlockSize, - [&evaluator](Index first, Index last) { + [&evaluator](StorageIndex first, StorageIndex last) { EvalRange::run(&evaluator, first, last); }); } @@ -247,24 +249,24 @@ class TensorExecutor { template class TensorExecutor { public: - typedef typename Expression::Index Index; + using Scalar = typename traits::Scalar; + using ScalarNoConst = typename remove_const::type; + + using Evaluator = TensorEvaluator; + using StorageIndex = typename traits::Index; + + static const int NumDims = traits::NumDimensions; static inline void run(const Expression& expr, const ThreadPoolDevice& device) { - typedef TensorEvaluator Evaluator; - typedef typename internal::remove_const< - typename traits::Scalar>::type Scalar; - typedef typename traits::Index Index; - - static const int NumDims = traits::NumDimensions; - - typedef TensorBlock TensorBlock; - typedef TensorBlockMapper - TensorBlockMapper; + using TensorBlock = + TensorBlock; + using TensorBlockMapper = + TensorBlockMapper; Evaluator evaluator(expr, device); - std::size_t total_size = array_prod(evaluator.dimensions()); - std::size_t cache_size = device.firstLevelCacheSize() / sizeof(Scalar); + StorageIndex total_size = array_prod(evaluator.dimensions()); + StorageIndex cache_size = device.firstLevelCacheSize() / sizeof(Scalar); if (total_size < cache_size) { // TODO(andydavis) Reduce block management overhead for small tensors. internal::TensorExecutor resources; evaluator.getResourceRequirements(&resources); @@ -296,15 +298,16 @@ class TensorExecutor= -1 && thread_idx < num_threads); Scalar* thread_buf = reinterpret_cast( static_cast(buf) + aligned_blocksize * (thread_idx + 1)); - for (Index i = first; i < last; ++i) { + for (StorageIndex i = first; i < last; ++i) { auto block = block_mapper.GetBlockForIndex(i, thread_buf); evaluator.evalBlock(&block); } @@ -324,51 +327,51 @@ class TensorExecutor class TensorExecutor { public: - typedef typename Expression::Index Index; + typedef typename Expression::Index StorageIndex; static void run(const Expression& expr, const GpuDevice& device); }; #if defined(EIGEN_GPUCC) -template +template struct EigenMetaKernelEval { static __device__ EIGEN_ALWAYS_INLINE - void run(Evaluator& eval, Index first, Index last, Index step_size) { - for (Index i = first; i < last; i += step_size) { + void run(Evaluator& eval, StorageIndex first, StorageIndex last, StorageIndex step_size) { + for (StorageIndex i = first; i < last; i += step_size) { eval.evalScalar(i); } } }; -template -struct EigenMetaKernelEval { +template +struct EigenMetaKernelEval { static __device__ EIGEN_ALWAYS_INLINE - void run(Evaluator& eval, Index first, Index last, Index step_size) { - const Index PacketSize = unpacket_traits::size; - const Index vectorized_size = (last / PacketSize) * PacketSize; - const Index vectorized_step_size = step_size * PacketSize; + void run(Evaluator& eval, StorageIndex first, StorageIndex last, StorageIndex step_size) { + const StorageIndex PacketSize = unpacket_traits::size; + const StorageIndex vectorized_size = (last / PacketSize) * PacketSize; + const StorageIndex vectorized_step_size = step_size * PacketSize; // Use the vector path - for (Index i = first * PacketSize; i < vectorized_size; + for (StorageIndex i = first * PacketSize; i < vectorized_size; i += vectorized_step_size) { eval.evalPacket(i); } - for (Index i = vectorized_size + first; i < last; i += step_size) { + for (StorageIndex i = vectorized_size + first; i < last; i += step_size) { eval.evalScalar(i); } } }; -template +template __global__ void __launch_bounds__(1024) -EigenMetaKernel(Evaluator eval, Index size) { +EigenMetaKernel(Evaluator eval, StorageIndex size) { - const Index first_index = blockIdx.x * blockDim.x + threadIdx.x; - const Index step_size = blockDim.x * gridDim.x; + const StorageIndex first_index = blockIdx.x * blockDim.x + threadIdx.x; + const StorageIndex step_size = blockDim.x * gridDim.x; const bool vectorizable = Evaluator::PacketAccess & Evaluator::IsAligned; - EigenMetaKernelEval::run(eval, first_index, size, step_size); + EigenMetaKernelEval::run(eval, first_index, size, step_size); } /*static*/ @@ -382,12 +385,12 @@ inline void TensorExecutor::run( const int block_size = device.maxGpuThreadsPerBlock(); const int max_blocks = device.getNumGpuMultiProcessors() * device.maxGpuThreadsPerMultiProcessor() / block_size; - const Index size = array_prod(evaluator.dimensions()); + const StorageIndex size = array_prod(evaluator.dimensions()); // Create a least one block to ensure we won't crash when tensorflow calls with tensors of size 0. const int num_blocks = numext::maxi(numext::mini(max_blocks, divup(size, block_size)), 1); LAUNCH_GPU_KERNEL( - (EigenMetaKernel, Index>), + (EigenMetaKernel, StorageIndex>), num_blocks, block_size, 0, device, evaluator, size); } evaluator.cleanup(); diff --git a/unsupported/test/cxx11_tensor_block_access.cpp b/unsupported/test/cxx11_tensor_block_access.cpp index 416b686e4..6feeff231 100644 --- a/unsupported/test/cxx11_tensor_block_access.cpp +++ b/unsupported/test/cxx11_tensor_block_access.cpp @@ -37,6 +37,31 @@ static std::size_t RandomTargetSize(const DSizes& dims) { return internal::random(1, dims.TotalSize()); } +template +static DSizes RandomDims() { + array dims; + for (int i = 0; i < NumDims; ++i) { + dims[i] = internal::random(1, 20); + } + return DSizes(dims); +}; + +/** Dummy data type to test TensorBlock copy ops. */ +struct Data { + Data() : Data(0) {} + explicit Data(int v) { value = v; } + int value; +}; + +bool operator==(const Data& lhs, const Data& rhs) { + return lhs.value == rhs.value; +} + +std::ostream& operator<<(std::ostream& os, const Data& d) { + os << "Data: value=" << d.value; + return os; +} + template static T* GenerateRandomData(const Index& size) { T* data = new T[size]; @@ -46,6 +71,23 @@ static T* GenerateRandomData(const Index& size) { return data; } +template <> +Data* GenerateRandomData(const Index& size) { + Data* data = new Data[size]; + for (int i = 0; i < size; ++i) { + data[i] = Data(internal::random(1, 100)); + } + return data; +} + +template +static void Debug(DSizes dims) { + for (int i = 0; i < NumDims; ++i) { + std::cout << dims[i] << "; "; + } + std::cout << std::endl; +} + template static void test_block_mapper_sanity() { @@ -96,7 +138,7 @@ static void test_block_mapper_sanity() // index in the visited set. Verify that every coeff accessed only once. template static void UpdateCoeffSet( - const internal::TensorBlock& block, + const internal::TensorBlock& block, Index first_coeff_index, int dim_index, std::set* visited_coeffs) { const DSizes block_sizes = block.block_sizes(); const DSizes tensor_strides = block.tensor_strides(); @@ -114,14 +156,13 @@ static void UpdateCoeffSet( } } -template -static void test_block_mapper_maps_every_element() -{ - using T = int; - using TensorBlock = internal::TensorBlock; - using TensorBlockMapper = internal::TensorBlockMapper; +template +static void test_block_mapper_maps_every_element() { + using TensorBlock = internal::TensorBlock; + using TensorBlockMapper = + internal::TensorBlockMapper; - DSizes dims(5, 7, 11, 17); + DSizes dims = RandomDims(); // Keep track of elements indices available via block access. std::set coeff_set; @@ -131,29 +172,36 @@ static void test_block_mapper_maps_every_element() for (int i = 0; i < block_mapper.total_block_count(); ++i) { TensorBlock block = block_mapper.GetBlockForIndex(i, nullptr); - UpdateCoeffSet(block, block.first_coeff_index(), - choose(Layout, 3, 0), &coeff_set); + UpdateCoeffSet(block, block.first_coeff_index(), + choose(Layout, NumDims - 1, 0), + &coeff_set); } // Verify that every coefficient in the original Tensor is accessible through // TensorBlock only once. - auto total_coeffs = static_cast(dims.TotalSize()); + Index total_coeffs = dims.TotalSize(); VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs); - VERIFY_IS_EQUAL(*coeff_set.begin(), static_cast(0)); - VERIFY_IS_EQUAL(*coeff_set.rbegin(), static_cast(total_coeffs - 1)); + VERIFY_IS_EQUAL(*coeff_set.begin(), 0); + VERIFY_IS_EQUAL(*coeff_set.rbegin(), total_coeffs - 1); } -template -static void test_slice_block_mapper_maps_every_element() -{ - using T = int; - using TensorBlock = internal::TensorBlock; +template +static void test_slice_block_mapper_maps_every_element() { + using TensorBlock = internal::TensorBlock; using TensorSliceBlockMapper = - internal::TensorSliceBlockMapper; + internal::TensorSliceBlockMapper; - DSizes tensor_dims(5,7,11,17); - DSizes tensor_slice_offsets(1,3,5,7); - DSizes tensor_slice_extents(3,2,4,5); + DSizes tensor_dims = RandomDims(); + DSizes tensor_slice_offsets = RandomDims(); + DSizes tensor_slice_extents = RandomDims(); + + // Make sure that tensor offsets + extents do not overflow. + for (int i = 0; i < NumDims; ++i) { + tensor_slice_offsets[i] = + numext::mini(tensor_dims[i] - 1, tensor_slice_offsets[i]); + tensor_slice_extents[i] = numext::mini( + tensor_slice_extents[i], tensor_dims[i] - tensor_slice_offsets[i]); + } // Keep track of elements indices available via block access. std::set coeff_set; @@ -161,61 +209,59 @@ static void test_slice_block_mapper_maps_every_element() auto total_coeffs = static_cast(tensor_slice_extents.TotalSize()); // Pick a random dimension sizes for the tensor blocks. - DSizes block_sizes; - for (int i = 0; i < 4; ++i) { + DSizes block_sizes; + for (int i = 0; i < NumDims; ++i) { block_sizes[i] = internal::random(1, tensor_slice_extents[i]); } TensorSliceBlockMapper block_mapper(tensor_dims, tensor_slice_offsets, tensor_slice_extents, block_sizes, - DimensionList()); + DimensionList()); for (int i = 0; i < block_mapper.total_block_count(); ++i) { TensorBlock block = block_mapper.GetBlockForIndex(i, nullptr); - UpdateCoeffSet(block, block.first_coeff_index(), - choose(Layout, 3, 0), &coeff_set); + UpdateCoeffSet(block, block.first_coeff_index(), + choose(Layout, NumDims - 1, 0), + &coeff_set); } VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs); } -template -static void test_block_io_copy_data_from_source_to_target() -{ - using T = float; +template +static void test_block_io_copy_data_from_source_to_target() { + typedef internal::TensorBlock TensorBlock; + typedef internal::TensorBlockMapper + TensorBlockMapper; - typedef internal::TensorBlock TensorBlock; - typedef internal::TensorBlockMapper TensorBlockMapper; - - typedef internal::TensorBlockReader + typedef internal::TensorBlockReader TensorBlockReader; - typedef internal::TensorBlockWriter + typedef internal::TensorBlockWriter TensorBlockWriter; - typedef std::vector> DataVector; - - DSizes input_tensor_dims(5, 7, 11, 17, 3); + DSizes input_tensor_dims = RandomDims(); const auto input_tensor_size = input_tensor_dims.TotalSize(); - DataVector input_data(input_tensor_size, 0); - for (int i = 0; i < input_tensor_size; ++i) { - input_data[i] = internal::random(); - } - DataVector output_data(input_tensor_size, 0); + T* input_data = GenerateRandomData(input_tensor_size); + T* output_data = new T[input_tensor_size]; TensorBlockMapper block_mapper(input_tensor_dims, RandomShape(), RandomTargetSize(input_tensor_dims)); + T* block_data = new T[block_mapper.block_dims_total_size()]; - DataVector block_data(block_mapper.block_dims_total_size(), 0); for (int i = 0; i < block_mapper.total_block_count(); ++i) { - TensorBlock block = block_mapper.GetBlockForIndex(i, block_data.data()); - TensorBlockReader::Run(&block, input_data.data()); - TensorBlockWriter::Run(block, output_data.data()); + TensorBlock block = block_mapper.GetBlockForIndex(i, block_data); + TensorBlockReader::Run(&block, input_data); + TensorBlockWriter::Run(block, output_data); } for (int i = 0; i < input_tensor_size; ++i) { VERIFY_IS_EQUAL(input_data[i], output_data[i]); } + + delete[] input_data; + delete[] output_data; + delete[] block_data; } template @@ -261,31 +307,32 @@ static array ComputeStrides( return strides; } -template +template static void test_block_io_copy_using_reordered_dimensions() { - typedef internal::TensorBlock TensorBlock; - typedef internal::TensorBlockMapper + typedef internal::TensorBlock TensorBlock; + typedef internal::TensorBlockMapper TensorBlockMapper; - typedef internal::TensorBlockReader + typedef internal::TensorBlockReader TensorBlockReader; - typedef internal::TensorBlockWriter + typedef internal::TensorBlockWriter TensorBlockWriter; - DSizes input_tensor_dims(5, 7, 11, 17, 3); + DSizes input_tensor_dims = RandomDims(); const auto input_tensor_size = input_tensor_dims.TotalSize(); // Create a random input tensor. - auto* input_data = GenerateRandomData(input_tensor_size); + T* input_data = GenerateRandomData(input_tensor_size); // Create a random dimension re-ordering/shuffle. - std::vector shuffle = {0, 1, 2, 3, 4}; + std::vector shuffle; + for (int i = 0; i < NumDims; ++i) shuffle.push_back(i); std::shuffle(shuffle.begin(), shuffle.end(), std::mt19937()); - DSizes output_tensor_dims; - array input_to_output_dim_map; - array output_to_input_dim_map; - for (Index i = 0; i < 5; ++i) { + DSizes output_tensor_dims; + array input_to_output_dim_map; + array output_to_input_dim_map; + for (Index i = 0; i < NumDims; ++i) { output_tensor_dims[shuffle[i]] = input_tensor_dims[i]; input_to_output_dim_map[i] = shuffle[i]; output_to_input_dim_map[shuffle[i]] = i; @@ -295,17 +342,17 @@ static void test_block_io_copy_using_reordered_dimensions() { TensorBlockMapper block_mapper(output_tensor_dims, RandomShape(), RandomTargetSize(input_tensor_dims)); - auto* block_data = new float[block_mapper.block_dims_total_size()]; - auto* output_data = new float[input_tensor_size]; + auto* block_data = new T[block_mapper.block_dims_total_size()]; + auto* output_data = new T[input_tensor_size]; - array input_tensor_strides = - ComputeStrides(input_tensor_dims); - array output_tensor_strides = - ComputeStrides(output_tensor_dims); + array input_tensor_strides = + ComputeStrides(input_tensor_dims); + array output_tensor_strides = + ComputeStrides(output_tensor_dims); for (Index i = 0; i < block_mapper.total_block_count(); ++i) { TensorBlock block = block_mapper.GetBlockForIndex(i, block_data); - const Index first_coeff_index = GetInputIndex( + const Index first_coeff_index = GetInputIndex( block.first_coeff_index(), output_to_input_dim_map, input_tensor_strides, output_tensor_strides); TensorBlockReader::Run(&block, first_coeff_index, input_to_output_dim_map, @@ -327,18 +374,21 @@ template static void test_block_io_zero_stride() { typedef internal::TensorBlock TensorBlock; - typedef internal::TensorBlockReader + typedef internal::TensorBlockReader TensorBlockReader; - typedef internal::TensorBlockWriter + typedef internal::TensorBlockWriter TensorBlockWriter; - DSizes input_tensor_dims(1, 2, 1, 3, 1); - const auto input_tensor_size = input_tensor_dims.TotalSize(); + DSizes rnd_dims = RandomDims<5>(); - // Create a random input tensor. + DSizes input_tensor_dims = rnd_dims; + input_tensor_dims[0] = 1; + input_tensor_dims[2] = 1; + input_tensor_dims[4] = 1; + const auto input_tensor_size = input_tensor_dims.TotalSize(); auto* input_data = GenerateRandomData(input_tensor_size); - DSizes output_tensor_dims(3, 2, 3, 3, 2); + DSizes output_tensor_dims = rnd_dims; DSizes input_tensor_strides( ComputeStrides(input_tensor_dims)); @@ -401,9 +451,9 @@ static void test_block_io_zero_stride() template static void test_block_io_squeeze_ones() { typedef internal::TensorBlock TensorBlock; - typedef internal::TensorBlockReader + typedef internal::TensorBlockReader TensorBlockReader; - typedef internal::TensorBlockWriter + typedef internal::TensorBlockWriter TensorBlockWriter; // Total size > 1. @@ -467,23 +517,23 @@ static void test_block_io_squeeze_ones() { } } -template +template static void test_block_cwise_binary_io_basic() { - typedef internal::scalar_sum_op BinaryFunctor; - typedef internal::TensorBlockCwiseBinaryIO BinaryFunctor; + typedef internal::TensorBlockCwiseBinaryIO TensorBlockCwiseBinaryIO; - DSizes block_sizes(2, 3, 5, 7, 11); - DSizes strides(ComputeStrides(block_sizes)); + DSizes block_sizes = RandomDims(); + DSizes strides(ComputeStrides(block_sizes)); const auto total_size = block_sizes.TotalSize(); // Create a random input tensors. - auto* left_data = GenerateRandomData(total_size); - auto* right_data = GenerateRandomData(total_size); + T* left_data = GenerateRandomData(total_size); + T* right_data = GenerateRandomData(total_size); - auto* output_data = new float[total_size]; + T* output_data = new T[total_size]; BinaryFunctor functor; TensorBlockCwiseBinaryIO::Run(functor, block_sizes, strides, output_data, strides, left_data, strides, right_data); @@ -532,13 +582,22 @@ static void test_block_cwise_binary_io_zero_strides() { Layout> TensorBlockCwiseBinaryIO; - DSizes left_sizes(1, 3, 1, 7, 1); + DSizes rnd_dims = RandomDims<5>(); + + DSizes left_sizes = rnd_dims; + left_sizes[0] = 1; + left_sizes[2] = 1; + left_sizes[4] = 1; + DSizes left_strides(ComputeStrides(left_sizes)); left_strides[0] = 0; left_strides[2] = 0; left_strides[4] = 0; - DSizes right_sizes(2, 1, 5, 1, 11); + DSizes right_sizes = rnd_dims; + right_sizes[1] = 0; + right_sizes[3] = 0; + DSizes right_strides(ComputeStrides(right_sizes)); right_strides[1] = 0; right_strides[3] = 0; @@ -547,7 +606,7 @@ static void test_block_cwise_binary_io_zero_strides() { auto* left_data = GenerateRandomData(left_sizes.TotalSize()); auto* right_data = GenerateRandomData(right_sizes.TotalSize()); - DSizes output_sizes(2, 3, 5, 7, 11); + DSizes output_sizes = rnd_dims; DSizes output_strides(ComputeStrides(output_sizes)); const auto output_total_size = output_sizes.TotalSize(); @@ -557,11 +616,11 @@ static void test_block_cwise_binary_io_zero_strides() { TensorBlockCwiseBinaryIO::Run(functor, output_sizes, output_strides, output_data, left_strides, left_data, right_strides, right_data); - for (int i = 0; i < 2; ++i) { - for (int j = 0; j < 3; ++j) { - for (int k = 0; k < 5; ++k) { - for (int l = 0; l < 7; ++l) { - for (int m = 0; m < 11; ++m) { + for (int i = 0; i < rnd_dims[0]; ++i) { + for (int j = 0; j < rnd_dims[1]; ++j) { + for (int k = 0; k < rnd_dims[2]; ++k) { + for (int l = 0; l < rnd_dims[3]; ++l) { + for (int m = 0; m < rnd_dims[4]; ++m) { Index output_index = i * output_strides[0] + j * output_strides[1] + k * output_strides[2] + l * output_strides[3] + m * output_strides[4]; @@ -893,31 +952,44 @@ static void test_empty_dims(const internal::TensorBlockShapeType block_shape) } } -#define CALL_SUBTEST_LAYOUTS(NAME) \ +#define TEST_LAYOUTS(NAME) \ CALL_SUBTEST(NAME()); \ CALL_SUBTEST(NAME()) -#define CALL_SUBTEST_LAYOUTS_WITH_ARG(NAME, ARG) \ +#define TEST_LAYOUTS_AND_DIMS(TYPE, NAME) \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())) + +#define TEST_LAYOUTS_WITH_ARG(NAME, ARG) \ CALL_SUBTEST(NAME(ARG)); \ CALL_SUBTEST(NAME(ARG)) EIGEN_DECLARE_TEST(cxx11_tensor_block_access) { - CALL_SUBTEST_LAYOUTS(test_block_mapper_sanity); - CALL_SUBTEST_LAYOUTS(test_block_mapper_maps_every_element); - CALL_SUBTEST_LAYOUTS(test_slice_block_mapper_maps_every_element); - CALL_SUBTEST_LAYOUTS(test_block_io_copy_data_from_source_to_target); - CALL_SUBTEST_LAYOUTS(test_block_io_copy_using_reordered_dimensions); - CALL_SUBTEST_LAYOUTS(test_block_io_zero_stride); - CALL_SUBTEST_LAYOUTS(test_block_io_squeeze_ones); - CALL_SUBTEST_LAYOUTS(test_block_cwise_binary_io_basic); - CALL_SUBTEST_LAYOUTS(test_block_cwise_binary_io_squeeze_ones); - CALL_SUBTEST_LAYOUTS(test_block_cwise_binary_io_zero_strides); - CALL_SUBTEST_LAYOUTS(test_uniform_block_shape); - CALL_SUBTEST_LAYOUTS(test_skewed_inner_dim_block_shape); - - CALL_SUBTEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockShapeType::kUniformAllDims); - CALL_SUBTEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockShapeType::kSkewedInnerDims); + TEST_LAYOUTS(test_block_mapper_sanity); + TEST_LAYOUTS_AND_DIMS(float, test_block_mapper_maps_every_element); + TEST_LAYOUTS_AND_DIMS(float, test_slice_block_mapper_maps_every_element); + TEST_LAYOUTS_AND_DIMS(float, test_block_io_copy_data_from_source_to_target); + TEST_LAYOUTS_AND_DIMS(Data, test_block_io_copy_data_from_source_to_target); + TEST_LAYOUTS_AND_DIMS(float, test_block_io_copy_using_reordered_dimensions); + TEST_LAYOUTS_AND_DIMS(Data, test_block_io_copy_using_reordered_dimensions); + TEST_LAYOUTS(test_block_io_zero_stride); + TEST_LAYOUTS(test_block_io_squeeze_ones); + TEST_LAYOUTS_AND_DIMS(float, test_block_cwise_binary_io_basic); + TEST_LAYOUTS(test_block_cwise_binary_io_squeeze_ones); + TEST_LAYOUTS(test_block_cwise_binary_io_zero_strides); + TEST_LAYOUTS(test_uniform_block_shape); + TEST_LAYOUTS(test_skewed_inner_dim_block_shape); + TEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockShapeType::kUniformAllDims); + TEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockShapeType::kSkewedInnerDims); } -#undef CALL_SUBTEST_LAYOUTS -#undef CALL_SUBTEST_LAYOUTS_WITH_ARG \ No newline at end of file +#undef TEST_LAYOUTS +#undef TEST_LAYOUTS_WITH_ARG \ No newline at end of file diff --git a/unsupported/test/cxx11_tensor_executor.cpp b/unsupported/test/cxx11_tensor_executor.cpp index 5ae45ac5b..274f901ce 100644 --- a/unsupported/test/cxx11_tensor_executor.cpp +++ b/unsupported/test/cxx11_tensor_executor.cpp @@ -13,7 +13,6 @@ #include -using Eigen::Index; using Eigen::Tensor; using Eigen::RowMajor; using Eigen::ColMajor; @@ -25,9 +24,16 @@ template static void test_execute_binary_expr(Device d) { // Pick a large enough tensor size to bypass small tensor block evaluation // optimization. - Tensor lhs(840, 390, 37); - Tensor rhs(840, 390, 37); - Tensor dst(840, 390, 37); + int d0 = internal::random(100, 200); + int d1 = internal::random(100, 200); + int d2 = internal::random(100, 200); + + static constexpr int Options = 0; + using IndexType = int; + + Tensor lhs(d0, d1, d2); + Tensor rhs(d0, d1, d2); + Tensor dst(d0, d1, d2); lhs.setRandom(); rhs.setRandom(); @@ -40,9 +46,9 @@ static void test_execute_binary_expr(Device d) { Executor::run(Assign(dst, expr), d); - for (int i = 0; i < 840; ++i) { - for (int j = 0; j < 390; ++j) { - for (int k = 0; k < 37; ++k) { + for (int i = 0; i < d0; ++i) { + for (int j = 0; j < d1; ++j) { + for (int k = 0; k < d2; ++k) { float sum = lhs(i, j, k) + rhs(i, j, k); VERIFY_IS_EQUAL(sum, dst(i, j, k)); }