Initial support of TensorBlock

This commit is contained in:
Eugene Zhulenev 2018-07-20 17:37:20 -07:00
parent 2c2de9da7d
commit 34a75c3c5c
5 changed files with 610 additions and 8 deletions

View File

@ -118,6 +118,7 @@ typedef unsigned __int64 uint64_t;
#include "src/Tensor/TensorReduction.h" #include "src/Tensor/TensorReduction.h"
#include "src/Tensor/TensorReductionGpu.h" #include "src/Tensor/TensorReductionGpu.h"
#include "src/Tensor/TensorArgMax.h" #include "src/Tensor/TensorArgMax.h"
#include "src/Tensor/TensorBlock.h"
#include "src/Tensor/TensorConcatenation.h" #include "src/Tensor/TensorConcatenation.h"
#include "src/Tensor/TensorContractionMapper.h" #include "src/Tensor/TensorContractionMapper.h"
#include "src/Tensor/TensorContractionBlocking.h" #include "src/Tensor/TensorContractionBlocking.h"

View File

@ -0,0 +1,412 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2018 Andy Davis <andydavis@google.com>
// Copyright (C) 2018 Eugene Zhulenev <ezhulenev@google.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H
#define EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H
namespace Eigen {
namespace internal {
/**
* \class TensorBlockShapeType
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor block shape type.
*
* Tensor block shape type defines what are the shape preference for the blocks
* extracted from the larger tensor.
*
* Example:
*
* We want to extract blocks of 100 elements from the large 100x100 tensor:
* - tensor: 100x100
* - target_block_size: 100
*
* TensorBlockShapeType:
* - kUniformAllDims: 100 blocks of size 10x10
* - kSkewedInnerDims: 100 blocks of size 100x1 (or 1x100 depending on a column
* or row major layout)
*/
enum class TensorBlockShapeType {
kUniformAllDims,
kSkewedInnerDims,
};
/**
* \class TensorBlock
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor block class.
*
* This class represents a tensor block specified by the index of the
* first block coefficient, and the size of the block in each dimension.
*/
template <typename Scalar, typename Index, std::size_t NumDims, int Layout>
class TensorBlock {
public:
typedef DSizes<Index, NumDims> Dimensions;
TensorBlock(const Index first_coeff_index, const Dimensions& block_sizes,
const Dimensions& block_strides, const Dimensions& tensor_strides,
Scalar* data)
: m_first_coeff_index(first_coeff_index),
m_block_sizes(block_sizes),
m_block_strides(block_strides),
m_tensor_strides(tensor_strides),
m_data(data) {}
Index first_coeff_index() const { return m_first_coeff_index; }
const Dimensions& block_sizes() const { return m_block_sizes; }
const Dimensions& block_strides() const { return m_block_strides; }
const Dimensions& tensor_strides() const { return m_tensor_strides; }
Scalar* data() { return m_data; }
const Scalar* data() const { return m_data; }
private:
Index m_first_coeff_index;
Dimensions m_block_sizes;
Dimensions m_block_strides;
Dimensions m_tensor_strides;
Scalar* m_data; // Not owned.
};
/**
* \class TensorBlockMapper
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor block mapper class.
*
* This class is responsible for iterating over the blocks of a tensor.
*/
template <typename Scalar, typename Index, std::size_t NumDims, int Layout>
class TensorBlockMapper {
public:
typedef typename internal::TensorBlock<Scalar, Index, NumDims, Layout>
TensorBlock;
typedef DSizes<Index, NumDims> Dimensions;
TensorBlockMapper(const Dimensions& dims,
const TensorBlockShapeType block_shape,
size_t min_target_size)
: m_dimensions(dims),
m_block_dim_sizes(BlockDimensions(dims, block_shape, min_target_size)) {
// Calculate block counts by dimension and total block count.
DSizes<Index, NumDims> block_count;
for (size_t i = 0; i < block_count.rank(); ++i) {
block_count[i] = divup(m_dimensions[i], m_block_dim_sizes[i]);
}
m_total_block_count = array_prod(block_count);
// Calculate block strides (used for enumerating blocks).
if (NumDims > 0) {
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_block_strides[0] = 1;
m_tensor_strides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_block_strides[i] = m_block_strides[i - 1] * block_count[i - 1];
m_tensor_strides[i] = m_tensor_strides[i - 1] * m_dimensions[i - 1];
}
} else {
m_block_strides[NumDims - 1] = 1;
m_tensor_strides[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_block_strides[i] = m_block_strides[i + 1] * block_count[i + 1];
m_tensor_strides[i] = m_tensor_strides[i + 1] * m_dimensions[i + 1];
}
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
GetBlockForIndex(Index block_index, Scalar* data) const {
Index first_coeff_index = 0;
DSizes<Index, NumDims> coords;
DSizes<Index, NumDims> sizes;
DSizes<Index, NumDims> strides;
if (NumDims > 0) {
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = block_index / m_block_strides[i];
coords[i] = idx * m_block_dim_sizes[i];
sizes[i] =
numext::mini((m_dimensions[i] - coords[i]), m_block_dim_sizes[i]);
block_index -= idx * m_block_strides[i];
first_coeff_index += coords[i] * m_tensor_strides[i];
}
coords[0] = block_index * m_block_dim_sizes[0];
sizes[0] =
numext::mini((m_dimensions[0] - coords[0]), m_block_dim_sizes[0]);
first_coeff_index += coords[0] * m_tensor_strides[0];
strides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
strides[i] = strides[i - 1] * sizes[i - 1];
}
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = block_index / m_block_strides[i];
coords[i] = idx * m_block_dim_sizes[i];
sizes[i] =
numext::mini((m_dimensions[i] - coords[i]), m_block_dim_sizes[i]);
block_index -= idx * m_block_strides[i];
first_coeff_index += coords[i] * m_tensor_strides[i];
}
coords[NumDims - 1] = block_index * m_block_dim_sizes[NumDims - 1];
sizes[NumDims - 1] =
numext::mini((m_dimensions[NumDims - 1] - coords[NumDims - 1]),
m_block_dim_sizes[NumDims - 1]);
first_coeff_index +=
coords[NumDims - 1] * m_tensor_strides[NumDims - 1];
strides[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
strides[i] = strides[i + 1] * sizes[i + 1];
}
}
}
return TensorBlock(first_coeff_index, sizes, strides, m_tensor_strides,
data);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index total_block_count() const {
return m_total_block_count;
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index block_dims_total_size() const {
return m_block_dim_sizes.TotalSize();
}
private:
static int InnerDimIndex(Index i) {
return Layout == static_cast<int>(ColMajor) ? i : NumDims - i - 1;
}
static Dimensions BlockDimensions(const Dimensions& tensor_dims,
const TensorBlockShapeType block_shape,
size_t min_target_size) {
min_target_size = numext::maxi<size_t>(1, min_target_size);
// If tensor fully fits into the target size, we'll treat it a single block.
Dimensions block_dim_sizes = tensor_dims;
if (tensor_dims.TotalSize() == 0) {
// Corner case: one of the dimensions is zero. Logic below is too complex
// to handle this case on a general basis, just use unit block size.
// Note: we must not yield blocks with zero dimensions (recipe for
// overflows/underflows, divisions by zero and NaNs later).
for (int i = 0; i < NumDims; ++i) {
block_dim_sizes[i] = 1;
}
} else if (block_dim_sizes.TotalSize() > min_target_size) {
if (block_shape == TensorBlockShapeType::kUniformAllDims) {
// Tensor will not fit within 'min_target_size' budget: calculate tensor
// block dimension sizes based on "square" dimension size target.
const size_t dim_size_target = static_cast<const size_t>(
std::pow(static_cast<float>(min_target_size),
1.0 / static_cast<float>(block_dim_sizes.rank())));
for (size_t i = 0; i < block_dim_sizes.rank(); ++i) {
// TODO(andydavis) Adjust the inner most 'block_dim_size' to make it
// a multiple of the packet size. Note that reducing
// 'block_dim_size' in this manner can increase the number of
// blocks, and so will amplify any per-block overhead.
block_dim_sizes[i] = numext::mini(
dim_size_target, static_cast<size_t>(tensor_dims[i]));
}
// Add any un-allocated coefficients to inner dimension(s).
Index total_size = block_dim_sizes.TotalSize();
for (int i = 0; i < NumDims; ++i) {
const int dim = InnerDimIndex(i);
if (block_dim_sizes[dim] < tensor_dims[dim]) {
const Index total_size_other_dims =
total_size / block_dim_sizes[dim];
const Index alloc_avail =
divup<Index>(min_target_size, total_size_other_dims);
if (alloc_avail == block_dim_sizes[dim]) {
// Insufficient excess coefficients to allocate.
break;
}
block_dim_sizes[dim] = numext::mini(tensor_dims[dim], alloc_avail);
total_size = total_size_other_dims * block_dim_sizes[dim];
}
}
} else if (block_shape == TensorBlockShapeType::kSkewedInnerDims) {
Index coeff_to_allocate = min_target_size;
for (int i = 0; i < NumDims; ++i) {
const int dim = InnerDimIndex(i);
block_dim_sizes[dim] =
numext::mini(coeff_to_allocate, tensor_dims[dim]);
coeff_to_allocate =
divup(coeff_to_allocate,
numext::maxi(static_cast<Index>(1), block_dim_sizes[dim]));
}
eigen_assert(coeff_to_allocate == 1);
} else {
eigen_assert(false); // someone added new block shape type
}
}
eigen_assert(
block_dim_sizes.TotalSize() >=
numext::mini<size_t>(min_target_size, tensor_dims.TotalSize()));
return block_dim_sizes;
}
Dimensions m_dimensions;
Dimensions m_block_dim_sizes;
Dimensions m_block_strides;
Dimensions m_tensor_strides;
Index m_total_block_count;
};
/**
* \class TensorSliceBlockMapper
* \ingroup CXX11_Tensor_Module
*
* \brief Tensor slice block mapper class.
*
* This class is responsible for iterating over the blocks of
* a slice of a tensor. Supports shuffling of the block strides
* for callers that want to reduce strides for dimensions to be
* processed together.
*
*/
template <typename Scalar, typename Index, std::size_t NumDims, int Layout>
class TensorSliceBlockMapper {
public:
typedef typename internal::TensorBlock<Scalar, Index, NumDims, Layout>
TensorBlock;
typedef DSizes<Index, NumDims> Dimensions;
TensorSliceBlockMapper(const Dimensions& tensor_dims,
const Dimensions& tensor_slice_offsets,
const Dimensions& tensor_slice_extents,
const Dimensions& block_dim_sizes,
const Dimensions& block_stride_order)
: m_tensor_dimensions(tensor_dims),
m_tensor_slice_offsets(tensor_slice_offsets),
m_tensor_slice_extents(tensor_slice_extents),
m_block_dim_sizes(block_dim_sizes),
m_block_stride_order(block_stride_order),
m_total_block_count(1) {
// Calculate block counts by dimension and total block count.
DSizes<Index, NumDims> block_count;
for (size_t i = 0; i < block_count.rank(); ++i) {
block_count[i] = divup(m_tensor_slice_extents[i], m_block_dim_sizes[i]);
}
m_total_block_count = array_prod(block_count);
// Calculate block strides (used for enumerating blocks).
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_block_strides[0] = 1;
m_tensor_strides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_block_strides[i] = m_block_strides[i - 1] * block_count[i - 1];
m_tensor_strides[i] =
m_tensor_strides[i - 1] * m_tensor_dimensions[i - 1];
}
} else {
m_block_strides[NumDims - 1] = 1;
m_tensor_strides[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_block_strides[i] = m_block_strides[i + 1] * block_count[i + 1];
m_tensor_strides[i] =
m_tensor_strides[i + 1] * m_tensor_dimensions[i + 1];
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock
GetBlockForIndex(Index block_index, Scalar* data) const {
Index first_coeff_index = 0;
DSizes<Index, NumDims> coords;
DSizes<Index, NumDims> sizes;
DSizes<Index, NumDims> strides;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = block_index / m_block_strides[i];
coords[i] = m_tensor_slice_offsets[i] + idx * m_block_dim_sizes[i];
sizes[i] = numext::mini(
m_tensor_slice_offsets[i] + m_tensor_slice_extents[i] - coords[i],
m_block_dim_sizes[i]);
block_index -= idx * m_block_strides[i];
first_coeff_index += coords[i] * m_tensor_strides[i];
}
coords[0] =
m_tensor_slice_offsets[0] + block_index * m_block_dim_sizes[0];
sizes[0] = numext::mini(
m_tensor_slice_offsets[0] + m_tensor_slice_extents[0] - coords[0],
m_block_dim_sizes[0]);
first_coeff_index += coords[0] * m_tensor_strides[0];
Index prev_dim = m_block_stride_order[0];
strides[prev_dim] = 1;
for (int i = 1; i < NumDims; ++i) {
const Index curr_dim = m_block_stride_order[i];
strides[curr_dim] = strides[prev_dim] * sizes[prev_dim];
prev_dim = curr_dim;
}
} else {
for (int i = 0; i < static_cast<int>(NumDims) - 1; ++i) {
const Index idx = block_index / m_block_strides[i];
coords[i] = m_tensor_slice_offsets[i] + idx * m_block_dim_sizes[i];
sizes[i] = numext::mini(
m_tensor_slice_offsets[i] + m_tensor_slice_extents[i] - coords[i],
m_block_dim_sizes[i]);
block_index -= idx * m_block_strides[i];
first_coeff_index += coords[i] * m_tensor_strides[i];
}
coords[NumDims - 1] = m_tensor_slice_offsets[NumDims - 1] +
block_index * m_block_dim_sizes[NumDims - 1];
sizes[NumDims - 1] = numext::mini(
m_tensor_slice_offsets[NumDims - 1] +
m_tensor_slice_extents[NumDims - 1] - coords[NumDims - 1],
m_block_dim_sizes[NumDims - 1]);
first_coeff_index += coords[NumDims - 1] * m_tensor_strides[NumDims - 1];
Index prev_dim = m_block_stride_order[NumDims - 1];
strides[prev_dim] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
const Index curr_dim = m_block_stride_order[i];
strides[curr_dim] = strides[prev_dim] * sizes[prev_dim];
prev_dim = curr_dim;
}
}
return TensorBlock(first_coeff_index, sizes, strides, m_tensor_strides,
data);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index total_block_count() const {
return m_total_block_count;
}
private:
Dimensions m_tensor_dimensions;
Dimensions m_tensor_slice_offsets;
Dimensions m_tensor_slice_extents;
Dimensions m_tensor_strides;
Dimensions m_block_dim_sizes;
Dimensions m_block_stride_order;
Dimensions m_block_strides;
Index m_total_block_count;
};
} // namespace internal
} // namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H

View File

@ -284,6 +284,12 @@ struct DSizes : array<DenseIndex, NumDims> {
(*this)[0] = i0; (*this)[0] = i0;
} }
EIGEN_DEVICE_FUNC DSizes(const DimensionList<DenseIndex, NumDims>& a) {
for (int i = 0 ; i < NumDims; ++i) {
(*this)[i] = a[i];
}
}
#if EIGEN_HAS_VARIADIC_TEMPLATES #if EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes> EIGEN_DEVICE_FUNC template<typename... IndexTypes> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) { EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) {

View File

@ -130,6 +130,7 @@ if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
ei_add_test(cxx11_tensor_dimension) ei_add_test(cxx11_tensor_dimension)
ei_add_test(cxx11_tensor_map) ei_add_test(cxx11_tensor_map)
ei_add_test(cxx11_tensor_assign) ei_add_test(cxx11_tensor_assign)
ei_add_test(cxx11_tensor_block_access)
ei_add_test(cxx11_tensor_comparisons) ei_add_test(cxx11_tensor_comparisons)
ei_add_test(cxx11_tensor_forced_eval) ei_add_test(cxx11_tensor_forced_eval)
ei_add_test(cxx11_tensor_math) ei_add_test(cxx11_tensor_math)

View File

@ -0,0 +1,182 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2018 Andy Davis <andydavis@google.com>
// Copyright (C) 2018 Eugene Zhulenev <ezhulenev@google.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <set>
#include <Eigen/CXX11/Tensor>
using Eigen::Tensor;
using Eigen::Index;
using Eigen::RowMajor;
using Eigen::ColMajor;
template<typename T>
static const T& choose(int layout, const T& col, const T& row) {
return layout == ColMajor ? col : row;
}
template <int Layout>
static void test_block_mapper_sanity()
{
using T = int;
using TensorBlock = internal::TensorBlock<T, Index, 2, Layout>;
using TensorBlockMapper = internal::TensorBlockMapper<T, Index, 2, Layout>;
DSizes<Index, 2> tensor_dims(100, 100);
// Test uniform blocks.
TensorBlockMapper uniform_block_mapper(
tensor_dims, internal::TensorBlockShapeType::kUniformAllDims, 100);
VERIFY_IS_EQUAL(uniform_block_mapper.total_block_count(), 100);
VERIFY_IS_EQUAL(uniform_block_mapper.block_dims_total_size(), 100);
// 10x10 blocks
auto uniform_b0 = uniform_block_mapper.GetBlockForIndex(0, nullptr);
VERIFY_IS_EQUAL(uniform_b0.block_sizes().at(0), 10);
VERIFY_IS_EQUAL(uniform_b0.block_sizes().at(1), 10);
// Depending on a layout we stride by cols rows.
VERIFY_IS_EQUAL(uniform_b0.block_strides().at(0), choose(Layout, 1, 10));
VERIFY_IS_EQUAL(uniform_b0.block_strides().at(1), choose(Layout, 10, 1));
// Tensor strides depend only on a layout and not on the block size.
VERIFY_IS_EQUAL(uniform_b0.tensor_strides().at(0), choose(Layout, 1, 100));
VERIFY_IS_EQUAL(uniform_b0.tensor_strides().at(1), choose(Layout, 100, 1));
// Test skewed to inner dims blocks.
TensorBlockMapper skewed_block_mapper(
tensor_dims, internal::TensorBlockShapeType::kSkewedInnerDims, 100);
VERIFY_IS_EQUAL(skewed_block_mapper.total_block_count(), 100);
VERIFY_IS_EQUAL(skewed_block_mapper.block_dims_total_size(), 100);
// 1x100 (100x1) rows/cols depending on a tensor layout.
auto skewed_b0 = skewed_block_mapper.GetBlockForIndex(0, nullptr);
VERIFY_IS_EQUAL(skewed_b0.block_sizes().at(0), choose(Layout, 100, 1));
VERIFY_IS_EQUAL(skewed_b0.block_sizes().at(1), choose(Layout, 1, 100));
// Depending on a layout we stride by cols rows.
VERIFY_IS_EQUAL(skewed_b0.block_strides().at(0), choose(Layout, 1, 100));
VERIFY_IS_EQUAL(skewed_b0.block_strides().at(1), choose(Layout, 100, 1));
// Tensor strides depend only on a layout and not on the block size.
VERIFY_IS_EQUAL(skewed_b0.tensor_strides().at(0), choose(Layout, 1, 100));
VERIFY_IS_EQUAL(skewed_b0.tensor_strides().at(1), choose(Layout, 100, 1));
}
// Given a TensorBlock "visit" every element accessible though it, and a keep an
// index in the visited set. Verify that every coeff accessed only once.
template <typename T, int Layout, int NumDims>
static void UpdateCoeffSet(
const internal::TensorBlock<T, Index, 4, Layout>& block,
Index first_coeff_index,
int dim_index,
std::set<Index>* visited_coeffs) {
const DSizes<Index, NumDims> block_sizes = block.block_sizes();
const DSizes<Index, NumDims> tensor_strides = block.tensor_strides();
for (int i = 0; i < block_sizes[dim_index]; ++i) {
if (tensor_strides[dim_index] == 1) {
auto inserted = visited_coeffs->insert(first_coeff_index + i);
VERIFY_IS_EQUAL(inserted.second, true);
} else {
int next_dim_index = dim_index + choose(Layout, -1, 1);
UpdateCoeffSet<T, Layout, NumDims>(block, first_coeff_index,
next_dim_index, visited_coeffs);
first_coeff_index += tensor_strides[dim_index];
}
}
}
template <int Layout>
static void test_block_mapper_maps_every_element()
{
using T = int;
using TensorBlock = internal::TensorBlock<T, Index, 4, Layout>;
using TensorBlockMapper = internal::TensorBlockMapper<T, Index, 4, Layout>;
DSizes<Index, 4> dims(5, 7, 11, 17);
auto total_coeffs = static_cast<int>(dims.TotalSize());
// Keep track of elements indices available via block access.
std::set<Index> coeff_set;
// Try different combinations of block types and sizes.
auto block_shape_type =
internal::random<bool>()
? internal::TensorBlockShapeType::kUniformAllDims
: internal::TensorBlockShapeType::kSkewedInnerDims;
auto block_target_size = internal::random<int>(1, total_coeffs);
TensorBlockMapper block_mapper(dims, block_shape_type, block_target_size);
for (int i = 0; i < block_mapper.total_block_count(); ++i) {
TensorBlock block = block_mapper.GetBlockForIndex(i, nullptr);
UpdateCoeffSet<T, Layout, 4>(block, block.first_coeff_index(),
choose(Layout, 3, 0), &coeff_set);
}
// Verify that every coefficient in the original Tensor is accessible through
// TensorBlock only once.
VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs);
VERIFY_IS_EQUAL(*coeff_set.begin(), static_cast<Index>(0));
VERIFY_IS_EQUAL(*coeff_set.rbegin(), static_cast<Index>(total_coeffs - 1));
}
template <int Layout>
static void test_slice_block_mapper_maps_every_element()
{
using T = int;
using TensorBlock = internal::TensorBlock<T, Index, 4, Layout>;
using TensorSliceBlockMapper =
internal::TensorSliceBlockMapper<T, Index, 4, Layout>;
DSizes<Index, 4> tensor_dims(5,7,11,17);
DSizes<Index, 4> tensor_slice_offsets(1,3,5,7);
DSizes<Index, 4> tensor_slice_extents(3,2,4,5);
// Keep track of elements indices available via block access.
std::set<Index> coeff_set;
auto total_coeffs = static_cast<int>(tensor_slice_extents.TotalSize());
// Try different combinations of block types and sizes.
auto block_shape_type =
internal::random<bool>()
? internal::TensorBlockShapeType::kUniformAllDims
: internal::TensorBlockShapeType::kSkewedInnerDims;
auto block_target_size = internal::random<int>(1, total_coeffs);
// Pick a random dimension sizes for the tensor blocks.
DSizes<Index, 4> block_sizes;
for (int i = 0; i < 4; ++i) {
block_sizes[i] = internal::random<int>(1, tensor_slice_extents[i]);
}
TensorSliceBlockMapper block_mapper(tensor_dims, tensor_slice_offsets,
tensor_slice_extents, block_sizes,
DimensionList<Index, 4>());
for (int i = 0; i < block_mapper.total_block_count(); ++i) {
TensorBlock block = block_mapper.GetBlockForIndex(i, NULL);
UpdateCoeffSet<T, Layout, 4>(block, block.first_coeff_index(),
choose(Layout, 3, 0), &coeff_set);
}
VERIFY_IS_EQUAL(coeff_set.size(), total_coeffs);
}
EIGEN_DECLARE_TEST(cxx11_tensor_assign) {
CALL_SUBTEST(test_block_mapper_sanity<ColMajor>());
CALL_SUBTEST(test_block_mapper_sanity<RowMajor>());
CALL_SUBTEST(test_block_mapper_maps_every_element<ColMajor>());
CALL_SUBTEST(test_block_mapper_maps_every_element<RowMajor>());
CALL_SUBTEST(test_slice_block_mapper_maps_every_element<ColMajor>());
CALL_SUBTEST(test_slice_block_mapper_maps_every_element<RowMajor>());
}