From f73c95c032895aaaa8f5de120d1c01e75d0f0081 Mon Sep 17 00:00:00 2001 From: cpp977 Date: Tue, 16 Nov 2021 17:36:58 +0000 Subject: [PATCH] Reimplemented the Tensor stream output. --- unsupported/Eigen/CXX11/Tensor | 4 +- unsupported/Eigen/CXX11/src/Tensor/README.md | 39 ++ .../Eigen/CXX11/src/Tensor/TensorBase.h | 5 + unsupported/Eigen/CXX11/src/Tensor/TensorIO.h | 405 +++++++++++++++--- unsupported/test/cxx11_tensor_io.cpp | 211 +++++---- 5 files changed, 498 insertions(+), 166 deletions(-) diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor index 1b8b33f89..f88ddc514 100644 --- a/unsupported/Eigen/CXX11/Tensor +++ b/unsupported/Eigen/CXX11/Tensor @@ -76,6 +76,8 @@ #include "src/Tensor/TensorIntDiv.h" #include "src/Tensor/TensorGlobalFunctions.h" +#include "src/Tensor/TensorIO.h" + #include "src/Tensor/TensorBase.h" #include "src/Tensor/TensorBlock.h" @@ -129,7 +131,7 @@ #include "src/Tensor/TensorMap.h" #include "src/Tensor/TensorRef.h" -#include "src/Tensor/TensorIO.h" + #include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h" diff --git a/unsupported/Eigen/CXX11/src/Tensor/README.md b/unsupported/Eigen/CXX11/src/Tensor/README.md index d4d3d5986..b6abf2f87 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/README.md +++ b/unsupported/Eigen/CXX11/src/Tensor/README.md @@ -1794,6 +1794,45 @@ but you can easily cast the tensors to floats to do the division: TODO +## Tensor Printing +Tensors can be printed into a stream object (e.g. `std::cout`) using different formatting options. + + Eigen::Tensor tensor3d = {4, 3, 2}; + tensor3d.setValues( {{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}} ); + std::cout << tensor3d.format(Eigen::TensorIOFormat::Plain()) << std::endl; + ==> + 1 2 + 3 4 + 5 6 + + 7 8 + 9 10 + 11 12 + + 13 14 + 15 16 + 17 18 + + 19 20 + 21 22 + 23 24 + + +In the example, we used the predefined format `Eigen::TensorIOFormat::Plain`. +Here is the list of all predefined formats from which you can choose: +- `Eigen::TensorIOFormat::Plain()` for a plain output without braces. Different submatrices are separated by a blank line. +- `Eigen::TensorIOFormat::Numpy()` for numpy-like output. +- `Eigen::TensorIOFormat::Native()` for a `c++` like output which can be directly copy-pasted to setValues(). +- `Eigen::TensorIOFormat::Legacy()` for a backwards compatible printing of tensors. + +If you send the tensor directly to the stream the default format is called which is `Eigen::IOFormats::Plain()`. + +You can define your own format by explicitly providing a `Eigen::TensorIOFormat` class instance. Here, you can specify: +- The overall prefix and suffix with `std::string tenPrefix` and `std::string tenSuffix` +- The prefix, separator and suffix for each new element, row, matrix, 3d subtensor, ... with `std::vector prefix`, `std::vector separator` and `std::vector suffix`. Note that the first entry in each of the vectors refer to the last dimension of the tensor, e.g. `separator[0]` will be printed between adjacent elements, `separator[1]` will be printed between adjacent matrices, ... +- `char fill`: character which will be placed if the elements are aligned. +- `int precision` +- `int flags`: an OR-ed combination of flags, the default value is 0, the only currently available flag is `Eigen::DontAlignCols` which allows to disable the alignment of columns, resulting in faster code. ## Representation of scalar values diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h index 68aced516..9c356f497 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h @@ -962,6 +962,11 @@ class TensorBase return TensorForcedEvalOp(derived()); } + // Returns a formatted tensor ready for printing to a stream + inline const TensorWithFormat format(const TensorIOFormat& fmt) const { + return TensorWithFormat(derived(), fmt); + } + #ifdef EIGEN_READONLY_TENSORBASE_PLUGIN #include EIGEN_READONLY_TENSORBASE_PLUGIN #endif diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h index f47973b74..3e95f95fa 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h @@ -14,68 +14,361 @@ namespace Eigen { +struct TensorIOFormat; + namespace internal { - -// Print the tensor as a 2d matrix -template -struct TensorPrinter { - static void run (std::ostream& os, const Tensor& tensor) { - typedef typename internal::remove_const::type Scalar; - typedef typename Tensor::Index Index; - const Index total_size = internal::array_prod(tensor.dimensions()); - if (total_size > 0) { - const Index first_dim = Eigen::internal::array_get<0>(tensor.dimensions()); - static const int layout = Tensor::Layout; - Map > matrix(const_cast(tensor.data()), first_dim, total_size/first_dim); - os << matrix; - } - } -}; - - -// Print the tensor as a vector -template -struct TensorPrinter { - static void run (std::ostream& os, const Tensor& tensor) { - typedef typename internal::remove_const::type Scalar; - typedef typename Tensor::Index Index; - const Index total_size = internal::array_prod(tensor.dimensions()); - if (total_size > 0) { - Map > array(const_cast(tensor.data()), total_size); - os << array; - } - } -}; - - -// Print the tensor as a scalar -template -struct TensorPrinter { - static void run (std::ostream& os, const Tensor& tensor) { - os << tensor.coeff(0); - } -}; +template +struct TensorPrinter; } +struct TensorIOFormat { + TensorIOFormat(const std::vector& _separator, const std::vector& _prefix, + const std::vector& _suffix, int _precision = StreamPrecision, int _flags = 0, + const std::string& _tenPrefix = "", const std::string& _tenSuffix = "", const char _fill = ' ') + : tenPrefix(_tenPrefix), + tenSuffix(_tenSuffix), + prefix(_prefix), + suffix(_suffix), + separator(_separator), + fill(_fill), + precision(_precision), + flags(_flags) { + init_spacer(); + } + + TensorIOFormat(int _precision = StreamPrecision, int _flags = 0, const std::string& _tenPrefix = "", + const std::string& _tenSuffix = "", const char _fill = ' ') + : tenPrefix(_tenPrefix), tenSuffix(_tenSuffix), fill(_fill), precision(_precision), flags(_flags) { + // default values of prefix, suffix and separator + prefix = {"", "["}; + suffix = {"", "]"}; + separator = {", ", "\n"}; + + init_spacer(); + } + + void init_spacer() { + if ((flags & DontAlignCols)) return; + spacer.resize(prefix.size()); + spacer[0] = ""; + int i = int(tenPrefix.length()) - 1; + while (i >= 0 && tenPrefix[i] != '\n') { + spacer[0] += ' '; + i--; + } + + for (std::size_t k = 1; k < prefix.size(); k++) { + int i = int(prefix[k].length()) - 1; + while (i >= 0 && prefix[k][i] != '\n') { + spacer[k] += ' '; + i--; + } + } + } + + static inline const TensorIOFormat Numpy() { + std::vector prefix = {"", "["}; + std::vector suffix = {"", "]"}; + std::vector separator = {" ", "\n"}; + return TensorIOFormat(separator, prefix, suffix, StreamPrecision, 0, "[", "]"); + } + + static inline const TensorIOFormat Plain() { + std::vector separator = {" ", "\n", "\n", ""}; + std::vector prefix = {""}; + std::vector suffix = {""}; + return TensorIOFormat(separator, prefix, suffix, StreamPrecision, 0, "", "", ' '); + } + + static inline const TensorIOFormat Native() { + std::vector separator = {", ", ",\n", "\n"}; + std::vector prefix = {"", "{"}; + std::vector suffix = {"", "}"}; + return TensorIOFormat(separator, prefix, suffix, StreamPrecision, 0, "{", "}", ' '); + } + + static inline const TensorIOFormat Legacy() { + TensorIOFormat LegacyFormat(StreamPrecision, 0, "", "", ' '); + LegacyFormat.legacy_bit = true; + return LegacyFormat; + } + + std::string tenPrefix; + std::string tenSuffix; + std::vector prefix; + std::vector suffix; + std::vector separator; + char fill; + int precision; + int flags; + std::vector spacer{}; + bool legacy_bit = false; +}; + +template +class TensorWithFormat; +// specialize for Layout=ColMajor, Layout=RowMajor and rank=0. +template +class TensorWithFormat { + public: + TensorWithFormat(const T& tensor, const TensorIOFormat& format) : t_tensor(tensor), t_format(format) {} + + friend std::ostream& operator<<(std::ostream& os, const TensorWithFormat& wf) { + // Evaluate the expression if needed + typedef TensorEvaluator, DefaultDevice> Evaluator; + TensorForcedEvalOp eval = wf.t_tensor.eval(); + Evaluator tensor(eval, DefaultDevice()); + tensor.evalSubExprsIfNeeded(NULL); + internal::TensorPrinter::run(os, tensor, wf.t_format); + // Cleanup. + tensor.cleanup(); + return os; + } + + protected: + T t_tensor; + TensorIOFormat t_format; +}; + +template +class TensorWithFormat { + public: + TensorWithFormat(const T& tensor, const TensorIOFormat& format) : t_tensor(tensor), t_format(format) {} + + friend std::ostream& operator<<(std::ostream& os, const TensorWithFormat& wf) { + // Switch to RowMajor storage and print afterwards + typedef typename T::Index Index; + std::array shuffle; + std::array id; + std::iota(id.begin(), id.end(), Index(0)); + std::copy(id.begin(), id.end(), shuffle.rbegin()); + auto tensor_row_major = wf.t_tensor.swap_layout().shuffle(shuffle); + + // Evaluate the expression if needed + typedef TensorEvaluator, DefaultDevice> Evaluator; + TensorForcedEvalOp eval = tensor_row_major.eval(); + Evaluator tensor(eval, DefaultDevice()); + tensor.evalSubExprsIfNeeded(NULL); + internal::TensorPrinter::run(os, tensor, wf.t_format); + // Cleanup. + tensor.cleanup(); + return os; + } + + protected: + T t_tensor; + TensorIOFormat t_format; +}; + template -std::ostream& operator << (std::ostream& os, const TensorBase& expr) { - typedef TensorEvaluator, DefaultDevice> Evaluator; - typedef typename Evaluator::Dimensions Dimensions; +class TensorWithFormat { + public: + TensorWithFormat(const T& tensor, const TensorIOFormat& format) : t_tensor(tensor), t_format(format) {} - // Evaluate the expression if needed - TensorForcedEvalOp eval = expr.eval(); - Evaluator tensor(eval, DefaultDevice()); - tensor.evalSubExprsIfNeeded(NULL); + friend std::ostream& operator<<(std::ostream& os, const TensorWithFormat& wf) { + // Evaluate the expression if needed + typedef TensorEvaluator, DefaultDevice> Evaluator; + TensorForcedEvalOp eval = wf.t_tensor.eval(); + Evaluator tensor(eval, DefaultDevice()); + tensor.evalSubExprsIfNeeded(NULL); + internal::TensorPrinter::run(os, tensor, wf.t_format); + // Cleanup. + tensor.cleanup(); + return os; + } - // Print the result - static const int rank = internal::array_size::value; - internal::TensorPrinter::run(os, tensor); + protected: + T t_tensor; + TensorIOFormat t_format; +}; - // Cleanup. - tensor.cleanup(); - return os; +namespace internal { +template +struct TensorPrinter { + static void run(std::ostream& s, const Tensor& _t, const TensorIOFormat& fmt) { + typedef typename Tensor::Scalar Scalar; + typedef typename Tensor::Index Index; + static const int layout = Tensor::Layout; + // backwards compatibility case: print tensor after reshaping to matrix of size dim(0) x + // (dim(1)*dim(2)*...*dim(rank-1)). + if (fmt.legacy_bit) { + const Index total_size = internal::array_prod(_t.dimensions()); + if (total_size > 0) { + const Index first_dim = Eigen::internal::array_get<0>(_t.dimensions()); + Map > matrix(const_cast(_t.data()), first_dim, + total_size / first_dim); + s << matrix; + return; + } + } + + assert(layout == RowMajor); + typedef typename conditional::value || is_same::value || + is_same::value || is_same::value, + int, + typename conditional >::value || + is_same >::value || + is_same >::value || + is_same >::value, + std::complex, const Scalar&>::type>::type PrintType; + + const Index total_size = array_prod(_t.dimensions()); + + std::streamsize explicit_precision; + if (fmt.precision == StreamPrecision) { + explicit_precision = 0; + } else if (fmt.precision == FullPrecision) { + if (NumTraits::IsInteger) { + explicit_precision = 0; + } else { + explicit_precision = significant_decimals_impl::run(); + } + } else { + explicit_precision = fmt.precision; + } + + std::streamsize old_precision = 0; + if (explicit_precision) old_precision = s.precision(explicit_precision); + + Index width = 0; + + bool align_cols = !(fmt.flags & DontAlignCols); + if (align_cols) { + // compute the largest width + for (Index i = 0; i < total_size; i++) { + std::stringstream sstr; + sstr.copyfmt(s); + sstr << static_cast(_t.data()[i]); + width = std::max(width, Index(sstr.str().length())); + } + } + std::streamsize old_width = s.width(); + char old_fill_character = s.fill(); + + s << fmt.tenPrefix; + for (Index i = 0; i < total_size; i++) { + std::array is_at_end{}; + std::array is_at_begin{}; + + // is the ith element the end of an coeff (always true), of a row, of a matrix, ...? + for (std::size_t k = 0; k < rank; k++) { + if ((i + 1) % (std::accumulate(_t.dimensions().rbegin(), _t.dimensions().rbegin() + k, 1, + std::multiplies())) == + 0) { + is_at_end[k] = true; + } + } + + // is the ith element the begin of an coeff (always true), of a row, of a matrix, ...? + for (std::size_t k = 0; k < rank; k++) { + if (i % (std::accumulate(_t.dimensions().rbegin(), _t.dimensions().rbegin() + k, 1, + std::multiplies())) == + 0) { + is_at_begin[k] = true; + } + } + + // do we have a line break? + bool is_at_begin_after_newline = false; + for (std::size_t k = 0; k < rank; k++) { + if (is_at_begin[k]) { + std::size_t separator_index = (k < fmt.separator.size()) ? k : fmt.separator.size() - 1; + if (fmt.separator[separator_index].find('\n') != std::string::npos) { + is_at_begin_after_newline = true; + } + } + } + + bool is_at_end_before_newline = false; + for (std::size_t k = 0; k < rank; k++) { + if (is_at_end[k]) { + std::size_t separator_index = (k < fmt.separator.size()) ? k : fmt.separator.size() - 1; + if (fmt.separator[separator_index].find('\n') != std::string::npos) { + is_at_end_before_newline = true; + } + } + } + + std::stringstream suffix, prefix, separator; + for (std::size_t k = 0; k < rank; k++) { + std::size_t suffix_index = (k < fmt.suffix.size()) ? k : fmt.suffix.size() - 1; + if (is_at_end[k]) { + suffix << fmt.suffix[suffix_index]; + } + } + for (std::size_t k = 0; k < rank; k++) { + std::size_t separator_index = (k < fmt.separator.size()) ? k : fmt.separator.size() - 1; + if (is_at_end[k] and + (!is_at_end_before_newline or fmt.separator[separator_index].find('\n') != std::string::npos)) { + separator << fmt.separator[separator_index]; + } + } + for (std::size_t k = 0; k < rank; k++) { + std::size_t spacer_index = (k < fmt.spacer.size()) ? k : fmt.spacer.size() - 1; + if (i != 0 and is_at_begin_after_newline and (!is_at_begin[k] or k == 0)) { + prefix << fmt.spacer[spacer_index]; + } + } + for (int k = rank - 1; k >= 0; k--) { + std::size_t prefix_index = (static_cast(k) < fmt.prefix.size()) ? k : fmt.prefix.size() - 1; + if (is_at_begin[k]) { + prefix << fmt.prefix[prefix_index]; + } + } + + s << prefix.str(); + if (width) { + s.fill(fmt.fill); + s.width(width); + s << std::right; + } + s << _t.data()[i]; + s << suffix.str(); + if (i < total_size - 1) { + s << separator.str(); + } + } + s << fmt.tenSuffix; + if (explicit_precision) s.precision(old_precision); + if (width) { + s.fill(old_fill_character); + s.width(old_width); + } + } +}; + +template +struct TensorPrinter { + static void run(std::ostream& s, const Tensor& _t, const TensorIOFormat& fmt) { + typedef typename Tensor::Scalar Scalar; + + std::streamsize explicit_precision; + if (fmt.precision == StreamPrecision) { + explicit_precision = 0; + } else if (fmt.precision == FullPrecision) { + if (NumTraits::IsInteger) { + explicit_precision = 0; + } else { + explicit_precision = significant_decimals_impl::run(); + } + } else { + explicit_precision = fmt.precision; + } + + std::streamsize old_precision = 0; + if (explicit_precision) old_precision = s.precision(explicit_precision); + + s << fmt.tenPrefix << _t.coeff(0) << fmt.tenSuffix; + if (explicit_precision) s.precision(old_precision); + } +}; + +} // end namespace internal +template +std::ostream& operator<<(std::ostream& s, const TensorBase& t) { + s << t.format(TensorIOFormat::Plain()); + return s; } +} // end namespace Eigen -} // end namespace Eigen - -#endif // EIGEN_CXX11_TENSOR_TENSOR_IO_H +#endif // EIGEN_CXX11_TENSOR_TENSOR_IO_H diff --git a/unsupported/test/cxx11_tensor_io.cpp b/unsupported/test/cxx11_tensor_io.cpp index 2c638f9bf..34fcbc3ff 100644 --- a/unsupported/test/cxx11_tensor_io.cpp +++ b/unsupported/test/cxx11_tensor_io.cpp @@ -6,131 +6,124 @@ // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. - #include "main.h" + #include -#include #include +template +struct test_tensor_ostream_impl {}; -template -static void test_output_0d() -{ - Tensor tensor; - tensor() = 123; - - std::stringstream os; - os << tensor; - - std::string expected("123"); - VERIFY_IS_EQUAL(std::string(os.str()), expected); -} - - -template -static void test_output_1d() -{ - Tensor tensor(5); - for (int i = 0; i < 5; ++i) { - tensor(i) = i; +template +struct test_tensor_ostream_impl { + static void run() { + Eigen::Tensor t; + t.setValues(1); + std::ostringstream os; + os << t.format(Eigen::TensorIOFormat::Plain()); + VERIFY(os.str() == "1"); } +}; - std::stringstream os; - os << tensor; - - std::string expected("0\n1\n2\n3\n4"); - VERIFY_IS_EQUAL(std::string(os.str()), expected); - - Eigen::Tensor empty_tensor(0); - std::stringstream empty_os; - empty_os << empty_tensor; - std::string empty_string; - VERIFY_IS_EQUAL(std::string(empty_os.str()), empty_string); -} - - -template -static void test_output_2d() -{ - Tensor tensor(5, 3); - for (int i = 0; i < 5; ++i) { - for (int j = 0; j < 3; ++j) { - tensor(i, j) = i*j; - } +template +struct test_tensor_ostream_impl { + static void run() { + Eigen::Tensor t = {3}; + t.setValues({1, 2, 3}); + std::ostringstream os; + os << t.format(Eigen::TensorIOFormat::Plain()); + VERIFY(os.str() == "1 2 3"); } +}; - std::stringstream os; - os << tensor; - - std::string expected("0 0 0\n0 1 2\n0 2 4\n0 3 6\n0 4 8"); - VERIFY_IS_EQUAL(std::string(os.str()), expected); -} - - -template -static void test_output_expr() -{ - Tensor tensor1(5); - Tensor tensor2(5); - for (int i = 0; i < 5; ++i) { - tensor1(i) = i; - tensor2(i) = 7; +template +struct test_tensor_ostream_impl { + static void run() { + Eigen::Tensor t = {3, 2}; + t.setValues({{1, 2}, {3, 4}, {5, 6}}); + std::ostringstream os; + os << t.format(Eigen::TensorIOFormat::Plain()); + VERIFY(os.str() == "1 2\n3 4\n5 6"); } +}; - std::stringstream os; - os << tensor1 + tensor2; - - std::string expected(" 7\n 8\n 9\n10\n11"); - VERIFY_IS_EQUAL(std::string(os.str()), expected); -} - - -template -static void test_output_string() -{ - Tensor tensor(5, 3); - tensor.setConstant(std::string("foo")); - - std::cout << tensor << std::endl; - - std::stringstream os; - os << tensor; - - std::string expected("foo foo foo\nfoo foo foo\nfoo foo foo\nfoo foo foo\nfoo foo foo"); - VERIFY_IS_EQUAL(std::string(os.str()), expected); -} - - -template -static void test_output_const() -{ - Tensor tensor(5); - for (int i = 0; i < 5; ++i) { - tensor(i) = i; +template +struct test_tensor_ostream_impl { + static void run() { + Eigen::Tensor t = {4, 3, 2}; + t.setValues({{{1, 2}, {3, 4}, {5, 6}}, + {{7, 8}, {9, 10}, {11, 12}}, + {{13, 14}, {15, 16}, {17, 18}}, + {{19, 20}, {21, 22}, {23, 24}}}); + std::ostringstream os; + os << t.format(Eigen::TensorIOFormat::Plain()); + VERIFY(os.str() == " 1 2\n 3 4\n 5 6\n\n 7 8\n 9 10\n11 12\n\n13 14\n15 16\n17 18\n\n19 20\n21 22\n23 24"); } +}; - TensorMap > tensor_map(tensor.data(), 5); +template +struct test_tensor_ostream_impl { + static void run() { + Eigen::Tensor t = {3, 2}; + t.setValues({{false, true}, {true, false}, {false, false}}); + std::ostringstream os; + os << t.format(Eigen::TensorIOFormat::Plain()); + VERIFY(os.str() == "0 1\n1 0\n0 0"); + } +}; - std::stringstream os; - os << tensor_map; +template +struct test_tensor_ostream_impl, 2, Layout> { + static void run() { + Eigen::Tensor, 2> t = {3, 2}; + t.setValues({{std::complex(1, 2), std::complex(12, 3)}, + {std::complex(-4, 2), std::complex(0, 5)}, + {std::complex(-1, 4), std::complex(5, 27)}}); + std::ostringstream os; + os << t.format(Eigen::TensorIOFormat::Plain()); + VERIFY(os.str() == " (1,2) (12,3)\n(-4,2) (0,5)\n(-1,4) (5,27)"); + } +}; - std::string expected("0\n1\n2\n3\n4"); - VERIFY_IS_EQUAL(std::string(os.str()), expected); +template +void test_tensor_ostream() { + test_tensor_ostream_impl::run(); } +EIGEN_DECLARE_TEST(cxx11_tensor_io) { + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); -EIGEN_DECLARE_TEST(cxx11_tensor_io) -{ - CALL_SUBTEST(test_output_0d()); - CALL_SUBTEST(test_output_0d()); - CALL_SUBTEST(test_output_1d()); - CALL_SUBTEST(test_output_1d()); - CALL_SUBTEST(test_output_2d()); - CALL_SUBTEST(test_output_2d()); - CALL_SUBTEST(test_output_expr()); - CALL_SUBTEST(test_output_expr()); - CALL_SUBTEST(test_output_string()); - CALL_SUBTEST(test_output_string()); - CALL_SUBTEST(test_output_const()); - CALL_SUBTEST(test_output_const()); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + + CALL_SUBTEST((test_tensor_ostream())); + CALL_SUBTEST((test_tensor_ostream())); + + CALL_SUBTEST((test_tensor_ostream, 2, Eigen::ColMajor>())); + CALL_SUBTEST((test_tensor_ostream, 2, Eigen::ColMajor>())); }