From 62b5474032c298446ec819e288612e1bfe76e984 Mon Sep 17 00:00:00 2001 From: Pascal Getreuer Date: Mon, 29 Jan 2024 08:12:43 +0000 Subject: [PATCH] Fix busted formatting in Eigen::Tensor README.md. --- unsupported/Eigen/CXX11/src/Tensor/README.md | 981 ++++++++++--------- 1 file changed, 495 insertions(+), 486 deletions(-) diff --git a/unsupported/Eigen/CXX11/src/Tensor/README.md b/unsupported/Eigen/CXX11/src/Tensor/README.md index 7f16fde9c..443500aff 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/README.md +++ b/unsupported/Eigen/CXX11/src/Tensor/README.md @@ -1,4 +1,4 @@ -#Eigen Tensors{#eigen_tensors } +# Eigen Tensors {#eigen_tensors} Tensors are multidimensional arrays of elements. Elements are typically scalars, but more complex types such as strings are also supported. @@ -29,8 +29,8 @@ dimensions. // memory to hold 24 floating point values (24 = 2 x 3 x 4). Tensor t_3d(2, 3, 4); -// Resize t_3d by assigning a tensor of different sizes, but same rank. -t_3d = Tensor(3, 4, 3); + // Resize t_3d by assigning a tensor of different sizes, but same rank. + t_3d = Tensor(3, 4, 3); #### Constructor Tensor(size_array) @@ -72,101 +72,100 @@ large enough to hold all the data. // Map a tensor of ints on top of stack-allocated storage. int storage[128]; // 2 x 4 x 2 x 8 = 128 -TensorMap> t_4d(storage, 2, 4, 2, 8); + TensorMap> t_4d(storage, 2, 4, 2, 8); -// The same storage can be viewed as a different tensor. -// You can also pass the sizes as an array. -TensorMap> t_2d(storage, 16, 8); + // The same storage can be viewed as a different tensor. + // You can also pass the sizes as an array. + TensorMap> t_2d(storage, 16, 8); -// You can also map fixed-size tensors. Here we get a 1d view of -// the 2d fixed-size tensor. -TensorFixedSize> t_4x3; -TensorMap> t_12(t_4x3.data(), 12); + // You can also map fixed-size tensors. Here we get a 1d view of + // the 2d fixed-size tensor. + TensorFixedSize> t_4x3; + TensorMap> t_12(t_4x3.data(), 12); -####Class TensorRef - See Assigning to a TensorRef below - . +#### Class TensorRef - ##Accessing Tensor Elements +See Assigning to a `TensorRef` below. - #### - tensor(index0, index1...) +## Accessing Tensor Elements - Return the element at position `(index0, index1...)` in tensor -`tensor`.You must pass as many parameters as the rank of `tensor`.The expression can be used as an l - - value to set the value of the element at the specified position - .The value returned is of the datatype of the tensor. +#### tensor(index0, index1...) - // Set the value of the element at position (0, 1, 0); - Tensor t_3d(2, 3, 4); -t_3d(0, 1, 0) = 12.0f; +Return the element at position `(index0, index1...)` in tensor +`tensor`. You must pass as many parameters as the rank of `tensor`. +The expression can be used as an l-value to set the value of the element at the +specified position. The value returned is of the datatype of the tensor. -// Initialize all elements to random values. -for (int i = 0; i < 2; ++i) { - for (int j = 0; j < 3; ++j) { - for (int k = 0; k < 4; ++k) { - t_3d(i, j, k) = ... some random value...; + // Set the value of the element at position (0, 1, 0); + Tensor t_3d(2, 3, 4); + t_3d(0, 1, 0) = 12.0f; + + // Initialize all elements to random values. + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 4; ++k) { + t_3d(i, j, k) = ...some random value...; + } + } } - } -} -// Print elements of a tensor. -for (int i = 0; i < 2; ++i) { - LOG(INFO) << t_3d(i, 0, 0); -} + // Print elements of a tensor. + for (int i = 0; i < 2; ++i) { + LOG(INFO) << t_3d(i, 0, 0); + } -##TensorLayout - The tensor library supports 2 layouts : `ColMajor` (the default) and +## TensorLayout + +The tensor library supports 2 layouts: `ColMajor` (the default) and `RowMajor`. - The layout of a tensor is - optionally specified as part of its type.If not specified explicitly column major is assumed. +The layout of a tensor is optionally specified as part of its type. If not +specified explicitly column major is assumed. - Tensor col_major; // equivalent to Tensor -TensorMap> row_major(data, ...); + Tensor col_major; // equivalent to Tensor + TensorMap > row_major(data, ...); -All the arguments to an expression must use the same - layout.Attempting to mix different layouts will result in a compilation error. +All the arguments to an expression must use the same layout. Attempting to mix +different layouts will result in a compilation error. - It is possible to change the layout of a tensor or - an expression using the -`swap_layout()` method.Note that this will also reverse the order of the dimensions. +It is possible to change the layout of a tensor or an expression using the +`swap_layout()` method. Note that this will also reverse the order of the +dimensions. Tensor col_major(2, 4); -Tensor row_major(2, 4); + Tensor row_major(2, 4); -Tensor col_major_result = col_major; // ok, layouts match -Tensor col_major_result = row_major; // will not compile + Tensor col_major_result = col_major; // ok, layouts match + Tensor col_major_result = row_major; // will not compile -// Simple layout swap -col_major_result = row_major.swap_layout(); -eigen_assert(col_major_result.dimension(0) == 4); -eigen_assert(col_major_result.dimension(1) == 2); + // Simple layout swap + col_major_result = row_major.swap_layout(); + eigen_assert(col_major_result.dimension(0) == 4); + eigen_assert(col_major_result.dimension(1) == 2); -// Swap the layout and preserve the order of the dimensions -array shuffle(1, 0); -col_major_result = row_major.swap_layout().shuffle(shuffle); -eigen_assert(col_major_result.dimension(0) == 2); -eigen_assert(col_major_result.dimension(1) == 4); + // Swap the layout and preserve the order of the dimensions + array shuffle(1, 0); + col_major_result = row_major.swap_layout().shuffle(shuffle); + eigen_assert(col_major_result.dimension(0) == 2); + eigen_assert(col_major_result.dimension(1) == 4); -##Tensor Operations - The Eigen Tensor library provides a vast library of operations on Tensors - : numerical operations such as addition and multiplication, - geometry operations such as slicing and shuffling, - etc.These operations are available as methods of the Tensor classes, - and in some cases as operator overloads.For example the following code computes the elementwise addition of two - tensors : +## Tensor Operations - Tensor - t1(2, 3, 4); -... set some values in t1... Tensor t2(2, 3, 4); -... set some values in t2... +The Eigen Tensor library provides a vast library of operations on Tensors: +numerical operations such as addition and multiplication, geometry operations +such as slicing and shuffling, etc. These operations are available as methods +of the Tensor classes, and in some cases as operator overloads. For example +the following code computes the elementwise addition of two tensors: + + Tensor t1(2, 3, 4); + ...set some values in t1... + Tensor t2(2, 3, 4); + ...set some values in t2... // Set t3 to the element wise sum of t1 and t2 - Tensor - t3 = t1 + t2; + Tensor t3 = t1 + t2; While the code above looks easy enough, it is important to understand that the expression `t1 + t2` is not actually adding the values of the tensors. The @@ -192,17 +191,19 @@ Because Tensor operations create tensor operators, the C++ `auto` keyword does not have its intuitive meaning. Consider these 2 lines of code: Tensor t3 = t1 + t2; -auto t4 = t1 + t2; + auto t4 = t1 + t2; -In the first line we allocate the tensor `t3` and it will contain the result of the addition of `t1` and `t2`.In the second line, `t4` is actually the tree of tensor operators that will compute the addition of -`t1` and `t2`.In fact, `t4` is * not *a tensor and you cannot get the values of its elements : +In the first line we allocate the tensor `t3` and it will contain the +result of the addition of `t1` and `t2`. In the second line, `t4` +is actually the tree of tensor operators that will compute the addition of +`t1` and `t2`. In fact, `t4` is *not* a tensor and you cannot get +the values of its elements: - Tensor - t3 = t1 + t2; -cout << t3(0, 0, 0); // OK prints the value of t1(0, 0, 0) + t2(0, 0, 0) + Tensor t3 = t1 + t2; + cout << t3(0, 0, 0); // OK prints the value of t1(0, 0, 0) + t2(0, 0, 0) -auto t4 = t1 + t2; -cout << t4(0, 0, 0); // Compilation error! + auto t4 = t1 + t2; + cout << t4(0, 0, 0); // Compilation error! When you use `auto` you do not get a Tensor as a result but instead a non-evaluated expression. So only use `auto` to delay evaluation. @@ -216,16 +217,16 @@ result to a Tensor that will be capable of holding onto them. This can be either a normal Tensor, a fixed size Tensor, or a TensorMap on an existing piece of memory. All the following will work: -auto t4 = t1 + t2; + auto t4 = t1 + t2; -Tensor result = t4; // Could also be: result(t4); -cout << result(0, 0, 0); + Tensor result = t4; // Could also be: result(t4); + cout << result(0, 0, 0); -TensorMap result(, , ...) = t4; -cout << result(0, 0, 0); + TensorMap result(, , ...) = t4; + cout << result(0, 0, 0); -TensorFixedSize> result = t4; -cout << result(0, 0, 0); + TensorFixedSize> result = t4; + cout << result(0, 0, 0); Until you need the results, you can keep the operation around, and even reuse it for additional operations. As long as you keep the expression as an @@ -233,108 +234,109 @@ operation, no computation is performed. // One way to compute exp((t1 + t2) * 0.2f); auto t3 = t1 + t2; -auto t4 = t3 * 0.2f; -auto t5 = t4.exp(); -Tensor result = t5; + auto t4 = t3 * 0.2f; + auto t5 = t4.exp(); + Tensor result = t5; -// Another way, exactly as efficient as the previous one: -Tensor result = ((t1 + t2) * 0.2f).exp(); + // Another way, exactly as efficient as the previous one: + Tensor result = ((t1 + t2) * 0.2f).exp(); -## #Controlling When Expression are Evaluated +### Controlling When Expression are Evaluated - There are several ways to control when expressions are evaluated : +There are several ways to control when expressions are evaluated: - *Assignment to a Tensor, - TensorFixedSize, - or TensorMap.*Use of the eval() method.* - Assignment to a TensorRef. +* Assignment to a Tensor, TensorFixedSize, or TensorMap. +* Use of the eval() method. +* Assignment to a TensorRef. - ####Assigning to a Tensor, - TensorFixedSize, - or TensorMap - . +#### Assigning to a Tensor, TensorFixedSize, or TensorMap. - The most common way to evaluate an expression is to assign it to a Tensor.In the example below, - the `auto` declarations make the intermediate values "Operations", - not Tensors, - and do not cause the - expressions to be evaluated.The assignment to the Tensor `result` causes the evaluation of all the operations. +The most common way to evaluate an expression is to assign it to a Tensor. In +the example below, the `auto` declarations make the intermediate values +"Operations", not Tensors, and do not cause the expressions to be evaluated. +The assignment to the Tensor `result` causes the evaluation of all the +operations. - auto t3 = t1 + t2; // t3 is an Operation. -auto t4 = t3 * 0.2f; // t4 is an Operation. -auto t5 = t4.exp(); // t5 is an Operation. -Tensor result = t5; // The operations are evaluated. + auto t3 = t1 + t2; // t3 is an Operation. + auto t4 = t3 * 0.2f; // t4 is an Operation. + auto t5 = t4.exp(); // t5 is an Operation. + Tensor result = t5; // The operations are evaluated. -If you know the ranks and sizes of the Operation value you can assign the Operation to a TensorFixedSize instead of a - Tensor, - which is a bit more efficient. +If you know the ranks and sizes of the Operation value you can assign the +Operation to a TensorFixedSize instead of a Tensor, which is a bit more +efficient. // We know that the result is a 4x4x2 tensor! - TensorFixedSize> - result = t5; + TensorFixedSize> result = t5; -Simiarly, assigning an expression to a TensorMap causes its evaluation.Like tensors of type TensorFixedSize, - TensorMaps cannot be resized so they have to have the rank and sizes of the expression that are assigned to them - . +Simiarly, assigning an expression to a TensorMap causes its evaluation. Like +tensors of type TensorFixedSize, TensorMaps cannot be resized so they have to +have the rank and sizes of the expression that are assigned to them. - ####Calling - eval() - . +#### Calling eval(). - When you compute large composite expressions, - you sometimes want to tell Eigen that an intermediate value in the expression tree is worth - evaluating ahead of time.This is done by inserting a call to the `eval()` method of the expression Operation. +When you compute large composite expressions, you sometimes want to tell Eigen +that an intermediate value in the expression tree is worth evaluating ahead of +time. This is done by inserting a call to the `eval()` method of the +expression Operation. // The previous example could have been written: Tensor result = ((t1 + t2) * 0.2f).exp(); -// If you want to compute (t1 + t2) once ahead of time you can write: -Tensor result = ((t1 + t2).eval() * 0.2f).exp(); + // If you want to compute (t1 + t2) once ahead of time you can write: + Tensor result = ((t1 + t2).eval() * 0.2f).exp(); -Semantically, calling `eval()` is equivalent to materializing the value of the - expression in a temporary Tensor of the right size.The code above in effect does : +Semantically, calling `eval()` is equivalent to materializing the value of +the expression in a temporary Tensor of the right size. The code above in +effect does: // .eval() knows the size! TensorFixedSize> tmp = t1 + t2; -Tensor result = (tmp * 0.2f).exp(); + Tensor result = (tmp * 0.2f).exp(); -Note that the return value of `eval()` is itself an Operation, so the following code does not do what you may think : +Note that the return value of `eval()` is itself an Operation, so the +following code does not do what you may think: // Here t3 is an evaluation Operation. t3 has not been evaluated yet. auto t3 = (t1 + t2).eval(); -// You can use t3 in another expression. Still no evaluation. -auto t4 = (t3 * 0.2f).exp(); + // You can use t3 in another expression. Still no evaluation. + auto t4 = (t3 * 0.2f).exp(); -// The value is evaluated when you assign the Operation to a Tensor, using -// an intermediate tensor to represent t3.x -Tensor result = t4; + // The value is evaluated when you assign the Operation to a Tensor, using + // an intermediate tensor to represent t3.x + Tensor result = t4; -While in the examples above calling `eval()` does not make a difference in performance, - in other cases it can make a huge difference.In the - expression below the `broadcast()` expression causes the `X.maximum()` expression to be evaluated many times : +While in the examples above calling `eval()` does not make a difference in +performance, in other cases it can make a huge difference. In the expression +below the `broadcast()` expression causes the `X.maximum()` expression +to be evaluated many times: - Tensor<...> X...; -Tensor<...> Y = ((X - X.maximum(depth_dim).reshape(dims2d).broadcast(bcast)) * beta).exp(); + Tensor<...> X ...; + Tensor<...> Y = ((X - X.maximum(depth_dim).reshape(dims2d).broadcast(bcast)) + * beta).exp(); Inserting a call to `eval()` between the `maximum()` and -`reshape()` calls guarantees that maximum() is only computed once and greatly speeds - up execution : +`reshape()` calls guarantees that maximum() is only computed once and +greatly speeds-up execution: - Tensor<...> Y = ((X - X.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast)) * beta).exp(); + Tensor<...> Y = + ((X - X.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast)) + * beta).exp(); -In the other example below, - the tensor `Y` is both used in the expression and its assignment.This is an aliasing problem and if the evaluation - is not done in the right order Y will be updated incrementally during the evaluation resulting in bogus results - : +In the other example below, the tensor `Y` is both used in the expression +and its assignment. This is an aliasing problem and if the evaluation is not +done in the right order Y will be updated incrementally during the evaluation +resulting in bogus results: - Tensor<...> - Y...; -Y = Y / (Y.sum(depth_dim).reshape(dims2d).broadcast(bcast)); + Tensor<...> Y ...; + Y = Y / (Y.sum(depth_dim).reshape(dims2d).broadcast(bcast)); -Inserting a call to `eval()` between the `sum()` and `reshape()` expressions ensures that the sum is computed before any - updates to `Y` are done. +Inserting a call to `eval()` between the `sum()` and `reshape()` +expressions ensures that the sum is computed before any updates to `Y` are +done. - Y = Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast)); + Y = Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast)); Note that an eval around the full right hand side expression is not needed because the generated has to compute the i-th value of the right hand side @@ -347,6 +349,7 @@ call for the right hand side: Y.shuffle(...) = (Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast))).eval(); + #### Assigning to a TensorRef. If you need to access only a few elements from the value of an expression you @@ -361,10 +364,10 @@ not provide a way to access individual elements. // evaluated yet. TensorRef > ref = ((t1 + t2) * 0.2f).exp(); -// Use "ref" to access individual elements. The expression is evaluated -// on the fly. -float at_0 = ref(0, 0, 0); -cout << ref(0, 1, 0); + // Use "ref" to access individual elements. The expression is evaluated + // on the fly. + float at_0 = ref(0, 0, 0); + cout << ref(0, 1, 0); Only use TensorRef when you need a subset of the values of the expression. TensorRef only computes the values you access. However note that if you are @@ -395,8 +398,8 @@ For example, the following code adds two tensors using the default single-threaded CPU implementation: Tensor a(30, 40); -Tensor b(30, 40); -Tensor c = a + b; + Tensor b(30, 40); + Tensor c = a + b; To choose a different implementation you have to insert a `device()` call before the assignment of the result. For technical C++ reasons this requires @@ -404,25 +407,23 @@ that the Tensor for the result be declared on its own. This means that you have to know the size of the result. Eigen::Tensor c(30, 40); -c.device(...) = a + b; + c.device(...) = a + b; -The call to `device()` must be the last call on the left of the operator= - . +The call to `device()` must be the last call on the left of the operator=. - You must pass to the `device()` call an Eigen device object.There are presently three devices you can use - : DefaultDevice, - ThreadPoolDevice and GpuDevice - . +You must pass to the `device()` call an Eigen device object. There are +presently three devices you can use: DefaultDevice, ThreadPoolDevice and +GpuDevice. - ####Evaluating With the DefaultDevice - This is exactly the same as - not inserting a `device()` call. +#### Evaluating With the DefaultDevice - DefaultDevice my_device; -c.device(my_device) = a + b; +This is exactly the same as not inserting a `device()` call. -####Evaluating with a Thread Pool + DefaultDevice my_device; + c.device(my_device) = a + b; + +#### Evaluating with a Thread Pool // Create the Eigen ThreadPool Eigen::ThreadPool pool(8 /* number of threads in pool */) @@ -430,9 +431,10 @@ c.device(my_device) = a + b; // Create the Eigen ThreadPoolDevice. Eigen::ThreadPoolDevice my_device(&pool, 4 /* number of threads to use */); -// Now just use the device when evaluating expressions. -Eigen::Tensor c(30, 50); -c.device(my_device) = a.contract(b, dot_product_dims); + // Now just use the device when evaluating expressions. + Eigen::Tensor c(30, 50); + c.device(my_device) = a.contract(b, dot_product_dims); + #### Evaluating On GPU @@ -465,7 +467,7 @@ Represents the datatype of individual tensor elements. For example, for a `Tensor`, `Scalar` is the type `float`. See `setConstant()`. -#### +#### (Operation) We use this pseudo type to indicate that a tensor Operation is returned by a method. We indicate in the text the type and dimensions of the tensor that the @@ -491,151 +493,150 @@ Constant value indicating the number of dimensions of a Tensor. This is also known as the tensor "rank". Eigen::Tensor a(3, 4); -cout << "Dims " << a.NumDimensions; -= > Dims 2 + cout << "Dims " << a.NumDimensions; + => Dims 2 - ## #Dimensions dimensions() +### Dimensions dimensions() - Returns an array - - like object representing the - dimensions of the tensor.The actual type of the `dimensions()` result is `::``Dimensions`. - - Eigen::Tensor a(3, 4); -const Eigen::Tensor::Dimensions& d = a.dimensions(); -cout << "Dim size: " << d.size << ", dim 0: " << d[0] << ", dim 1: " << d[1]; -= > Dim size : 2, dim 0 : 3, - dim 1 : 4 - - If you use a C++ 11 compiler, - you can use `auto` to simplify the code : - - const auto &d = a.dimensions(); -cout << "Dim size: " << d.size << ", dim 0: " << d[0] << ", dim 1: " << d[1]; -= > Dim size : 2, dim 0 : 3, - dim 1 : 4 - - ## #Index dimension(Index n) - - Returns the n - - th dimension of the tensor.The actual type of the -`dimension()` result is `::``Index`, - but you can always use it like an int. +Returns an array-like object representing the dimensions of the tensor. +The actual type of the `dimensions()` result is `::``Dimensions`. Eigen::Tensor a(3, 4); -int dim1 = a.dimension(1); -cout << "Dim 1: " << dim1; -= > Dim 1 : 4 + const Eigen::Tensor::Dimensions& d = a.dimensions(); + cout << "Dim size: " << d.size << ", dim 0: " << d[0] + << ", dim 1: " << d[1]; + => Dim size: 2, dim 0: 3, dim 1: 4 - ## #Index size() +If you use a C++11 compiler, you can use `auto` to simplify the code: - Returns the total number of elements in the tensor.This is the product of all the tensor - dimensions.The actual type of the `size()` result is -`::``Index`, - but you can always use it like an int. + const auto& d = a.dimensions(); + cout << "Dim size: " << d.size << ", dim 0: " << d[0] + << ", dim 1: " << d[1]; + => Dim size: 2, dim 0: 3, dim 1: 4 + +### Index dimension(Index n) + +Returns the n-th dimension of the tensor. The actual type of the +`dimension()` result is `::``Index`, but you can +always use it like an int. + + Eigen::Tensor a(3, 4); + int dim1 = a.dimension(1); + cout << "Dim 1: " << dim1; + => Dim 1: 4 + +### Index size() + +Returns the total number of elements in the tensor. This is the product of all +the tensor dimensions. The actual type of the `size()` result is +`::``Index`, but you can always use it like an int. Eigen::Tensor a(3, 4); -cout << "Size: " << a.size(); -= > Size : 12 + cout << "Size: " << a.size(); + => Size: 12 - ## #Getting Dimensions From An Operation - A few operations provide `dimensions()` directly, - e.g. `TensorReslicingOp`.Most operations defer calculating - dimensions until the operation is being evaluated.If you need access to the dimensions of a deferred operation, - you can wrap it in a TensorRef(see Assigning to a TensorRef above), - which provides `dimensions()` and `dimension()` as above. +### Getting Dimensions From An Operation - TensorRef can also wrap the plain Tensor types, - so this is a useful idiom in templated contexts where the underlying object could be either a raw Tensor - or some deferred operation(e.g.a slice of a Tensor).In this case, - the template code can wrap the object in a TensorRef and reason about its dimensionality - while remaining agnostic to the underlying type - . +A few operations provide `dimensions()` directly, +e.g. `TensorReslicingOp`. Most operations defer calculating dimensions +until the operation is being evaluated. If you need access to the dimensions +of a deferred operation, you can wrap it in a TensorRef (see Assigning to a +TensorRef above), which provides `dimensions()` and `dimension()` as +above. - ##Constructors +TensorRef can also wrap the plain Tensor types, so this is a useful idiom in +templated contexts where the underlying object could be either a raw Tensor +or some deferred operation (e.g. a slice of a Tensor). In this case, the +template code can wrap the object in a TensorRef and reason about its +dimensionality while remaining agnostic to the underlying type. - ## #Tensor - Creates a tensor of the specified size.The number of arguments must be equal to the rank of the tensor - .The content of the tensor is not initialized. +## Constructors + +### Tensor + +Creates a tensor of the specified size. The number of arguments must be equal +to the rank of the tensor. The content of the tensor is not initialized. Eigen::Tensor a(3, 4); - cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; = > NumRows: -3 NumCols : 4 + cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; + => NumRows: 3 NumCols: 4 - ## #TensorFixedSize +### TensorFixedSize - Creates a tensor of the specified - size.The number of arguments in the Sizes<> template parameter determines the rank of the - tensor.The content of the tensor is not initialized. +Creates a tensor of the specified size. The number of arguments in the Sizes<> +template parameter determines the rank of the tensor. The content of the tensor +is not initialized. - Eigen::TensorFixedSize> - a; -cout << "Rank: " << a.rank() << endl; -= > Rank : 2 cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; -= > NumRows : 3 NumCols : 4 + Eigen::TensorFixedSize> a; + cout << "Rank: " << a.rank() << endl; + => Rank: 2 + cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; + => NumRows: 3 NumCols: 4 - ## #TensorMap +### TensorMap - Creates a tensor mapping an existing array of data.The data must not be freed until the TensorMap is discarded, - and the size of the data must be large enough to accommodate the coefficients of the tensor. +Creates a tensor mapping an existing array of data. The data must not be freed +until the TensorMap is discarded, and the size of the data must be large enough +to accommodate the coefficients of the tensor. - float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; -Eigen::TensorMap> a(data, 3, 4); -cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; -= > NumRows : 3 NumCols : 4 cout << "a(1, 2): " << a(1, 2) << endl; -= > a(1, 2) : 7 + float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + Eigen::TensorMap> a(data, 3, 4); + cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; + => NumRows: 3 NumCols: 4 + cout << "a(1, 2): " << a(1, 2) << endl; + => a(1, 2): 7 - ##Contents Initialization - When a new Tensor - or a new TensorFixedSize are created, - memory is allocated to hold all the tensor elements, but the memory is not initialized.Similarly, - when a new TensorMap is created on top of non - - initialized memory the memory its contents are not initialized - . +## Contents Initialization - You can use one of the methods below to initialize the tensor memory - .These have an immediate effect on the tensor and return the tensor itself as a result - .These are not tensor Operations which delay evaluation - . +When a new Tensor or a new TensorFixedSize are created, memory is allocated to +hold all the tensor elements, but the memory is not initialized. Similarly, +when a new TensorMap is created on top of non-initialized memory the memory its +contents are not initialized. - ## # setConstant(const Scalar& val) +You can use one of the methods below to initialize the tensor memory. These +have an immediate effect on the tensor and return the tensor itself as a +result. These are not tensor Operations which delay evaluation. - Sets all elements of the tensor to the constant value `val` - . `Scalar` is the type of data stored in the tensor.You can pass any value that is convertible to that type - . +### setConstant(const Scalar& val) - Returns the tensor itself in case you want to chain another call - . +Sets all elements of the tensor to the constant value `val`. `Scalar` +is the type of data stored in the tensor. You can pass any value that is +convertible to that type. - a.setConstant(12.3f); - cout << "Constant: " << endl - << a << endl - << endl; - = > Constant: -12.3 12.3 12.3 12.3 12.3 12.3 12.3 12.3 12.3 12.3 12.3 12.3 +Returns the tensor itself in case you want to chain another call. - Note that `setConstant()` can be used on any tensor where the element type has a copy constructor and an ` - operator=()`: + a.setConstant(12.3f); + cout << "Constant: " << endl << a << endl << endl; + => + Constant: + 12.3 12.3 12.3 12.3 + 12.3 12.3 12.3 12.3 + 12.3 12.3 12.3 12.3 + +Note that `setConstant()` can be used on any tensor where the element type +has a copy constructor and an `operator=()`: Eigen::Tensor a(2, 3); -a.setConstant("yolo"); -cout << "String tensor: " << endl << a << endl << endl; -= > String tensor : yolo yolo yolo yolo yolo yolo + a.setConstant("yolo"); + cout << "String tensor: " << endl << a << endl << endl; + => + String tensor: + yolo yolo yolo + yolo yolo yolo - ## # - setZero() - Fills the tensor with zeros.Equivalent to `setConstant(Scalar(0))` - .Returns the tensor itself in case you want to chain another call - . +### setZero() - a.setZero(); - cout << "Zeros: " << endl - << a << endl - << endl; - = > Zeros: +Fills the tensor with zeros. Equivalent to `setConstant(Scalar(0))`. +Returns the tensor itself in case you want to chain another call. + + a.setZero(); + cout << "Zeros: " << endl << a << endl << endl; + => + Zeros: 0 0 0 0 0 0 0 0 0 0 0 0 @@ -686,12 +687,14 @@ want to chain another call. cout << "Random: " << endl << a << endl << endl; => Random: - 0.680375 0.59688 - 0.329554 0.10794 - 0.211234 0.823295 0.536459 - 0.0452059 0.566198 - 0.604897 - - 0.444451 0.257742 + 0.680375 0.59688 -0.329554 0.10794 + -0.211234 0.823295 0.536459 -0.0452059 + 0.566198 -0.604897 -0.444451 0.257742 - You can customize `setRandom()` by providing your own random number generator as a template argument : +You can customize `setRandom()` by providing your own random number +generator as a template argument: - a.setRandom(); + a.setRandom(); Here, `MyRandomGenerator` must be a struct with the following member functions, where Scalar and Index are the same as `::``Scalar` @@ -701,23 +704,24 @@ See `struct UniformRandomGenerator` in TensorFunctors.h for an example. // Custom number generator for use with setRandom(). struct MyRandomGenerator { - // Default and copy constructors. Both are needed - MyRandomGenerator() {} - MyRandomGenerator(const MyRandomGenerator&) {} + // Default and copy constructors. Both are needed + MyRandomGenerator() { } + MyRandomGenerator(const MyRandomGenerator& ) { } - // Return a random value to be used. "element_location" is the - // location of the entry to set in the tensor, it can typically - // be ignored. - Scalar operator()(Eigen::DenseIndex element_location, Eigen::DenseIndex /*unused*/ = 0) const { - return ; - } + // Return a random value to be used. "element_location" is the + // location of the entry to set in the tensor, it can typically + // be ignored. + Scalar operator()(Eigen::DenseIndex element_location, + Eigen::DenseIndex /*unused*/ = 0) const { + return ; + } - // Same as above but generates several numbers at a time. - typename internal::packet_traits::type packetOp(Eigen::DenseIndex packet_location, - Eigen::DenseIndex /*unused*/ = 0) const { - return ; - } -}; + // Same as above but generates several numbers at a time. + typename internal::packet_traits::type packetOp( + Eigen::DenseIndex packet_location, Eigen::DenseIndex /*unused*/ = 0) const { + return ; + } + }; You can also use one of the 2 random number generators that are part of the tensor library: @@ -753,9 +757,9 @@ Eigen Tensor code with other libraries. Scalar is the type of data stored in the tensor. Eigen::Tensor a(3, 4); -float* a_data = a.data(); -a_data[0] = 123.45f; -cout << "a(0, 0): " << a(0, 0); + float* a_data = a.data(); + a_data[0] = 123.45f; + cout << "a(0, 0): " << a(0, 0); => a(0, 0): 123.45 @@ -769,7 +773,7 @@ The chain of Operation is evaluated lazily, typically when it is assigned to a tensor. See "Controlling when Expression are Evaluated" for more details about their evaluation. -### constant(const Scalar& val) +### (Operation) constant(const Scalar& val) Returns a tensor of the same type and dimensions as the original tensor but where all elements have the value `val`. @@ -797,7 +801,7 @@ tensor, or multiply every element of a tensor by a scalar. 0.6 0.6 0.6 0.6 0.6 0.6 -### random() +### (Operation) random() Returns a tensor of the same type and dimensions as the current tensor but where all elements have random values. @@ -811,108 +815,107 @@ as for `setRandom()`. Eigen::Tensor b = a + a.random(); cout << "a" << endl << a << endl << endl; cout << "b" << endl << b << endl << endl; - = > a 1 1 1 1 1 1 + => + a + 1 1 1 + 1 1 1 - b 1.68038 1.5662 1.82329 0.788766 1.59688 0.395103 + b + 1.68038 1.5662 1.82329 + 0.788766 1.59688 0.395103 - ##Unary Element Wise Operations - All these operations take a single input tensor as argument and return a tensor of the same type and - dimensions as the tensor to which they are - applied.The requested operations are applied to each element independently. +## Unary Element Wise Operations - ## # - operator-() +All these operations take a single input tensor as argument and return a tensor +of the same type and dimensions as the tensor to which they are applied. The +requested operations are applied to each element independently. - Returns a tensor of the same type and - dimensions as the original tensor containing the opposite values of the original tensor. +### (Operation) operator-() - Eigen::Tensor - a(2, 3); +Returns a tensor of the same type and dimensions as the original tensor +containing the opposite values of the original tensor. + + Eigen::Tensor a(2, 3); a.setConstant(1.0f); Eigen::Tensor b = -a; cout << "a" << endl << a << endl << endl; cout << "b" << endl << b << endl << endl; - = > a 1 1 1 1 1 1 + => + a + 1 1 1 + 1 1 1 - b - - 1 - 1 - 1 - 1 - 1 - - 1 + b + -1 -1 -1 + -1 -1 -1 - ## # - sqrt() +### (Operation) sqrt() - Returns a tensor of the same type and dimensions as the original tensor - containing the square roots of the original tensor - . +Returns a tensor of the same type and dimensions as the original tensor +containing the square roots of the original tensor. - ## # rsqrt() +### (Operation) rsqrt() - Returns a tensor of the same type and dimensions as the original tensor - containing the inverse square roots of the original tensor - . +Returns a tensor of the same type and dimensions as the original tensor +containing the inverse square roots of the original tensor. - ## # square() +### (Operation) square() - Returns a tensor of the same type and dimensions as the original tensor - containing the squares of the original tensor values - . +Returns a tensor of the same type and dimensions as the original tensor +containing the squares of the original tensor values. - ## # inverse() +### (Operation) inverse() - Returns a tensor of the same type and dimensions as the original tensor - containing the inverse of the original tensor values - . +Returns a tensor of the same type and dimensions as the original tensor +containing the inverse of the original tensor values. - ## # exp() +### (Operation) exp() - Returns a tensor of the same type and dimensions as the original tensor containing the - exponential of the original tensor - . +Returns a tensor of the same type and dimensions as the original tensor +containing the exponential of the original tensor. - ## # log() +### (Operation) log() - Returns a tensor of the same type and dimensions as the original tensor containing the natural - logarithms of the original tensor - . +Returns a tensor of the same type and dimensions as the original tensor +containing the natural logarithms of the original tensor. - ## # abs() +### (Operation) abs() - Returns a tensor of the same type and dimensions as the original tensor - containing the absolute values of the original tensor - . +Returns a tensor of the same type and dimensions as the original tensor +containing the absolute values of the original tensor. - ## # arg() +### (Operation) arg() - Returns a tensor with the same dimensions as the original tensor containing the complex - argument(phase angle) of the values of the original tensor - . +Returns a tensor with the same dimensions as the original tensor +containing the complex argument (phase angle) of the values of the +original tensor. - ## # real() +### (Operation) real() - Returns a tensor with the same dimensions as the original tensor - containing the real part of the complex values of the original tensor - . +Returns a tensor with the same dimensions as the original tensor +containing the real part of the complex values of the original tensor. - ## # imag() +### (Operation) imag() - Returns a tensor with the same dimensions as the orginal tensor - containing the imaginary part of the complex values of the original tensor - . +Returns a tensor with the same dimensions as the orginal tensor +containing the imaginary part of the complex values of the original +tensor. - ## # pow(Scalar exponent) +### (Operation) pow(Scalar exponent) - Returns a tensor of the same type and dimensions as the original tensor containing the - coefficients of the original tensor to the power of the exponent. +Returns a tensor of the same type and dimensions as the original tensor +containing the coefficients of the original tensor to the power of the +exponent. - The type of the exponent, - Scalar, is always the same as the type of the tensor coefficients.For example, - only integer exponents can be used in conjuntion with tensors of integer values - . +The type of the exponent, Scalar, is always the same as the type of the +tensor coefficients. For example, only integer exponents can be used in +conjuntion with tensors of integer values. - You can use cast() to lift this restriction.For example this computes cubic roots of an int Tensor : +You can use cast() to lift this restriction. For example this computes +cubic roots of an int Tensor: - Eigen::Tensor a(2, 3); + Eigen::Tensor a(2, 3); a.setValues({{0, 1, 8}, {27, 64, 125}}); Eigen::Tensor b = a.cast().pow(1.0 / 3.0); cout << "a" << endl << a << endl << endl; @@ -926,17 +929,17 @@ as for `setRandom()`. 0 1 2 3 4 5 -### operator * (Scalar scale) +### (Operation) operator * (Scalar scale) Multiplies all the coefficients of the input tensor by the provided scale. -### cwiseMax(Scalar threshold) +### (Operation) cwiseMax(Scalar threshold) TODO -### cwiseMin(Scalar threshold) +### (Operation) cwiseMin(Scalar threshold) TODO -### unaryExpr(const CustomUnaryOp& func) +### (Operation) unaryExpr(const CustomUnaryOp& func) TODO @@ -948,50 +951,50 @@ dimensions as the tensors to which they are applied, and unless otherwise specified it is also of the same type. The requested operations are applied to each pair of elements independently. -### operator+(const OtherDerived& other) +### (Operation) operator+(const OtherDerived& other) Returns a tensor of the same type and dimensions as the input tensors containing the coefficient wise sums of the inputs. -### operator-(const OtherDerived& other) +### (Operation) operator-(const OtherDerived& other) Returns a tensor of the same type and dimensions as the input tensors containing the coefficient wise differences of the inputs. -### operator*(const OtherDerived& other) +### (Operation) operator*(const OtherDerived& other) Returns a tensor of the same type and dimensions as the input tensors containing the coefficient wise products of the inputs. -### operator/(const OtherDerived& other) +### (Operation) operator/(const OtherDerived& other) Returns a tensor of the same type and dimensions as the input tensors containing the coefficient wise quotients of the inputs. This operator is not supported for integer types. -### cwiseMax(const OtherDerived& other) +### (Operation) cwiseMax(const OtherDerived& other) Returns a tensor of the same type and dimensions as the input tensors containing the coefficient wise maximums of the inputs. -### cwiseMin(const OtherDerived& other) +### (Operation) cwiseMin(const OtherDerived& other) Returns a tensor of the same type and dimensions as the input tensors containing the coefficient wise mimimums of the inputs. -### Logical operators +### (Operation) Logical operators The following logical operators are supported as well: -* operator&&(const OtherDerived& other) -* operator||(const OtherDerived& other) -* operator<(const OtherDerived& other) -* operator<=(const OtherDerived& other) -* operator>(const OtherDerived& other) -* operator>=(const OtherDerived& other) -* operator==(const OtherDerived& other) -* operator!=(const OtherDerived& other) + * `operator&&(const OtherDerived& other)` + * `operator||(const OtherDerived& other)` + * `operator<(const OtherDerived& other)` + * `operator<=(const OtherDerived& other)` + * `operator>(const OtherDerived& other)` + * `operator>=(const OtherDerived& other)` + * `operator==(const OtherDerived& other)` + * `operator!=(const OtherDerived& other)` They all return a tensor of boolean values. @@ -1138,50 +1141,50 @@ scalar, represented as a zero-dimension tensor. 276 -### sum(const Dimensions& new_dims) -### sum() +### (Operation) sum(const Dimensions& new_dims) +### (Operation) sum() Reduce a tensor using the sum() operator. The resulting values are the sum of the reduced values. -### mean(const Dimensions& new_dims) -### mean() +### (Operation) mean(const Dimensions& new_dims) +### (Operation) mean() Reduce a tensor using the mean() operator. The resulting values are the mean of the reduced values. -### maximum(const Dimensions& new_dims) -### maximum() +### (Operation) maximum(const Dimensions& new_dims) +### (Operation) maximum() Reduce a tensor using the maximum() operator. The resulting values are the largest of the reduced values. -### minimum(const Dimensions& new_dims) -### minimum() +### (Operation) minimum(const Dimensions& new_dims) +### (Operation) minimum() Reduce a tensor using the minimum() operator. The resulting values are the smallest of the reduced values. -### prod(const Dimensions& new_dims) -### prod() +### (Operation) prod(const Dimensions& new_dims) +### (Operation) prod() Reduce a tensor using the prod() operator. The resulting values are the product of the reduced values. -### all(const Dimensions& new_dims) -### all() +### (Operation) all(const Dimensions& new_dims) +### (Operation) all() Reduce a tensor using the all() operator. Casts tensor to bool and then checks whether all elements are true. Runs through all elements rather than short-circuiting, so may be significantly inefficient. -### any(const Dimensions& new_dims) -### any() +### (Operation) any(const Dimensions& new_dims) +### (Operation) any() Reduce a tensor using the any() operator. Casts tensor to bool and then checks whether any element is true. Runs through all elements rather than short-circuiting, so may be significantly inefficient. -### reduce(const Dimensions& new_dims, const Reducer& reducer) +### (Operation) reduce(const Dimensions& new_dims, const Reducer& reducer) Reduce a tensor using a user-defined reduction operator. See `SumReducer` in TensorFunctors.h for information on how to implement a reduction operator. @@ -1217,8 +1220,8 @@ Example: Trace along 2 dimensions. 15 -### trace(const Dimensions& new_dims) -### trace() +### (Operation) trace(const Dimensions& new_dims) +### (Operation) trace() As a special case, if no parameter is passed to the operation, trace is computed along *all* dimensions of the input tensor. @@ -1268,18 +1271,18 @@ dd a comment to this line 1 3 6 4 9 15 -### cumsum(const Index& axis) +### (Operation) cumsum(const Index& axis) Perform a scan by summing consecutive entries. -### cumprod(const Index& axis) +### (Operation) cumprod(const Index& axis) Perform a scan by multiplying consecutive entries. ## Convolutions -### convolve(const Kernel& kernel, const Dimensions& dims) +### (Operation) convolve(const Kernel& kernel, const Dimensions& dims) Returns a tensor that is the output of the convolution of the input tensor with the kernel, along the specified dimensions of the input tensor. The dimension size for dimensions of the output tensor @@ -1301,17 +1304,19 @@ for the last dimension). output = input.convolve(kernel, dims); for (int i = 0; i < 3; ++i) { - for (int j = 0; j < 2; ++j) { - for (int k = 0; k < 6; ++k) { - for (int l = 0; l < 11; ++l) { - const float result = output(i, j, k, l); - const float expected = input(i, j + 0, k + 0, l) * kernel(0, 0) + input(i, j + 1, k + 0, l) * kernel(1, 0) + - input(i, j + 0, k + 1, l) * kernel(0, 1) + input(i, j + 1, k + 1, l) * kernel(1, 1); - VERIFY_IS_APPROX(result, expected); + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 6; ++k) { + for (int l = 0; l < 11; ++l) { + const float result = output(i,j,k,l); + const float expected = input(i,j+0,k+0,l) * kernel(0,0) + + input(i,j+1,k+0,l) * kernel(1,0) + + input(i,j+0,k+1,l) * kernel(0,1) + + input(i,j+1,k+1,l) * kernel(1,1); + VERIFY_IS_APPROX(result, expected); + } + } } } - } - } ## Geometrical Operations @@ -1320,7 +1325,7 @@ These operations return a Tensor with different dimensions than the original Tensor. They can be used to access slices of tensors, see them with different dimensions, or pad tensors with additional data. -### reshape(const Dimensions& new_dims) +### (Operation) reshape(const Dimensions& new_dims) Returns a view of the input tensor that has been reshaped to the specified new dimensions. The argument new_dims is an array of Index values. The @@ -1399,7 +1404,7 @@ Note that "b" itself was not reshaped but that instead the assignment is done to the reshape view of b. -### shuffle(const Shuffle& shuffle) +### (Operation) shuffle(const Shuffle& shuffle) Returns a copy of the input tensor whose dimensions have been reordered according to the specified permutation. The argument shuffle @@ -1440,7 +1445,7 @@ Let's rewrite the previous example to take advantage of this feature: output.shuffle({2, 0, 1}) = input; -### stride(const Strides& strides) +### (Operation) stride(const Strides& strides) Returns a view of the input tensor that strides (skips stride-1 elements) along each of the dimensions. The argument strides is an @@ -1466,7 +1471,7 @@ It is possible to assign a tensor to a stride: output.stride({2, 3, 4}) = input; -### slice(const StartIndices& offsets, const Sizes& extents) +### (Operation) slice(const StartIndices& offsets, const Sizes& extents) Returns a sub-tensor of the given tensor. For each dimension i, the slice is made of the coefficients stored between offset[i] and offset[i] + extents[i] in @@ -1492,7 +1497,7 @@ the input tensor. 600 700 -### chip(const Index offset, const Index dim) +### (Operation) chip(const Index offset, const Index dim) A chip is a special kind of slice. It is the subtensor at the given offset in the dimension dim. The returned tensor has one fewer dimension than the input @@ -1543,7 +1548,7 @@ lvalue. For example: 0 0 0 -### reverse(const ReverseDimensions& reverse) +### (Operation) reverse(const ReverseDimensions& reverse) Returns a view of the input tensor that reverses the order of the coefficients along a subset of the dimensions. The argument reverse is an array of boolean @@ -1573,7 +1578,7 @@ of a 2D tensor: 0 100 200 -### broadcast(const Broadcast& broadcast) +### (Operation) broadcast(const Broadcast& broadcast) Returns a view of the input tensor in which the input is replicated one to many times. @@ -1597,11 +1602,11 @@ made in each of the dimensions. 0 100 200 0 100 200 300 400 500 300 400 500 -### concatenate(const OtherDerived& other, Axis axis) +### (Operation) concatenate(const OtherDerived& other, Axis axis) TODO -### pad(const PaddingDimensions& padding) +### (Operation) pad(const PaddingDimensions& padding) Returns a view of the input tensor in which the input is padded with zeros. @@ -1626,7 +1631,7 @@ Returns a view of the input tensor in which the input is padded with zeros. 0 0 0 0 -### extract_patches(const PatchDims& patch_dims) +### (Operation) extract_patches(const PatchDims& patch_dims) Returns a tensor of coefficient patches extracted from the input tensor, where each patch is of dimension specified by 'patch_dims'. The returned tensor has @@ -1657,18 +1662,18 @@ Six 2x2 patches can be extracted and indexed using the following code: patch_dims[1] = 2; patch = tensor.extract_patches(patch_dims); for (int k = 0; k < 6; ++k) { - cout << "patch index: " << k << endl; - for (int i = 0; i < 2; ++i) { - for (int j = 0; j < 2; ++j) { - if (DataLayout == ColMajor) { - cout << patch(i, j, k) << " "; - } else { - cout << patch(k, i, j) << " "; + cout << "patch index: " << k << endl; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + if (DataLayout == ColMajor) { + cout << patch(i, j, k) << " "; + } else { + cout << patch(k, i, j) << " "; + } + } + cout << endl; } } - cout << endl; - } - } This code results in the following output when the data layout is ColMajor: @@ -1713,7 +1718,7 @@ This code results in the following output when the data layout is RowMajor: 6 7 10 11 -### extract_image_patches(const Index patch_rows, const Index patch_cols, const Index row_stride, const Index col_stride, const PaddingType padding_type) +### (Operation) extract_image_patches(const Index patch_rows, const Index patch_cols, const Index row_stride, const Index col_stride, const PaddingType padding_type) Returns a tensor of coefficient image patches extracted from the input tensor, which is expected to have dimensions ordered as follows (depending on the data @@ -1751,73 +1756,77 @@ sizes: *) 2D patch: ColMajor (patch indexed by second-to-last dimension) Tensor twod_patch; -twod_patch = tensor.extract_image_patches<2, 2>(); -// twod_patch.dimension(0) == 2 -// twod_patch.dimension(1) == 2 -// twod_patch.dimension(2) == 2 -// twod_patch.dimension(3) == 3*5 -// twod_patch.dimension(4) == 7 + twod_patch = tensor.extract_image_patches<2, 2>(); + // twod_patch.dimension(0) == 2 + // twod_patch.dimension(1) == 2 + // twod_patch.dimension(2) == 2 + // twod_patch.dimension(3) == 3*5 + // twod_patch.dimension(4) == 7 *) 2D patch: RowMajor (patch indexed by the second dimension) Tensor twod_patch_row_major; -twod_patch_row_major = tensor_row_major.extract_image_patches<2, 2>(); -// twod_patch_row_major.dimension(0) == 7 -// twod_patch_row_major.dimension(1) == 3*5 -// twod_patch_row_major.dimension(2) == 2 -// twod_patch_row_major.dimension(3) == 2 -// twod_patch_row_major.dimension(4) == 2 + twod_patch_row_major = tensor_row_major.extract_image_patches<2, 2>(); + // twod_patch_row_major.dimension(0) == 7 + // twod_patch_row_major.dimension(1) == 3*5 + // twod_patch_row_major.dimension(2) == 2 + // twod_patch_row_major.dimension(3) == 2 + // twod_patch_row_major.dimension(4) == 2 -##Special Operations +## Special Operations - ## # - cast() +### (Operation) cast() - Returns a tensor of type T with the same dimensions as the original tensor - .The returned tensor contains the values of the original tensor converted to type T. +Returns a tensor of type T with the same dimensions as the original tensor. +The returned tensor contains the values of the original tensor converted to +type T. Eigen::Tensor a(2, 3); -Eigen::Tensor b = a.cast(); + Eigen::Tensor b = a.cast(); This can be useful for example if you need to do element-wise division of Tensors of integers. This is not currently supported by the Tensor library but you can easily cast the tensors to floats to do the division: Eigen::Tensor a(2, 3); -a.setValues({{0, 1, 2}, {3, 4, 5}}); -Eigen::Tensor b = (a.cast() / a.constant(2).cast()).cast(); -cout << "a" << endl << a << endl << endl; -cout << "b" << endl << b << endl << endl; -= > a 0 1 2 3 4 5 + a.setValues({{0, 1, 2}, {3, 4, 5}}); + Eigen::Tensor b = + (a.cast() / a.constant(2).cast()).cast(); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 0 1 2 + 3 4 5 - b 0 0 1 1 2 2 + b + 0 0 1 + 1 2 2 - ## # - eval() - TODO +### (Operation) eval() - ##Tensor Printing Tensors can be printed into a stream object(e.g. `std::cout`) using different formatting options. +TODO - Eigen::Tensor tensor3d = {4, 3, 2}; -tensor3d.setValues({{{1, 2}, {3, 4}, {5, 6}}, - {{7, 8}, {9, 10}, {11, 12}}, - {{13, 14}, {15, 16}, {17, 18}}, - {{19, 20}, {21, 22}, {23, 24}}}); -std::cout << tensor3d.format(Eigen::TensorIOFormat::Plain()) << std::endl; - ==> - 1 2 - 3 4 - 5 6 - - 7 8 +## Tensor Printing +Tensors can be printed into a stream object (e.g. `std::cout`) using different formatting options. + + Eigen::Tensor tensor3d = {4, 3, 2}; + tensor3d.setValues( {{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}} ); + std::cout << tensor3d.format(Eigen::TensorIOFormat::Plain()) << std::endl; + ==> + 1 2 + 3 4 + 5 6 + + 7 8 9 10 11 12 - + 13 14 15 16 17 18 - + 19 20 21 22 23 24