From c0f2cb016e60b7dbde1d5946f42234a709a711f9 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Mon, 28 Apr 2014 10:32:27 -0700 Subject: [PATCH] Extended support for Tensors: * Added ability to map a region of the memory to a tensor * Added basic support for unary and binary coefficient wise expressions, such as addition or square root * Provided an emulation layer to make it possible to compile the code with compilers (such as nvcc) that don't support cxx11. --- Eigen/src/Core/util/Macros.h | 5 + unsupported/Eigen/CXX11/Core | 14 +- unsupported/Eigen/CXX11/Tensor | 27 ++- .../Eigen/CXX11/src/Core/util/CXX11Meta.h | 24 +-- .../CXX11/src/Core/util/CXX11Workarounds.h | 16 +- .../CXX11/src/Core/util/EmulateCXX11Meta.h | 184 ++++++++++++++++++ unsupported/Eigen/CXX11/src/Tensor/Tensor.h | 156 ++++++++------- .../Eigen/CXX11/src/Tensor/TensorAssign.h | 52 +++++ .../Eigen/CXX11/src/Tensor/TensorBase.h | 82 ++++++++ .../Eigen/CXX11/src/Tensor/TensorEvaluator.h | 127 ++++++++++++ .../Eigen/CXX11/src/Tensor/TensorExpr.h | 161 +++++++++++++++ .../src/Tensor/TensorForwardDeclarations.h | 27 +++ .../Eigen/CXX11/src/Tensor/TensorMap.h | 101 ++++++++++ .../Eigen/CXX11/src/Tensor/TensorStorage.h | 52 ++--- .../Eigen/CXX11/src/Tensor/TensorTraits.h | 122 ++++++++++++ unsupported/test/CMakeLists.txt | 5 +- unsupported/test/cxx11_tensor_simple.cpp | 2 +- 17 files changed, 1028 insertions(+), 129 deletions(-) create mode 100644 unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h create mode 100644 unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h create mode 100644 unsupported/Eigen/CXX11/src/Tensor/TensorBase.h create mode 100644 unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h create mode 100644 unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h create mode 100644 unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h create mode 100644 unsupported/Eigen/CXX11/src/Tensor/TensorMap.h create mode 100644 unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index bfd6ba7de..3a928001e 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -121,6 +121,11 @@ #define EIGEN_HAVE_RVALUE_REFERENCES #endif +// Does the compiler support variadic templates? +#if __cplusplus > 199711L +#define EIGEN_HAS_VARIADIC_TEMPLATES 1 +#endif + /** Allows to disable some optimizations which might affect the accuracy of the result. * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them. * They currently include: diff --git a/unsupported/Eigen/CXX11/Core b/unsupported/Eigen/CXX11/Core index 4dc4ab224..bba3d578d 100644 --- a/unsupported/Eigen/CXX11/Core +++ b/unsupported/Eigen/CXX11/Core @@ -2,6 +2,7 @@ // for linear algebra. // // Copyright (C) 2013 Christian Seiler +// Copyright (C) 2014 Benoit Steiner // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed @@ -21,20 +22,23 @@ * module. Note that at this stage, you should not need to include * this module directly. * + * It also provides a limited fallback for compilers that don't support + * CXX11 yet, such as nvcc. + * * \code * #include * \endcode */ -#include - +// Emulate the cxx11 functionality that we need if the compiler doesn't support it. +#if __cplusplus <= 199711L +#include "src/Core/util/EmulateCXX11Meta.h" +#else #include "src/Core/util/CXX11Workarounds.h" #include "src/Core/util/CXX11Meta.h" +#endif #include #endif // EIGEN_CXX11_CORE_MODULE -/* - * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; - */ diff --git a/unsupported/Eigen/CXX11/Tensor b/unsupported/Eigen/CXX11/Tensor index f2c5129b3..f554c204a 100644 --- a/unsupported/Eigen/CXX11/Tensor +++ b/unsupported/Eigen/CXX11/Tensor @@ -10,9 +10,10 @@ #ifndef EIGEN_CXX11_TENSOR_MODULE #define EIGEN_CXX11_TENSOR_MODULE -#include +#include "Eigen/src/Core/util/StaticAssert.h" +#include "unsupported/Eigen/CXX11/Core" -#include +#include "Eigen/src/Core/util/DisableStupidWarnings.h" /** \defgroup CXX11_Tensor_Module Tensor Module * @@ -27,13 +28,21 @@ #include #include -#include "src/Tensor/TensorStorage.h" -#include "src/Tensor/Tensor.h" +#include "Eigen/Core" -#include +#include "unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h" +#include "unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h" + +#include "unsupported/Eigen/CXX11/src/Tensor/TensorBase.h" + +#include "unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h" +#include "unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h" +#include "unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h" + +#include "unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h" +#include "unsupported/Eigen/CXX11/src/Tensor/Tensor.h" +#include "unsupported/Eigen/CXX11/src/Tensor/TensorMap.h" + +#include "Eigen/src/Core/util/ReenableStupidWarnings.h" #endif // EIGEN_CXX11_TENSOR_MODULE - -/* - * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; - */ diff --git a/unsupported/Eigen/CXX11/src/Core/util/CXX11Meta.h b/unsupported/Eigen/CXX11/src/Core/util/CXX11Meta.h index 618e2eb7b..47f06b1b5 100644 --- a/unsupported/Eigen/CXX11/src/Core/util/CXX11Meta.h +++ b/unsupported/Eigen/CXX11/src/Core/util/CXX11Meta.h @@ -317,7 +317,7 @@ constexpr inline decltype(reduce::run((*((Ts*)0))...)) arg_sum(Ts template constexpr inline Array h_array_reverse(Array arr, numeric_list) { - return {{std_array_get(arr)...}}; + return {{array_get(arr)...}}; } template @@ -335,9 +335,9 @@ constexpr inline std::array array_reverse(std::array arr) // an infinite loop) template struct h_array_reduce { - constexpr static inline auto run(std::array arr) -> decltype(Reducer::run(h_array_reduce::run(arr), std_array_get(arr))) + constexpr static inline auto run(std::array arr) -> decltype(Reducer::run(h_array_reduce::run(arr), array_get(arr))) { - return Reducer::run(h_array_reduce::run(arr), std_array_get(arr)); + return Reducer::run(h_array_reduce::run(arr), array_get(arr)); } }; @@ -346,7 +346,7 @@ struct h_array_reduce { constexpr static inline T run(std::array arr) { - return std_array_get<0>(arr); + return array_get<0>(arr); } }; @@ -375,7 +375,7 @@ constexpr inline auto array_prod(std::array arr) -> decltype(array_reduce< template constexpr inline std::array h_array_zip(std::array a, std::array b, numeric_list) { - return std::array{{ Op::run(std_array_get(a), std_array_get(b))... }}; + return std::array{{ Op::run(array_get(a), array_get(b))... }}; } template @@ -387,9 +387,9 @@ constexpr inline std::array array_zip(std::array< /* zip an array and reduce the result */ template -constexpr inline auto h_array_zip_and_reduce(std::array a, std::array b, numeric_list) -> decltype(reduce::type...>::run(Op::run(std_array_get(a), std_array_get(b))...)) +constexpr inline auto h_array_zip_and_reduce(std::array a, std::array b, numeric_list) -> decltype(reduce::type...>::run(Op::run(array_get(a), array_get(b))...)) { - return reduce::type...>::run(Op::run(std_array_get(a), std_array_get(b))...); + return reduce::type...>::run(Op::run(array_get(a), array_get(b))...); } template @@ -403,7 +403,7 @@ constexpr inline auto array_zip_and_reduce(std::array a, std::array template constexpr inline std::array h_array_apply(std::array a, numeric_list) { - return std::array{{ Op::run(std_array_get(a))... }}; + return std::array{{ Op::run(array_get(a))... }}; } template @@ -415,9 +415,9 @@ constexpr inline std::array array_apply(std::array -constexpr inline auto h_array_apply_and_reduce(std::array arr, numeric_list) -> decltype(reduce::type...>::run(Op::run(std_array_get(arr))...)) +constexpr inline auto h_array_apply_and_reduce(std::array arr, numeric_list) -> decltype(reduce::type...>::run(Op::run(array_get(arr))...)) { - return reduce::type...>::run(Op::run(std_array_get(arr))...); + return reduce::type...>::run(Op::run(array_get(arr))...); } template @@ -497,7 +497,3 @@ InstType instantiate_by_c_array(ArrType* arr) } // end namespace Eigen #endif // EIGEN_CXX11META_H - -/* - * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; - */ diff --git a/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h b/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h index 356ae10cf..77207f453 100644 --- a/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h +++ b/unsupported/Eigen/CXX11/src/Core/util/CXX11Workarounds.h @@ -40,8 +40,18 @@ #error This library needs at least a C++11 compliant compiler. If you use g++/clang, please enable the -std=c++11 compiler flag. (-std=c++0x on older versions.) #endif +using std::array; + namespace Eigen { +// Use std::array as Eigen array +/*template +struct array : public std::array { + array() = default; + array(const std::initializer_list& a);// : std::array(a) {}; + array(const std::array& a); +};*/ + namespace internal { /* std::get is only constexpr in C++14, not yet in C++11 @@ -60,9 +70,9 @@ namespace internal { #define STD_GET_ARR_HACK std::template get(a) #endif -template constexpr inline T& std_array_get(std::array& a) { return (T&) STD_GET_ARR_HACK; } -template constexpr inline T&& std_array_get(std::array&& a) { return (T&&) STD_GET_ARR_HACK; } -template constexpr inline T const& std_array_get(std::array const& a) { return (T const&) STD_GET_ARR_HACK; } +template constexpr inline T& array_get(std::array& a) { return (T&) STD_GET_ARR_HACK; } +template constexpr inline T&& array_get(std::array&& a) { return (T&&) STD_GET_ARR_HACK; } +template constexpr inline T const& array_get(std::array const& a) { return (T const&) STD_GET_ARR_HACK; } #undef STD_GET_ARR_HACK diff --git a/unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h b/unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h new file mode 100644 index 000000000..76fcba5b4 --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Core/util/EmulateCXX11Meta.h @@ -0,0 +1,184 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EMULATE_CXX11_META_H +#define EIGEN_EMULATE_CXX11_META_H + + +namespace Eigen { + +// The array class is only available starting with cxx11. Emulate our own here +// if needed +template class array { + public: + T& operator[] (size_t index) { return values[index]; } + const T& operator[] (size_t index) const { return values[index]; } + + T values[n]; +}; + + +namespace internal { + +/** \internal + * \file CXX11/Core/util/EmulateCXX11Meta.h + * This file emulates a subset of the functionality provided by CXXMeta.h for + * compilers that don't yet support cxx11 such as nvcc. + */ + +struct empty_list { static const std::size_t count = 0; }; + +template struct type_list { + T head; + Tail tail; + static const std::size_t count = 1 + Tail::count; +}; + +struct null_type { }; + +template +struct make_type_list { + typedef typename make_type_list::type tailresult; + + typedef type_list type; +}; + +template<> struct make_type_list<> { + typedef empty_list type; +}; + + + +template +struct type2val { + static const T value = n; +}; + + +template struct gen_numeric_list_repeated; + +template struct gen_numeric_list_repeated { + typedef typename make_type_list >::type type; +}; + +template struct gen_numeric_list_repeated { + typedef typename make_type_list, type2val >::type type; +}; + +template struct gen_numeric_list_repeated { + typedef typename make_type_list, type2val, type2val >::type type; +}; + +template struct gen_numeric_list_repeated { + typedef typename make_type_list, type2val, type2val, type2val >::type type; +}; + +template struct gen_numeric_list_repeated { + typedef typename make_type_list, type2val, type2val, type2val, type2val >::type type; +}; + + + +template +array repeat(t v) { + array array; + array.fill(v); + return array; +} + +template +t array_prod(const array& a) { + t prod = 1; + for (size_t i = 0; i < n; ++i) { prod *= a[i]; } + return prod; +} +template +t array_prod(const array& /*a*/) { + return 0; +} + +template inline T& array_get(array& a) { + return a[I]; +} +template inline const T& array_get(const array& a) { + return a[I]; +} + +struct sum_op { + template static inline bool run(A a, B b) { return a + b; } +}; +struct product_op { + template static inline bool run(A a, B b) { return a * b; } +}; + +struct logical_and_op { + template static inline bool run(A a, B b) { return a && b; } +}; +struct logical_or_op { + template static inline bool run(A a, B b) { return a || b; } +}; + +struct equal_op { + template static inline bool run(A a, B b) { return a == b; } +}; +struct not_equal_op { + template static inline bool run(A a, B b) { return a != b; } +}; +struct lesser_op { + template static inline bool run(A a, B b) { return a < b; } +}; +struct lesser_equal_op { + template static inline bool run(A a, B b) { return a <= b; } +}; + +struct greater_op { + template static inline bool run(A a, B b) { return a > b; } +}; +struct greater_equal_op { + template static inline bool run(A a, B b) { return a >= b; } +}; + +struct not_op { + template static inline bool run(A a) { return !a; } +}; +struct negation_op { + template static inline bool run(A a) { return -a; } +}; +struct greater_equal_zero_op { + template static inline bool run(A a) { return a >= 0; } +}; + + +template +inline bool array_apply_and_reduce(const array& a) { + EIGEN_STATIC_ASSERT(N >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE) + bool result = Reducer::run(Op::run(a[0]), Op::run(a[1])); + for (size_t i = 2; i < N; ++i) { + result = Reducer::run(result, Op::run(a[i])); + } + return result; +} + +template +inline bool array_zip_and_reduce(const array& a, const array& b) { + EIGEN_STATIC_ASSERT(N >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE) + bool result = Reducer::run(Op::run(a[0], b[0]), Op::run(a[1], b[1])); + for (size_t i = 2; i < N; ++i) { + result = Reducer::run(result, Op::run(a[i], b[i])); + } + return result; +} + +} // end namespace internal + +} // end namespace Eigen + + + +#endif // EIGEN_EMULATE_CXX11_META_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h index c6216e14c..7b8f14c6d 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h +++ b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h @@ -57,28 +57,16 @@ namespace Eigen { * * \ref TopicStorageOrders */ -template -class Tensor; namespace internal { -template -struct traits> -{ - typedef Scalar_ Scalar; - typedef Dense StorageKind; - typedef DenseIndex Index; - enum { - Options = Options_ - }; -}; template struct tensor_index_linearization_helper { - constexpr static inline Index run(std::array const& indices, std::array const& dimensions) + static inline Index run(array const& indices, array const& dimensions) { - return std_array_get(indices) + - std_array_get(dimensions) * + return array_get(indices) + + array_get(dimensions) * tensor_index_linearization_helper::run(indices, dimensions); } }; @@ -86,39 +74,40 @@ struct tensor_index_linearization_helper template struct tensor_index_linearization_helper { - constexpr static inline Index run(std::array const& indices, std::array const&) + static inline Index run(array const& indices, array const&) { - return std_array_get(indices); + return array_get(indices); } }; /* Forward-declaration required for the symmetry support. */ template class tensor_symmetry_value_setter; + } // end namespace internal template -class Tensor +class Tensor : public TensorBase > { - static_assert(NumIndices_ >= 1, "A tensor must have at least one index."); - public: typedef Tensor Self; + typedef TensorBase > Base; + typedef typename Eigen::internal::nested::type Nested; typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; - typedef typename internal::traits::Scalar Scalar; + typedef Scalar_ Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; - typedef Self DenseType; + typedef typename Base::CoeffReturnType CoeffReturnType; - constexpr static int Options = Options_; - constexpr static std::size_t NumIndices = NumIndices_; + static const int Options = Options_; + static const std::size_t NumIndices = NumIndices_; protected: TensorStorage m_storage; public: EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } - EIGEN_STRONG_INLINE std::array dimensions() const { return m_storage.dimensions(); } + EIGEN_STRONG_INLINE array dimensions() const { return m_storage.dimensions(); } EIGEN_STRONG_INLINE Index size() const { return internal::array_prod(m_storage.dimensions()); } EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } @@ -129,29 +118,17 @@ class Tensor inline Self& base() { return *this; } inline const Self& base() const { return *this; } - void setZero() - { - // FIXME: until we have implemented packet access and the - // expression engine w.r.t. nullary ops, use this - // as a kludge. Only works with POD types, but for - // any standard usage, this shouldn't be a problem - memset((void *)data(), 0, size() * sizeof(Scalar)); - } - - inline Self& operator=(Self const& other) - { - m_storage = other.m_storage; - return *this; - } - +#ifdef EIGEN_HAS_VARIADIC_TEMPLATES template inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const { - static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor."); - return coeff(std::array{{firstIndex, secondIndex, otherIndices...}}); + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeff(array{{firstIndex, secondIndex, otherIndices...}}); } +#endif - inline const Scalar& coeff(const std::array& indices) const + inline const Scalar& coeff(const array& indices) const { eigen_internal_assert(checkIndexRange(indices)); return m_storage.data()[linearizedIndex(indices)]; @@ -163,14 +140,17 @@ class Tensor return m_storage.data()[index]; } +#ifdef EIGEN_HAS_VARIADIC_TEMPLATES template inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) { - static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor."); - return coeffRef(std::array{{firstIndex, secondIndex, otherIndices...}}); + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(array{{firstIndex, secondIndex, otherIndices...}}); } +#endif - inline Scalar& coeffRef(const std::array& indices) + inline Scalar& coeffRef(const array& indices) { eigen_internal_assert(checkIndexRange(indices)); return m_storage.data()[linearizedIndex(indices)]; @@ -182,14 +162,17 @@ class Tensor return m_storage.data()[index]; } +#ifdef EIGEN_HAS_VARIADIC_TEMPLATES template inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const { - static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor."); - return this->operator()(std::array{{firstIndex, secondIndex, otherIndices...}}); + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return this->operator()(array{{firstIndex, secondIndex, otherIndices...}}); } +#endif - inline const Scalar& operator()(const std::array& indices) const + inline const Scalar& operator()(const array& indices) const { eigen_assert(checkIndexRange(indices)); return coeff(indices); @@ -203,18 +186,22 @@ class Tensor inline const Scalar& operator[](Index index) const { - static_assert(NumIndices == 1, "The bracket operator is only for vectors, use the parenthesis operator instead."); + // The bracket operator is only for vectors, use the parenthesis operator instead. + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE); return coeff(index); } +#ifdef EIGEN_HAS_VARIADIC_TEMPLATES template inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) { - static_assert(sizeof...(otherIndices) + 2 == NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor."); - return operator()(std::array{{firstIndex, secondIndex, otherIndices...}}); + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return operator()(array{{firstIndex, secondIndex, otherIndices...}}); } +#endif - inline Scalar& operator()(const std::array& indices) + inline Scalar& operator()(const array& indices) { eigen_assert(checkIndexRange(indices)); return coeffRef(indices); @@ -228,47 +215,70 @@ class Tensor inline Scalar& operator[](Index index) { - static_assert(NumIndices == 1, "The bracket operator is only for vectors, use the parenthesis operator instead."); + // The bracket operator is only for vectors, use the parenthesis operator instead + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE) return coeffRef(index); } - inline Tensor() + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor() : m_storage() { } - inline Tensor(const Self& other) + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(const Self& other) : m_storage(other.m_storage) { } - inline Tensor(Self&& other) - : m_storage(other.m_storage) - { - } +#ifdef EIGEN_HAVE_RVALUE_REFERENCES +// inline Tensor(Self&& other) +// : m_storage(other.m_storage) +// { +// } +#endif +#ifdef EIGEN_HAS_VARIADIC_TEMPLATES template inline Tensor(Index firstDimension, IndexTypes... otherDimensions) : m_storage() { - static_assert(sizeof...(otherDimensions) + 1 == NumIndices, "Number of dimensions used to construct a tensor must be equal to the rank of the tensor."); - resize(std::array{{firstDimension, otherDimensions...}}); + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + resize(array{{firstDimension, otherDimensions...}}); } +#endif - inline Tensor(std::array dimensions) + inline Tensor(const array& dimensions) : m_storage(internal::array_prod(dimensions), dimensions) { EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other) + { + // FIXME: we need to resize the tensor to fix the dimensions of the other. + // Unfortunately this isn't possible yet when the rhs is an expression. + // resize(other.dimensions()); + internal::TensorAssign::run(*this, other); + return *this; + } + +#ifdef EIGEN_HAS_VARIADIC_TEMPLATES template void resize(Index firstDimension, IndexTypes... otherDimensions) { - static_assert(sizeof...(otherDimensions) + 1 == NumIndices, "Number of dimensions used to resize a tensor must be equal to the rank of the tensor."); - resize(std::array{{firstDimension, otherDimensions...}}); + // The number of dimensions used to resize a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + resize(array{{firstDimension, otherDimensions...}}); } +#endif - void resize(const std::array& dimensions) + void resize(const array& dimensions) { std::size_t i; Index size = Index(1); @@ -285,20 +295,22 @@ class Tensor #endif } +#ifdef EIGEN_HAS_VARIADIC_TEMPLATES template internal::tensor_symmetry_value_setter symCoeff(const Symmetry_& symmetry, Index firstIndex, IndexTypes... otherIndices) { - return symCoeff(symmetry, std::array{{firstIndex, otherIndices...}}); + return symCoeff(symmetry, array{{firstIndex, otherIndices...}}); } template - internal::tensor_symmetry_value_setter symCoeff(const Symmetry_& symmetry, std::array const& indices) + internal::tensor_symmetry_value_setter symCoeff(const Symmetry_& symmetry, array const& indices) { return internal::tensor_symmetry_value_setter(*this, symmetry, indices); } +#endif protected: - bool checkIndexRange(const std::array& indices) const + bool checkIndexRange(const array& indices) const { using internal::array_apply_and_reduce; using internal::array_zip_and_reduce; @@ -313,7 +325,7 @@ class Tensor array_zip_and_reduce(indices, m_storage.dimensions()); } - inline Index linearizedIndex(const std::array& indices) const + inline Index linearizedIndex(const array& indices) const { return internal::tensor_index_linearization_helper::run(indices, m_storage.dimensions()); } @@ -322,7 +334,3 @@ class Tensor } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_H - -/* - * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; - */ diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h new file mode 100644 index 000000000..f1df827f9 --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h @@ -0,0 +1,52 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H +#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H + + +namespace Eigen { + +/** \class TensorAssign + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor assignment class. + * + * This class is responsible for triggering the evaluation of the expressions + * used on the lhs and rhs of an assignment operator and copy the result of + * the evaluation of the rhs expression at the address computed during the + * evaluation lhs expression. + * + * TODO: vectorization. For now the code only uses scalars + * TODO: parallelisation using multithreading on cpu, or kernels on gpu. + */ +namespace internal { + +template +struct TensorAssign +{ + typedef typename Derived1::Index Index; + EIGEN_DEVICE_FUNC + static inline void run(Derived1& dst, const Derived2& src) + { + TensorEvaluator evalDst(dst); + TensorEvaluator evalSrc(src); + const Index size = dst.size(); + for(Index i = 0; i < size; ++i) { + evalDst.coeffRef(i) = evalSrc.coeff(i); + } + } +}; + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h new file mode 100644 index 000000000..0b9f32f7f --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h @@ -0,0 +1,82 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_BASE_H +#define EIGEN_CXX11_TENSOR_TENSOR_BASE_H + +namespace Eigen { + +/** \class TensorBase + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor base class. + * + * This class is the common parent of the Tensor and TensorMap class, thus + * making it possible to use either class interchangably in expressions. + */ + +template +class TensorBase +{ + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::Index Index; + typedef Scalar CoeffReturnType; + + Derived& setZero() { + return setConstant(Scalar(0)); + } + + Derived& setConstant(const Scalar& val) { + Scalar* data = derived().data(); + for (int i = 0; i < derived().size(); ++i) { + data[i] = val; + } + return derived(); + } + + Derived& setRandom() { + Scalar* data = derived().data(); + for (int i = 0; i < derived().size(); ++i) { + data[i] = internal::random_default_impl::run(); + } + return derived(); + } + + // Coefficient-wise unary operators + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + operator-() const { return derived(); } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + cwiseSqrt() const { return derived(); } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + cwiseAbs() const { return derived(); } + + // Coefficient-wise binary operators. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator+(const OtherDerived& other) const { + return TensorCwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); + } + + protected: + template friend class TensorBase; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& derived() { return *static_cast(this); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast(this); } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_BASE_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h new file mode 100644 index 000000000..f4f10eff5 --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h @@ -0,0 +1,127 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H + +namespace Eigen { + +/** \class TensorEvaluator + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor evaluator classes. + * + * These classes are responsible for the evaluation of the tensor expression. + * + * TODO: add support for more types of expressions, in particular expressions + * leading to lvalues (slicing, reshaping, etc...) + * TODO: add support for vectorization + */ + + +template +struct TensorEvaluator +{ + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar& CoeffReturnType; + //typedef typename Derived::PacketScalar PacketScalar; + typedef TensorEvaluator nestedType; + + TensorEvaluator(Derived& m) + : m_data(const_cast(m.data())) + { } + + CoeffReturnType coeff(Index index) const { + return m_data[index]; + } + + Scalar& coeffRef(Index index) { + return m_data[index]; + } + + // to do: vectorized evaluation. + /* template + PacketReturnType packet(Index index) const + { + return ploadt(m_data + index); + } + + template + void writePacket(Index index, const PacketScalar& x) + { + return pstoret(const_cast(m_data) + index, x); + }*/ + + protected: + Scalar* m_data; +}; + + + + +// -------------------- CwiseUnaryOp -------------------- + +template +struct TensorEvaluator > +{ + typedef TensorCwiseUnaryOp XprType; + typedef TensorEvaluator nestedType; + + TensorEvaluator(const XprType& op) + : m_functor(op.functor()), + m_argImpl(op.nestedExpression()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + CoeffReturnType coeff(Index index) const + { + return m_functor(m_argImpl.coeff(index)); + } + + private: + const UnaryOp m_functor; + typename TensorEvaluator::nestedType m_argImpl; +}; + + +// -------------------- CwiseBinaryOp -------------------- + +template +struct TensorEvaluator > +{ + typedef TensorCwiseBinaryOp XprType; + typedef TensorEvaluator leftType; + typedef TensorEvaluator rightType; + + TensorEvaluator(const XprType& op) + : m_functor(op.functor()), + m_leftImpl(op.lhsExpression()), + m_rightImpl(op.rhsExpression()) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + CoeffReturnType coeff(Index index) const + { + return m_functor(m_leftImpl.coeff(index), m_rightImpl.coeff(index)); + } + + private: + const BinaryOp m_functor; + typename TensorEvaluator::nestedType m_leftImpl; + typename TensorEvaluator::nestedType m_rightImpl; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h new file mode 100644 index 000000000..5a45cec31 --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h @@ -0,0 +1,161 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXPR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EXPR_H + +namespace Eigen { + +/** \class TensorExpr + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor expression classes. + * + * The TensorCwiseUnaryOp class represents an expression where a unary operator + * (e.g. cwiseSqrt) is applied to an expression. + * + * The TensorCwiseBinaryOp class represents an expression where a binary operator + * (e.g. addition) is applied to a lhs and a rhs expression. + * + */ + +namespace internal { +template +struct traits > + : traits +{ + typedef typename result_of< + UnaryOp(typename XprType::Scalar) + >::type Scalar; + typedef typename XprType::Nested XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseUnaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseUnaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCwiseUnaryOp +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + inline TensorCwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) + : m_xpr(xpr), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const UnaryOp& functor() const { return m_functor; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC + typename internal::remove_all::type& + nestedExpression() { return m_xpr.const_cast_derived(); } + + protected: + typename XprType::Nested m_xpr; + const UnaryOp m_functor; +}; + + +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename result_of< + BinaryOp( + typename LhsXprType::Scalar, + typename RhsXprType::Scalar + ) + >::type Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseBinaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseBinaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCwiseBinaryOp +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + inline TensorCwiseBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const BinaryOp& func = BinaryOp()) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const BinaryOp& functor() const { return m_functor; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const BinaryOp m_functor; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EXPR_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h new file mode 100644 index 000000000..dc97764f0 --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h @@ -0,0 +1,27 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H +#define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H + +namespace Eigen { + +template class Tensor; +template class TensorMap; +template class TensorBase; + +template class TensorCwiseUnaryOp; +template class TensorCwiseBinaryOp; + +// Move to internal? +template struct TensorEvaluator; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h new file mode 100644 index 000000000..7dec1e08d --- /dev/null +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h @@ -0,0 +1,101 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_MAP_H +#define EIGEN_CXX11_TENSOR_TENSOR_MAP_H + +namespace Eigen { + +template class Stride; + + +/** \class TensorMap + * \ingroup CXX11_Tensor_Module + * + * \brief A tensor expression mapping an existing array of data. + * + */ + +template class TensorMap : public TensorBase > +{ + public: + typedef TensorMap Self; + typedef typename PlainObjectType::Base Base; + typedef typename Eigen::internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + + /* typedef typename internal::conditional< + bool(internal::is_lvalue::value), + Scalar *, + const Scalar *>::type + PointerType;*/ + typedef Scalar* PointerType; + typedef PointerType PointerArgType; + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions({{firstDimension}}) { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(1 == PlainObjectType::NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + +#ifdef EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(PointerArgType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions({{firstDimension, otherDimensions...}}) { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == PlainObjectType::NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index dimension(Index n) const { return m_dimensions[n]; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index size() const { return internal::array_prod(m_dimensions); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar* data() { return m_data; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar* data() const { return m_data; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return m_data[index]; + } + +#ifdef EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) + { + static_assert(sizeof...(otherIndices) + 1 == PlainObjectType::NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor."); + const Index index = internal::tensor_index_linearization_helper::run(array{{firstIndex, otherIndices...}}, m_dimensions); + return m_data[index]; + } +#endif + + template + EIGEN_DEVICE_FUNC + Self& operator=(const OtherDerived& other) + { + internal::TensorAssign::run(*this, other); + return *this; + } + + private: + typename PlainObjectType::Scalar* m_data; + array m_dimensions; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_MAP_H diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h index a34600ee6..503d7cfd6 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h @@ -37,14 +37,19 @@ template class TensorStorage : public TensorStorage::type> { - typedef TensorStorage::type> Base_; + typedef TensorStorage::type> Base_; + public: - TensorStorage() = default; - TensorStorage(const TensorStorage&) = default; - TensorStorage(TensorStorage&&) = default; + TensorStorage() { } + TensorStorage(const TensorStorage& other) : Base_(other) { } + +#ifdef EIGEN_HAVE_RVALUE_REFERENCES +// TensorStorage(TensorStorage&&) = default; +#endif TensorStorage(internal::constructor_without_unaligned_array_assert) : Base_(internal::constructor_without_unaligned_array_assert()) {} - TensorStorage(DenseIndex size, const std::array& dimensions) : Base_(size, dimensions) {} - TensorStorage& operator=(const TensorStorage&) = default; + TensorStorage(DenseIndex size, const array& dimensions) : Base_(size, dimensions) {} + + // TensorStorage& operator=(const TensorStorage&) = default; }; // pure dynamic @@ -52,17 +57,17 @@ template class TensorStorage::type> { T *m_data; - std::array m_dimensions; + array m_dimensions; typedef TensorStorage::type> Self_; public: - TensorStorage() : m_data(0), m_dimensions(internal::template repeat(0)) {} + TensorStorage() : m_data(0), m_dimensions() {} TensorStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_dimensions(internal::template repeat(0)) {} - TensorStorage(DenseIndex size, const std::array& dimensions) - : m_data(internal::conditional_aligned_new_auto(size)), m_dimensions(dimensions) - { EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN } - TensorStorage(const Self_& other) + TensorStorage(DenseIndex size, const array& dimensions) + : m_data(internal::conditional_aligned_new_auto(size)), m_dimensions(dimensions) + { EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN } + TensorStorage(const Self_& other) : m_data(internal::conditional_aligned_new_auto(internal::array_prod(other.m_dimensions))) , m_dimensions(other.m_dimensions) { @@ -76,28 +81,34 @@ class TensorStorage(m_data, internal::array_prod(m_dimensions)); } void swap(Self_& other) { std::swap(m_data,other.m_data); std::swap(m_dimensions,other.m_dimensions); } - std::array dimensions(void) const {return m_dimensions;} - void conservativeResize(DenseIndex size, const std::array& nbDimensions) + const array& dimensions() const {return m_dimensions;} + + void conservativeResize(DenseIndex size, const array& nbDimensions) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, internal::array_prod(m_dimensions)); m_dimensions = nbDimensions; } - void resize(DenseIndex size, const std::array& nbDimensions) + void resize(DenseIndex size, const array& nbDimensions) { if(size != internal::array_prod(m_dimensions)) { @@ -110,8 +121,9 @@ class TensorStorage +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H +#define EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H + +namespace Eigen { +namespace internal { + + +template +class compute_tensor_flags +{ + enum { + is_dynamic_size_storage = 1, + + aligned_bit = + ( + ((Options&DontAlign)==0) && ( +#if EIGEN_ALIGN_STATICALLY + (!is_dynamic_size_storage) +#else + 0 +#endif + || +#if EIGEN_ALIGN + is_dynamic_size_storage +#else + 0 +#endif + ) + ) ? AlignedBit : 0, + packet_access_bit = packet_traits::Vectorizable && aligned_bit ? PacketAccessBit : 0 + }; + + public: + enum { ret = packet_access_bit | aligned_bit}; +}; + + +template +struct traits > +{ + typedef Scalar_ Scalar; + typedef Dense StorageKind; + typedef DenseIndex Index; + enum { + Options = Options_, + Flags = compute_tensor_flags::ret, + }; +}; + + +template +struct traits > + : public traits +{ + typedef traits BaseTraits; + typedef typename BaseTraits::Scalar Scalar; + typedef typename BaseTraits::StorageKind StorageKind; + typedef typename BaseTraits::Index Index; +}; + + +template +struct eval, Eigen::Dense> +{ + typedef const Tensor<_Scalar, NumIndices_, Options_>& type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const Tensor<_Scalar, NumIndices_, Options_>& type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorMap& type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorMap& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef const Tensor& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef const Tensor& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef const TensorMap& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef const TensorMap& type; +}; + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt index 0a6c56c19..31583d3ca 100644 --- a/unsupported/test/CMakeLists.txt +++ b/unsupported/test/CMakeLists.txt @@ -93,7 +93,7 @@ ei_add_test(minres) ei_add_test(levenberg_marquardt) ei_add_test(bdcsvd) -option(EIGEN_TEST_CXX11 "Enable testing of C++11 features (e.g. Tensor module)." OFF) +option(EIGEN_TEST_CXX11 "Enable testing of C++11 features (e.g. Tensor module)." ON) if(EIGEN_TEST_CXX11) # FIXME: add C++11 compiler switch in some portable way # (MSVC doesn't need any for example, so this will @@ -101,4 +101,7 @@ if(EIGEN_TEST_CXX11) ei_add_test(cxx11_meta "-std=c++0x") ei_add_test(cxx11_tensor_simple "-std=c++0x") ei_add_test(cxx11_tensor_symmetry "-std=c++0x") + ei_add_test(cxx11_tensor_assign "-std=c++0x") + ei_add_test(cxx11_tensor_expr "-std=c++0x") + ei_add_test(cxx11_tensor_map "-std=c++0x") endif() diff --git a/unsupported/test/cxx11_tensor_simple.cpp b/unsupported/test/cxx11_tensor_simple.cpp index ea512c9cc..1f76033ea 100644 --- a/unsupported/test/cxx11_tensor_simple.cpp +++ b/unsupported/test/cxx11_tensor_simple.cpp @@ -163,7 +163,7 @@ static void test_3d() VERIFY_IS_EQUAL((epsilon(0,2,1)), -1); VERIFY_IS_EQUAL((epsilon(1,0,2)), -1); - std::array dims{{2,3,4}}; + array dims{{2,3,4}}; Tensor t1(dims); Tensor t2(dims);