Drop support for c++03 in Eigen tensor. Get rid of some code used to emulate c++11 functionality with older compilers.

This commit is contained in:
Rasmus Munk Larsen 2019-10-18 16:42:00 -07:00
parent df0e8b8137
commit 668ab3fc47
11 changed files with 139 additions and 711 deletions

View File

@ -13,6 +13,8 @@
#include "../../../Eigen/Core"
#if EIGEN_HAS_CXX11
#if defined(EIGEN_USE_SYCL)
#undef min
#undef max
@ -47,6 +49,7 @@
#include <cmath>
#include <cstddef>
#include <cstring>
#include <random>
#ifdef _WIN32
typedef __int16 int16_t;
@ -61,10 +64,6 @@ typedef unsigned __int64 uint64_t;
#include <unistd.h>
#endif
#if __cplusplus > 199711 || EIGEN_COMP_MSVC >= 1900
#include <random>
#endif
#ifdef _WIN32
#include <windows.h>
#elif defined(__APPLE__)
@ -164,4 +163,5 @@ typedef unsigned __int64 uint64_t;
#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_HAS_CXX11
//#endif // EIGEN_CXX11_TENSOR_MODULE

View File

@ -51,13 +51,11 @@ EIGEN_ALWAYS_INLINE DSizes<IndexType, NumDims> strides(
return strides<Layout>(DSizes<IndexType, NumDims>(dimensions));
}
#if EIGEN_HAS_CXX11
template <int Layout, std::ptrdiff_t... Indices>
EIGEN_STRONG_INLINE DSizes<std::ptrdiff_t, sizeof...(Indices)> strides(
const Sizes<Indices...>& sizes) {
return strides<Layout>(DSizes<std::ptrdiff_t, sizeof...(Indices)>(sizes));
}
#endif
// -------------------------------------------------------------------------- //
// TensorBlockDescriptor specifies a block offset within a tensor and the block
@ -185,7 +183,7 @@ class TensorBlockDescriptor {
Scalar* dst_base,
const DSizes<DstStridesIndexType, NumDims>& dst_strides) {
// DSizes constructor will do index type promotion if it's safe.
AddDestinationBuffer<Layout>(*this, dst_base, Dimensions(dst_strides));
AddDestinationBuffer<Layout>(dst_base, Dimensions(dst_strides));
}
TensorBlockDescriptor& DropDestinationBuffer() {
@ -285,11 +283,6 @@ class TensorBlockScratchAllocator {
// -------------------------------------------------------------------------- //
// TensorBlockKind represents all possible block kinds, that can be produced by
// TensorEvaluator::evalBlock function.
#if !EIGEN_HAS_CXX11
// To be able to use `TensorBlockKind::kExpr` in C++03 we need a namespace.
// (Use of enumeration in a nested name specifier is a c++11 extension).
namespace TensorBlockKind {
#endif
enum TensorBlockKind {
// Tensor block that is a lazy expression that must be assigned to a
// destination using TensorBlockAssign.
@ -313,9 +306,6 @@ enum TensorBlockKind {
// TensorBlockAssign or for constructing another block expression.
kMaterializedInOutput
};
#if !EIGEN_HAS_CXX11
} // namespace TensorBlockKind
#endif
// -------------------------------------------------------------------------- //
// TensorBlockNotImplemented should be used to defined TensorBlock typedef in
@ -361,9 +351,6 @@ struct XprScalar<void> {
template <typename Scalar, int NumDims, int Layout,
typename IndexType = Eigen::Index>
class TensorMaterializedBlock {
#if !EIGEN_HAS_CXX11
typedef internal::TensorBlockKind::TensorBlockKind TensorBlockKind;
#endif
public:
typedef DSizes<IndexType, NumDims> Dimensions;
typedef TensorMap<const Tensor<Scalar, NumDims, Layout> > XprType;
@ -543,9 +530,6 @@ class TensorMaterializedBlock {
template <typename UnaryOp, typename ArgTensorBlock>
class TensorCwiseUnaryBlock {
#if !EIGEN_HAS_CXX11
typedef internal::TensorBlockKind::TensorBlockKind TensorBlockKind;
#endif
static const bool NoArgBlockAccess =
internal::is_void<typename ArgTensorBlock::XprType>::value;
@ -578,9 +562,6 @@ class TensorCwiseUnaryBlock {
template <typename BinaryOp, typename LhsTensorBlock, typename RhsTensorBlock>
class TensorCwiseBinaryBlock {
#if !EIGEN_HAS_CXX11
typedef internal::TensorBlockKind::TensorBlockKind TensorBlockKind;
#endif
static const bool NoArgBlockAccess =
internal::is_void<typename LhsTensorBlock::XprType>::value ||
@ -628,9 +609,6 @@ class TensorCwiseBinaryBlock {
template <typename BlockFactory, typename ArgTensorBlock>
class TensorUnaryExprBlock {
#if !EIGEN_HAS_CXX11
typedef internal::TensorBlockKind::TensorBlockKind TensorBlockKind;
#endif
typedef typename ArgTensorBlock::XprType ArgXprType;
static const bool NoArgBlockAccess = internal::is_void<ArgXprType>::value;
@ -663,9 +641,6 @@ class TensorUnaryExprBlock {
template <typename BlockFactory, typename Arg1TensorBlock,
typename Arg2TensorBlock, typename Arg3TensorBlock>
class TensorTernaryExprBlock {
#if !EIGEN_HAS_CXX11
typedef internal::TensorBlockKind::TensorBlockKind TensorBlockKind;
#endif
typedef typename Arg1TensorBlock::XprType Arg1XprType;
typedef typename Arg2TensorBlock::XprType Arg2XprType;

View File

@ -53,14 +53,8 @@ static void initializeDeviceProp() {
// compile with nvcc, so we resort to atomics and thread fences instead.
// Note that if the caller uses a compiler that doesn't support c++11 we
// can't ensure that the initialization is thread safe.
#if __cplusplus >= 201103L
static std::atomic<bool> first(true);
if (first.exchange(false)) {
#else
static bool first = true;
if (first) {
first = false;
#endif
// We're the first thread to reach this point.
int num_devices;
gpuError_t status = gpuGetDeviceCount(&num_devices);
@ -83,16 +77,12 @@ static void initializeDeviceProp() {
}
}
#if __cplusplus >= 201103L
std::atomic_thread_fence(std::memory_order_release);
#endif
m_devicePropInitialized = true;
} else {
// Wait for the other thread to inititialize the properties.
while (!m_devicePropInitialized) {
#if __cplusplus >= 201103L
std::atomic_thread_fence(std::memory_order_acquire);
#endif
EIGEN_SLEEP(1000);
}
}

View File

@ -76,13 +76,8 @@ struct ExpressionHasTensorBroadcastingOp<
* Default strategy: the expression is evaluated sequentially with a single cpu
* thread, without vectorization and block evaluation.
*/
#if EIGEN_HAS_CXX11
template <typename Expression, typename Device, bool Vectorizable,
TiledEvaluation Tiling>
#else
template <typename Expression, typename Device, bool Vectorizable,
TiledEvaluation::TiledEvaluation Tiling>
#endif
class TensorExecutor {
public:
typedef typename Expression::Index StorageIndex;

View File

@ -10,10 +10,6 @@
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FFT_H
#define EIGEN_CXX11_TENSOR_TENSOR_FFT_H
// This code requires the ability to initialize arrays of constant
// values directly inside a class.
#if __cplusplus >= 201103L || EIGEN_COMP_MSVC >= 1900
namespace Eigen {
/** \class TensorFFT
@ -671,7 +667,4 @@ struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, D
} // end namespace Eigen
#endif // EIGEN_HAS_CONSTEXPR
#endif // EIGEN_CXX11_TENSOR_TENSOR_FFT_H

View File

@ -155,19 +155,11 @@ struct IsVectorizable<GpuDevice, Expression> {
};
// Tiled evaluation strategy.
#if !EIGEN_HAS_CXX11
// To be able to use `TiledEvaluation::Off` in C++03 we need a namespace.
// (Use of enumeration in a nested name specifier is a c++11 extension).
namespace TiledEvaluation {
#endif
enum TiledEvaluation {
Off = 0, // tiled evaluation is not supported
On = 1, // still work in progress (see TensorBlockV2.h)
Legacy = 2 // soon to be deprecated (see TensorBock.h)
};
#if !EIGEN_HAS_CXX11
} // namespace TiledEvaluation
#endif
template <typename Device, typename Expression>
struct IsTileable {
@ -182,30 +174,16 @@ struct IsTileable {
TensorEvaluator<Expression, Device>::BlockAccessV2 &&
TensorEvaluator<Expression, Device>::PreferBlockAccess;
#if EIGEN_HAS_CXX11
static const TiledEvaluation value =
BlockAccessV2
? TiledEvaluation::On
: (BlockAccess ? TiledEvaluation::Legacy : TiledEvaluation::Off);
#else
static const TiledEvaluation::TiledEvaluation value =
BlockAccessV2
? TiledEvaluation::On
: (BlockAccess ? TiledEvaluation::Legacy : TiledEvaluation::Off);
#endif
};
#if EIGEN_HAS_CXX11
template <typename Expression, typename Device,
bool Vectorizable = IsVectorizable<Device, Expression>::value,
TiledEvaluation Tiling = IsTileable<Device, Expression>::value>
class TensorExecutor;
#else
template <typename Expression, typename Device,
bool Vectorizable = IsVectorizable<Device, Expression>::value,
TiledEvaluation::TiledEvaluation Tiling = IsTileable<Device, Expression>::value>
class TensorExecutor;
#endif
// TODO(ezhulenev): Add TiledEvaluation support to async executor.
template <typename Expression, typename Device, typename DoneCallback,

View File

@ -445,7 +445,7 @@ __global__ void OuterReductionKernel(R, const S, I_, I_, typename S::CoeffReturn
*/
template <typename Op, typename CoeffReturnType>
struct ReductionReturnType {
#if EIGEN_HAS_CXX11 && defined(EIGEN_USE_SYCL)
#if defined(EIGEN_USE_SYCL)
typedef typename remove_const<decltype(std::declval<Op>().initialize())>::type type;
#else
typedef typename remove_const<CoeffReturnType>::type type;

View File

@ -11,19 +11,102 @@
#define EIGEN_CXX11META_H
#include <vector>
#include "EmulateArray.h"
// Emulate the cxx11 functionality that we need if the compiler doesn't support it.
// Visual studio 2015 doesn't advertise itself as cxx11 compliant, although it
// supports enough of the standard for our needs
#if __cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900
#include <array>
#include "CXX11Workarounds.h"
namespace Eigen {
// Workaround for constructors used by legacy code calling Eigen::array.
template <typename T, size_t N>
class array : public std::array<T, N> {
public:
typedef std::array<T, N> Base;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array() : Base() {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array(const T& v) : Base{v} {
EIGEN_STATIC_ASSERT(N == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array(const T& v1, const T& v2) : Base{v1, v2} {
EIGEN_STATIC_ASSERT(N == 2, YOU_MADE_A_PROGRAMMING_MISTAKE);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array(const T& v1, const T& v2, const T& v3) : Base{v1, v2, v3} {
EIGEN_STATIC_ASSERT(N == 3, YOU_MADE_A_PROGRAMMING_MISTAKE);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array(const T& v1, const T& v2, const T& v3, const T& v4)
: Base{v1, v2, v3, v4} {
EIGEN_STATIC_ASSERT(N == 4, YOU_MADE_A_PROGRAMMING_MISTAKE);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array(const T& v1, const T& v2, const T& v3, const T& v4, const T& v5)
: Base{v1, v2, v3, v4, v5} {
EIGEN_STATIC_ASSERT(N == 5, YOU_MADE_A_PROGRAMMING_MISTAKE);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array(const T& v1, const T& v2, const T& v3, const T& v4, const T& v5,
const T& v6) : Base{v1, v2, v3, v4, v5, v6} {
EIGEN_STATIC_ASSERT(N == 6, YOU_MADE_A_PROGRAMMING_MISTAKE);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array(const T& v1, const T& v2, const T& v3, const T& v4, const T& v5,
const T& v6, const T& v7)
: Base{v1, v2, v3, v4, v5, v6, v7} {
EIGEN_STATIC_ASSERT(N == 7, YOU_MADE_A_PROGRAMMING_MISTAKE);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array(const T& v1, const T& v2, const T& v3, const T& v4, const T& v5,
const T& v6, const T& v7, const T& v8)
: Base{v1, v2, v3, v4, v5, v6, v7, v8} {
EIGEN_STATIC_ASSERT(N == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
array(std::initializer_list<T> l) {
eigen_assert(l.size() == N);
internal::smart_copy(l.begin(), l.end(), this->begin());
}
#endif
};
template<typename T, std::size_t N> struct internal::array_size<const array<T,N> > {
enum { value = N };
};
template<typename T, std::size_t N> struct internal::array_size<array<T,N> > {
enum { value = N };
};
namespace internal {
/* std::get is only constexpr in C++14, not yet in C++11
* - libstdc++ from version 4.7 onwards has it nevertheless,
* so use that
* - libstdc++ older versions: use _M_instance directly
* - libc++ all versions so far: use __elems_ directly
* - all other libs: use std::get to be portable, but
* this may not be constexpr
*/
#if defined(__GLIBCXX__) && __GLIBCXX__ < 20120322
#define STD_GET_ARR_HACK a._M_instance[I_]
#elif defined(_LIBCPP_VERSION)
#define STD_GET_ARR_HACK a.__elems_[I_]
#else
#define STD_GET_ARR_HACK std::template get<I_, T, N>(a)
#endif
template<std::size_t I_, class T, std::size_t N> constexpr inline T& array_get(std::array<T,N>& a) { return (T&) STD_GET_ARR_HACK; }
template<std::size_t I_, class T, std::size_t N> constexpr inline T&& array_get(std::array<T,N>&& a) { return (T&&) STD_GET_ARR_HACK; }
template<std::size_t I_, class T, std::size_t N> constexpr inline T const& array_get(std::array<T,N> const& a) { return (T const&) STD_GET_ARR_HACK; }
#undef STD_GET_ARR_HACK
/** \internal
* \file CXX11/util/CXX11Meta.h
* This file contains generic metaprogramming classes which are not specifically related to Eigen.
@ -537,10 +620,4 @@ InstType instantiate_by_c_array(ArrType* arr)
} // end namespace Eigen
#else // Non C++11, fallback to emulation mode
#include "EmulateCXX11Meta.h"
#endif
#endif // EIGEN_CXX11META_H

View File

@ -1,261 +0,0 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_EMULATE_ARRAY_H
#define EIGEN_EMULATE_ARRAY_H
// The array class is only available starting with cxx11. Emulate our own here
// if needed. Beware, msvc still doesn't advertise itself as a c++11 compiler!
// Moreover, CUDA doesn't support the STL containers, so we use our own instead.
#if (__cplusplus <= 199711L && EIGEN_COMP_MSVC < 1900) || defined(EIGEN_GPUCC) || defined(EIGEN_AVOID_STL_ARRAY)
namespace Eigen {
template <typename T, size_t n> class array {
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE T& operator[] (size_t index) { eigen_internal_assert(index < size()); return values[index]; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const T& operator[] (size_t index) const { eigen_internal_assert(index < size()); return values[index]; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE T& at(size_t index) { eigen_assert(index < size()); return values[index]; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const T& at(size_t index) const { eigen_assert(index < size()); return values[index]; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE T& front() { return values[0]; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const T& front() const { return values[0]; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE T& back() { return values[n-1]; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const T& back() const { return values[n-1]; }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
static std::size_t size() { return n; }
T values[n];
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array() { }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array(const T& v) {
EIGEN_STATIC_ASSERT(n==1, YOU_MADE_A_PROGRAMMING_MISTAKE)
values[0] = v;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array(const T& v1, const T& v2) {
EIGEN_STATIC_ASSERT(n==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
values[0] = v1;
values[1] = v2;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3) {
EIGEN_STATIC_ASSERT(n==3, YOU_MADE_A_PROGRAMMING_MISTAKE)
values[0] = v1;
values[1] = v2;
values[2] = v3;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3,
const T& v4) {
EIGEN_STATIC_ASSERT(n==4, YOU_MADE_A_PROGRAMMING_MISTAKE)
values[0] = v1;
values[1] = v2;
values[2] = v3;
values[3] = v4;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4,
const T& v5) {
EIGEN_STATIC_ASSERT(n==5, YOU_MADE_A_PROGRAMMING_MISTAKE)
values[0] = v1;
values[1] = v2;
values[2] = v3;
values[3] = v4;
values[4] = v5;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4,
const T& v5, const T& v6) {
EIGEN_STATIC_ASSERT(n==6, YOU_MADE_A_PROGRAMMING_MISTAKE)
values[0] = v1;
values[1] = v2;
values[2] = v3;
values[3] = v4;
values[4] = v5;
values[5] = v6;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4,
const T& v5, const T& v6, const T& v7) {
EIGEN_STATIC_ASSERT(n==7, YOU_MADE_A_PROGRAMMING_MISTAKE)
values[0] = v1;
values[1] = v2;
values[2] = v3;
values[3] = v4;
values[4] = v5;
values[5] = v6;
values[6] = v7;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array(
const T& v1, const T& v2, const T& v3, const T& v4,
const T& v5, const T& v6, const T& v7, const T& v8) {
EIGEN_STATIC_ASSERT(n==8, YOU_MADE_A_PROGRAMMING_MISTAKE)
values[0] = v1;
values[1] = v2;
values[2] = v3;
values[3] = v4;
values[4] = v5;
values[5] = v6;
values[6] = v7;
values[7] = v8;
}
#if EIGEN_HAS_VARIADIC_TEMPLATES
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array(std::initializer_list<T> l) {
eigen_assert(l.size() == n);
internal::smart_copy(l.begin(), l.end(), values);
}
#endif
};
// Specialize array for zero size
template <typename T> class array<T, 0> {
public:
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE T& operator[] (size_t) {
eigen_assert(false && "Can't index a zero size array");
return dummy;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const T& operator[] (size_t) const {
eigen_assert(false && "Can't index a zero size array");
return dummy;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE T& front() {
eigen_assert(false && "Can't index a zero size array");
return dummy;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const T& front() const {
eigen_assert(false && "Can't index a zero size array");
return dummy;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE T& back() {
eigen_assert(false && "Can't index a zero size array");
return dummy;
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const T& back() const {
eigen_assert(false && "Can't index a zero size array");
return dummy;
}
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::size_t size() { return 0; }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE array() : dummy() { }
#if EIGEN_HAS_VARIADIC_TEMPLATES
EIGEN_DEVICE_FUNC array(std::initializer_list<T> l) : dummy() {
EIGEN_UNUSED_VARIABLE(l);
eigen_assert(l.size() == 0);
}
#endif
private:
T dummy;
};
// Comparison operator
// Todo: implement !=, <, <=, >, and >=
template<class T, std::size_t N>
EIGEN_DEVICE_FUNC bool operator==(const array<T,N>& lhs, const array<T,N>& rhs) {
for (std::size_t i = 0; i < N; ++i) {
if (lhs[i] != rhs[i]) {
return false;
}
}
return true;
}
namespace internal {
template<std::size_t I_, class T, std::size_t N>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& array_get(array<T,N>& a) {
return a[I_];
}
template<std::size_t I_, class T, std::size_t N>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& array_get(const array<T,N>& a) {
return a[I_];
}
template<class T, std::size_t N> struct array_size<array<T,N> > {
enum { value = N };
};
template<class T, std::size_t N> struct array_size<array<T,N>& > {
enum { value = N };
};
template<class T, std::size_t N> struct array_size<const array<T,N> > {
enum { value = N };
};
template<class T, std::size_t N> struct array_size<const array<T,N>& > {
enum { value = N };
};
} // end namespace internal
} // end namespace Eigen
#else
// The compiler supports c++11, and we're not targeting cuda: use std::array as Eigen::array
#include <array>
namespace Eigen {
template <typename T, std::size_t N> using array = std::array<T, N>;
namespace internal {
/* std::get is only constexpr in C++14, not yet in C++11
* - libstdc++ from version 4.7 onwards has it nevertheless,
* so use that
* - libstdc++ older versions: use _M_instance directly
* - libc++ all versions so far: use __elems_ directly
* - all other libs: use std::get to be portable, but
* this may not be constexpr
*/
#if defined(__GLIBCXX__) && __GLIBCXX__ < 20120322
#define STD_GET_ARR_HACK a._M_instance[I_]
#elif defined(_LIBCPP_VERSION)
#define STD_GET_ARR_HACK a.__elems_[I_]
#else
#define STD_GET_ARR_HACK std::template get<I_, T, N>(a)
#endif
template<std::size_t I_, class T, std::size_t N> constexpr inline T& array_get(std::array<T,N>& a) { return (T&) STD_GET_ARR_HACK; }
template<std::size_t I_, class T, std::size_t N> constexpr inline T&& array_get(std::array<T,N>&& a) { return (T&&) STD_GET_ARR_HACK; }
template<std::size_t I_, class T, std::size_t N> constexpr inline T const& array_get(std::array<T,N> const& a) { return (T const&) STD_GET_ARR_HACK; }
#undef STD_GET_ARR_HACK
} // end namespace internal
} // end namespace Eigen
#endif
#endif // EIGEN_EMULATE_ARRAY_H

View File

@ -1,311 +0,0 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_EMULATE_CXX11_META_H
#define EIGEN_EMULATE_CXX11_META_H
namespace Eigen {
namespace internal {
/** \internal
* \file CXX11/util/EmulateCXX11Meta.h
* This file emulates a subset of the functionality provided by CXXMeta.h for
* compilers that don't yet support cxx11 such as nvcc.
*/
struct empty_list { static const std::size_t count = 0; };
template<typename T, typename Tail=empty_list> struct type_list {
typedef T HeadType;
typedef Tail TailType;
static const T head;
static const Tail tail;
static const std::size_t count = 1 + Tail::count;
};
struct null_type { };
template<typename T1 = null_type, typename T2 = null_type, typename T3 = null_type,
typename T4 = null_type, typename T5 = null_type, typename T6 = null_type,
typename T7 = null_type, typename T8 = null_type>
struct make_type_list {
typedef typename make_type_list<T2, T3, T4, T5, T6, T7, T8>::type tailresult;
typedef type_list<T1, tailresult> type;
};
template<> struct make_type_list<> {
typedef empty_list type;
};
template <std::size_t index, class TList> struct get_type;
template <class Head, class Tail>
struct get_type<0, type_list<Head, Tail> >
{
typedef Head type;
};
template <std::size_t i, class Head, class Tail>
struct get_type<i, type_list<Head, Tail> >
{
typedef typename get_type<i-1, Tail>::type type;
};
/* numeric list */
template <typename T, T n>
struct type2val {
typedef T type;
static const T value = n;
};
template<typename T, size_t n, T V> struct gen_numeric_list_repeated;
template<typename T, T V> struct gen_numeric_list_repeated<T, 1, V> {
typedef typename make_type_list<type2val<T, V> >::type type;
};
template<typename T, T V> struct gen_numeric_list_repeated<T, 2, V> {
typedef typename make_type_list<type2val<T, V>, type2val<T, V> >::type type;
};
template<typename T, T V> struct gen_numeric_list_repeated<T, 3, V> {
typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
};
template<typename T, T V> struct gen_numeric_list_repeated<T, 4, V> {
typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
};
template<typename T, T V> struct gen_numeric_list_repeated<T, 5, V> {
typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
};
template<typename T, T V> struct gen_numeric_list_repeated<T, 6, V> {
typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>,
type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type;
};
template<typename T, T V> struct gen_numeric_list_repeated<T, 7, V> {
typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>,
type2val<T, V>, type2val<T, V>, type2val<T, V>,
type2val<T, V> >::type type;
};
template<typename T, T V> struct gen_numeric_list_repeated<T, 8, V> {
typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>,
type2val<T, V>, type2val<T, V>, type2val<T, V>,
type2val<T, V>, type2val<T, V> >::type type;
};
template <std::size_t index, class NList> struct get;
template <std::size_t i>
struct get<i, empty_list>
{
get() { eigen_assert(false && "index overflow"); }
typedef void type;
static const char value = '\0';
};
template <std::size_t i, class Head>
struct get<i, type_list<Head, empty_list> >
{
get() { eigen_assert(false && "index overflow"); }
typedef void type;
static const char value = '\0';
};
template <class Head>
struct get<0, type_list<Head, empty_list> >
{
typedef typename Head::type type;
static const type value = Head::value;
};
template <class Head, class Tail>
struct get<0, type_list<Head, Tail> >
{
typedef typename Head::type type;
static const type value = Head::value;
};
template <std::size_t i, class Head, class Tail>
struct get<i, type_list<Head, Tail> >
{
typedef typename Tail::HeadType::type type;
static const type value = get<i-1, Tail>::value;
};
template <class NList> struct arg_prod {
static const typename NList::HeadType::type value = get<0, NList>::value * arg_prod<typename NList::TailType>::value;
};
template <> struct arg_prod<empty_list> {
static const int value = 1;
};
template<int n, typename t>
array<t, n> repeat(t v) {
array<t, n> array;
array.fill(v);
return array;
}
template<std::size_t I_, class Head, class Tail>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Head::type array_get(type_list<Head, Tail>&) {
return get<I_, type_list<Head, Tail> >::value;
}
template<std::size_t I_, class Head, class Tail>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Head::type array_get(const type_list<Head, Tail>&) {
return get<I_, type_list<Head, Tail> >::value;
}
template <class NList>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NList::HeadType::type array_prod(const NList&) {
return arg_prod<NList>::value;
}
template<typename t, std::size_t n>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const array<t, n>& a) {
t prod = 1;
for (size_t i = 0; i < n; ++i) { prod *= a[i]; }
return prod;
}
template<typename t>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const array<t, 0>& /*a*/) {
return 1;
}
template<typename t>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const std::vector<t>& a) {
eigen_assert(a.size() > 0);
t prod = 1;
for (size_t i = 0; i < a.size(); ++i) { prod *= a[i]; }
return prod;
}
template<std::size_t I_, class T>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& array_get(std::vector<T>& a) {
return a[I_];
}
template<std::size_t I_, class T>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& array_get(const std::vector<T>& a) {
return a[I_];
}
struct sum_op {
template<typename A, typename B> static inline bool run(A a, B b) { return a + b; }
};
struct product_op {
template<typename A, typename B> static inline bool run(A a, B b) { return a * b; }
};
struct logical_and_op {
template<typename A, typename B> static inline bool run(A a, B b) { return a && b; }
};
struct logical_or_op {
template<typename A, typename B> static inline bool run(A a, B b) { return a || b; }
};
struct equal_op {
template<typename A, typename B> static inline bool run(A a, B b) { return a == b; }
};
struct not_equal_op {
template<typename A, typename B> static inline bool run(A a, B b) { return a != b; }
};
struct lesser_op {
template<typename A, typename B> static inline bool run(A a, B b) { return a < b; }
};
struct lesser_equal_op {
template<typename A, typename B> static inline bool run(A a, B b) { return a <= b; }
};
struct greater_op {
template<typename A, typename B> static inline bool run(A a, B b) { return a > b; }
};
struct greater_equal_op {
template<typename A, typename B> static inline bool run(A a, B b) { return a >= b; }
};
struct not_op {
template<typename A> static inline bool run(A a) { return !a; }
};
struct negation_op {
template<typename A> static inline bool run(A a) { return -a; }
};
struct greater_equal_zero_op {
template<typename A> static inline bool run(A a) { return a >= 0; }
};
template<typename Reducer, typename Op, typename A, std::size_t N>
struct ArrayApplyAndReduce {
static inline bool run(const array<A, N>& a) {
EIGEN_STATIC_ASSERT(N >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE);
bool result = Reducer::run(Op::run(a[0]), Op::run(a[1]));
for (size_t i = 2; i < N; ++i) {
result = Reducer::run(result, Op::run(a[i]));
}
return result;
}
};
template<typename Reducer, typename Op, typename A>
struct ArrayApplyAndReduce<Reducer, Op, A, 1> {
static inline bool run(const array<A, 1>& a) {
return Op::run(a[0]);
}
};
template<typename Reducer, typename Op, typename A, std::size_t N>
inline bool array_apply_and_reduce(const array<A, N>& a) {
return ArrayApplyAndReduce<Reducer, Op, A, N>::run(a);
}
template<typename Reducer, typename Op, typename A, typename B, std::size_t N>
struct ArrayZipAndReduce {
static inline bool run(const array<A, N>& a, const array<B, N>& b) {
EIGEN_STATIC_ASSERT(N >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE);
bool result = Reducer::run(Op::run(a[0], b[0]), Op::run(a[1], b[1]));
for (size_t i = 2; i < N; ++i) {
result = Reducer::run(result, Op::run(a[i], b[i]));
}
return result;
}
};
template<typename Reducer, typename Op, typename A, typename B>
struct ArrayZipAndReduce<Reducer, Op, A, B, 1> {
static inline bool run(const array<A, 1>& a, const array<B, 1>& b) {
return Op::run(a[0], b[0]);
}
};
template<typename Reducer, typename Op, typename A, typename B, std::size_t N>
inline bool array_zip_and_reduce(const array<A, N>& a, const array<B, N>& b) {
return ArrayZipAndReduce<Reducer, Op, A, B, N>::run(a, b);
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_EMULATE_CXX11_META_H

View File

@ -109,31 +109,6 @@ ei_add_test(kronecker_product)
ei_add_test(bessel_functions)
ei_add_test(special_functions)
# TODO: The following test names are prefixed with the cxx11 string, since historically
# the tests depended on c++11. This isn't the case anymore so we ought to rename them.
# FIXME: Old versions of MSVC fail to compile this code, so we just disable these tests
# when using visual studio. We should make the check more strict to enable the tests for
# newer versions of MSVC.
if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
ei_add_test(cxx11_tensor_dimension)
ei_add_test(cxx11_tensor_map)
ei_add_test(cxx11_tensor_assign)
ei_add_test(cxx11_tensor_block_access)
ei_add_test(cxx11_tensor_broadcasting)
ei_add_test(cxx11_tensor_comparisons)
ei_add_test(cxx11_tensor_forced_eval)
ei_add_test(cxx11_tensor_math)
ei_add_test(cxx11_tensor_const)
ei_add_test(cxx11_tensor_intdiv)
ei_add_test(cxx11_tensor_casts)
ei_add_test(cxx11_tensor_empty)
ei_add_test(cxx11_tensor_sugar)
ei_add_test(cxx11_tensor_roundings)
ei_add_test(cxx11_tensor_layout_swap)
ei_add_test(cxx11_tensor_io)
ei_add_test(cxx11_maxsizevector)
endif()
if(EIGEN_TEST_CXX11)
if(EIGEN_TEST_SYCL)
if(EIGEN_SYCL_TRISYCL)
@ -177,46 +152,63 @@ if(EIGEN_TEST_CXX11)
ei_add_test(cxx11_non_blocking_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_meta)
ei_add_test(cxx11_tensor_simple)
# ei_add_test(cxx11_tensor_symmetry)
ei_add_test(cxx11_tensor_index_list)
ei_add_test(cxx11_tensor_mixed_indices)
ei_add_test(cxx11_tensor_contraction)
ei_add_test(cxx11_tensor_convolution)
ei_add_test(cxx11_tensor_expr)
ei_add_test(cxx11_tensor_fixed_size)
ei_add_test(cxx11_tensor_of_const_values)
ei_add_test(cxx11_tensor_of_complex)
ei_add_test(cxx11_tensor_of_strings)
ei_add_test(cxx11_tensor_lvalue)
ei_add_test(cxx11_tensor_chipping)
ei_add_test(cxx11_maxsizevector)
ei_add_test(cxx11_tensor_argmax)
ei_add_test(cxx11_tensor_assign)
ei_add_test(cxx11_tensor_block_access)
ei_add_test(cxx11_tensor_block_eval)
ei_add_test(cxx11_tensor_block_io)
ei_add_test(cxx11_tensor_broadcasting)
ei_add_test(cxx11_tensor_casts)
ei_add_test(cxx11_tensor_chipping)
ei_add_test(cxx11_tensor_comparisons)
ei_add_test(cxx11_tensor_concatenation)
ei_add_test(cxx11_tensor_const)
ei_add_test(cxx11_tensor_contraction)
ei_add_test(cxx11_tensor_convolution)
ei_add_test(cxx11_tensor_custom_index)
ei_add_test(cxx11_tensor_custom_op)
ei_add_test(cxx11_tensor_dimension)
ei_add_test(cxx11_tensor_empty)
ei_add_test(cxx11_tensor_executor "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_expr)
ei_add_test(cxx11_tensor_fft)
ei_add_test(cxx11_tensor_fixed_size)
ei_add_test(cxx11_tensor_forced_eval)
ei_add_test(cxx11_tensor_generator)
ei_add_test(cxx11_tensor_ifft)
ei_add_test(cxx11_tensor_image_patch)
ei_add_test(cxx11_tensor_index_list)
ei_add_test(cxx11_tensor_inflation)
ei_add_test(cxx11_tensor_intdiv)
ei_add_test(cxx11_tensor_io)
ei_add_test(cxx11_tensor_layout_swap)
ei_add_test(cxx11_tensor_lvalue)
ei_add_test(cxx11_tensor_map)
ei_add_test(cxx11_tensor_math)
ei_add_test(cxx11_tensor_mixed_indices)
ei_add_test(cxx11_tensor_morphing)
ei_add_test(cxx11_tensor_move)
ei_add_test(cxx11_tensor_notification "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_of_complex)
ei_add_test(cxx11_tensor_of_const_values)
ei_add_test(cxx11_tensor_of_strings)
ei_add_test(cxx11_tensor_padding)
ei_add_test(cxx11_tensor_patch)
ei_add_test(cxx11_tensor_image_patch)
ei_add_test(cxx11_tensor_volume_patch)
ei_add_test(cxx11_tensor_random)
ei_add_test(cxx11_tensor_reduction)
ei_add_test(cxx11_tensor_argmax)
ei_add_test(cxx11_tensor_ref)
ei_add_test(cxx11_tensor_roundings)
ei_add_test(cxx11_tensor_scan)
ei_add_test(cxx11_tensor_shuffling)
ei_add_test(cxx11_tensor_simple)
ei_add_test(cxx11_tensor_striding)
ei_add_test(cxx11_tensor_notification "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_sugar)
ei_add_test(cxx11_tensor_thread_local "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_executor "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_ref)
ei_add_test(cxx11_tensor_random)
ei_add_test(cxx11_tensor_generator)
ei_add_test(cxx11_tensor_custom_op)
ei_add_test(cxx11_tensor_custom_index)
ei_add_test(cxx11_tensor_fft)
ei_add_test(cxx11_tensor_ifft)
ei_add_test(cxx11_tensor_scan)
ei_add_test(cxx11_tensor_trace)
ei_add_test(cxx11_tensor_move)
ei_add_test(cxx11_tensor_volume_patch)
# ei_add_test(cxx11_tensor_symmetry)
if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8" AND NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# This test requires __uint128_t which is only available on 64bit systems
ei_add_test(cxx11_tensor_uint128)