Use NumTraits<T>::RequireInitialization instead of internal::is_arithmetic<T>::value to check whether it's possible to bypass the type constructor in the tensor code.

This commit is contained in:
Benoit Steiner 2015-07-07 15:23:56 -07:00
parent 7b7df7b6b8
commit 6de6fa9483
4 changed files with 3 additions and 4 deletions

View File

@ -67,7 +67,6 @@ template<> struct is_arithmetic<signed int> { enum { value = true }; };
template<> struct is_arithmetic<unsigned int> { enum { value = true }; }; template<> struct is_arithmetic<unsigned int> { enum { value = true }; };
template<> struct is_arithmetic<signed long> { enum { value = true }; }; template<> struct is_arithmetic<signed long> { enum { value = true }; };
template<> struct is_arithmetic<unsigned long> { enum { value = true }; }; template<> struct is_arithmetic<unsigned long> { enum { value = true }; };
template<typename T> struct is_arithmetic<std::complex<T> > { enum { value = true }; };
template <typename T> struct add_const { typedef const T type; }; template <typename T> struct add_const { typedef const T type; };
template <typename T> struct add_const<T&> { typedef T& type; }; template <typename T> struct add_const<T&> { typedef T& type; };

View File

@ -157,7 +157,7 @@ struct TensorEvaluator<const Derived, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
if (internal::is_arithmetic<typename internal::remove_const<Scalar>::type>::value && data) { if (!NumTraits<typename internal::remove_const<Scalar>::type>::RequireInitialization && data) {
m_device.memcpy((void*)data, m_data, m_dims.TotalSize() * sizeof(Scalar)); m_device.memcpy((void*)data, m_data, m_dims.TotalSize() * sizeof(Scalar));
return false; return false;
} }

View File

@ -109,7 +109,7 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
const Index numValues = m_impl.dimensions().TotalSize(); const Index numValues = m_impl.dimensions().TotalSize();
m_buffer = (CoeffReturnType*)m_device.allocate(numValues * sizeof(CoeffReturnType)); m_buffer = (CoeffReturnType*)m_device.allocate(numValues * sizeof(CoeffReturnType));
// Should initialize the memory in case we're dealing with non POD types. // Should initialize the memory in case we're dealing with non POD types.
if (!internal::is_arithmetic<CoeffReturnType>::value) { if (NumTraits<CoeffReturnType>::RequireInitialization) {
for (Index i = 0; i < numValues; ++i) { for (Index i = 0; i < numValues; ++i) {
new(m_buffer+i) CoeffReturnType(); new(m_buffer+i) CoeffReturnType();
} }

View File

@ -366,7 +366,7 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
m_impl.evalSubExprsIfNeeded(NULL); m_impl.evalSubExprsIfNeeded(NULL);
if (internal::is_arithmetic<typename internal::remove_const<Scalar>::type>::value && data && m_impl.data()) { if (!NumTraits<typename internal::remove_const<Scalar>::type>::RequireInitialization && data && m_impl.data()) {
Index contiguous_values = 1; Index contiguous_values = 1;
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = 0; i < NumDims; ++i) { for (int i = 0; i < NumDims; ++i) {