Replaced all instances of internal::(U)IntPtr with std::(u)intptr_t. Remove ICC workaround.

This commit is contained in:
Colin Broderick 2023-03-21 16:50:23 +00:00 committed by Rasmus Munk Larsen
parent 2c8011c2dd
commit 8f9b8e3630
19 changed files with 52 additions and 68 deletions

View File

@ -550,7 +550,7 @@ struct dense_assignment_loop<Kernel, SliceVectorizedTraversal, NoUnrolling>
: int(Kernel::AssignmentTraits::DstAlignment) : int(Kernel::AssignmentTraits::DstAlignment)
}; };
const Scalar *dst_ptr = kernel.dstDataPtr(); const Scalar *dst_ptr = kernel.dstDataPtr();
if((!bool(dstIsAligned)) && (UIntPtr(dst_ptr) % sizeof(Scalar))>0) if((!bool(dstIsAligned)) && (std::uintptr_t(dst_ptr) % sizeof(Scalar))>0)
{ {
// the pointer is not aligned-on scalar, so alignment is not possible // the pointer is not aligned-on scalar, so alignment is not possible
return dense_assignment_loop<Kernel,DefaultTraversal,NoUnrolling>::run(kernel); return dense_assignment_loop<Kernel,DefaultTraversal,NoUnrolling>::run(kernel);

View File

@ -1225,7 +1225,7 @@ struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAc
explicit block_evaluator(const XprType& block) explicit block_evaluator(const XprType& block)
: mapbase_evaluator<XprType, typename XprType::PlainObject>(block) : mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
{ {
eigen_internal_assert((internal::is_constant_evaluated() || (internal::UIntPtr(block.data()) % plain_enum_max(1,evaluator<XprType>::Alignment)) == 0) \ eigen_internal_assert((internal::is_constant_evaluated() || (std::uintptr_t(block.data()) % plain_enum_max(1,evaluator<XprType>::Alignment)) == 0) \
&& "data is not aligned"); && "data is not aligned");
} }
}; };

View File

@ -56,7 +56,7 @@ struct plain_array
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
#else #else
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
eigen_assert((internal::is_constant_evaluated() || (internal::UIntPtr(array) & (sizemask)) == 0) \ eigen_assert((internal::is_constant_evaluated() || (std::uintptr_t(array) & (sizemask)) == 0) \
&& "this assertion is explained here: " \ && "this assertion is explained here: " \
"http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \
" **** READ THIS WEB PAGE !!! ****"); " **** READ THIS WEB PAGE !!! ****");

View File

@ -191,7 +191,7 @@ struct gemv_static_vector_if<Scalar,Size,MaxSize,true>
internal::plain_array<Scalar, internal::min_size_prefer_fixed(Size, MaxSize)+(ForceAlignment?EIGEN_MAX_ALIGN_BYTES:0),0> m_data; internal::plain_array<Scalar, internal::min_size_prefer_fixed(Size, MaxSize)+(ForceAlignment?EIGEN_MAX_ALIGN_BYTES:0),0> m_data;
EIGEN_STRONG_INLINE Scalar* data() { EIGEN_STRONG_INLINE Scalar* data() {
return ForceAlignment return ForceAlignment
? reinterpret_cast<Scalar*>((internal::UIntPtr(m_data.array) & ~(std::size_t(EIGEN_MAX_ALIGN_BYTES-1))) + EIGEN_MAX_ALIGN_BYTES) ? reinterpret_cast<Scalar*>((std::uintptr_t(m_data.array) & ~(std::size_t(EIGEN_MAX_ALIGN_BYTES-1))) + EIGEN_MAX_ALIGN_BYTES)
: m_data.array; : m_data.array;
} }
#endif #endif

View File

@ -197,7 +197,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
// innerStride() is not set yet when this function is called, so we optimistically assume the lowest plausible value: // innerStride() is not set yet when this function is called, so we optimistically assume the lowest plausible value:
const Index minInnerStride = InnerStrideAtCompileTime == Dynamic ? 1 : Index(InnerStrideAtCompileTime); const Index minInnerStride = InnerStrideAtCompileTime == Dynamic ? 1 : Index(InnerStrideAtCompileTime);
EIGEN_ONLY_USED_FOR_DEBUG(minInnerStride); EIGEN_ONLY_USED_FOR_DEBUG(minInnerStride);
eigen_assert(( ((internal::UIntPtr(m_data) % internal::traits<Derived>::Alignment) == 0) eigen_assert(( ((std::uintptr_t(m_data) % internal::traits<Derived>::Alignment) == 0)
|| (cols() * rows() * minInnerStride * sizeof(Scalar)) < internal::traits<Derived>::Alignment ) && "data is not aligned"); || (cols() * rows() * minInnerStride * sizeof(Scalar)) < internal::traits<Derived>::Alignment ) && "data is not aligned");
#endif #endif
} }

View File

@ -444,7 +444,7 @@ struct reshaped_evaluator<ArgType, Rows, Cols, Order, /* HasDirectAccess */ true
: mapbase_evaluator<XprType, typename XprType::PlainObject>(xpr) : mapbase_evaluator<XprType, typename XprType::PlainObject>(xpr)
{ {
// TODO: for the 3.4 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime // TODO: for the 3.4 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
eigen_assert(((internal::UIntPtr(xpr.data()) % plain_enum_max(1, evaluator<XprType>::Alignment)) == 0) && "data is not aligned"); eigen_assert(((std::uintptr_t(xpr.data()) % plain_enum_max(1, evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
} }
}; };

View File

@ -311,8 +311,8 @@ class gemm_blocking_space<StorageOrder,LhsScalar_,RhsScalar_,MaxRows, MaxCols, M
this->m_blockA = m_staticA; this->m_blockA = m_staticA;
this->m_blockB = m_staticB; this->m_blockB = m_staticB;
#else #else
this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockA = reinterpret_cast<LhsScalar*>((std::uintptr_t(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((std::uintptr_t(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif #endif
} }

View File

@ -66,7 +66,7 @@ class BlasVectorMapper {
template <typename Packet> template <typename Packet>
EIGEN_DEVICE_FUNC bool aligned(Index i) const { EIGEN_DEVICE_FUNC bool aligned(Index i) const {
return (UIntPtr(m_data+i)%sizeof(Packet))==0; return (std::uintptr_t(m_data+i)%sizeof(Packet))==0;
} }
protected: protected:
@ -253,7 +253,7 @@ public:
EIGEN_DEVICE_FUNC const Scalar* data() const { return m_data; } EIGEN_DEVICE_FUNC const Scalar* data() const { return m_data; }
EIGEN_DEVICE_FUNC Index firstAligned(Index size) const { EIGEN_DEVICE_FUNC Index firstAligned(Index size) const {
if (UIntPtr(m_data)%sizeof(Scalar)) { if (std::uintptr_t(m_data)%sizeof(Scalar)) {
return -1; return -1;
} }
return internal::first_default_aligned(m_data, size); return internal::first_default_aligned(m_data, size);

View File

@ -541,7 +541,7 @@ EIGEN_DEVICE_FUNC inline Index first_aligned(const Scalar* array, Index size)
// so that all elements of the array have the same alignment. // so that all elements of the array have the same alignment.
return 0; return 0;
} }
else if( (UIntPtr(array) & (sizeof(Scalar)-1)) || (Alignment%ScalarSize)!=0) else if( (std::uintptr_t(array) & (sizeof(Scalar)-1)) || (Alignment%ScalarSize)!=0)
{ {
// The array is not aligned to the size of a single scalar, or the requested alignment is not a multiple of the scalar size. // The array is not aligned to the size of a single scalar, or the requested alignment is not a multiple of the scalar size.
// Consequently, no element of the array is well aligned. // Consequently, no element of the array is well aligned.
@ -549,7 +549,7 @@ EIGEN_DEVICE_FUNC inline Index first_aligned(const Scalar* array, Index size)
} }
else else
{ {
Index first = (AlignmentSize - (Index((UIntPtr(array)/sizeof(Scalar))) & AlignmentMask)) & AlignmentMask; Index first = (AlignmentSize - (Index((std::uintptr_t(array)/sizeof(Scalar))) & AlignmentMask)) & AlignmentMask;
return (first < size) ? first : size; return (first < size) ? first : size;
} }
} }
@ -583,7 +583,7 @@ template<typename T> EIGEN_DEVICE_FUNC void smart_copy(const T* start, const T*
template<typename T> struct smart_copy_helper<T,true> { template<typename T> struct smart_copy_helper<T,true> {
EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target) EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target)
{ {
IntPtr size = IntPtr(end)-IntPtr(start); std::intptr_t size = std::intptr_t(end)-std::intptr_t(start);
if(size==0) return; if(size==0) return;
eigen_internal_assert(start!=0 && end!=0 && target!=0); eigen_internal_assert(start!=0 && end!=0 && target!=0);
EIGEN_USING_STD(memcpy) EIGEN_USING_STD(memcpy)
@ -607,7 +607,7 @@ template<typename T> void smart_memmove(const T* start, const T* end, T* target)
template<typename T> struct smart_memmove_helper<T,true> { template<typename T> struct smart_memmove_helper<T,true> {
static inline void run(const T* start, const T* end, T* target) static inline void run(const T* start, const T* end, T* target)
{ {
IntPtr size = IntPtr(end)-IntPtr(start); std::intptr_t size = std::intptr_t(end)-std::intptr_t(start);
if(size==0) return; if(size==0) return;
eigen_internal_assert(start!=0 && end!=0 && target!=0); eigen_internal_assert(start!=0 && end!=0 && target!=0);
std::memmove(target, start, size); std::memmove(target, start, size);
@ -617,7 +617,7 @@ template<typename T> struct smart_memmove_helper<T,true> {
template<typename T> struct smart_memmove_helper<T,false> { template<typename T> struct smart_memmove_helper<T,false> {
static inline void run(const T* start, const T* end, T* target) static inline void run(const T* start, const T* end, T* target)
{ {
if (UIntPtr(target) < UIntPtr(start)) if (std::uintptr_t(target) < std::uintptr_t(start))
{ {
std::copy(start, end, target); std::copy(start, end, target);
} }
@ -799,7 +799,7 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
#if EIGEN_DEFAULT_ALIGN_BYTES>0 #if EIGEN_DEFAULT_ALIGN_BYTES>0
// We always manually re-align the result of EIGEN_ALLOCA. // We always manually re-align the result of EIGEN_ALLOCA.
// If alloca is already aligned, the compiler should be smart enough to optimize away the re-alignment. // If alloca is already aligned, the compiler should be smart enough to optimize away the re-alignment.
#define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((internal::UIntPtr(EIGEN_ALLOCA(SIZE+EIGEN_DEFAULT_ALIGN_BYTES-1)) + EIGEN_DEFAULT_ALIGN_BYTES-1) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((std::uintptr_t(EIGEN_ALLOCA(SIZE+EIGEN_DEFAULT_ALIGN_BYTES-1)) + EIGEN_DEFAULT_ALIGN_BYTES-1) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)))
#else #else
#define EIGEN_ALIGNED_ALLOCA(SIZE) EIGEN_ALLOCA(SIZE) #define EIGEN_ALIGNED_ALLOCA(SIZE) EIGEN_ALLOCA(SIZE)
#endif #endif

View File

@ -27,9 +27,6 @@
#endif #endif
// Recent versions of ICC require <cstdint> for pointer types below.
#define EIGEN_ICC_NEEDS_CSTDINT (EIGEN_COMP_ICC>=1600)
// Define portable (u)int{32,64} types // Define portable (u)int{32,64} types
#include <cstdint> #include <cstdint>
@ -93,17 +90,6 @@ namespace internal {
* we however don't want to add a dependency to Boost. * we however don't want to add a dependency to Boost.
*/ */
// Only recent versions of ICC complain about using ptrdiff_t to hold pointers,
// and older versions do not provide *intptr_t types.
#if EIGEN_ICC_NEEDS_CSTDINT
typedef std::intptr_t IntPtr;
typedef std::uintptr_t UIntPtr;
#else
typedef std::ptrdiff_t IntPtr;
typedef std::size_t UIntPtr;
#endif
#undef EIGEN_ICC_NEEDS_CSTDINT
struct true_type { enum { value = 1 }; }; struct true_type { enum { value = 1 }; };
struct false_type { enum { value = 0 }; }; struct false_type { enum { value = 0 }; };

View File

@ -26,7 +26,7 @@ void check_handmade_aligned_malloc()
for(int i = 1; i < 1000; i++) for(int i = 1; i < 1000; i++)
{ {
char *p = (char*)internal::handmade_aligned_malloc(i, alignment); char *p = (char*)internal::handmade_aligned_malloc(i, alignment);
VERIFY(internal::UIntPtr(p)%ALIGNMENT==0); VERIFY(std::uintptr_t(p)%ALIGNMENT==0);
// if the buffer is wrongly allocated this will give a bad write --> check with valgrind // if the buffer is wrongly allocated this will give a bad write --> check with valgrind
for(int j = 0; j < i; j++) p[j]=0; for(int j = 0; j < i; j++) p[j]=0;
internal::handmade_aligned_free(p); internal::handmade_aligned_free(p);
@ -38,7 +38,7 @@ void check_aligned_malloc()
for(int i = ALIGNMENT; i < 1000; i++) for(int i = ALIGNMENT; i < 1000; i++)
{ {
char *p = (char*)internal::aligned_malloc(i); char *p = (char*)internal::aligned_malloc(i);
VERIFY(internal::UIntPtr(p)%ALIGNMENT==0); VERIFY(std::uintptr_t(p)%ALIGNMENT==0);
// if the buffer is wrongly allocated this will give a bad write --> check with valgrind // if the buffer is wrongly allocated this will give a bad write --> check with valgrind
for(int j = 0; j < i; j++) p[j]=0; for(int j = 0; j < i; j++) p[j]=0;
internal::aligned_free(p); internal::aligned_free(p);
@ -50,7 +50,7 @@ void check_aligned_new()
for(int i = ALIGNMENT; i < 1000; i++) for(int i = ALIGNMENT; i < 1000; i++)
{ {
float *p = internal::aligned_new<float>(i); float *p = internal::aligned_new<float>(i);
VERIFY(internal::UIntPtr(p)%ALIGNMENT==0); VERIFY(std::uintptr_t(p)%ALIGNMENT==0);
// if the buffer is wrongly allocated this will give a bad write --> check with valgrind // if the buffer is wrongly allocated this will give a bad write --> check with valgrind
for(int j = 0; j < i; j++) p[j]=0; for(int j = 0; j < i; j++) p[j]=0;
internal::aligned_delete(p,i); internal::aligned_delete(p,i);
@ -62,7 +62,7 @@ void check_aligned_stack_alloc()
for(int i = ALIGNMENT; i < 400; i++) for(int i = ALIGNMENT; i < 400; i++)
{ {
ei_declare_aligned_stack_constructed_variable(float,p,i,0); ei_declare_aligned_stack_constructed_variable(float,p,i,0);
VERIFY(internal::UIntPtr(p)%ALIGNMENT==0); VERIFY(std::uintptr_t(p)%ALIGNMENT==0);
// if the buffer is wrongly allocated this will give a bad write --> check with valgrind // if the buffer is wrongly allocated this will give a bad write --> check with valgrind
for(int j = 0; j < i; j++) p[j]=0; for(int j = 0; j < i; j++) p[j]=0;
} }
@ -92,7 +92,7 @@ template<typename T> void check_dynaligned()
{ {
T* obj = new T; T* obj = new T;
VERIFY(T::NeedsToAlign==1); VERIFY(T::NeedsToAlign==1);
VERIFY(internal::UIntPtr(obj)%ALIGNMENT==0); VERIFY(std::uintptr_t(obj)%ALIGNMENT==0);
delete obj; delete obj;
} }
} }
@ -153,15 +153,15 @@ EIGEN_DECLARE_TEST(dynalloc)
} }
{ {
MyStruct foo0; VERIFY(internal::UIntPtr(foo0.avec.data())%ALIGNMENT==0); MyStruct foo0; VERIFY(std::uintptr_t(foo0.avec.data())%ALIGNMENT==0);
MyClassA fooA; VERIFY(internal::UIntPtr(fooA.avec.data())%ALIGNMENT==0); MyClassA fooA; VERIFY(std::uintptr_t(fooA.avec.data())%ALIGNMENT==0);
} }
// dynamic allocation, single object // dynamic allocation, single object
for (int i=0; i<g_repeat*100; ++i) for (int i=0; i<g_repeat*100; ++i)
{ {
MyStruct *foo0 = new MyStruct(); VERIFY(internal::UIntPtr(foo0->avec.data())%ALIGNMENT==0); MyStruct *foo0 = new MyStruct(); VERIFY(std::uintptr_t(foo0->avec.data())%ALIGNMENT==0);
MyClassA *fooA = new MyClassA(); VERIFY(internal::UIntPtr(fooA->avec.data())%ALIGNMENT==0); MyClassA *fooA = new MyClassA(); VERIFY(std::uintptr_t(fooA->avec.data())%ALIGNMENT==0);
delete foo0; delete foo0;
delete fooA; delete fooA;
} }
@ -170,8 +170,8 @@ EIGEN_DECLARE_TEST(dynalloc)
const int N = 10; const int N = 10;
for (int i=0; i<g_repeat*100; ++i) for (int i=0; i<g_repeat*100; ++i)
{ {
MyStruct *foo0 = new MyStruct[N]; VERIFY(internal::UIntPtr(foo0->avec.data())%ALIGNMENT==0); MyStruct *foo0 = new MyStruct[N]; VERIFY(std::uintptr_t(foo0->avec.data())%ALIGNMENT==0);
MyClassA *fooA = new MyClassA[N]; VERIFY(internal::UIntPtr(fooA->avec.data())%ALIGNMENT==0); MyClassA *fooA = new MyClassA[N]; VERIFY(std::uintptr_t(fooA->avec.data())%ALIGNMENT==0);
delete[] foo0; delete[] foo0;
delete[] fooA; delete[] fooA;
} }

View File

@ -512,7 +512,7 @@ EIGEN_DECLARE_TEST(evaluators)
float *destMem = new float[(M*N) + 1]; float *destMem = new float[(M*N) + 1];
// In case of no alignment, avoid division by zero. // In case of no alignment, avoid division by zero.
constexpr int alignment = (std::max<int>)(EIGEN_MAX_ALIGN_BYTES, 1); constexpr int alignment = (std::max<int>)(EIGEN_MAX_ALIGN_BYTES, 1);
float *dest = (internal::UIntPtr(destMem)%alignment) == 0 ? destMem+1 : destMem; float *dest = (std::uintptr_t(destMem)%alignment) == 0 ? destMem+1 : destMem;
const Matrix<float, Dynamic, Dynamic, RowMajor> a = Matrix<float, Dynamic, Dynamic, RowMajor>::Random(M, K); const Matrix<float, Dynamic, Dynamic, RowMajor> a = Matrix<float, Dynamic, Dynamic, RowMajor>::Random(M, K);
const Matrix<float, Dynamic, Dynamic, RowMajor> b = Matrix<float, Dynamic, Dynamic, RowMajor>::Random(K, N); const Matrix<float, Dynamic, Dynamic, RowMajor> b = Matrix<float, Dynamic, Dynamic, RowMajor>::Random(K, N);

View File

@ -41,7 +41,7 @@ EIGEN_DECLARE_TEST(first_aligned)
test_first_aligned_helper(array_double+1, 50); test_first_aligned_helper(array_double+1, 50);
test_first_aligned_helper(array_double+2, 50); test_first_aligned_helper(array_double+2, 50);
double *array_double_plus_4_bytes = (double*)(internal::UIntPtr(array_double)+4); double *array_double_plus_4_bytes = (double*)(std::uintptr_t(array_double)+4);
test_none_aligned_helper(array_double_plus_4_bytes, 50); test_none_aligned_helper(array_double_plus_4_bytes, 50);
test_none_aligned_helper(array_double_plus_4_bytes+1, 50); test_none_aligned_helper(array_double_plus_4_bytes+1, 50);

View File

@ -22,7 +22,7 @@ template<typename VectorType> void map_class_vector(const VectorType& m)
Scalar* array3 = new Scalar[size+1]; Scalar* array3 = new Scalar[size+1];
// In case of no alignment, avoid division by zero. // In case of no alignment, avoid division by zero.
constexpr int alignment = (std::max<int>)(EIGEN_MAX_ALIGN_BYTES, 1); constexpr int alignment = (std::max<int>)(EIGEN_MAX_ALIGN_BYTES, 1);
Scalar* array3unaligned = (internal::UIntPtr(array3)%alignment) == 0 ? array3+1 : array3; Scalar* array3unaligned = (std::uintptr_t(array3)%alignment) == 0 ? array3+1 : array3;
Scalar array4[EIGEN_TESTMAP_MAX_SIZE]; Scalar array4[EIGEN_TESTMAP_MAX_SIZE];
Map<VectorType, AlignedMax>(array1, size) = VectorType::Random(size); Map<VectorType, AlignedMax>(array1, size) = VectorType::Random(size);
@ -64,7 +64,7 @@ template<typename MatrixType> void map_class_matrix(const MatrixType& m)
for(Index i = 0; i < sizep1; i++) array3[i] = Scalar(1); for(Index i = 0; i < sizep1; i++) array3[i] = Scalar(1);
// In case of no alignment, avoid division by zero. // In case of no alignment, avoid division by zero.
constexpr int alignment = (std::max<int>)(EIGEN_MAX_ALIGN_BYTES, 1); constexpr int alignment = (std::max<int>)(EIGEN_MAX_ALIGN_BYTES, 1);
Scalar* array3unaligned = (internal::UIntPtr(array3)%alignment) == 0 ? array3+1 : array3; Scalar* array3unaligned = (std::uintptr_t(array3)%alignment) == 0 ? array3+1 : array3;
Scalar array4[256]; Scalar array4[256];
if(size<=256) if(size<=256)
for(int i = 0; i < size; i++) array4[i] = Scalar(1); for(int i = 0; i < size; i++) array4[i] = Scalar(1);
@ -129,7 +129,7 @@ template<typename VectorType> void map_static_methods(const VectorType& m)
Scalar* array3 = new Scalar[size+1]; Scalar* array3 = new Scalar[size+1];
// In case of no alignment, avoid division by zero. // In case of no alignment, avoid division by zero.
constexpr int alignment = (std::max<int>)(EIGEN_MAX_ALIGN_BYTES, 1); constexpr int alignment = (std::max<int>)(EIGEN_MAX_ALIGN_BYTES, 1);
Scalar* array3unaligned = (internal::UIntPtr(array3)%alignment) == 0 ? array3+1 : array3; Scalar* array3unaligned = (std::uintptr_t(array3)%alignment) == 0 ? array3+1 : array3;
VectorType::MapAligned(array1, size) = VectorType::Random(size); VectorType::MapAligned(array1, size) = VectorType::Random(size);
VectorType::Map(array2, size) = VectorType::Map(array1, size); VectorType::Map(array2, size) = VectorType::Map(array1, size);

View File

@ -22,7 +22,7 @@ template<int Alignment,typename VectorType> void map_class_vector(const VectorTy
Scalar* a_array = internal::aligned_new<Scalar>(arraysize+1); Scalar* a_array = internal::aligned_new<Scalar>(arraysize+1);
Scalar* array = a_array; Scalar* array = a_array;
if(Alignment!=Aligned) if(Alignment!=Aligned)
array = (Scalar*)(internal::IntPtr(a_array) + (internal::packet_traits<Scalar>::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits<Scalar>::Real))); array = (Scalar*)(std::intptr_t(a_array) + (internal::packet_traits<Scalar>::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits<Scalar>::Real)));
{ {
Map<VectorType, Alignment, InnerStride<3> > map(array, size); Map<VectorType, Alignment, InnerStride<3> > map(array, size);
@ -61,16 +61,16 @@ template<int Alignment,typename MatrixType> void map_class_matrix(const MatrixTy
Scalar* a_array1 = internal::aligned_new<Scalar>(arraysize+1); Scalar* a_array1 = internal::aligned_new<Scalar>(arraysize+1);
Scalar* array1 = a_array1; Scalar* array1 = a_array1;
if(Alignment!=Aligned) if(Alignment!=Aligned)
array1 = (Scalar*)(internal::IntPtr(a_array1) + (internal::packet_traits<Scalar>::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits<Scalar>::Real))); array1 = (Scalar*)(std::intptr_t(a_array1) + (internal::packet_traits<Scalar>::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits<Scalar>::Real)));
Scalar a_array2[256]; Scalar a_array2[256];
Scalar* array2 = a_array2; Scalar* array2 = a_array2;
if(Alignment!=Aligned) { if(Alignment!=Aligned) {
array2 = (Scalar*)(internal::IntPtr(a_array2) + (internal::packet_traits<Scalar>::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits<Scalar>::Real))); array2 = (Scalar*)(std::intptr_t(a_array2) + (internal::packet_traits<Scalar>::AlignedOnScalar?sizeof(Scalar):sizeof(typename NumTraits<Scalar>::Real)));
} else { } else {
// In case there is no alignment, default to pointing to the start. // In case there is no alignment, default to pointing to the start.
constexpr int alignment = (std::max<int>)(EIGEN_MAX_ALIGN_BYTES, 1); constexpr int alignment = (std::max<int>)(EIGEN_MAX_ALIGN_BYTES, 1);
array2 = (Scalar*)(((internal::UIntPtr(a_array2)+alignment-1)/alignment)*alignment); array2 = (Scalar*)(((std::uintptr_t(a_array2)+alignment-1)/alignment)*alignment);
} }
Index maxsize2 = a_array2 - array2 + 256; Index maxsize2 = a_array2 - array2 + 256;

View File

@ -15,8 +15,6 @@
#include <Eigen/Core> #include <Eigen/Core>
using internal::UIntPtr;
template <typename MatrixType> template <typename MatrixType>
void rvalue_copyassign(const MatrixType& m) void rvalue_copyassign(const MatrixType& m)
{ {
@ -25,18 +23,18 @@ void rvalue_copyassign(const MatrixType& m)
// create a temporary which we are about to destroy by moving // create a temporary which we are about to destroy by moving
MatrixType tmp = m; MatrixType tmp = m;
UIntPtr src_address = reinterpret_cast<UIntPtr>(tmp.data()); std::uintptr_t src_address = reinterpret_cast<std::uintptr_t>(tmp.data());
Eigen::internal::set_is_malloc_allowed(false); // moving from an rvalue reference shall never allocate Eigen::internal::set_is_malloc_allowed(false); // moving from an rvalue reference shall never allocate
// move the temporary to n // move the temporary to n
MatrixType n = std::move(tmp); MatrixType n = std::move(tmp);
UIntPtr dst_address = reinterpret_cast<UIntPtr>(n.data()); std::uintptr_t dst_address = reinterpret_cast<std::uintptr_t>(n.data());
if (MatrixType::RowsAtCompileTime==Dynamic|| MatrixType::ColsAtCompileTime==Dynamic) if (MatrixType::RowsAtCompileTime==Dynamic|| MatrixType::ColsAtCompileTime==Dynamic)
{ {
// verify that we actually moved the guts // verify that we actually moved the guts
VERIFY_IS_EQUAL(src_address, dst_address); VERIFY_IS_EQUAL(src_address, dst_address);
VERIFY_IS_EQUAL(tmp.size(), 0); VERIFY_IS_EQUAL(tmp.size(), 0);
VERIFY_IS_EQUAL(reinterpret_cast<UIntPtr>(tmp.data()), UIntPtr(0)); VERIFY_IS_EQUAL(reinterpret_cast<std::uintptr_t>(tmp.data()), std::uintptr_t(0));
} }
// verify that the content did not change // verify that the content did not change
@ -55,24 +53,24 @@ void rvalue_transpositions(Index rows)
Eigen::internal::set_is_malloc_allowed(false); // moving from an rvalue reference shall never allocate Eigen::internal::set_is_malloc_allowed(false); // moving from an rvalue reference shall never allocate
UIntPtr t0_address = reinterpret_cast<UIntPtr>(t0.indices().data()); std::uintptr_t t0_address = reinterpret_cast<std::uintptr_t>(t0.indices().data());
// Move constructors: // Move constructors:
TranspositionsType t1 = std::move(t0); TranspositionsType t1 = std::move(t0);
UIntPtr t1_address = reinterpret_cast<UIntPtr>(t1.indices().data()); std::uintptr_t t1_address = reinterpret_cast<std::uintptr_t>(t1.indices().data());
VERIFY_IS_EQUAL(t0_address, t1_address); VERIFY_IS_EQUAL(t0_address, t1_address);
// t0 must be de-allocated: // t0 must be de-allocated:
VERIFY_IS_EQUAL(t0.size(), 0); VERIFY_IS_EQUAL(t0.size(), 0);
VERIFY_IS_EQUAL(reinterpret_cast<UIntPtr>(t0.indices().data()), UIntPtr(0)); VERIFY_IS_EQUAL(reinterpret_cast<std::uintptr_t>(t0.indices().data()), std::uintptr_t(0));
// Move assignment: // Move assignment:
t0 = std::move(t1); t0 = std::move(t1);
t0_address = reinterpret_cast<UIntPtr>(t0.indices().data()); t0_address = reinterpret_cast<std::uintptr_t>(t0.indices().data());
VERIFY_IS_EQUAL(t0_address, t1_address); VERIFY_IS_EQUAL(t0_address, t1_address);
// t1 must be de-allocated: // t1 must be de-allocated:
VERIFY_IS_EQUAL(t1.size(), 0); VERIFY_IS_EQUAL(t1.size(), 0);
VERIFY_IS_EQUAL(reinterpret_cast<UIntPtr>(t1.indices().data()), UIntPtr(0)); VERIFY_IS_EQUAL(reinterpret_cast<std::uintptr_t>(t1.indices().data()), std::uintptr_t(0));
Eigen::internal::set_is_malloc_allowed(true); Eigen::internal::set_is_malloc_allowed(true);
} }

View File

@ -34,7 +34,7 @@ void check_stdvector_matrix(const MatrixType& m)
VERIFY_IS_APPROX(v[21], y); VERIFY_IS_APPROX(v[21], y);
v.push_back(x); v.push_back(x);
VERIFY_IS_APPROX(v[22], x); VERIFY_IS_APPROX(v[22], x);
VERIFY((internal::UIntPtr)&(v[22]) == (internal::UIntPtr)&(v[21]) + sizeof(MatrixType)); VERIFY((std::uintptr_t)&(v[22]) == (std::uintptr_t)&(v[21]) + sizeof(MatrixType));
// do a lot of push_back such that the vector gets internally resized // do a lot of push_back such that the vector gets internally resized
// (with memory reallocation) // (with memory reallocation)
@ -69,7 +69,7 @@ void check_stdvector_transform(const TransformType&)
VERIFY_IS_APPROX(v[21], y); VERIFY_IS_APPROX(v[21], y);
v.push_back(x); v.push_back(x);
VERIFY_IS_APPROX(v[22], x); VERIFY_IS_APPROX(v[22], x);
VERIFY((internal::UIntPtr)&(v[22]) == (internal::UIntPtr)&(v[21]) + sizeof(TransformType)); VERIFY((std::uintptr_t)&(v[22]) == (std::uintptr_t)&(v[21]) + sizeof(TransformType));
// do a lot of push_back such that the vector gets internally resized // do a lot of push_back such that the vector gets internally resized
// (with memory reallocation) // (with memory reallocation)
@ -104,7 +104,7 @@ void check_stdvector_quaternion(const QuaternionType&)
VERIFY_IS_APPROX(v[21], y); VERIFY_IS_APPROX(v[21], y);
v.push_back(x); v.push_back(x);
VERIFY_IS_APPROX(v[22], x); VERIFY_IS_APPROX(v[22], x);
VERIFY((internal::UIntPtr)&(v[22]) == (internal::UIntPtr)&(v[21]) + sizeof(QuaternionType)); VERIFY((std::uintptr_t)&(v[22]) == (std::uintptr_t)&(v[21]) + sizeof(QuaternionType));
// do a lot of push_back such that the vector gets internally resized // do a lot of push_back such that the vector gets internally resized
// (with memory reallocation) // (with memory reallocation)

View File

@ -48,7 +48,7 @@ void check_stdvector_matrix(const MatrixType& m)
VERIFY_IS_APPROX(v[21], y); VERIFY_IS_APPROX(v[21], y);
v.push_back(x); v.push_back(x);
VERIFY_IS_APPROX(v[22], x); VERIFY_IS_APPROX(v[22], x);
VERIFY((internal::UIntPtr)&(v[22]) == (internal::UIntPtr)&(v[21]) + sizeof(MatrixType)); VERIFY((std::uintptr_t)&(v[22]) == (std::uintptr_t)&(v[21]) + sizeof(MatrixType));
// do a lot of push_back such that the vector gets internally resized // do a lot of push_back such that the vector gets internally resized
// (with memory reallocation) // (with memory reallocation)
@ -83,7 +83,7 @@ void check_stdvector_transform(const TransformType&)
VERIFY_IS_APPROX(v[21], y); VERIFY_IS_APPROX(v[21], y);
v.push_back(x); v.push_back(x);
VERIFY_IS_APPROX(v[22], x); VERIFY_IS_APPROX(v[22], x);
VERIFY((internal::UIntPtr)&(v[22]) == (internal::UIntPtr)&(v[21]) + sizeof(TransformType)); VERIFY((std::uintptr_t)&(v[22]) == (std::uintptr_t)&(v[21]) + sizeof(TransformType));
// do a lot of push_back such that the vector gets internally resized // do a lot of push_back such that the vector gets internally resized
// (with memory reallocation) // (with memory reallocation)
@ -118,7 +118,7 @@ void check_stdvector_quaternion(const QuaternionType&)
VERIFY_IS_APPROX(v[21], y); VERIFY_IS_APPROX(v[21], y);
v.push_back(x); v.push_back(x);
VERIFY_IS_APPROX(v[22], x); VERIFY_IS_APPROX(v[22], x);
VERIFY((internal::UIntPtr)&(v[22]) == (internal::UIntPtr)&(v[21]) + sizeof(QuaternionType)); VERIFY((std::uintptr_t)&(v[22]) == (std::uintptr_t)&(v[21]) + sizeof(QuaternionType));
// do a lot of push_back such that the vector gets internally resized // do a lot of push_back such that the vector gets internally resized
// (with memory reallocation) // (with memory reallocation)

View File

@ -18,13 +18,13 @@ struct Foo
#endif #endif
std::cout << '+'; std::cout << '+';
++Foo::object_count; ++Foo::object_count;
eigen_assert((internal::UIntPtr(this) & (127)) == 0); eigen_assert((std::uintptr_t(this) & (127)) == 0);
} }
Foo(const Foo&) Foo(const Foo&)
{ {
std::cout << 'c'; std::cout << 'c';
++Foo::object_count; ++Foo::object_count;
eigen_assert((internal::UIntPtr(this) & (127)) == 0); eigen_assert((std::uintptr_t(this) & (127)) == 0);
} }
~Foo() ~Foo()