mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-07-19 19:34:29 +08:00
fixups for clean QCC compilation (add std:: in front of size_t, memcpy, etc.)
This commit is contained in:
parent
47a61bbd80
commit
1625a5e3f8
@ -15,7 +15,9 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
// FIXME: this check should not be against __QNXNTO__, which is also defined
|
||||
// while compiling with GCC for QNX target. Better solution is welcome!
|
||||
#if defined(__GNUC__) && !defined(__QNXNTO__)
|
||||
#define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__>=x && __GNUC_MINOR__>=y) || __GNUC__>x)
|
||||
#else
|
||||
#define EIGEN_GNUC_AT_LEAST(x,y) 0
|
||||
|
@ -36,8 +36,8 @@ template <class T>
|
||||
class aligned_allocator_indirection : public aligned_allocator<T>
|
||||
{
|
||||
public:
|
||||
typedef size_t size_type;
|
||||
typedef ptrdiff_t difference_type;
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef T* pointer;
|
||||
typedef const T* const_pointer;
|
||||
typedef T& reference;
|
||||
|
@ -84,7 +84,7 @@ static void ei_cache_friendly_product(
|
||||
MaxL2BlockSize = ei_L2_block_traits<EIGEN_TUNE_FOR_CPU_CACHE_SIZE,Scalar>::width
|
||||
};
|
||||
|
||||
const bool resIsAligned = (PacketSize==1) || (((resStride%PacketSize) == 0) && (size_t(res)%16==0));
|
||||
const bool resIsAligned = (PacketSize==1) || (((resStride%PacketSize) == 0) && (std::size_t(res)%16==0));
|
||||
|
||||
const int remainingSize = depth % PacketSize;
|
||||
const int size = depth - remainingSize; // third dimension of the product clamped to packet boundaries
|
||||
@ -92,7 +92,7 @@ static void ei_cache_friendly_product(
|
||||
const int l2BlockCols = MaxL2BlockSize > cols ? cols : MaxL2BlockSize;
|
||||
const int l2BlockSize = MaxL2BlockSize > size ? size : MaxL2BlockSize;
|
||||
const int l2BlockSizeAligned = (1 + std::max(l2BlockSize,l2BlockCols)/PacketSize)*PacketSize;
|
||||
const bool needRhsCopy = (PacketSize>1) && ((rhsStride%PacketSize!=0) || (size_t(rhs)%16!=0));
|
||||
const bool needRhsCopy = (PacketSize>1) && ((rhsStride%PacketSize!=0) || (std::size_t(rhs)%16!=0));
|
||||
Scalar* EIGEN_RESTRICT block = 0;
|
||||
const int allocBlockSize = l2BlockRows*size;
|
||||
block = ei_aligned_stack_new(Scalar, allocBlockSize);
|
||||
@ -172,7 +172,7 @@ static void ei_cache_friendly_product(
|
||||
for(int l1j=l2j; l1j<l2blockColEnd; l1j+=1)
|
||||
{
|
||||
ei_internal_assert(l2BlockSizeAligned*(l1j-l2j)+(l2blockSizeEnd-l2k) < l2BlockSizeAligned*l2BlockSizeAligned);
|
||||
memcpy(rhsCopy+l2BlockSizeAligned*(l1j-l2j),&(rhs[l1j*rhsStride+l2k]),(l2blockSizeEnd-l2k)*sizeof(Scalar));
|
||||
std::memcpy(rhsCopy+l2BlockSizeAligned*(l1j-l2j),&(rhs[l1j*rhsStride+l2k]),(l2blockSizeEnd-l2k)*sizeof(Scalar));
|
||||
}
|
||||
|
||||
// for each bw x 1 result's block
|
||||
@ -397,7 +397,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_colmajor_times_vector(
|
||||
int skipColumns = 0;
|
||||
if (PacketSize>1)
|
||||
{
|
||||
ei_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(Packet)==0 || size<PacketSize);
|
||||
ei_internal_assert(std::size_t(lhs+lhsAlignmentOffset)%sizeof(Packet)==0 || size<PacketSize);
|
||||
|
||||
while (skipColumns<PacketSize &&
|
||||
alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%PacketSize))
|
||||
@ -414,7 +414,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_colmajor_times_vector(
|
||||
// note that the skiped columns are processed later.
|
||||
}
|
||||
|
||||
ei_internal_assert((alignmentPattern==NoneAligned) || (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(Packet))==0);
|
||||
ei_internal_assert((alignmentPattern==NoneAligned) || (std::size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(Packet))==0);
|
||||
}
|
||||
|
||||
int offset1 = (FirstAligned && alignmentStep==1?3:1);
|
||||
@ -516,7 +516,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_colmajor_times_vector(
|
||||
res[j] += ei_pfirst(ptmp0) * lhs0[j];
|
||||
|
||||
// process aligned result's coeffs
|
||||
if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0)
|
||||
if ((std::size_t(lhs0+alignedStart)%sizeof(Packet))==0)
|
||||
for (int j = alignedStart;j<alignedSize;j+=PacketSize)
|
||||
ei_pstore(&res[j], ei_pmadd(ptmp0,ei_pload(&lhs0[j]),ei_pload(&res[j])));
|
||||
else
|
||||
@ -586,7 +586,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
|
||||
int skipRows = 0;
|
||||
if (PacketSize>1)
|
||||
{
|
||||
ei_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(Packet)==0 || size<PacketSize);
|
||||
ei_internal_assert(std::size_t(lhs+lhsAlignmentOffset)%sizeof(Packet)==0 || size<PacketSize);
|
||||
|
||||
while (skipRows<PacketSize &&
|
||||
alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%PacketSize))
|
||||
@ -603,7 +603,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
|
||||
// note that the skiped columns are processed later.
|
||||
}
|
||||
ei_internal_assert((alignmentPattern==NoneAligned) || PacketSize==1
|
||||
|| (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(Packet))==0);
|
||||
|| (std::size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(Packet))==0);
|
||||
}
|
||||
|
||||
int offset1 = (FirstAligned && alignmentStep==1?3:1);
|
||||
@ -722,7 +722,7 @@ static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
|
||||
if (alignedSize>alignedStart)
|
||||
{
|
||||
// process aligned rhs coeffs
|
||||
if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0)
|
||||
if ((std::size_t(lhs0+alignedStart)%sizeof(Packet))==0)
|
||||
for (int j = alignedStart;j<alignedSize;j+=PacketSize)
|
||||
ptmp0 = ei_pmadd(ei_pload(&rhs[j]), ei_pload(&lhs0[j]), ptmp0);
|
||||
else
|
||||
|
@ -239,7 +239,7 @@ inline static Integer ei_alignmentOffset(const Scalar* array, Integer size)
|
||||
// of the array have the same aligment.
|
||||
return 0;
|
||||
}
|
||||
else if(size_t(array) & (sizeof(Scalar)-1))
|
||||
else if(std::size_t(array) & (sizeof(Scalar)-1))
|
||||
{
|
||||
// There is vectorization for this scalar type, but the array is not aligned to the size of a single scalar.
|
||||
// Consequently, no element of the array is well aligned.
|
||||
@ -247,7 +247,7 @@ inline static Integer ei_alignmentOffset(const Scalar* array, Integer size)
|
||||
}
|
||||
else
|
||||
{
|
||||
return std::min<Integer>( (PacketSize - (Integer((size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
|
||||
return std::min<Integer>( (PacketSize - (Integer((std::size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
|
||||
& PacketAlignedMask, size);
|
||||
}
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ template<typename _Scalar> class AmbiVector
|
||||
int allocSize = m_allocatedElements * sizeof(ListEl);
|
||||
allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
|
||||
Scalar* newBuffer = new Scalar[allocSize];
|
||||
memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
|
||||
std::memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
|
||||
delete[] m_buffer;
|
||||
m_buffer = newBuffer;
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ class CompressedStorage
|
||||
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
|
||||
{}
|
||||
|
||||
CompressedStorage(size_t size)
|
||||
CompressedStorage(std::size_t size)
|
||||
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
|
||||
{
|
||||
resize(size);
|
||||
@ -52,8 +52,8 @@ class CompressedStorage
|
||||
CompressedStorage& operator=(const CompressedStorage& other)
|
||||
{
|
||||
resize(other.size());
|
||||
memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
|
||||
memcpy(m_indices, other.m_indices, m_size * sizeof(int));
|
||||
std::memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
|
||||
std::memcpy(m_indices, other.m_indices, m_size * sizeof(int));
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -71,9 +71,9 @@ class CompressedStorage
|
||||
delete[] m_indices;
|
||||
}
|
||||
|
||||
void reserve(size_t size)
|
||||
void reserve(std::size_t size)
|
||||
{
|
||||
size_t newAllocatedSize = m_size + size;
|
||||
std::size_t newAllocatedSize = m_size + size;
|
||||
if (newAllocatedSize > m_allocatedSize)
|
||||
reallocate(newAllocatedSize);
|
||||
}
|
||||
@ -84,10 +84,10 @@ class CompressedStorage
|
||||
reallocate(m_size);
|
||||
}
|
||||
|
||||
void resize(size_t size, float reserveSizeFactor = 0)
|
||||
void resize(std::size_t size, float reserveSizeFactor = 0)
|
||||
{
|
||||
if (m_allocatedSize<size)
|
||||
reallocate(size + size_t(reserveSizeFactor*size));
|
||||
reallocate(size + std::size_t(reserveSizeFactor*size));
|
||||
m_size = size;
|
||||
}
|
||||
|
||||
@ -99,17 +99,17 @@ class CompressedStorage
|
||||
m_indices[id] = i;
|
||||
}
|
||||
|
||||
inline size_t size() const { return m_size; }
|
||||
inline size_t allocatedSize() const { return m_allocatedSize; }
|
||||
inline std::size_t size() const { return m_size; }
|
||||
inline std::size_t allocatedSize() const { return m_allocatedSize; }
|
||||
inline void clear() { m_size = 0; }
|
||||
|
||||
inline Scalar& value(size_t i) { return m_values[i]; }
|
||||
inline const Scalar& value(size_t i) const { return m_values[i]; }
|
||||
inline Scalar& value(std::size_t i) { return m_values[i]; }
|
||||
inline const Scalar& value(std::size_t i) const { return m_values[i]; }
|
||||
|
||||
inline int& index(size_t i) { return m_indices[i]; }
|
||||
inline const int& index(size_t i) const { return m_indices[i]; }
|
||||
inline int& index(std::size_t i) { return m_indices[i]; }
|
||||
inline const int& index(std::size_t i) const { return m_indices[i]; }
|
||||
|
||||
static CompressedStorage Map(int* indices, Scalar* values, size_t size)
|
||||
static CompressedStorage Map(int* indices, Scalar* values, std::size_t size)
|
||||
{
|
||||
CompressedStorage res;
|
||||
res.m_indices = indices;
|
||||
@ -125,11 +125,11 @@ class CompressedStorage
|
||||
}
|
||||
|
||||
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
|
||||
inline int searchLowerIndex(size_t start, size_t end, int key) const
|
||||
inline int searchLowerIndex(std::size_t start, std::size_t end, int key) const
|
||||
{
|
||||
while(end>start)
|
||||
{
|
||||
size_t mid = (end+start)>>1;
|
||||
std::size_t mid = (end+start)>>1;
|
||||
if (m_indices[mid]<key)
|
||||
start = mid+1;
|
||||
else
|
||||
@ -148,12 +148,12 @@ class CompressedStorage
|
||||
return m_values[m_size-1];
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
const size_t id = searchLowerIndex(0,m_size-1,key);
|
||||
const std::size_t id = searchLowerIndex(0,m_size-1,key);
|
||||
return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
|
||||
}
|
||||
|
||||
/** Like at(), but the search is performed in the range [start,end) */
|
||||
inline Scalar atInRange(size_t start, size_t end, int key, Scalar defaultValue = Scalar(0)) const
|
||||
inline Scalar atInRange(std::size_t start, std::size_t end, int key, Scalar defaultValue = Scalar(0)) const
|
||||
{
|
||||
if (start==end)
|
||||
return Scalar(0);
|
||||
@ -161,7 +161,7 @@ class CompressedStorage
|
||||
return m_values[end-1];
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
const size_t id = searchLowerIndex(start,end-1,key);
|
||||
const std::size_t id = searchLowerIndex(start,end-1,key);
|
||||
return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
|
||||
}
|
||||
|
||||
@ -170,11 +170,11 @@ class CompressedStorage
|
||||
* such that the keys are sorted. */
|
||||
inline Scalar& atWithInsertion(int key, Scalar defaultValue = Scalar(0))
|
||||
{
|
||||
size_t id = searchLowerIndex(0,m_size,key);
|
||||
std::size_t id = searchLowerIndex(0,m_size,key);
|
||||
if (id>=m_size || m_indices[id]!=key)
|
||||
{
|
||||
resize(m_size+1,1);
|
||||
for (size_t j=m_size-1; j>id; --j)
|
||||
for (std::size_t j=m_size-1; j>id; --j)
|
||||
{
|
||||
m_indices[j] = m_indices[j-1];
|
||||
m_values[j] = m_values[j-1];
|
||||
@ -187,9 +187,9 @@ class CompressedStorage
|
||||
|
||||
void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
|
||||
{
|
||||
size_t k = 0;
|
||||
size_t n = size();
|
||||
for (size_t i=0; i<n; ++i)
|
||||
std::size_t k = 0;
|
||||
std::size_t n = size();
|
||||
for (std::size_t i=0; i<n; ++i)
|
||||
{
|
||||
if (!ei_isMuchSmallerThan(value(i), reference, epsilon))
|
||||
{
|
||||
@ -203,14 +203,14 @@ class CompressedStorage
|
||||
|
||||
protected:
|
||||
|
||||
inline void reallocate(size_t size)
|
||||
inline void reallocate(std::size_t size)
|
||||
{
|
||||
Scalar* newValues = new Scalar[size];
|
||||
int* newIndices = new int[size];
|
||||
size_t copySize = std::min(size, m_size);
|
||||
std::size_t copySize = std::min(size, m_size);
|
||||
// copy
|
||||
memcpy(newValues, m_values, copySize * sizeof(Scalar));
|
||||
memcpy(newIndices, m_indices, copySize * sizeof(int));
|
||||
std::memcpy(newValues, m_values, copySize * sizeof(Scalar));
|
||||
std::memcpy(newIndices, m_indices, copySize * sizeof(int));
|
||||
// delete old stuff
|
||||
delete[] m_values;
|
||||
delete[] m_indices;
|
||||
@ -222,8 +222,8 @@ class CompressedStorage
|
||||
protected:
|
||||
Scalar* m_values;
|
||||
int* m_indices;
|
||||
size_t m_size;
|
||||
size_t m_allocatedSize;
|
||||
std::size_t m_size;
|
||||
std::size_t m_allocatedSize;
|
||||
|
||||
};
|
||||
|
||||
|
@ -212,7 +212,7 @@ class DynamicSparseMatrix
|
||||
// remove all coefficients with innerCoord>=innerSize
|
||||
// TODO
|
||||
std::cerr << "not implemented yet\n";
|
||||
exit(2);
|
||||
std::exit(2);
|
||||
}
|
||||
if (m_data.size() != outerSize)
|
||||
{
|
||||
|
@ -122,7 +122,7 @@ class SparseMatrix
|
||||
{
|
||||
m_data.clear();
|
||||
//if (m_outerSize)
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
|
||||
std::memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
|
||||
// for (int i=0; i<m_outerSize; ++i)
|
||||
// m_outerIndex[i] = 0;
|
||||
// if (m_outerSize)
|
||||
@ -164,7 +164,7 @@ class SparseMatrix
|
||||
{
|
||||
ei_assert(m_data.index(m_data.size()-1)<inner && "wrong sorted insertion");
|
||||
}
|
||||
assert(size_t(m_outerIndex[outer+1]) == m_data.size());
|
||||
assert(std::size_t(m_outerIndex[outer+1]) == m_data.size());
|
||||
int id = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
|
||||
@ -190,10 +190,10 @@ class SparseMatrix
|
||||
}
|
||||
m_outerIndex[outer+1] = m_outerIndex[outer];
|
||||
}
|
||||
assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "invalid outer index");
|
||||
size_t startId = m_outerIndex[outer];
|
||||
// FIXME let's make sure sizeof(long int) == sizeof(size_t)
|
||||
size_t id = m_outerIndex[outer+1];
|
||||
assert(std::size_t(m_outerIndex[outer+1]) == m_data.size() && "invalid outer index");
|
||||
std::size_t startId = m_outerIndex[outer];
|
||||
// FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
|
||||
std::size_t id = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
|
||||
float reallocRatio = 1;
|
||||
@ -273,7 +273,7 @@ class SparseMatrix
|
||||
m_outerIndex = new int [outerSize+1];
|
||||
m_outerSize = outerSize;
|
||||
}
|
||||
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
|
||||
std::memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
|
||||
}
|
||||
void resizeNonZeros(int size)
|
||||
{
|
||||
@ -324,7 +324,7 @@ class SparseMatrix
|
||||
else
|
||||
{
|
||||
resize(other.rows(), other.cols());
|
||||
memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(int));
|
||||
std::memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(int));
|
||||
m_data = other.m_data;
|
||||
}
|
||||
return *this;
|
||||
|
@ -35,7 +35,7 @@ void check_handmade_aligned_malloc()
|
||||
for(int i = 1; i < 1000; i++)
|
||||
{
|
||||
char *p = (char*)ei_handmade_aligned_malloc(i);
|
||||
VERIFY(size_t(p)%ALIGNMENT==0);
|
||||
VERIFY(std::size_t(p)%ALIGNMENT==0);
|
||||
// if the buffer is wrongly allocated this will give a bad write --> check with valgrind
|
||||
for(int j = 0; j < i; j++) p[j]=0;
|
||||
ei_handmade_aligned_free(p);
|
||||
@ -47,7 +47,7 @@ void check_aligned_malloc()
|
||||
for(int i = 1; i < 1000; i++)
|
||||
{
|
||||
char *p = (char*)ei_aligned_malloc(i);
|
||||
VERIFY(size_t(p)%ALIGNMENT==0);
|
||||
VERIFY(std::size_t(p)%ALIGNMENT==0);
|
||||
// if the buffer is wrongly allocated this will give a bad write --> check with valgrind
|
||||
for(int j = 0; j < i; j++) p[j]=0;
|
||||
ei_aligned_free(p);
|
||||
@ -59,7 +59,7 @@ void check_aligned_new()
|
||||
for(int i = 1; i < 1000; i++)
|
||||
{
|
||||
float *p = ei_aligned_new<float>(i);
|
||||
VERIFY(size_t(p)%ALIGNMENT==0);
|
||||
VERIFY(std::size_t(p)%ALIGNMENT==0);
|
||||
// if the buffer is wrongly allocated this will give a bad write --> check with valgrind
|
||||
for(int j = 0; j < i; j++) p[j]=0;
|
||||
ei_aligned_delete(p,i);
|
||||
@ -71,7 +71,7 @@ void check_aligned_stack_alloc()
|
||||
for(int i = 1; i < 1000; i++)
|
||||
{
|
||||
float *p = ei_aligned_stack_new(float,i);
|
||||
VERIFY(size_t(p)%ALIGNMENT==0);
|
||||
VERIFY(std::size_t(p)%ALIGNMENT==0);
|
||||
// if the buffer is wrongly allocated this will give a bad write --> check with valgrind
|
||||
for(int j = 0; j < i; j++) p[j]=0;
|
||||
ei_aligned_stack_delete(float,p,i);
|
||||
@ -98,7 +98,7 @@ class MyClassA
|
||||
template<typename T> void check_dynaligned()
|
||||
{
|
||||
T* obj = new T;
|
||||
VERIFY(size_t(obj)%ALIGNMENT==0);
|
||||
VERIFY(std::size_t(obj)%ALIGNMENT==0);
|
||||
delete obj;
|
||||
}
|
||||
|
||||
@ -121,15 +121,15 @@ void test_dynalloc()
|
||||
|
||||
// check static allocation, who knows ?
|
||||
{
|
||||
MyStruct foo0; VERIFY(size_t(foo0.avec.data())%ALIGNMENT==0);
|
||||
MyClassA fooA; VERIFY(size_t(fooA.avec.data())%ALIGNMENT==0);
|
||||
MyStruct foo0; VERIFY(std::size_t(foo0.avec.data())%ALIGNMENT==0);
|
||||
MyClassA fooA; VERIFY(std::size_t(fooA.avec.data())%ALIGNMENT==0);
|
||||
}
|
||||
|
||||
// dynamic allocation, single object
|
||||
for (int i=0; i<g_repeat*100; ++i)
|
||||
{
|
||||
MyStruct *foo0 = new MyStruct(); VERIFY(size_t(foo0->avec.data())%ALIGNMENT==0);
|
||||
MyClassA *fooA = new MyClassA(); VERIFY(size_t(fooA->avec.data())%ALIGNMENT==0);
|
||||
MyStruct *foo0 = new MyStruct(); VERIFY(std::size_t(foo0->avec.data())%ALIGNMENT==0);
|
||||
MyClassA *fooA = new MyClassA(); VERIFY(std::size_t(fooA->avec.data())%ALIGNMENT==0);
|
||||
delete foo0;
|
||||
delete fooA;
|
||||
}
|
||||
@ -138,8 +138,8 @@ void test_dynalloc()
|
||||
const int N = 10;
|
||||
for (int i=0; i<g_repeat*100; ++i)
|
||||
{
|
||||
MyStruct *foo0 = new MyStruct[N]; VERIFY(size_t(foo0->avec.data())%ALIGNMENT==0);
|
||||
MyClassA *fooA = new MyClassA[N]; VERIFY(size_t(fooA->avec.data())%ALIGNMENT==0);
|
||||
MyStruct *foo0 = new MyStruct[N]; VERIFY(std::size_t(foo0->avec.data())%ALIGNMENT==0);
|
||||
MyClassA *fooA = new MyClassA[N]; VERIFY(std::size_t(fooA->avec.data())%ALIGNMENT==0);
|
||||
delete[] foo0;
|
||||
delete[] fooA;
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ template<typename Scalar>
|
||||
void test_first_aligned_helper(Scalar *array, int size)
|
||||
{
|
||||
const int packet_size = sizeof(Scalar) * ei_packet_traits<Scalar>::size;
|
||||
VERIFY(((size_t(array) + sizeof(Scalar) * ei_alignmentOffset(array, size)) % packet_size) == 0);
|
||||
VERIFY(((std::size_t(array) + sizeof(Scalar) * ei_alignmentOffset(array, size)) % packet_size) == 0);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
@ -54,7 +54,7 @@ void test_first_aligned()
|
||||
test_first_aligned_helper(array_double+1, 50);
|
||||
test_first_aligned_helper(array_double+2, 50);
|
||||
|
||||
double *array_double_plus_4_bytes = (double*)(size_t(array_double)+4);
|
||||
double *array_double_plus_4_bytes = (double*)(std::size_t(array_double)+4);
|
||||
test_none_aligned_helper(array_double_plus_4_bytes, 50);
|
||||
test_none_aligned_helper(array_double_plus_4_bytes+1, 50);
|
||||
|
||||
|
10
test/main.h
10
test/main.h
@ -142,7 +142,7 @@ namespace Eigen
|
||||
#define VERIFY(a) do { if (!(a)) { \
|
||||
std::cerr << "Test " << g_test_stack.back() << " failed in "EI_PP_MAKE_STRING(__FILE__) << " (" << EI_PP_MAKE_STRING(__LINE__) << ")" \
|
||||
<< std::endl << " " << EI_PP_MAKE_STRING(a) << std::endl << std::endl; \
|
||||
exit(2); \
|
||||
std::exit(2); \
|
||||
} } while (0)
|
||||
|
||||
#define VERIFY_IS_APPROX(a, b) VERIFY(test_ei_isApprox(a, b))
|
||||
@ -257,7 +257,7 @@ int main(int argc, char *argv[])
|
||||
std::cout << "Argument " << argv[i] << " conflicting with a former argument" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
repeat = atoi(argv[i]+1);
|
||||
repeat = std::atoi(argv[i]+1);
|
||||
has_set_repeat = true;
|
||||
if(repeat <= 0)
|
||||
{
|
||||
@ -272,7 +272,7 @@ int main(int argc, char *argv[])
|
||||
std::cout << "Argument " << argv[i] << " conflicting with a former argument" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
seed = int(strtoul(argv[i]+1, 0, 10));
|
||||
seed = int(std::strtoul(argv[i]+1, 0, 10));
|
||||
has_set_seed = true;
|
||||
bool ok = seed!=0;
|
||||
if(!ok)
|
||||
@ -295,11 +295,11 @@ int main(int argc, char *argv[])
|
||||
return 1;
|
||||
}
|
||||
|
||||
if(!has_set_seed) seed = (unsigned int) time(NULL);
|
||||
if(!has_set_seed) seed = (unsigned int) std::time(NULL);
|
||||
if(!has_set_repeat) repeat = DEFAULT_REPEAT;
|
||||
|
||||
std::cout << "Initializing random number generator with seed " << seed << std::endl;
|
||||
srand(seed);
|
||||
std::srand(seed);
|
||||
std::cout << "Repeating each test " << repeat << " times" << std::endl;
|
||||
|
||||
Eigen::g_repeat = repeat;
|
||||
|
@ -34,7 +34,7 @@ template<typename VectorType> void map_class_vector(const VectorType& m)
|
||||
Scalar* array1 = ei_aligned_new<Scalar>(size);
|
||||
Scalar* array2 = ei_aligned_new<Scalar>(size);
|
||||
Scalar* array3 = new Scalar[size+1];
|
||||
Scalar* array3unaligned = size_t(array3)%16 == 0 ? array3+1 : array3;
|
||||
Scalar* array3unaligned = std::size_t(array3)%16 == 0 ? array3+1 : array3;
|
||||
|
||||
Map<VectorType, Aligned>(array1, size) = VectorType::Random(size);
|
||||
Map<VectorType>(array2, size) = Map<VectorType>(array1, size);
|
||||
@ -63,7 +63,7 @@ template<typename MatrixType> void map_class_matrix(const MatrixType& m)
|
||||
for(int i = 0; i < size; i++) array2[i] = Scalar(1);
|
||||
Scalar* array3 = new Scalar[size+1];
|
||||
for(int i = 0; i < size+1; i++) array3[i] = Scalar(1);
|
||||
Scalar* array3unaligned = size_t(array3)%16 == 0 ? array3+1 : array3;
|
||||
Scalar* array3unaligned = std::size_t(array3)%16 == 0 ? array3+1 : array3;
|
||||
Map<MatrixType, Aligned>(array1, rows, cols) = MatrixType::Ones(rows,cols);
|
||||
Map<MatrixType>(array2, rows, cols) = Map<MatrixType>(array1, rows, cols);
|
||||
Map<MatrixType>(array3unaligned, rows, cols) = Map<MatrixType>(array1, rows, cols);
|
||||
@ -88,7 +88,7 @@ template<typename VectorType> void map_static_methods(const VectorType& m)
|
||||
Scalar* array1 = ei_aligned_new<Scalar>(size);
|
||||
Scalar* array2 = ei_aligned_new<Scalar>(size);
|
||||
Scalar* array3 = new Scalar[size+1];
|
||||
Scalar* array3unaligned = size_t(array3)%16 == 0 ? array3+1 : array3;
|
||||
Scalar* array3unaligned = std::size_t(array3)%16 == 0 ? array3+1 : array3;
|
||||
|
||||
VectorType::MapAligned(array1, size) = VectorType::Random(size);
|
||||
VectorType::Map(array2, size) = VectorType::Map(array1, size);
|
||||
|
@ -50,7 +50,7 @@ void check_stdvector_matrix(const MatrixType& m)
|
||||
VERIFY_IS_APPROX(v[21], y);
|
||||
v.push_back(x);
|
||||
VERIFY_IS_APPROX(v[22], x);
|
||||
VERIFY((size_t)&(v[22]) == (size_t)&(v[21]) + sizeof(MatrixType));
|
||||
VERIFY((std::size_t)&(v[22]) == (std::size_t)&(v[21]) + sizeof(MatrixType));
|
||||
|
||||
// do a lot of push_back such that the vector gets internally resized
|
||||
// (with memory reallocation)
|
||||
@ -85,7 +85,7 @@ void check_stdvector_transform(const TransformType&)
|
||||
VERIFY_IS_APPROX(v[21], y);
|
||||
v.push_back(x);
|
||||
VERIFY_IS_APPROX(v[22], x);
|
||||
VERIFY((size_t)&(v[22]) == (size_t)&(v[21]) + sizeof(TransformType));
|
||||
VERIFY((std::size_t)&(v[22]) == (std::size_t)&(v[21]) + sizeof(TransformType));
|
||||
|
||||
// do a lot of push_back such that the vector gets internally resized
|
||||
// (with memory reallocation)
|
||||
@ -120,7 +120,7 @@ void check_stdvector_quaternion(const QuaternionType&)
|
||||
VERIFY_IS_APPROX(v[21], y);
|
||||
v.push_back(x);
|
||||
VERIFY_IS_APPROX(v[22], x);
|
||||
VERIFY((size_t)&(v[22]) == (size_t)&(v[21]) + sizeof(QuaternionType));
|
||||
VERIFY((std::size_t)&(v[22]) == (std::size_t)&(v[21]) + sizeof(QuaternionType));
|
||||
|
||||
// do a lot of push_back such that the vector gets internally resized
|
||||
// (with memory reallocation)
|
||||
|
@ -49,7 +49,7 @@ void check_stdvector_matrix(const MatrixType& m)
|
||||
VERIFY_IS_APPROX(v[21], y);
|
||||
v.push_back(x);
|
||||
VERIFY_IS_APPROX(v[22], x);
|
||||
VERIFY((size_t)&(v[22]) == (size_t)&(v[21]) + sizeof(MatrixType));
|
||||
VERIFY((std::size_t)&(v[22]) == (std::size_t)&(v[21]) + sizeof(MatrixType));
|
||||
|
||||
// do a lot of push_back such that the vector gets internally resized
|
||||
// (with memory reallocation)
|
||||
@ -84,7 +84,7 @@ void check_stdvector_transform(const TransformType&)
|
||||
VERIFY_IS_APPROX(v[21], y);
|
||||
v.push_back(x);
|
||||
VERIFY_IS_APPROX(v[22], x);
|
||||
VERIFY((size_t)&(v[22]) == (size_t)&(v[21]) + sizeof(TransformType));
|
||||
VERIFY((std::size_t)&(v[22]) == (std::size_t)&(v[21]) + sizeof(TransformType));
|
||||
|
||||
// do a lot of push_back such that the vector gets internally resized
|
||||
// (with memory reallocation)
|
||||
@ -119,7 +119,7 @@ void check_stdvector_quaternion(const QuaternionType&)
|
||||
VERIFY_IS_APPROX(v[21], y);
|
||||
v.push_back(x);
|
||||
VERIFY_IS_APPROX(v[22], x);
|
||||
VERIFY((size_t)&(v[22]) == (size_t)&(v[21]) + sizeof(QuaternionType));
|
||||
VERIFY((std::size_t)&(v[22]) == (std::size_t)&(v[21]) + sizeof(QuaternionType));
|
||||
|
||||
// do a lot of push_back such that the vector gets internally resized
|
||||
// (with memory reallocation)
|
||||
|
@ -98,7 +98,7 @@ void check_unalignedassert_bad()
|
||||
{
|
||||
float buf[sizeof(T)+16];
|
||||
float *unaligned = buf;
|
||||
while((reinterpret_cast<size_t>(unaligned)&0xf)==0) ++unaligned; // make sure unaligned is really unaligned
|
||||
while((reinterpret_cast<std::size_t>(unaligned)&0xf)==0) ++unaligned; // make sure unaligned is really unaligned
|
||||
T *x = ::new(static_cast<void*>(unaligned)) T;
|
||||
x->~T();
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user