mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-04-19 08:09:36 +08:00
Fix many long to int implicit conversions
This commit is contained in:
parent
e25e674852
commit
b47ef1431f
@ -445,7 +445,7 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
|
||||
if(!evalToDest)
|
||||
{
|
||||
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
int size = dest.size();
|
||||
Index size = dest.size();
|
||||
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#endif
|
||||
if(!alphaIsCompatible)
|
||||
@ -510,7 +510,7 @@ template<> struct gemv_selector<OnTheRight,RowMajor,true>
|
||||
if(!DirectlyUseRhs)
|
||||
{
|
||||
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
int size = actualRhs.size();
|
||||
Index size = actualRhs.size();
|
||||
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#endif
|
||||
Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
|
||||
|
@ -233,10 +233,10 @@ template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstore(
|
||||
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from)
|
||||
{ (*to) = from; }
|
||||
|
||||
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, int /*stride*/)
|
||||
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, DenseIndex /*stride*/)
|
||||
{ return ploadu<Packet>(from); }
|
||||
|
||||
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, int /*stride*/)
|
||||
template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, DenseIndex /*stride*/)
|
||||
{ pstore(to, from); }
|
||||
|
||||
/** \internal tries to do cache prefetching of \a addr */
|
||||
|
@ -194,7 +194,7 @@ DenseBase<Derived>::minCoeff(IndexType* index) const
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
internal::min_coeff_visitor<Derived> minVisitor;
|
||||
this->visit(minVisitor);
|
||||
*index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row;
|
||||
*index = IndexType((RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row);
|
||||
return minVisitor.res;
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ template<> EIGEN_STRONG_INLINE Packet4cf ploaddup<Packet4cf>(const std::complex<
|
||||
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packet4cf>(const std::complex<float>* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packet4cf>(const std::complex<float>* from, DenseIndex stride)
|
||||
{
|
||||
return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]),
|
||||
std::imag(from[2*stride]), std::real(from[2*stride]),
|
||||
@ -100,7 +100,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packe
|
||||
std::imag(from[0*stride]), std::real(from[0*stride])));
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet4cf>(std::complex<float>* to, const Packet4cf& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet4cf>(std::complex<float>* to, const Packet4cf& from, DenseIndex stride)
|
||||
{
|
||||
__m128 low = _mm256_extractf128_ps(from.v, 0);
|
||||
to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)),
|
||||
|
@ -224,17 +224,17 @@ template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i&
|
||||
|
||||
// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
|
||||
// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, DenseIndex stride)
|
||||
{
|
||||
return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
|
||||
from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, DenseIndex stride)
|
||||
{
|
||||
return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, DenseIndex stride)
|
||||
{
|
||||
__m128 low = _mm256_extractf128_ps(from, 0);
|
||||
to[stride*0] = _mm_cvtss_f32(low);
|
||||
@ -248,7 +248,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, co
|
||||
to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
|
||||
to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, DenseIndex stride)
|
||||
{
|
||||
__m128d low = _mm256_extractf128_pd(from, 0);
|
||||
to[stride*0] = _mm_cvtsd_f64(low);
|
||||
|
@ -68,14 +68,14 @@ template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<flo
|
||||
return res;
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, DenseIndex stride)
|
||||
{
|
||||
std::complex<float> EIGEN_ALIGN16 af[2];
|
||||
af[0] = from[0*stride];
|
||||
af[1] = from[1*stride];
|
||||
return Packet2cf(vec_ld(0, (const float*)af));
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, DenseIndex stride)
|
||||
{
|
||||
std::complex<float> EIGEN_ALIGN16 af[2];
|
||||
vec_st(from.v, 0, (float*)af);
|
||||
|
@ -190,7 +190,7 @@ pbroadcast4<Packet4i>(const int *a,
|
||||
a3 = vec_splat(a3, 3);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, DenseIndex stride)
|
||||
{
|
||||
float EIGEN_ALIGN16 af[4];
|
||||
af[0] = from[0*stride];
|
||||
@ -199,7 +199,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const floa
|
||||
af[3] = from[3*stride];
|
||||
return vec_ld(0, af);
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, DenseIndex stride)
|
||||
{
|
||||
int EIGEN_ALIGN16 ai[4];
|
||||
ai[0] = from[0*stride];
|
||||
@ -208,7 +208,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* f
|
||||
ai[3] = from[3*stride];
|
||||
return vec_ld(0, ai);
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, DenseIndex stride)
|
||||
{
|
||||
float EIGEN_ALIGN16 af[4];
|
||||
vec_st(from, 0, af);
|
||||
@ -217,7 +217,7 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, co
|
||||
to[2*stride] = af[2];
|
||||
to[3*stride] = af[3];
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, DenseIndex stride)
|
||||
{
|
||||
int EIGEN_ALIGN16 ai[4];
|
||||
vec_st(from, 0, ai);
|
||||
|
@ -111,7 +111,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<
|
||||
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, DenseIndex stride)
|
||||
{
|
||||
Packet4f res;
|
||||
res = vsetq_lane_f32(std::real(from[0*stride]), res, 0);
|
||||
@ -121,7 +121,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packe
|
||||
return Packet2cf(res);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, DenseIndex stride)
|
||||
{
|
||||
to[stride*0] = std::complex<float>(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1));
|
||||
to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));
|
||||
|
@ -222,7 +222,7 @@ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& f
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, DenseIndex stride)
|
||||
{
|
||||
Packet4f res;
|
||||
res = vsetq_lane_f32(from[0*stride], res, 0);
|
||||
@ -231,7 +231,7 @@ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const floa
|
||||
res = vsetq_lane_f32(from[3*stride], res, 3);
|
||||
return res;
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, DenseIndex stride)
|
||||
{
|
||||
Packet4i res;
|
||||
res = vsetq_lane_s32(from[0*stride], res, 0);
|
||||
@ -241,14 +241,14 @@ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* f
|
||||
return res;
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, DenseIndex stride)
|
||||
{
|
||||
to[stride*0] = vgetq_lane_f32(from, 0);
|
||||
to[stride*1] = vgetq_lane_f32(from, 1);
|
||||
to[stride*2] = vgetq_lane_f32(from, 2);
|
||||
to[stride*3] = vgetq_lane_f32(from, 3);
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, DenseIndex stride)
|
||||
{
|
||||
to[stride*0] = vgetq_lane_s32(from, 0);
|
||||
to[stride*1] = vgetq_lane_s32(from, 1);
|
||||
|
@ -114,13 +114,13 @@ template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<f
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), Packet4f(from.v)); }
|
||||
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, DenseIndex stride)
|
||||
{
|
||||
return Packet2cf(_mm_set_ps(std::imag(from[1*stride]), std::real(from[1*stride]),
|
||||
std::imag(from[0*stride]), std::real(from[0*stride])));
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, DenseIndex stride)
|
||||
{
|
||||
to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 0)),
|
||||
_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 1)));
|
||||
|
@ -383,32 +383,32 @@ template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d&
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), Packet2d(_mm_castps_pd(from))); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), Packet2d(_mm_castsi128_pd(from))); }
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, DenseIndex stride)
|
||||
{
|
||||
return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, DenseIndex stride)
|
||||
{
|
||||
return _mm_set_pd(from[1*stride], from[0*stride]);
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, DenseIndex stride)
|
||||
{
|
||||
return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
||||
}
|
||||
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, DenseIndex stride)
|
||||
{
|
||||
to[stride*0] = _mm_cvtss_f32(from);
|
||||
to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));
|
||||
to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));
|
||||
to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, DenseIndex stride)
|
||||
{
|
||||
to[stride*0] = _mm_cvtsd_f64(from);
|
||||
to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
|
||||
}
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, int stride)
|
||||
template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, DenseIndex stride)
|
||||
{
|
||||
to[stride*0] = _mm_cvtsi128_si32(from);
|
||||
to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
|
||||
|
@ -218,7 +218,7 @@ struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
|
||||
if(!EvalToDest)
|
||||
{
|
||||
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
int size = dest.size();
|
||||
Index size = dest.size();
|
||||
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#endif
|
||||
MappedDest(actualDestPtr, dest.size()) = dest;
|
||||
@ -227,7 +227,7 @@ struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
|
||||
if(!UseRhs)
|
||||
{
|
||||
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
int size = rhs.size();
|
||||
Index size = rhs.size();
|
||||
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#endif
|
||||
Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, rhs.size()) = rhs;
|
||||
|
@ -322,7 +322,7 @@ template<> struct trmv_selector<RowMajor>
|
||||
if(!DirectlyUseRhs)
|
||||
{
|
||||
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
int size = actualRhs.size();
|
||||
Index size = actualRhs.size();
|
||||
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
|
||||
#endif
|
||||
Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
|
||||
|
@ -50,11 +50,11 @@ public:
|
||||
Index m_outer;
|
||||
};
|
||||
|
||||
inline BlockImpl(const XprType& xpr, int i)
|
||||
inline BlockImpl(const XprType& xpr, Index i)
|
||||
: m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
|
||||
{}
|
||||
|
||||
inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
|
||||
inline BlockImpl(const XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
|
||||
{}
|
||||
|
||||
@ -65,7 +65,7 @@ public:
|
||||
{
|
||||
Index nnz = 0;
|
||||
Index end = m_outerStart + m_outerSize.value();
|
||||
for(int j=m_outerStart; j<end; ++j)
|
||||
for(Index j=m_outerStart; j<end; ++j)
|
||||
for(typename XprType::InnerIterator it(m_matrix, j); it; ++it)
|
||||
++nnz;
|
||||
return nnz;
|
||||
@ -124,11 +124,11 @@ public:
|
||||
Index m_outer;
|
||||
};
|
||||
|
||||
inline sparse_matrix_block_impl(const SparseMatrixType& xpr, int i)
|
||||
inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index i)
|
||||
: m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
|
||||
{}
|
||||
|
||||
inline sparse_matrix_block_impl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
|
||||
inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
|
||||
{}
|
||||
|
||||
@ -228,8 +228,8 @@ public:
|
||||
Index nonZeros() const
|
||||
{
|
||||
if(m_matrix.isCompressed())
|
||||
return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
|
||||
- std::size_t(m_matrix.outerIndexPtr()[m_outerStart]);
|
||||
return Index( std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
|
||||
- std::size_t(m_matrix.outerIndexPtr()[m_outerStart]));
|
||||
else if(m_outerSize.value()==0)
|
||||
return 0;
|
||||
else
|
||||
@ -264,13 +264,14 @@ class BlockImpl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true
|
||||
: public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols>
|
||||
{
|
||||
public:
|
||||
typedef _Index Index;
|
||||
typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
|
||||
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
|
||||
inline BlockImpl(SparseMatrixType& xpr, int i)
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index i)
|
||||
: Base(xpr, i)
|
||||
{}
|
||||
|
||||
inline BlockImpl(SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: Base(xpr, startRow, startCol, blockRows, blockCols)
|
||||
{}
|
||||
|
||||
@ -282,13 +283,14 @@ class BlockImpl<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCol
|
||||
: public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols>
|
||||
{
|
||||
public:
|
||||
typedef _Index Index;
|
||||
typedef const SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
|
||||
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
|
||||
inline BlockImpl(SparseMatrixType& xpr, int i)
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index i)
|
||||
: Base(xpr, i)
|
||||
{}
|
||||
|
||||
inline BlockImpl(SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: Base(xpr, startRow, startCol, blockRows, blockCols)
|
||||
{}
|
||||
|
||||
@ -362,7 +364,7 @@ public:
|
||||
|
||||
/** Column or Row constructor
|
||||
*/
|
||||
inline BlockImpl(const XprType& xpr, int i)
|
||||
inline BlockImpl(const XprType& xpr, Index i)
|
||||
: m_matrix(xpr),
|
||||
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
|
||||
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
|
||||
@ -372,32 +374,32 @@ public:
|
||||
|
||||
/** Dynamic-size constructor
|
||||
*/
|
||||
inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
|
||||
inline BlockImpl(const XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols)
|
||||
{}
|
||||
|
||||
inline int rows() const { return m_blockRows.value(); }
|
||||
inline int cols() const { return m_blockCols.value(); }
|
||||
inline Index rows() const { return m_blockRows.value(); }
|
||||
inline Index cols() const { return m_blockCols.value(); }
|
||||
|
||||
inline Scalar& coeffRef(int row, int col)
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
return m_matrix.const_cast_derived()
|
||||
.coeffRef(row + m_startRow.value(), col + m_startCol.value());
|
||||
}
|
||||
|
||||
inline const Scalar coeff(int row, int col) const
|
||||
inline const Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
|
||||
}
|
||||
|
||||
inline Scalar& coeffRef(int index)
|
||||
inline Scalar& coeffRef(Index index)
|
||||
{
|
||||
return m_matrix.const_cast_derived()
|
||||
.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
|
||||
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
|
||||
}
|
||||
|
||||
inline const Scalar coeff(int index) const
|
||||
inline const Scalar coeff(Index index) const
|
||||
{
|
||||
return m_matrix
|
||||
.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
|
||||
|
@ -28,15 +28,16 @@ template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename Lhs::Index Index;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
for(int col=0 ; col<other.cols() ; ++col)
|
||||
for(Index col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
for(int i=0; i<lhs.rows(); ++i)
|
||||
for(Index i=0; i<lhs.rows(); ++i)
|
||||
{
|
||||
Scalar tmp = other.coeff(i,col);
|
||||
Scalar lastVal(0);
|
||||
int lastIndex = 0;
|
||||
Index lastIndex = 0;
|
||||
for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
|
||||
{
|
||||
lastVal = it.value();
|
||||
@ -62,11 +63,12 @@ template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename Lhs::Index Index;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
for(int col=0 ; col<other.cols() ; ++col)
|
||||
for(Index col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
for(int i=lhs.rows()-1 ; i>=0 ; --i)
|
||||
for(Index i=lhs.rows()-1 ; i>=0 ; --i)
|
||||
{
|
||||
Scalar tmp = other.coeff(i,col);
|
||||
Scalar l_ii = 0;
|
||||
@ -100,11 +102,12 @@ template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename Lhs::Index Index;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
for(int col=0 ; col<other.cols() ; ++col)
|
||||
for(Index col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
for(int i=0; i<lhs.cols(); ++i)
|
||||
for(Index i=0; i<lhs.cols(); ++i)
|
||||
{
|
||||
Scalar& tmp = other.coeffRef(i,col);
|
||||
if (tmp!=Scalar(0)) // optimization when other is actually sparse
|
||||
@ -132,11 +135,12 @@ template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename Lhs::Index Index;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
for(int col=0 ; col<other.cols() ; ++col)
|
||||
for(Index col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
for(int i=lhs.cols()-1; i>=0; --i)
|
||||
for(Index i=lhs.cols()-1; i>=0; --i)
|
||||
{
|
||||
Scalar& tmp = other.coeffRef(i,col);
|
||||
if (tmp!=Scalar(0)) // optimization when other is actually sparse
|
||||
@ -219,7 +223,7 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
Rhs res(other.rows(), other.cols());
|
||||
res.reserve(other.nonZeros());
|
||||
|
||||
for(int col=0 ; col<other.cols() ; ++col)
|
||||
for(Index col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
// FIXME estimate number of non zeros
|
||||
tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
|
||||
@ -230,7 +234,7 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
|
||||
}
|
||||
|
||||
for(int i=IsLower?0:lhs.cols()-1;
|
||||
for(Index i=IsLower?0:lhs.cols()-1;
|
||||
IsLower?i<lhs.cols():i>=0;
|
||||
i+=IsLower?1:-1)
|
||||
{
|
||||
@ -267,7 +271,7 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
}
|
||||
|
||||
|
||||
int count = 0;
|
||||
Index count = 0;
|
||||
// FIXME compute a reference value to filter zeros
|
||||
for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector/*,1e-12*/); it; ++it)
|
||||
{
|
||||
|
@ -181,7 +181,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
if(rows>=3)
|
||||
{
|
||||
SquareMatrixType A = symm;
|
||||
int c = internal::random<int>(0,rows-2);
|
||||
Index c = internal::random<Index>(0,rows-2);
|
||||
A.bottomRightCorner(c,c).setZero();
|
||||
// Make sure a solution exists:
|
||||
vecX.setRandom();
|
||||
@ -196,7 +196,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
// check non-full rank matrices
|
||||
if(rows>=3)
|
||||
{
|
||||
int r = internal::random<int>(1,rows-1);
|
||||
Index r = internal::random<Index>(1,rows-1);
|
||||
Matrix<Scalar,Dynamic,Dynamic> a = Matrix<Scalar,Dynamic,Dynamic>::Random(rows,r);
|
||||
SquareMatrixType A = a * a.adjoint();
|
||||
// Make sure a solution exists:
|
||||
@ -215,7 +215,7 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
RealScalar s = (std::min)(16,std::numeric_limits<RealScalar>::max_exponent10/8);
|
||||
Matrix<Scalar,Dynamic,Dynamic> a = Matrix<Scalar,Dynamic,Dynamic>::Random(rows,rows);
|
||||
Matrix<RealScalar,Dynamic,1> d = Matrix<RealScalar,Dynamic,1>::Random(rows);
|
||||
for(int k=0; k<rows; ++k)
|
||||
for(Index k=0; k<rows; ++k)
|
||||
d(k) = d(k)*std::pow(RealScalar(10),internal::random<RealScalar>(-s,s));
|
||||
SquareMatrixType A = a * d.asDiagonal() * a.adjoint();
|
||||
// Make sure a solution exists:
|
||||
|
@ -69,7 +69,8 @@ struct mapstaticmethods_impl<PlainObjectType, true, false>
|
||||
{
|
||||
static void run(const PlainObjectType& m)
|
||||
{
|
||||
int rows = m.rows(), cols = m.cols();
|
||||
typedef typename PlainObjectType::Index Index;
|
||||
Index rows = m.rows(), cols = m.cols();
|
||||
|
||||
int i = internal::random<int>(2,5), j = internal::random<int>(2,5);
|
||||
|
||||
@ -115,7 +116,8 @@ struct mapstaticmethods_impl<PlainObjectType, true, true>
|
||||
{
|
||||
static void run(const PlainObjectType& v)
|
||||
{
|
||||
int size = v.size();
|
||||
typedef typename PlainObjectType::Index Index;
|
||||
Index size = v.size();
|
||||
|
||||
int i = internal::random<int>(2,5);
|
||||
|
||||
|
@ -7,14 +7,13 @@
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
static int nb_temporaries;
|
||||
static long int nb_temporaries;
|
||||
|
||||
inline void on_temporary_creation(int size) {
|
||||
inline void on_temporary_creation(long int size) {
|
||||
// here's a great place to set a breakpoint when debugging failures in this test!
|
||||
if(size!=0) nb_temporaries++;
|
||||
}
|
||||
|
||||
|
||||
#define EIGEN_DENSE_STORAGE_CTOR_PLUGIN { on_temporary_creation(size); }
|
||||
|
||||
#include "main.h"
|
||||
|
@ -12,14 +12,13 @@
|
||||
#undef EIGEN_DEFAULT_TO_ROW_MAJOR
|
||||
#endif
|
||||
|
||||
static int nb_temporaries;
|
||||
static long int nb_temporaries;
|
||||
|
||||
inline void on_temporary_creation(int) {
|
||||
inline void on_temporary_creation(long int) {
|
||||
// here's a great place to set a breakpoint when debugging failures in this test!
|
||||
nb_temporaries++;
|
||||
}
|
||||
|
||||
|
||||
#define EIGEN_DENSE_STORAGE_CTOR_PLUGIN { on_temporary_creation(size); }
|
||||
|
||||
#include "main.h"
|
||||
|
@ -71,7 +71,7 @@ initSparse(double density,
|
||||
//sparseMat.startVec(j);
|
||||
for(Index i=0; i<sparseMat.innerSize(); i++)
|
||||
{
|
||||
int ai(i), aj(j);
|
||||
Index ai(i), aj(j);
|
||||
if(IsRowMajor)
|
||||
std::swap(ai,aj);
|
||||
Scalar v = (internal::random<double>(0,1) < density) ? internal::random<Scalar>() : Scalar(0);
|
||||
@ -163,7 +163,7 @@ initSparse(double density,
|
||||
{
|
||||
sparseVec.reserve(int(refVec.size()*density));
|
||||
sparseVec.setZero();
|
||||
for(Index i=0; i<refVec.size(); i++)
|
||||
for(int i=0; i<refVec.size(); i++)
|
||||
{
|
||||
Scalar v = (internal::random<double>(0,1) < density) ? internal::random<Scalar>() : Scalar(0);
|
||||
if (v!=Scalar(0))
|
||||
|
@ -147,7 +147,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
DenseMatrix m1(rows,cols);
|
||||
m1.setZero();
|
||||
SparseMatrixType m2(rows,cols);
|
||||
VectorXi r(VectorXi::Constant(m2.outerSize(), ((mode%2)==0) ? m2.innerSize() : std::max<int>(1,m2.innerSize()/8)));
|
||||
VectorXi r(VectorXi::Constant(m2.outerSize(), ((mode%2)==0) ? int(m2.innerSize()) : std::max<int>(1,int(m2.innerSize())/8)));
|
||||
m2.reserve(r);
|
||||
for (int k=0; k<rows*cols; ++k)
|
||||
{
|
||||
@ -181,7 +181,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
VERIFY_IS_APPROX(m2.innerVector(j0)+m2.innerVector(j1), refMat2.col(j0)+refMat2.col(j1));
|
||||
|
||||
SparseMatrixType m3(rows,rows);
|
||||
m3.reserve(VectorXi::Constant(rows,rows/2));
|
||||
m3.reserve(VectorXi::Constant(rows,int(rows/2)));
|
||||
for(Index j=0; j<rows; ++j)
|
||||
for(Index k=0; k<j; ++k)
|
||||
m3.insertByOuterInner(j,k) = k+1;
|
||||
@ -384,11 +384,11 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
{
|
||||
typedef Triplet<Scalar,Index> TripletType;
|
||||
std::vector<TripletType> triplets;
|
||||
int ntriplets = rows*cols;
|
||||
Index ntriplets = rows*cols;
|
||||
triplets.reserve(ntriplets);
|
||||
DenseMatrix refMat(rows,cols);
|
||||
refMat.setZero();
|
||||
for(int i=0;i<ntriplets;++i)
|
||||
for(Index i=0;i<ntriplets;++i)
|
||||
{
|
||||
Index r = internal::random<Index>(0,rows-1);
|
||||
Index c = internal::random<Index>(0,cols-1);
|
||||
|
Loading…
x
Reference in New Issue
Block a user