mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-04-16 14:49:39 +08:00
Big renaming:
start ---> head end ---> tail Much frustration with sed syntax. Need to learn perl some day.
This commit is contained in:
parent
78ba523d30
commit
39ac57fa6d
@ -194,7 +194,7 @@ LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
|
||||
{
|
||||
// Find largest diagonal element
|
||||
int index_of_biggest_in_corner;
|
||||
biggest_in_corner = m_matrix.diagonal().end(size-j).cwise().abs()
|
||||
biggest_in_corner = m_matrix.diagonal().tail(size-j).cwise().abs()
|
||||
.maxCoeff(&index_of_biggest_in_corner);
|
||||
index_of_biggest_in_corner += j;
|
||||
|
||||
@ -227,12 +227,12 @@ LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
|
||||
|
||||
if (j == 0) {
|
||||
m_matrix.row(0) = m_matrix.row(0).conjugate();
|
||||
m_matrix.col(0).end(size-1) = m_matrix.row(0).end(size-1) / m_matrix.coeff(0,0);
|
||||
m_matrix.col(0).tail(size-1) = m_matrix.row(0).tail(size-1) / m_matrix.coeff(0,0);
|
||||
continue;
|
||||
}
|
||||
|
||||
RealScalar Djj = ei_real(m_matrix.coeff(j,j) - m_matrix.row(j).start(j)
|
||||
.dot(m_matrix.col(j).start(j)));
|
||||
RealScalar Djj = ei_real(m_matrix.coeff(j,j) - m_matrix.row(j).head(j)
|
||||
.dot(m_matrix.col(j).head(j)));
|
||||
m_matrix.coeffRef(j,j) = Djj;
|
||||
|
||||
// Finish early if the matrix is not full rank.
|
||||
@ -244,13 +244,13 @@ LDLT<MatrixType>& LDLT<MatrixType>::compute(const MatrixType& a)
|
||||
|
||||
int endSize = size - j - 1;
|
||||
if (endSize > 0) {
|
||||
_temporary.end(endSize).noalias() = m_matrix.block(j+1,0, endSize, j)
|
||||
* m_matrix.col(j).start(j).conjugate();
|
||||
_temporary.tail(endSize).noalias() = m_matrix.block(j+1,0, endSize, j)
|
||||
* m_matrix.col(j).head(j).conjugate();
|
||||
|
||||
m_matrix.row(j).end(endSize) = m_matrix.row(j).end(endSize).conjugate()
|
||||
- _temporary.end(endSize).transpose();
|
||||
m_matrix.row(j).tail(endSize) = m_matrix.row(j).tail(endSize).conjugate()
|
||||
- _temporary.tail(endSize).transpose();
|
||||
|
||||
m_matrix.col(j).end(endSize) = m_matrix.row(j).end(endSize) / Djj;
|
||||
m_matrix.col(j).tail(endSize) = m_matrix.row(j).tail(endSize) / Djj;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ template<> struct ei_llt_inplace<LowerTriangular>
|
||||
Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
|
||||
|
||||
RealScalar x = ei_real(mat.coeff(k,k));
|
||||
if (k>0) x -= mat.row(k).start(k).squaredNorm();
|
||||
if (k>0) x -= mat.row(k).head(k).squaredNorm();
|
||||
if (x<=RealScalar(0))
|
||||
return false;
|
||||
mat.coeffRef(k,k) = x = ei_sqrt(x);
|
||||
|
@ -459,11 +459,11 @@ template<typename Derived> class MatrixBase
|
||||
VectorBlock<Derived> segment(int start, int size);
|
||||
const VectorBlock<Derived> segment(int start, int size) const;
|
||||
|
||||
VectorBlock<Derived> start(int size);
|
||||
const VectorBlock<Derived> start(int size) const;
|
||||
VectorBlock<Derived> head(int size);
|
||||
const VectorBlock<Derived> head(int size) const;
|
||||
|
||||
VectorBlock<Derived> end(int size);
|
||||
const VectorBlock<Derived> end(int size) const;
|
||||
VectorBlock<Derived> tail(int size);
|
||||
const VectorBlock<Derived> tail(int size) const;
|
||||
|
||||
typename BlockReturnType<Derived>::Type corner(CornerType type, int cRows, int cCols);
|
||||
const typename BlockReturnType<Derived>::Type corner(CornerType type, int cRows, int cCols) const;
|
||||
@ -478,11 +478,11 @@ template<typename Derived> class MatrixBase
|
||||
template<int CRows, int CCols>
|
||||
const typename BlockReturnType<Derived, CRows, CCols>::Type corner(CornerType type) const;
|
||||
|
||||
template<int Size> VectorBlock<Derived,Size> start(void);
|
||||
template<int Size> const VectorBlock<Derived,Size> start() const;
|
||||
template<int Size> VectorBlock<Derived,Size> head(void);
|
||||
template<int Size> const VectorBlock<Derived,Size> head() const;
|
||||
|
||||
template<int Size> VectorBlock<Derived,Size> end();
|
||||
template<int Size> const VectorBlock<Derived,Size> end() const;
|
||||
template<int Size> VectorBlock<Derived,Size> tail();
|
||||
template<int Size> const VectorBlock<Derived,Size> tail() const;
|
||||
|
||||
template<int Size> VectorBlock<Derived,Size> segment(int start);
|
||||
template<int Size> const VectorBlock<Derived,Size> segment(int start) const;
|
||||
|
@ -67,7 +67,7 @@ MatrixBase<Derived>::stableNorm() const
|
||||
{
|
||||
bi = ei_first_aligned(&const_cast_derived().coeffRef(0), n);
|
||||
if (bi>0)
|
||||
ei_stable_norm_kernel(start(bi), ssq, scale, invScale);
|
||||
ei_stable_norm_kernel(head(bi), ssq, scale, invScale);
|
||||
}
|
||||
for (; bi<n; bi+=blockSize)
|
||||
ei_stable_norm_kernel(VectorBlock<Derived,Dynamic,Alignment>(derived(),bi,std::min(blockSize, n - bi)), ssq, scale, invScale);
|
||||
|
@ -161,16 +161,16 @@ MatrixBase<Derived>::segment(int start, int size) const
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline VectorBlock<Derived>
|
||||
MatrixBase<Derived>::start(int size)
|
||||
MatrixBase<Derived>::head(int size)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return VectorBlock<Derived>(derived(), 0, size);
|
||||
}
|
||||
|
||||
/** This is the const version of start(int).*/
|
||||
/** This is the const version of head(int).*/
|
||||
template<typename Derived>
|
||||
inline const VectorBlock<Derived>
|
||||
MatrixBase<Derived>::start(int size) const
|
||||
MatrixBase<Derived>::head(int size) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return VectorBlock<Derived>(derived(), 0, size);
|
||||
@ -193,16 +193,16 @@ MatrixBase<Derived>::start(int size) const
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline VectorBlock<Derived>
|
||||
MatrixBase<Derived>::end(int size)
|
||||
MatrixBase<Derived>::tail(int size)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return VectorBlock<Derived>(derived(), this->size() - size, size);
|
||||
}
|
||||
|
||||
/** This is the const version of end(int).*/
|
||||
/** This is the const version of tail(int).*/
|
||||
template<typename Derived>
|
||||
inline const VectorBlock<Derived>
|
||||
MatrixBase<Derived>::end(int size) const
|
||||
MatrixBase<Derived>::tail(int size) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return VectorBlock<Derived>(derived(), this->size() - size, size);
|
||||
@ -254,17 +254,17 @@ MatrixBase<Derived>::segment(int start) const
|
||||
template<typename Derived>
|
||||
template<int Size>
|
||||
inline VectorBlock<Derived,Size>
|
||||
MatrixBase<Derived>::start()
|
||||
MatrixBase<Derived>::head()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return VectorBlock<Derived,Size>(derived(), 0);
|
||||
}
|
||||
|
||||
/** This is the const version of start<int>().*/
|
||||
/** This is the const version of head<int>().*/
|
||||
template<typename Derived>
|
||||
template<int Size>
|
||||
inline const VectorBlock<Derived,Size>
|
||||
MatrixBase<Derived>::start() const
|
||||
MatrixBase<Derived>::head() const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return VectorBlock<Derived,Size>(derived(), 0);
|
||||
@ -284,17 +284,17 @@ MatrixBase<Derived>::start() const
|
||||
template<typename Derived>
|
||||
template<int Size>
|
||||
inline VectorBlock<Derived,Size>
|
||||
MatrixBase<Derived>::end()
|
||||
MatrixBase<Derived>::tail()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return VectorBlock<Derived, Size>(derived(), size() - Size);
|
||||
}
|
||||
|
||||
/** This is the const version of end<int>.*/
|
||||
/** This is the const version of tail<int>.*/
|
||||
template<typename Derived>
|
||||
template<int Size>
|
||||
inline const VectorBlock<Derived,Size>
|
||||
MatrixBase<Derived>::end() const
|
||||
MatrixBase<Derived>::tail() const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
return VectorBlock<Derived, Size>(derived(), size() - Size);
|
||||
|
@ -41,8 +41,8 @@ struct ei_selfadjoint_rank2_update_selector<Scalar,UType,VType,LowerTriangular>
|
||||
for (int i=0; i<size; ++i)
|
||||
{
|
||||
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) +=
|
||||
(alpha * ei_conj(u.coeff(i))) * v.end(size-i)
|
||||
+ (alpha * ei_conj(v.coeff(i))) * u.end(size-i);
|
||||
(alpha * ei_conj(u.coeff(i))) * v.tail(size-i)
|
||||
+ (alpha * ei_conj(v.coeff(i))) * u.tail(size-i);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -55,8 +55,8 @@ struct ei_selfadjoint_rank2_update_selector<Scalar,UType,VType,UpperTriangular>
|
||||
const int size = u.size();
|
||||
for (int i=0; i<size; ++i)
|
||||
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) +=
|
||||
(alpha * ei_conj(u.coeff(i))) * v.start(i+1)
|
||||
+ (alpha * ei_conj(v.coeff(i))) * u.start(i+1);
|
||||
(alpha * ei_conj(u.coeff(i))) * v.head(i+1)
|
||||
+ (alpha * ei_conj(v.coeff(i))) * u.head(i+1);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -133,7 +133,7 @@ void ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix)
|
||||
for (int i=0; i<n; i++)
|
||||
{
|
||||
int k;
|
||||
m_eivalues.cwise().abs().end(n-i).minCoeff(&k);
|
||||
m_eivalues.cwise().abs().tail(n-i).minCoeff(&k);
|
||||
if (k != 0)
|
||||
{
|
||||
k += i;
|
||||
|
@ -620,7 +620,7 @@ void EigenSolver<MatrixType>::hqr2(MatrixType& matH)
|
||||
// Overflow control
|
||||
t = ei_abs(matH.coeff(i,n));
|
||||
if ((eps * t) * t > 1)
|
||||
matH.col(n).end(nn-i) /= t;
|
||||
matH.col(n).tail(nn-i) /= t;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -708,7 +708,7 @@ void EigenSolver<MatrixType>::hqr2(MatrixType& matH)
|
||||
// in this algo low==0 and high==nn-1 !!
|
||||
if (i < low || i > high)
|
||||
{
|
||||
m_eivec.row(i).end(nn-i) = matH.row(i).end(nn-i);
|
||||
m_eivec.row(i).tail(nn-i) = matH.row(i).tail(nn-i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVector
|
||||
int remainingSize = n-i-1;
|
||||
RealScalar beta;
|
||||
Scalar h;
|
||||
matA.col(i).end(remainingSize).makeHouseholderInPlace(h, beta);
|
||||
matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
|
||||
matA.col(i).coeffRef(i+1) = beta;
|
||||
hCoeffs.coeffRef(i) = h;
|
||||
|
||||
@ -159,11 +159,11 @@ void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVector
|
||||
|
||||
// A = H A
|
||||
matA.corner(BottomRight, remainingSize, remainingSize)
|
||||
.applyHouseholderOnTheLeft(matA.col(i).end(remainingSize-1), h, &temp.coeffRef(0));
|
||||
.applyHouseholderOnTheLeft(matA.col(i).tail(remainingSize-1), h, &temp.coeffRef(0));
|
||||
|
||||
// A = A H'
|
||||
matA.corner(BottomRight, n, remainingSize)
|
||||
.applyHouseholderOnTheRight(matA.col(i).end(remainingSize-1).conjugate(), ei_conj(h), &temp.coeffRef(0));
|
||||
.applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), ei_conj(h), &temp.coeffRef(0));
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ HessenbergDecomposition<MatrixType>::matrixQ() const
|
||||
for (int i = n-2; i>=0; i--)
|
||||
{
|
||||
matQ.corner(BottomRight,n-i-1,n-i-1)
|
||||
.applyHouseholderOnTheLeft(m_matrix.col(i).end(n-i-2), ei_conj(m_hCoeffs.coeff(i)), &temp.coeffRef(0,0));
|
||||
.applyHouseholderOnTheLeft(m_matrix.col(i).tail(n-i-2), ei_conj(m_hCoeffs.coeff(i)), &temp.coeffRef(0,0));
|
||||
}
|
||||
return matQ;
|
||||
}
|
||||
|
@ -202,19 +202,19 @@ void Tridiagonalization<MatrixType>::_compute(MatrixType& matA, CoeffVectorType&
|
||||
int remainingSize = n-i-1;
|
||||
RealScalar beta;
|
||||
Scalar h;
|
||||
matA.col(i).end(remainingSize).makeHouseholderInPlace(h, beta);
|
||||
matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
|
||||
|
||||
// Apply similarity transformation to remaining columns,
|
||||
// i.e., A = H A H' where H = I - h v v' and v = matA.col(i).end(n-i-1)
|
||||
// i.e., A = H A H' where H = I - h v v' and v = matA.col(i).tail(n-i-1)
|
||||
matA.col(i).coeffRef(i+1) = 1;
|
||||
|
||||
hCoeffs.end(n-i-1) = (matA.corner(BottomRight,remainingSize,remainingSize).template selfadjointView<LowerTriangular>()
|
||||
* (ei_conj(h) * matA.col(i).end(remainingSize)));
|
||||
hCoeffs.tail(n-i-1) = (matA.corner(BottomRight,remainingSize,remainingSize).template selfadjointView<LowerTriangular>()
|
||||
* (ei_conj(h) * matA.col(i).tail(remainingSize)));
|
||||
|
||||
hCoeffs.end(n-i-1) += (ei_conj(h)*Scalar(-0.5)*(hCoeffs.end(remainingSize).dot(matA.col(i).end(remainingSize)))) * matA.col(i).end(n-i-1);
|
||||
hCoeffs.tail(n-i-1) += (ei_conj(h)*Scalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1);
|
||||
|
||||
matA.corner(BottomRight, remainingSize, remainingSize).template selfadjointView<LowerTriangular>()
|
||||
.rankUpdate(matA.col(i).end(remainingSize), hCoeffs.end(remainingSize), -1);
|
||||
.rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), -1);
|
||||
|
||||
matA.col(i).coeffRef(i+1) = beta;
|
||||
hCoeffs.coeffRef(i) = h;
|
||||
@ -242,7 +242,7 @@ void Tridiagonalization<MatrixType>::matrixQInPlace(MatrixBase<QDerived>* q) con
|
||||
for (int i = n-2; i>=0; i--)
|
||||
{
|
||||
matQ.corner(BottomRight,n-i-1,n-i-1)
|
||||
.applyHouseholderOnTheLeft(m_matrix.col(i).end(n-i-2), ei_conj(m_hCoeffs.coeff(i)), &aux.coeffRef(0,0));
|
||||
.applyHouseholderOnTheLeft(m_matrix.col(i).tail(n-i-2), ei_conj(m_hCoeffs.coeff(i)), &aux.coeffRef(0,0));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -173,7 +173,7 @@ struct ei_unitOrthogonal_selector<Derived,3>
|
||||
if((!ei_isMuchSmallerThan(src.x(), src.z()))
|
||||
|| (!ei_isMuchSmallerThan(src.y(), src.z())))
|
||||
{
|
||||
RealScalar invnm = RealScalar(1)/src.template start<2>().norm();
|
||||
RealScalar invnm = RealScalar(1)/src.template head<2>().norm();
|
||||
perp.coeffRef(0) = -ei_conj(src.y())*invnm;
|
||||
perp.coeffRef(1) = ei_conj(src.x())*invnm;
|
||||
perp.coeffRef(2) = 0;
|
||||
@ -184,7 +184,7 @@ struct ei_unitOrthogonal_selector<Derived,3>
|
||||
*/
|
||||
else
|
||||
{
|
||||
RealScalar invnm = RealScalar(1)/src.template end<2>().norm();
|
||||
RealScalar invnm = RealScalar(1)/src.template tail<2>().norm();
|
||||
perp.coeffRef(0) = 0;
|
||||
perp.coeffRef(1) = -ei_conj(src.z())*invnm;
|
||||
perp.coeffRef(2) = ei_conj(src.y())*invnm;
|
||||
|
@ -77,10 +77,10 @@ public:
|
||||
inline Scalar& w() { return this->derived().coeffs().coeffRef(3); }
|
||||
|
||||
/** \returns a read-only vector expression of the imaginary part (x,y,z) */
|
||||
inline const VectorBlock<Coefficients,3> vec() const { return coeffs().template start<3>(); }
|
||||
inline const VectorBlock<Coefficients,3> vec() const { return coeffs().template head<3>(); }
|
||||
|
||||
/** \returns a vector expression of the imaginary part (x,y,z) */
|
||||
inline VectorBlock<Coefficients,3> vec() { return coeffs().template start<3>(); }
|
||||
inline VectorBlock<Coefficients,3> vec() { return coeffs().template head<3>(); }
|
||||
|
||||
/** \returns a read-only vector expression of the coefficients (x,y,z,w) */
|
||||
inline const typename ei_traits<Derived>::Coefficients& coeffs() const { return derived().coeffs(); }
|
||||
|
@ -1102,7 +1102,7 @@ struct ei_transform_right_product_impl<Other,AffineCompact, Dim,HDim, HDim,1>
|
||||
static ResultType run(const TransformType& tr, const Other& other)
|
||||
{
|
||||
ResultType res;
|
||||
res.template start<HDim>() = tr.matrix() * other;
|
||||
res.template head<HDim>() = tr.matrix() * other;
|
||||
res.coeffRef(Dim) = other.coeff(Dim);
|
||||
}
|
||||
};
|
||||
@ -1120,7 +1120,7 @@ struct ei_transform_right_product_impl<Other,Mode, Dim,HDim, Dim,Dim>
|
||||
res.matrix().col(Dim) = tr.matrix().col(Dim);
|
||||
res.linearExt() = (tr.linearExt() * other).lazy();
|
||||
if(Mode==Affine)
|
||||
res.matrix().row(Dim).template start<Dim>() = tr.matrix().row(Dim).template start<Dim>();
|
||||
res.matrix().row(Dim).template head<Dim>() = tr.matrix().row(Dim).template head<Dim>();
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
@ -170,8 +170,8 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
|
||||
// Eq. (41)
|
||||
// Note that we first assign dst_mean to the destination so that there no need
|
||||
// for a temporary.
|
||||
Rt.col(m).start(m) = dst_mean;
|
||||
Rt.col(m).start(m).noalias() -= c*Rt.corner(TopLeft,m,m)*src_mean;
|
||||
Rt.col(m).head(m) = dst_mean;
|
||||
Rt.col(m).head(m).noalias() -= c*Rt.corner(TopLeft,m,m)*src_mean;
|
||||
|
||||
if (with_scaling) Rt.block(0,0,m,m) *= c;
|
||||
|
||||
|
@ -105,10 +105,10 @@ template<typename VectorsType, typename CoeffsType> class HouseholderSequence
|
||||
{
|
||||
if(m_trans)
|
||||
dst.corner(BottomRight, length-k, length-k)
|
||||
.applyHouseholderOnTheRight(m_vectors.col(k).end(length-k-1), m_coeffs.coeff(k), &temp.coeffRef(0));
|
||||
.applyHouseholderOnTheRight(m_vectors.col(k).tail(length-k-1), m_coeffs.coeff(k), &temp.coeffRef(0));
|
||||
else
|
||||
dst.corner(BottomRight, length-k, length-k)
|
||||
.applyHouseholderOnTheLeft(m_vectors.col(k).end(length-k-1), m_coeffs.coeff(k), &temp.coeffRef(k));
|
||||
.applyHouseholderOnTheLeft(m_vectors.col(k).tail(length-k-1), m_coeffs.coeff(k), &temp.coeffRef(k));
|
||||
}
|
||||
}
|
||||
|
||||
@ -122,7 +122,7 @@ template<typename VectorsType, typename CoeffsType> class HouseholderSequence
|
||||
{
|
||||
int actual_k = m_trans ? vecs-k-1 : k;
|
||||
dst.corner(BottomRight, dst.rows(), length-actual_k)
|
||||
.applyHouseholderOnTheRight(m_vectors.col(actual_k).end(length-actual_k-1), m_coeffs.coeff(actual_k), &temp.coeffRef(0));
|
||||
.applyHouseholderOnTheRight(m_vectors.col(actual_k).tail(length-actual_k-1), m_coeffs.coeff(actual_k), &temp.coeffRef(0));
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ template<typename VectorsType, typename CoeffsType> class HouseholderSequence
|
||||
{
|
||||
int actual_k = m_trans ? k : vecs-k-1;
|
||||
dst.corner(BottomRight, length-actual_k, dst.cols())
|
||||
.applyHouseholderOnTheLeft(m_vectors.col(actual_k).end(length-actual_k-1), m_coeffs.coeff(actual_k), &temp.coeffRef(0));
|
||||
.applyHouseholderOnTheLeft(m_vectors.col(actual_k).tail(length-actual_k-1), m_coeffs.coeff(actual_k), &temp.coeffRef(0));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -451,9 +451,9 @@ FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
|
||||
// bottom-right corner by Gaussian elimination.
|
||||
|
||||
if(k<rows-1)
|
||||
m_lu.col(k).end(rows-k-1) /= m_lu.coeff(k,k);
|
||||
m_lu.col(k).tail(rows-k-1) /= m_lu.coeff(k,k);
|
||||
if(k<size-1)
|
||||
m_lu.block(k+1,k+1,rows-k-1,cols-k-1).noalias() -= m_lu.col(k).end(rows-k-1) * m_lu.row(k).end(cols-k-1);
|
||||
m_lu.block(k+1,k+1,rows-k-1,cols-k-1).noalias() -= m_lu.col(k).tail(rows-k-1) * m_lu.row(k).tail(cols-k-1);
|
||||
}
|
||||
|
||||
// the main loop is over, we still have to accumulate the transpositions to find the
|
||||
@ -537,8 +537,8 @@ struct ei_kernel_retval<FullPivLU<_MatrixType> >
|
||||
m(dec().matrixLU().block(0, 0, rank(), cols));
|
||||
for(int i = 0; i < rank(); ++i)
|
||||
{
|
||||
if(i) m.row(i).start(i).setZero();
|
||||
m.row(i).end(cols-i) = dec().matrixLU().row(pivots.coeff(i)).end(cols-i);
|
||||
if(i) m.row(i).head(i).setZero();
|
||||
m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i);
|
||||
}
|
||||
m.block(0, 0, rank(), rank());
|
||||
m.block(0, 0, rank(), rank()).template triangularView<StrictlyLowerTriangular>().setZero();
|
||||
@ -558,7 +558,7 @@ struct ei_kernel_retval<FullPivLU<_MatrixType> >
|
||||
m.col(i).swap(m.col(pivots.coeff(i)));
|
||||
|
||||
// see the negative sign in the next line, that's what we were talking about above.
|
||||
for(int i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).end(dimker);
|
||||
for(int i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker);
|
||||
for(int i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero();
|
||||
for(int k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1);
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ struct ei_partial_lu_impl
|
||||
{
|
||||
int row_of_biggest_in_col;
|
||||
RealScalar biggest_in_corner
|
||||
= lu.col(k).end(rows-k).cwise().abs().maxCoeff(&row_of_biggest_in_col);
|
||||
= lu.col(k).tail(rows-k).cwise().abs().maxCoeff(&row_of_biggest_in_col);
|
||||
row_of_biggest_in_col += k;
|
||||
|
||||
if(biggest_in_corner == 0) // the pivot is exactly zero: the matrix is singular
|
||||
@ -256,8 +256,8 @@ struct ei_partial_lu_impl
|
||||
{
|
||||
int rrows = rows-k-1;
|
||||
int rsize = size-k-1;
|
||||
lu.col(k).end(rrows) /= lu.coeff(k,k);
|
||||
lu.corner(BottomRight,rrows,rsize).noalias() -= lu.col(k).end(rrows) * lu.row(k).end(rsize);
|
||||
lu.col(k).tail(rrows) /= lu.coeff(k,k);
|
||||
lu.corner(BottomRight,rrows,rsize).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rsize);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
@ -359,14 +359,14 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
|
||||
{
|
||||
// first, we look up in our table colSqNorms which column has the biggest squared norm
|
||||
int biggest_col_index;
|
||||
RealScalar biggest_col_sq_norm = colSqNorms.end(cols-k).maxCoeff(&biggest_col_index);
|
||||
RealScalar biggest_col_sq_norm = colSqNorms.tail(cols-k).maxCoeff(&biggest_col_index);
|
||||
biggest_col_index += k;
|
||||
|
||||
// since our table colSqNorms accumulates imprecision at every step, we must now recompute
|
||||
// the actual squared norm of the selected column.
|
||||
// Note that not doing so does result in solve() sometimes returning inf/nan values
|
||||
// when running the unit test with 1000 repetitions.
|
||||
biggest_col_sq_norm = m_qr.col(biggest_col_index).end(rows-k).squaredNorm();
|
||||
biggest_col_sq_norm = m_qr.col(biggest_col_index).tail(rows-k).squaredNorm();
|
||||
|
||||
// we store that back into our table: it can't hurt to correct our table.
|
||||
colSqNorms.coeffRef(biggest_col_index) = biggest_col_sq_norm;
|
||||
@ -379,7 +379,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
|
||||
if(biggest_col_sq_norm < threshold_helper * (rows-k))
|
||||
{
|
||||
m_nonzero_pivots = k;
|
||||
m_hCoeffs.end(size-k).setZero();
|
||||
m_hCoeffs.tail(size-k).setZero();
|
||||
m_qr.corner(BottomRight,rows-k,cols-k)
|
||||
.template triangularView<StrictlyLowerTriangular>()
|
||||
.setZero();
|
||||
@ -396,7 +396,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
|
||||
|
||||
// generate the householder vector, store it below the diagonal
|
||||
RealScalar beta;
|
||||
m_qr.col(k).end(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
|
||||
m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
|
||||
|
||||
// apply the householder transformation to the diagonal coefficient
|
||||
m_qr.coeffRef(k,k) = beta;
|
||||
@ -406,10 +406,10 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
|
||||
|
||||
// apply the householder transformation
|
||||
m_qr.corner(BottomRight, rows-k, cols-k-1)
|
||||
.applyHouseholderOnTheLeft(m_qr.col(k).end(rows-k-1), m_hCoeffs.coeffRef(k), &temp.coeffRef(k+1));
|
||||
.applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &temp.coeffRef(k+1));
|
||||
|
||||
// update our table of squared norms of the columns
|
||||
colSqNorms.end(cols-k-1) -= m_qr.row(k).end(cols-k-1).cwise().abs2();
|
||||
colSqNorms.tail(cols-k-1) -= m_qr.row(k).tail(cols-k-1).cwise().abs2();
|
||||
}
|
||||
|
||||
m_cols_permutation.setIdentity(cols);
|
||||
|
@ -306,7 +306,7 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
|
||||
m_rows_transpositions.coeffRef(k) = row_of_biggest_in_corner;
|
||||
cols_transpositions.coeffRef(k) = col_of_biggest_in_corner;
|
||||
if(k != row_of_biggest_in_corner) {
|
||||
m_qr.row(k).end(cols-k).swap(m_qr.row(row_of_biggest_in_corner).end(cols-k));
|
||||
m_qr.row(k).tail(cols-k).swap(m_qr.row(row_of_biggest_in_corner).tail(cols-k));
|
||||
++number_of_transpositions;
|
||||
}
|
||||
if(k != col_of_biggest_in_corner) {
|
||||
@ -315,11 +315,11 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
|
||||
}
|
||||
|
||||
RealScalar beta;
|
||||
m_qr.col(k).end(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
|
||||
m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
|
||||
m_qr.coeffRef(k,k) = beta;
|
||||
|
||||
m_qr.corner(BottomRight, rows-k, cols-k-1)
|
||||
.applyHouseholderOnTheLeft(m_qr.col(k).end(rows-k-1), m_hCoeffs.coeffRef(k), &temp.coeffRef(k+1));
|
||||
.applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &temp.coeffRef(k+1));
|
||||
}
|
||||
|
||||
m_cols_permutation.setIdentity(cols);
|
||||
@ -360,7 +360,7 @@ struct ei_solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
|
||||
int remainingSize = rows-k;
|
||||
c.row(k).swap(c.row(dec().rowsTranspositions().coeff(k)));
|
||||
c.corner(BottomRight, remainingSize, rhs().cols())
|
||||
.applyHouseholderOnTheLeft(dec().matrixQR().col(k).end(remainingSize-1),
|
||||
.applyHouseholderOnTheLeft(dec().matrixQR().col(k).tail(remainingSize-1),
|
||||
dec().hCoeffs().coeff(k), &temp.coeffRef(0));
|
||||
}
|
||||
|
||||
@ -400,7 +400,7 @@ typename FullPivHouseholderQR<MatrixType>::MatrixQType FullPivHouseholderQR<Matr
|
||||
for (int k = size-1; k >= 0; k--)
|
||||
{
|
||||
res.block(k, k, rows-k, rows-k)
|
||||
.applyHouseholderOnTheLeft(m_qr.col(k).end(rows-k-1), ei_conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k));
|
||||
.applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), ei_conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k));
|
||||
res.row(k).swap(res.row(m_rows_transpositions.coeff(k)));
|
||||
}
|
||||
return res;
|
||||
|
@ -197,12 +197,12 @@ HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType&
|
||||
int remainingCols = cols - k - 1;
|
||||
|
||||
RealScalar beta;
|
||||
m_qr.col(k).end(remainingRows).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
|
||||
m_qr.col(k).tail(remainingRows).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
|
||||
m_qr.coeffRef(k,k) = beta;
|
||||
|
||||
// apply H to remaining part of m_qr from the left
|
||||
m_qr.corner(BottomRight, remainingRows, remainingCols)
|
||||
.applyHouseholderOnTheLeft(m_qr.col(k).end(remainingRows-1), m_hCoeffs.coeffRef(k), &temp.coeffRef(k+1));
|
||||
.applyHouseholderOnTheLeft(m_qr.col(k).tail(remainingRows-1), m_hCoeffs.coeffRef(k), &temp.coeffRef(k+1));
|
||||
}
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
@ -226,7 +226,7 @@ struct ei_solve_retval<HouseholderQR<_MatrixType>, Rhs>
|
||||
// Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
|
||||
c.applyOnTheLeft(householderSequence(
|
||||
dec().matrixQR().corner(TopLeft,rows,rank),
|
||||
dec().hCoeffs().start(rank)).transpose()
|
||||
dec().hCoeffs().head(rank)).transpose()
|
||||
);
|
||||
|
||||
dec().matrixQR()
|
||||
|
@ -342,7 +342,7 @@ JacobiSVD<MatrixType, Options>& JacobiSVD<MatrixType, Options>::compute(const Ma
|
||||
for(int i = 0; i < diagSize; i++)
|
||||
{
|
||||
int pos;
|
||||
m_singularValues.end(diagSize-i).maxCoeff(&pos);
|
||||
m_singularValues.tail(diagSize-i).maxCoeff(&pos);
|
||||
if(pos)
|
||||
{
|
||||
pos += i;
|
||||
|
@ -205,7 +205,7 @@ SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
g = s = scale = 0.0;
|
||||
if (i < m)
|
||||
{
|
||||
scale = A.col(i).end(m-i).cwise().abs().sum();
|
||||
scale = A.col(i).tail(m-i).cwise().abs().sum();
|
||||
if (scale != Scalar(0))
|
||||
{
|
||||
for (k=i; k<m; k++)
|
||||
@ -219,18 +219,18 @@ SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
A(i, i)=f-g;
|
||||
for (j=l-1; j<n; j++)
|
||||
{
|
||||
s = A.col(j).end(m-i).dot(A.col(i).end(m-i));
|
||||
s = A.col(j).tail(m-i).dot(A.col(i).tail(m-i));
|
||||
f = s/h;
|
||||
A.col(j).end(m-i) += f*A.col(i).end(m-i);
|
||||
A.col(j).tail(m-i) += f*A.col(i).tail(m-i);
|
||||
}
|
||||
A.col(i).end(m-i) *= scale;
|
||||
A.col(i).tail(m-i) *= scale;
|
||||
}
|
||||
}
|
||||
W[i] = scale * g;
|
||||
g = s = scale = 0.0;
|
||||
if (i+1 <= m && i+1 != n)
|
||||
{
|
||||
scale = A.row(i).end(n-l+1).cwise().abs().sum();
|
||||
scale = A.row(i).tail(n-l+1).cwise().abs().sum();
|
||||
if (scale != Scalar(0))
|
||||
{
|
||||
for (k=l-1; k<n; k++)
|
||||
@ -242,13 +242,13 @@ SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
g = -sign(ei_sqrt(s),f);
|
||||
h = f*g - s;
|
||||
A(i,l-1) = f-g;
|
||||
rv1.end(n-l+1) = A.row(i).end(n-l+1)/h;
|
||||
rv1.tail(n-l+1) = A.row(i).tail(n-l+1)/h;
|
||||
for (j=l-1; j<m; j++)
|
||||
{
|
||||
s = A.row(i).end(n-l+1).dot(A.row(j).end(n-l+1));
|
||||
A.row(j).end(n-l+1) += s*rv1.end(n-l+1).transpose();
|
||||
s = A.row(i).tail(n-l+1).dot(A.row(j).tail(n-l+1));
|
||||
A.row(j).tail(n-l+1) += s*rv1.tail(n-l+1).transpose();
|
||||
}
|
||||
A.row(i).end(n-l+1) *= scale;
|
||||
A.row(i).tail(n-l+1) *= scale;
|
||||
}
|
||||
}
|
||||
anorm = std::max( anorm, (ei_abs(W[i])+ei_abs(rv1[i])) );
|
||||
@ -265,12 +265,12 @@ SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
V(j, i) = (A(i, j)/A(i, l))/g;
|
||||
for (j=l; j<n; j++)
|
||||
{
|
||||
s = V.col(j).end(n-l).dot(A.row(i).end(n-l));
|
||||
V.col(j).end(n-l) += s * V.col(i).end(n-l);
|
||||
s = V.col(j).tail(n-l).dot(A.row(i).tail(n-l));
|
||||
V.col(j).tail(n-l) += s * V.col(i).tail(n-l);
|
||||
}
|
||||
}
|
||||
V.row(i).end(n-l).setZero();
|
||||
V.col(i).end(n-l).setZero();
|
||||
V.row(i).tail(n-l).setZero();
|
||||
V.col(i).tail(n-l).setZero();
|
||||
}
|
||||
V(i, i) = 1.0;
|
||||
g = rv1[i];
|
||||
@ -282,7 +282,7 @@ SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
l = i+1;
|
||||
g = W[i];
|
||||
if (n-l>0)
|
||||
A.row(i).end(n-l).setZero();
|
||||
A.row(i).tail(n-l).setZero();
|
||||
if (g != Scalar(0.0))
|
||||
{
|
||||
g = Scalar(1.0)/g;
|
||||
@ -290,15 +290,15 @@ SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
for (j=l; j<n; j++)
|
||||
{
|
||||
s = A.col(j).end(m-l).dot(A.col(i).end(m-l));
|
||||
s = A.col(j).tail(m-l).dot(A.col(i).tail(m-l));
|
||||
f = (s/A(i,i))*g;
|
||||
A.col(j).end(m-i) += f * A.col(i).end(m-i);
|
||||
A.col(j).tail(m-i) += f * A.col(i).tail(m-i);
|
||||
}
|
||||
}
|
||||
A.col(i).end(m-i) *= g;
|
||||
A.col(i).tail(m-i) *= g;
|
||||
}
|
||||
else
|
||||
A.col(i).end(m-i).setZero();
|
||||
A.col(i).tail(m-i).setZero();
|
||||
++A(i,i);
|
||||
}
|
||||
// Diagonalization of the bidiagonal form: Loop over
|
||||
@ -408,7 +408,7 @@ SVD<MatrixType>& SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
for (int i=0; i<n; i++)
|
||||
{
|
||||
int k;
|
||||
W.end(n-i).maxCoeff(&k);
|
||||
W.tail(n-i).maxCoeff(&k);
|
||||
if (k != 0)
|
||||
{
|
||||
k += i;
|
||||
@ -451,8 +451,8 @@ struct ei_solve_retval<SVD<_MatrixType>, Rhs>
|
||||
aux.coeffRef(i) /= si;
|
||||
}
|
||||
const int minsize = std::min(dec().rows(),dec().cols());
|
||||
dst.col(j).start(minsize) = aux.start(minsize);
|
||||
if(dec().cols()>dec().rows()) dst.col(j).end(cols()-minsize).setZero();
|
||||
dst.col(j).head(minsize) = aux.head(minsize);
|
||||
if(dec().cols()>dec().rows()) dst.col(j).tail(cols()-minsize).setZero();
|
||||
dst.col(j) = dec().matrixV() * dst.col(j);
|
||||
}
|
||||
}
|
||||
|
@ -153,14 +153,14 @@ public :
|
||||
else
|
||||
dst.copyCoeff(index, j, src);
|
||||
}
|
||||
//dst.col(j).end(N-j) = src.col(j).end(N-j);
|
||||
//dst.col(j).tail(N-j) = src.col(j).tail(N-j);
|
||||
}
|
||||
}
|
||||
|
||||
static EIGEN_DONT_INLINE void syr2(gene_matrix & A, gene_vector & X, gene_vector & Y, int N){
|
||||
// ei_product_selfadjoint_rank2_update<real,0,LowerTriangularBit>(N,A.data(),N, X.data(), 1, Y.data(), 1, -1);
|
||||
for(int j=0; j<N; ++j)
|
||||
A.col(j).end(N-j) += X[j] * Y.end(N-j) + Y[j] * X.end(N-j);
|
||||
A.col(j).tail(N-j) += X[j] * Y.tail(N-j) + Y[j] * X.tail(N-j);
|
||||
}
|
||||
|
||||
static EIGEN_DONT_INLINE void ger(gene_matrix & A, gene_vector & X, gene_vector & Y, int N){
|
||||
|
@ -16,8 +16,8 @@ void ei_compute_householder(const InputVector& x, OutputVector *v, typename Outp
|
||||
typedef typename OutputVector::RealScalar RealScalar;
|
||||
ei_assert(x.size() == v->size()+1);
|
||||
int n = x.size();
|
||||
RealScalar sigma = x.end(n-1).squaredNorm();
|
||||
*v = x.end(n-1);
|
||||
RealScalar sigma = x.tail(n-1).squaredNorm();
|
||||
*v = x.tail(n-1);
|
||||
// the big assumption in this code is that ei_abs2(x->coeff(0)) is not much smaller than sigma.
|
||||
if(ei_isMuchSmallerThan(sigma, ei_abs2(x.coeff(0))))
|
||||
{
|
||||
|
@ -41,8 +41,8 @@ A.setIdentity(); // Fill A with the identity.
|
||||
// Eigen // Matlab
|
||||
x.start(n) // x(1:n)
|
||||
x.start<n>() // x(1:n)
|
||||
x.end(n) // N = rows(x); x(N - n: N)
|
||||
x.end<n>() // N = rows(x); x(N - n: N)
|
||||
x.tail(n) // N = rows(x); x(N - n: N)
|
||||
x.tail<n>() // N = rows(x); x(N - n: N)
|
||||
x.segment(i, n) // x(i+1 : i+n)
|
||||
x.segment<n>(i) // x(i+1 : i+n)
|
||||
P.block(i, j, rows, cols) // P(i+1 : i+rows, j+1 : j+cols)
|
||||
|
@ -493,7 +493,7 @@ Read-write access to sub-vectors:
|
||||
<td></td>
|
||||
|
||||
<tr><td>\code vec1.start(n)\endcode</td><td>\code vec1.start<n>()\endcode</td><td>the first \c n coeffs </td></tr>
|
||||
<tr><td>\code vec1.end(n)\endcode</td><td>\code vec1.end<n>()\endcode</td><td>the last \c n coeffs </td></tr>
|
||||
<tr><td>\code vec1.tail(n)\endcode</td><td>\code vec1.tail<n>()\endcode</td><td>the last \c n coeffs </td></tr>
|
||||
<tr><td>\code vec1.segment(pos,n)\endcode</td><td>\code vec1.segment<n>(pos)\endcode</td>
|
||||
<td>the \c size coeffs in \n the range [\c pos : \c pos + \c n [</td></tr>
|
||||
<tr style="border-style: dashed none dashed none;"><td>
|
||||
|
@ -27,8 +27,8 @@ struct unroll_echelon
|
||||
m.row(k).swap(m.row(k+rowOfBiggest));
|
||||
m.col(k).swap(m.col(k+colOfBiggest));
|
||||
m.template corner<CornerRows-1, CornerCols>(BottomRight)
|
||||
-= m.col(k).template end<CornerRows-1>()
|
||||
* (m.row(k).template end<CornerCols>() / m(k,k));
|
||||
-= m.col(k).template tail<CornerRows-1>()
|
||||
* (m.row(k).template tail<CornerCols>() / m(k,k));
|
||||
}
|
||||
};
|
||||
|
||||
@ -59,7 +59,7 @@ struct unroll_echelon<Derived, Dynamic>
|
||||
m.row(k).swap(m.row(k+rowOfBiggest));
|
||||
m.col(k).swap(m.col(k+colOfBiggest));
|
||||
m.corner(BottomRight, cornerRows-1, cornerCols)
|
||||
-= m.col(k).end(cornerRows-1) * (m.row(k).end(cornerCols) / m(k,k));
|
||||
-= m.col(k).tail(cornerRows-1) * (m.row(k).tail(cornerCols) / m(k,k));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -1,5 +1,5 @@
|
||||
RowVector4i v = RowVector4i::Random();
|
||||
cout << "Here is the vector v:" << endl << v << endl;
|
||||
cout << "Here is v.end(2):" << endl << v.end(2) << endl;
|
||||
v.end(2).setZero();
|
||||
cout << "Here is v.tail(2):" << endl << v.tail(2) << endl;
|
||||
v.tail(2).setZero();
|
||||
cout << "Now the vector v is:" << endl << v << endl;
|
||||
|
@ -1,5 +1,5 @@
|
||||
RowVector4i v = RowVector4i::Random();
|
||||
cout << "Here is the vector v:" << endl << v << endl;
|
||||
cout << "Here is v.start(2):" << endl << v.start(2) << endl;
|
||||
v.start(2).setZero();
|
||||
cout << "Here is v.head(2):" << endl << v.head(2) << endl;
|
||||
v.head(2).setZero();
|
||||
cout << "Now the vector v is:" << endl << v << endl;
|
||||
|
@ -1,5 +1,5 @@
|
||||
RowVector4i v = RowVector4i::Random();
|
||||
cout << "Here is the vector v:" << endl << v << endl;
|
||||
cout << "Here is v.end(2):" << endl << v.end<2>() << endl;
|
||||
v.end<2>().setZero();
|
||||
cout << "Here is v.tail(2):" << endl << v.tail<2>() << endl;
|
||||
v.tail<2>().setZero();
|
||||
cout << "Now the vector v is:" << endl << v << endl;
|
||||
|
@ -1,5 +1,5 @@
|
||||
RowVector4i v = RowVector4i::Random();
|
||||
cout << "Here is the vector v:" << endl << v << endl;
|
||||
cout << "Here is v.start(2):" << endl << v.start<2>() << endl;
|
||||
v.start<2>().setZero();
|
||||
cout << "Here is v.head(2):" << endl << v.head<2>() << endl;
|
||||
v.head<2>().setZero();
|
||||
cout << "Now the vector v is:" << endl << v << endl;
|
||||
|
@ -67,7 +67,7 @@ template<typename Scalar,int Size> void homogeneous(void)
|
||||
VERIFY_IS_APPROX(m0, hm0.colwise().hnormalized());
|
||||
hm0.row(Size-1).setRandom();
|
||||
for(int j=0; j<Size; ++j)
|
||||
m0.col(j) = hm0.col(j).start(Size) / hm0(Size,j);
|
||||
m0.col(j) = hm0.col(j).head(Size) / hm0(Size,j);
|
||||
VERIFY_IS_APPROX(m0, hm0.colwise().hnormalized());
|
||||
|
||||
T1MatrixType t1 = T1MatrixType::Random();
|
||||
|
@ -66,7 +66,7 @@ template<typename Scalar> void orthomethods_3()
|
||||
v41 = Vector4::Random(),
|
||||
v42 = Vector4::Random();
|
||||
v40.w() = v41.w() = v42.w() = 0;
|
||||
v42.template start<3>() = v40.template start<3>().cross(v41.template start<3>());
|
||||
v42.template head<3>() = v40.template head<3>().cross(v41.template head<3>());
|
||||
VERIFY_IS_APPROX(v40.cross3(v41), v42);
|
||||
}
|
||||
|
||||
@ -88,8 +88,8 @@ template<typename Scalar, int Size> void orthomethods(int size=Size)
|
||||
|
||||
if (size>=3)
|
||||
{
|
||||
v0.template start<2>().setZero();
|
||||
v0.end(size-2).setRandom();
|
||||
v0.template head<2>().setZero();
|
||||
v0.tail(size-2).setRandom();
|
||||
|
||||
VERIFY_IS_MUCH_SMALLER_THAN(v0.unitOrthogonal().dot(v0), Scalar(1));
|
||||
VERIFY_IS_APPROX(v0.unitOrthogonal().norm(), RealScalar(1));
|
||||
|
@ -118,7 +118,7 @@ template<typename Scalar, int Mode> void transformations(void)
|
||||
t0.scale(v0);
|
||||
t1.prescale(v0);
|
||||
|
||||
VERIFY_IS_APPROX( (t0 * Vector3(1,0,0)).template start<3>().norm(), v0.x());
|
||||
VERIFY_IS_APPROX( (t0 * Vector3(1,0,0)).template head<3>().norm(), v0.x());
|
||||
//VERIFY(!ei_isApprox((t1 * Vector3(1,0,0)).norm(), v0.x()));
|
||||
|
||||
t0.setIdentity();
|
||||
@ -290,12 +290,12 @@ template<typename Scalar, int Mode> void transformations(void)
|
||||
// translation * vector
|
||||
t0.setIdentity();
|
||||
t0.translate(v0);
|
||||
VERIFY_IS_APPROX((t0 * v1).template start<3>(), Translation3(v0) * v1);
|
||||
VERIFY_IS_APPROX((t0 * v1).template head<3>(), Translation3(v0) * v1);
|
||||
|
||||
// AlignedScaling * vector
|
||||
t0.setIdentity();
|
||||
t0.scale(v0);
|
||||
VERIFY_IS_APPROX((t0 * v1).template start<3>(), AlignedScaling3(v0) * v1);
|
||||
VERIFY_IS_APPROX((t0 * v1).template head<3>(), AlignedScaling3(v0) * v1);
|
||||
|
||||
// test transform inversion
|
||||
t0.setIdentity();
|
||||
|
@ -53,7 +53,7 @@ template<typename MatrixType> void householder(const MatrixType& m)
|
||||
v1.makeHouseholder(essential, beta, alpha);
|
||||
v1.applyHouseholderOnTheLeft(essential,beta,tmp);
|
||||
VERIFY_IS_APPROX(v1.norm(), v2.norm());
|
||||
VERIFY_IS_MUCH_SMALLER_THAN(v1.end(rows-1).norm(), v1.norm());
|
||||
VERIFY_IS_MUCH_SMALLER_THAN(v1.tail(rows-1).norm(), v1.norm());
|
||||
v1 = VectorType::Random(rows);
|
||||
v2 = v1;
|
||||
v1.applyHouseholderOnTheLeft(essential,beta,tmp);
|
||||
@ -63,7 +63,7 @@ template<typename MatrixType> void householder(const MatrixType& m)
|
||||
m2(rows, cols);
|
||||
|
||||
v1 = VectorType::Random(rows);
|
||||
if(even) v1.end(rows-1).setZero();
|
||||
if(even) v1.tail(rows-1).setZero();
|
||||
m1.colwise() = v1;
|
||||
m2 = m1;
|
||||
m1.col(0).makeHouseholder(essential, beta, alpha);
|
||||
@ -74,7 +74,7 @@ template<typename MatrixType> void householder(const MatrixType& m)
|
||||
VERIFY_IS_APPROX(ei_real(m1(0,0)), alpha);
|
||||
|
||||
v1 = VectorType::Random(rows);
|
||||
if(even) v1.end(rows-1).setZero();
|
||||
if(even) v1.tail(rows-1).setZero();
|
||||
SquareMatrixType m3(rows,rows), m4(rows,rows);
|
||||
m3.rowwise() = v1.transpose();
|
||||
m4 = m3;
|
||||
|
@ -68,9 +68,9 @@ template<typename MatrixType> void product_selfadjoint(const MatrixType& m)
|
||||
if (rows>1)
|
||||
{
|
||||
m2 = m1.template triangularView<LowerTriangular>();
|
||||
m2.block(1,1,rows-1,cols-1).template selfadjointView<LowerTriangular>().rankUpdate(v1.end(rows-1),v2.start(cols-1));
|
||||
m2.block(1,1,rows-1,cols-1).template selfadjointView<LowerTriangular>().rankUpdate(v1.tail(rows-1),v2.head(cols-1));
|
||||
m3 = m1;
|
||||
m3.block(1,1,rows-1,cols-1) += v1.end(rows-1) * v2.start(cols-1).adjoint()+ v2.start(cols-1) * v1.end(rows-1).adjoint();
|
||||
m3.block(1,1,rows-1,cols-1) += v1.tail(rows-1) * v2.head(cols-1).adjoint()+ v2.head(cols-1) * v1.tail(rows-1).adjoint();
|
||||
VERIFY_IS_APPROX(m2, m3.template triangularView<LowerTriangular>().toDenseMatrix());
|
||||
}
|
||||
}
|
||||
|
@ -68,10 +68,10 @@ template<typename VectorType> void vectorRedux(const VectorType& w)
|
||||
minc = std::min(minc, ei_real(v[j]));
|
||||
maxc = std::max(maxc, ei_real(v[j]));
|
||||
}
|
||||
VERIFY_IS_APPROX(s, v.start(i).sum());
|
||||
VERIFY_IS_APPROX(p, v.start(i).prod());
|
||||
VERIFY_IS_APPROX(minc, v.real().start(i).minCoeff());
|
||||
VERIFY_IS_APPROX(maxc, v.real().start(i).maxCoeff());
|
||||
VERIFY_IS_APPROX(s, v.head(i).sum());
|
||||
VERIFY_IS_APPROX(p, v.head(i).prod());
|
||||
VERIFY_IS_APPROX(minc, v.real().head(i).minCoeff());
|
||||
VERIFY_IS_APPROX(maxc, v.real().head(i).maxCoeff());
|
||||
}
|
||||
|
||||
for(int i = 0; i < size-1; i++)
|
||||
@ -85,10 +85,10 @@ template<typename VectorType> void vectorRedux(const VectorType& w)
|
||||
minc = std::min(minc, ei_real(v[j]));
|
||||
maxc = std::max(maxc, ei_real(v[j]));
|
||||
}
|
||||
VERIFY_IS_APPROX(s, v.end(size-i).sum());
|
||||
VERIFY_IS_APPROX(p, v.end(size-i).prod());
|
||||
VERIFY_IS_APPROX(minc, v.real().end(size-i).minCoeff());
|
||||
VERIFY_IS_APPROX(maxc, v.real().end(size-i).maxCoeff());
|
||||
VERIFY_IS_APPROX(s, v.tail(size-i).sum());
|
||||
VERIFY_IS_APPROX(p, v.tail(size-i).prod());
|
||||
VERIFY_IS_APPROX(minc, v.real().tail(size-i).minCoeff());
|
||||
VERIFY_IS_APPROX(maxc, v.real().tail(size-i).maxCoeff());
|
||||
}
|
||||
|
||||
for(int i = 0; i < size/2; i++)
|
||||
|
@ -51,7 +51,7 @@ void makeNoisyCohyperplanarPoints(int numPoints,
|
||||
{
|
||||
cur_point = VectorType::Random(size)/*.normalized()*/;
|
||||
// project cur_point onto the hyperplane
|
||||
Scalar x = - (hyperplane->coeffs().start(size).cwise()*cur_point).sum();
|
||||
Scalar x = - (hyperplane->coeffs().head(size).cwise()*cur_point).sum();
|
||||
cur_point *= hyperplane->coeffs().coeff(size) / x;
|
||||
} while( cur_point.norm() < 0.5
|
||||
|| cur_point.norm() > 2.0 );
|
||||
|
@ -127,15 +127,15 @@ template<typename MatrixType> void submatrices(const MatrixType& m)
|
||||
if (rows>2)
|
||||
{
|
||||
// test sub vectors
|
||||
VERIFY_IS_APPROX(v1.template start<2>(), v1.block(0,0,2,1));
|
||||
VERIFY_IS_APPROX(v1.template start<2>(), v1.start(2));
|
||||
VERIFY_IS_APPROX(v1.template start<2>(), v1.segment(0,2));
|
||||
VERIFY_IS_APPROX(v1.template start<2>(), v1.template segment<2>(0));
|
||||
VERIFY_IS_APPROX(v1.template head<2>(), v1.block(0,0,2,1));
|
||||
VERIFY_IS_APPROX(v1.template head<2>(), v1.head(2));
|
||||
VERIFY_IS_APPROX(v1.template head<2>(), v1.segment(0,2));
|
||||
VERIFY_IS_APPROX(v1.template head<2>(), v1.template segment<2>(0));
|
||||
int i = rows-2;
|
||||
VERIFY_IS_APPROX(v1.template end<2>(), v1.block(i,0,2,1));
|
||||
VERIFY_IS_APPROX(v1.template end<2>(), v1.end(2));
|
||||
VERIFY_IS_APPROX(v1.template end<2>(), v1.segment(i,2));
|
||||
VERIFY_IS_APPROX(v1.template end<2>(), v1.template segment<2>(i));
|
||||
VERIFY_IS_APPROX(v1.template tail<2>(), v1.block(i,0,2,1));
|
||||
VERIFY_IS_APPROX(v1.template tail<2>(), v1.tail(2));
|
||||
VERIFY_IS_APPROX(v1.template tail<2>(), v1.segment(i,2));
|
||||
VERIFY_IS_APPROX(v1.template tail<2>(), v1.template segment<2>(i));
|
||||
i = ei_random(0,rows-2);
|
||||
VERIFY_IS_APPROX(v1.segment(i,2), v1.template segment<2>(i));
|
||||
|
||||
|
@ -106,7 +106,7 @@ template<typename _Scalar> class AlignedVector3
|
||||
{
|
||||
inline static void run(AlignedVector3& dest, const XprType& src)
|
||||
{
|
||||
dest.m_coeffs.template start<3>() = src;
|
||||
dest.m_coeffs.template head<3>() = src;
|
||||
dest.m_coeffs.w() = Scalar(0);
|
||||
}
|
||||
};
|
||||
@ -190,7 +190,7 @@ template<typename _Scalar> class AlignedVector3
|
||||
template<typename Derived>
|
||||
inline bool isApprox(const MatrixBase<Derived>& other, RealScalar eps=dummy_precision<Scalar>()) const
|
||||
{
|
||||
return m_coeffs.template start<3>().isApprox(other,eps);
|
||||
return m_coeffs.template head<3>().isApprox(other,eps);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -344,7 +344,7 @@ typename MatrixFunction<MatrixType,1>::DynMatrixType MatrixFunction<MatrixType,1
|
||||
if (i == m - 1) {
|
||||
AX = 0;
|
||||
} else {
|
||||
Matrix<Scalar,1,1> AXmatrix = A.row(i).end(m-1-i) * X.col(j).end(m-1-i);
|
||||
Matrix<Scalar,1,1> AXmatrix = A.row(i).tail(m-1-i) * X.col(j).tail(m-1-i);
|
||||
AX = AXmatrix(0,0);
|
||||
}
|
||||
|
||||
@ -353,7 +353,7 @@ typename MatrixFunction<MatrixType,1>::DynMatrixType MatrixFunction<MatrixType,1
|
||||
if (j == 0) {
|
||||
XB = 0;
|
||||
} else {
|
||||
Matrix<Scalar,1,1> XBmatrix = X.row(i).start(j) * B.col(j).start(j);
|
||||
Matrix<Scalar,1,1> XBmatrix = X.row(i).head(j) * B.col(j).head(j);
|
||||
XB = XBmatrix(0,0);
|
||||
}
|
||||
|
||||
|
@ -533,7 +533,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
||||
sing = true;
|
||||
}
|
||||
ipvt[j] = j;
|
||||
wa2[j] = fjac.col(j).start(j).stableNorm();
|
||||
wa2[j] = fjac.col(j).head(j).stableNorm();
|
||||
}
|
||||
if (sing) {
|
||||
ipvt.cwise()+=1;
|
||||
|
@ -87,7 +87,7 @@ void ei_lmpar(
|
||||
/* calculate an upper bound, paru, for the zero of the function. */
|
||||
|
||||
for (j = 0; j < n; ++j)
|
||||
wa1[j] = r.col(j).start(j+1).dot(qtb.start(j+1)) / diag[ipvt[j]];
|
||||
wa1[j] = r.col(j).head(j+1).dot(qtb.head(j+1)) / diag[ipvt[j]];
|
||||
|
||||
gnorm = wa1.stableNorm();
|
||||
paru = gnorm / delta;
|
||||
|
Loading…
x
Reference in New Issue
Block a user