mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-07-06 21:25:15 +08:00
protect calls to min and max with parentheses to make Eigen compatible with default windows.h
This commit is contained in:
parent
f096553344
commit
49b6e9143e
@ -175,13 +175,6 @@
|
|||||||
#include <new>
|
#include <new>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// this needs to be done after all possible windows C header includes and before any Eigen source includes
|
|
||||||
// (system C++ includes are supposed to be able to deal with this already):
|
|
||||||
// windows.h defines min and max macros which would make Eigen fail to compile.
|
|
||||||
#if defined(min) || defined(max)
|
|
||||||
#error The preprocessor symbols 'min' or 'max' are defined. If you are compiling on Windows, do #define NOMINMAX to prevent windows.h from defining these symbols.
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// defined in bits/termios.h
|
// defined in bits/termios.h
|
||||||
#undef B0
|
#undef B0
|
||||||
|
|
||||||
|
@ -233,7 +233,7 @@ template<> struct llt_inplace<Lower>
|
|||||||
|
|
||||||
Index blockSize = size/8;
|
Index blockSize = size/8;
|
||||||
blockSize = (blockSize/16)*16;
|
blockSize = (blockSize/16)*16;
|
||||||
blockSize = std::min(std::max(blockSize,Index(8)), Index(128));
|
blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128));
|
||||||
|
|
||||||
for (Index k=0; k<size; k+=blockSize)
|
for (Index k=0; k<size; k+=blockSize)
|
||||||
{
|
{
|
||||||
@ -241,7 +241,7 @@ template<> struct llt_inplace<Lower>
|
|||||||
// A00 | - | -
|
// A00 | - | -
|
||||||
// lu = A10 | A11 | -
|
// lu = A10 | A11 | -
|
||||||
// A20 | A21 | A22
|
// A20 | A21 | A22
|
||||||
Index bs = std::min(blockSize, size-k);
|
Index bs = (std::min)(blockSize, size-k);
|
||||||
Index rs = size - k - bs;
|
Index rs = size - k - bs;
|
||||||
Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs);
|
Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs);
|
||||||
Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs);
|
Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs);
|
||||||
|
@ -87,7 +87,7 @@ class BandMatrixBase : public EigenBase<Derived>
|
|||||||
if (i<=supers())
|
if (i<=supers())
|
||||||
{
|
{
|
||||||
start = supers()-i;
|
start = supers()-i;
|
||||||
len = std::min(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i)));
|
len = (std::min)(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i)));
|
||||||
}
|
}
|
||||||
else if (i>=rows()-subs())
|
else if (i>=rows()-subs())
|
||||||
len = std::max<Index>(0,coeffs().rows() - (i + 1 - rows() + subs()));
|
len = std::max<Index>(0,coeffs().rows() - (i + 1 - rows() + subs()));
|
||||||
@ -96,11 +96,11 @@ class BandMatrixBase : public EigenBase<Derived>
|
|||||||
|
|
||||||
/** \returns a vector expression of the main diagonal */
|
/** \returns a vector expression of the main diagonal */
|
||||||
inline Block<CoefficientsType,1,SizeAtCompileTime> diagonal()
|
inline Block<CoefficientsType,1,SizeAtCompileTime> diagonal()
|
||||||
{ return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,std::min(rows(),cols())); }
|
{ return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }
|
||||||
|
|
||||||
/** \returns a vector expression of the main diagonal (const version) */
|
/** \returns a vector expression of the main diagonal (const version) */
|
||||||
inline const Block<const CoefficientsType,1,SizeAtCompileTime> diagonal() const
|
inline const Block<const CoefficientsType,1,SizeAtCompileTime> diagonal() const
|
||||||
{ return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,std::min(rows(),cols())); }
|
{ return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }
|
||||||
|
|
||||||
template<int Index> struct DiagonalIntReturnType {
|
template<int Index> struct DiagonalIntReturnType {
|
||||||
enum {
|
enum {
|
||||||
@ -122,13 +122,13 @@ class BandMatrixBase : public EigenBase<Derived>
|
|||||||
/** \returns a vector expression of the \a N -th sub or super diagonal */
|
/** \returns a vector expression of the \a N -th sub or super diagonal */
|
||||||
template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal()
|
template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal()
|
||||||
{
|
{
|
||||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, std::max(0,N), 1, diagonalLength(N));
|
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns a vector expression of the \a N -th sub or super diagonal */
|
/** \returns a vector expression of the \a N -th sub or super diagonal */
|
||||||
template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const
|
template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const
|
||||||
{
|
{
|
||||||
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, std::max(0,N), 1, diagonalLength(N));
|
return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns a vector expression of the \a i -th sub or super diagonal */
|
/** \returns a vector expression of the \a i -th sub or super diagonal */
|
||||||
@ -166,7 +166,7 @@ class BandMatrixBase : public EigenBase<Derived>
|
|||||||
protected:
|
protected:
|
||||||
|
|
||||||
inline Index diagonalLength(Index i) const
|
inline Index diagonalLength(Index i) const
|
||||||
{ return i<0 ? std::min(cols(),rows()+i) : std::min(rows(),cols()-i); }
|
{ return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); }
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -742,7 +742,7 @@ struct setIdentity_impl<Derived, true>
|
|||||||
static EIGEN_STRONG_INLINE Derived& run(Derived& m)
|
static EIGEN_STRONG_INLINE Derived& run(Derived& m)
|
||||||
{
|
{
|
||||||
m.setZero();
|
m.setZero();
|
||||||
const Index size = std::min(m.rows(), m.cols());
|
const Index size = (std::min)(m.rows(), m.cols());
|
||||||
for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
|
for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
|
||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,7 @@ template<typename MatrixType, int DiagIndex> class Diagonal
|
|||||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
|
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
|
||||||
|
|
||||||
inline Index rows() const
|
inline Index rows() const
|
||||||
{ return m_index.value()<0 ? std::min(m_matrix.cols(),m_matrix.rows()+m_index.value()) : std::min(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
|
{ return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
|
||||||
|
|
||||||
inline Index cols() const { return 1; }
|
inline Index cols() const { return 1; }
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ struct functor_traits<scalar_conj_product_op<LhsScalar,RhsScalar> > {
|
|||||||
*/
|
*/
|
||||||
template<typename Scalar> struct scalar_min_op {
|
template<typename Scalar> struct scalar_min_op {
|
||||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)
|
EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)
|
||||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return min(a, b); }
|
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return (min)(a, b); }
|
||||||
template<typename Packet>
|
template<typename Packet>
|
||||||
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
|
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
|
||||||
{ return internal::pmin(a,b); }
|
{ return internal::pmin(a,b); }
|
||||||
@ -139,7 +139,7 @@ struct functor_traits<scalar_min_op<Scalar> > {
|
|||||||
*/
|
*/
|
||||||
template<typename Scalar> struct scalar_max_op {
|
template<typename Scalar> struct scalar_max_op {
|
||||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)
|
EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)
|
||||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return max(a, b); }
|
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return (max)(a, b); }
|
||||||
template<typename Packet>
|
template<typename Packet>
|
||||||
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
|
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
|
||||||
{ return internal::pmax(a,b); }
|
{ return internal::pmax(a,b); }
|
||||||
@ -167,8 +167,8 @@ template<typename Scalar> struct scalar_hypot_op {
|
|||||||
{
|
{
|
||||||
using std::max;
|
using std::max;
|
||||||
using std::min;
|
using std::min;
|
||||||
Scalar p = max(_x, _y);
|
Scalar p = (max)(_x, _y);
|
||||||
Scalar q = min(_x, _y);
|
Scalar q = (min)(_x, _y);
|
||||||
Scalar qp = q/p;
|
Scalar qp = q/p;
|
||||||
return p * sqrt(Scalar(1) + qp*qp);
|
return p * sqrt(Scalar(1) + qp*qp);
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ struct isApprox_selector
|
|||||||
using std::min;
|
using std::min;
|
||||||
const typename internal::nested<Derived,2>::type nested(x);
|
const typename internal::nested<Derived,2>::type nested(x);
|
||||||
const typename internal::nested<OtherDerived,2>::type otherNested(y);
|
const typename internal::nested<OtherDerived,2>::type otherNested(y);
|
||||||
return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * min(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
|
return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -94,7 +94,7 @@ struct isMuchSmallerThan_scalar_selector<Derived, true>
|
|||||||
*
|
*
|
||||||
* \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
|
* \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
|
||||||
* are considered to be approximately equal within precision \f$ p \f$ if
|
* are considered to be approximately equal within precision \f$ p \f$ if
|
||||||
* \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
|
* \f[ \Vert v - w \Vert \leqslant p\,\(min)(\Vert v\Vert, \Vert w\Vert). \f]
|
||||||
* For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
|
* For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
|
||||||
* L2 norm).
|
* L2 norm).
|
||||||
*
|
*
|
||||||
|
@ -134,12 +134,12 @@ pdiv(const Packet& a,
|
|||||||
/** \internal \returns the min of \a a and \a b (coeff-wise) */
|
/** \internal \returns the min of \a a and \a b (coeff-wise) */
|
||||||
template<typename Packet> inline Packet
|
template<typename Packet> inline Packet
|
||||||
pmin(const Packet& a,
|
pmin(const Packet& a,
|
||||||
const Packet& b) { using std::min; return min(a, b); }
|
const Packet& b) { using std::min; return (min)(a, b); }
|
||||||
|
|
||||||
/** \internal \returns the max of \a a and \a b (coeff-wise) */
|
/** \internal \returns the max of \a a and \a b (coeff-wise) */
|
||||||
template<typename Packet> inline Packet
|
template<typename Packet> inline Packet
|
||||||
pmax(const Packet& a,
|
pmax(const Packet& a,
|
||||||
const Packet& b) { using std::max; return max(a, b); }
|
const Packet& b) { using std::max; return (max)(a, b); }
|
||||||
|
|
||||||
/** \internal \returns the absolute value of \a a */
|
/** \internal \returns the absolute value of \a a */
|
||||||
template<typename Packet> inline Packet
|
template<typename Packet> inline Packet
|
||||||
|
@ -378,8 +378,8 @@ struct hypot_impl
|
|||||||
using std::min;
|
using std::min;
|
||||||
RealScalar _x = abs(x);
|
RealScalar _x = abs(x);
|
||||||
RealScalar _y = abs(y);
|
RealScalar _y = abs(y);
|
||||||
RealScalar p = max(_x, _y);
|
RealScalar p = (max)(_x, _y);
|
||||||
RealScalar q = min(_x, _y);
|
RealScalar q = (min)(_x, _y);
|
||||||
RealScalar qp = q/p;
|
RealScalar qp = q/p;
|
||||||
return p * sqrt(RealScalar(1) + qp*qp);
|
return p * sqrt(RealScalar(1) + qp*qp);
|
||||||
}
|
}
|
||||||
@ -737,7 +737,7 @@ struct scalar_fuzzy_default_impl<Scalar, false, false>
|
|||||||
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
||||||
{
|
{
|
||||||
using std::min;
|
using std::min;
|
||||||
return abs(x - y) <= min(abs(x), abs(y)) * prec;
|
return abs(x - y) <= (min)(abs(x), abs(y)) * prec;
|
||||||
}
|
}
|
||||||
static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
||||||
{
|
{
|
||||||
@ -776,7 +776,7 @@ struct scalar_fuzzy_default_impl<Scalar, true, false>
|
|||||||
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
||||||
{
|
{
|
||||||
using std::min;
|
using std::min;
|
||||||
return abs2(x - y) <= min(abs2(x), abs2(y)) * prec * prec;
|
return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ template<typename Derived> class MatrixBase
|
|||||||
|
|
||||||
/** \returns the size of the main diagonal, which is min(rows(),cols()).
|
/** \returns the size of the main diagonal, which is min(rows(),cols()).
|
||||||
* \sa rows(), cols(), SizeAtCompileTime. */
|
* \sa rows(), cols(), SizeAtCompileTime. */
|
||||||
inline Index diagonalSize() const { return std::min(rows(),cols()); }
|
inline Index diagonalSize() const { return (std::min)(rows(),cols()); }
|
||||||
|
|
||||||
/** \brief The plain matrix type corresponding to this expression.
|
/** \brief The plain matrix type corresponding to this expression.
|
||||||
*
|
*
|
||||||
|
@ -87,8 +87,8 @@ template<typename T> struct GenericNumTraits
|
|||||||
// make sure to override this for floating-point types
|
// make sure to override this for floating-point types
|
||||||
return Real(0);
|
return Real(0);
|
||||||
}
|
}
|
||||||
inline static T highest() { return std::numeric_limits<T>::max(); }
|
inline static T highest() { return (std::numeric_limits<T>::max)(); }
|
||||||
inline static T lowest() { return IsInteger ? std::numeric_limits<T>::min() : (-std::numeric_limits<T>::max()); }
|
inline static T lowest() { return IsInteger ? (std::numeric_limits<T>::min)() : (-(std::numeric_limits<T>::max)()); }
|
||||||
|
|
||||||
#ifdef EIGEN2_SUPPORT
|
#ifdef EIGEN2_SUPPORT
|
||||||
enum {
|
enum {
|
||||||
|
@ -647,8 +647,8 @@ struct internal::conservative_resize_like_impl
|
|||||||
{
|
{
|
||||||
// The storage order does not allow us to use reallocation.
|
// The storage order does not allow us to use reallocation.
|
||||||
typename Derived::PlainObject tmp(rows,cols);
|
typename Derived::PlainObject tmp(rows,cols);
|
||||||
const Index common_rows = std::min(rows, _this.rows());
|
const Index common_rows = (std::min)(rows, _this.rows());
|
||||||
const Index common_cols = std::min(cols, _this.cols());
|
const Index common_cols = (std::min)(cols, _this.cols());
|
||||||
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
|
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
|
||||||
_this.derived().swap(tmp);
|
_this.derived().swap(tmp);
|
||||||
}
|
}
|
||||||
@ -681,8 +681,8 @@ struct internal::conservative_resize_like_impl
|
|||||||
{
|
{
|
||||||
// The storage order does not allow us to use reallocation.
|
// The storage order does not allow us to use reallocation.
|
||||||
typename Derived::PlainObject tmp(other);
|
typename Derived::PlainObject tmp(other);
|
||||||
const Index common_rows = std::min(tmp.rows(), _this.rows());
|
const Index common_rows = (std::min)(tmp.rows(), _this.rows());
|
||||||
const Index common_cols = std::min(tmp.cols(), _this.cols());
|
const Index common_cols = (std::min)(tmp.cols(), _this.cols());
|
||||||
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
|
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
|
||||||
_this.derived().swap(tmp);
|
_this.derived().swap(tmp);
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ MatrixBase<Derived>::stableNorm() const
|
|||||||
if (bi>0)
|
if (bi>0)
|
||||||
internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
|
internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
|
||||||
for (; bi<n; bi+=blockSize)
|
for (; bi<n; bi+=blockSize)
|
||||||
internal::stable_norm_kernel(this->segment(bi,min(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
|
internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
|
||||||
return scale * internal::sqrt(ssq);
|
return scale * internal::sqrt(ssq);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,12 +103,12 @@ MatrixBase<Derived>::blueNorm() const
|
|||||||
// For portability, the PORT subprograms "ilmaeh" and "rlmach"
|
// For portability, the PORT subprograms "ilmaeh" and "rlmach"
|
||||||
// are used. For any specific computer, each of the assignment
|
// are used. For any specific computer, each of the assignment
|
||||||
// statements can be replaced
|
// statements can be replaced
|
||||||
nbig = std::numeric_limits<Index>::max(); // largest integer
|
nbig = (std::numeric_limits<Index>::max)(); // largest integer
|
||||||
ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
|
ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
|
||||||
it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
|
it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
|
||||||
iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
|
iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
|
||||||
iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
|
iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
|
||||||
rbig = std::numeric_limits<RealScalar>::max(); // largest floating-point number
|
rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
|
||||||
|
|
||||||
iexp = -((1-iemin)/2);
|
iexp = -((1-iemin)/2);
|
||||||
b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
|
b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
|
||||||
@ -167,8 +167,8 @@ MatrixBase<Derived>::blueNorm() const
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
return internal::sqrt(amed);
|
return internal::sqrt(amed);
|
||||||
asml = min(abig, amed);
|
asml = (min)(abig, amed);
|
||||||
abig = max(abig, amed);
|
abig = (max)(abig, amed);
|
||||||
if(asml <= abig*relerr)
|
if(asml <= abig*relerr)
|
||||||
return abig;
|
return abig;
|
||||||
else
|
else
|
||||||
|
@ -492,7 +492,7 @@ struct triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearO
|
|||||||
{
|
{
|
||||||
for(Index j = 0; j < dst.cols(); ++j)
|
for(Index j = 0; j < dst.cols(); ++j)
|
||||||
{
|
{
|
||||||
Index maxi = std::min(j, dst.rows()-1);
|
Index maxi = (std::min)(j, dst.rows()-1);
|
||||||
for(Index i = 0; i <= maxi; ++i)
|
for(Index i = 0; i <= maxi; ++i)
|
||||||
dst.copyCoeff(i, j, src);
|
dst.copyCoeff(i, j, src);
|
||||||
if (ClearOpposite)
|
if (ClearOpposite)
|
||||||
@ -512,7 +512,7 @@ struct triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearO
|
|||||||
{
|
{
|
||||||
for(Index i = j; i < dst.rows(); ++i)
|
for(Index i = j; i < dst.rows(); ++i)
|
||||||
dst.copyCoeff(i, j, src);
|
dst.copyCoeff(i, j, src);
|
||||||
Index maxi = std::min(j, dst.rows());
|
Index maxi = (std::min)(j, dst.rows());
|
||||||
if (ClearOpposite)
|
if (ClearOpposite)
|
||||||
for(Index i = 0; i < maxi; ++i)
|
for(Index i = 0; i < maxi; ++i)
|
||||||
dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
|
dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
|
||||||
@ -528,7 +528,7 @@ struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic
|
|||||||
{
|
{
|
||||||
for(Index j = 0; j < dst.cols(); ++j)
|
for(Index j = 0; j < dst.cols(); ++j)
|
||||||
{
|
{
|
||||||
Index maxi = std::min(j, dst.rows());
|
Index maxi = (std::min)(j, dst.rows());
|
||||||
for(Index i = 0; i < maxi; ++i)
|
for(Index i = 0; i < maxi; ++i)
|
||||||
dst.copyCoeff(i, j, src);
|
dst.copyCoeff(i, j, src);
|
||||||
if (ClearOpposite)
|
if (ClearOpposite)
|
||||||
@ -548,7 +548,7 @@ struct triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic
|
|||||||
{
|
{
|
||||||
for(Index i = j+1; i < dst.rows(); ++i)
|
for(Index i = j+1; i < dst.rows(); ++i)
|
||||||
dst.copyCoeff(i, j, src);
|
dst.copyCoeff(i, j, src);
|
||||||
Index maxi = std::min(j, dst.rows()-1);
|
Index maxi = (std::min)(j, dst.rows()-1);
|
||||||
if (ClearOpposite)
|
if (ClearOpposite)
|
||||||
for(Index i = 0; i <= maxi; ++i)
|
for(Index i = 0; i <= maxi; ++i)
|
||||||
dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
|
dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
|
||||||
@ -564,7 +564,7 @@ struct triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, Cl
|
|||||||
{
|
{
|
||||||
for(Index j = 0; j < dst.cols(); ++j)
|
for(Index j = 0; j < dst.cols(); ++j)
|
||||||
{
|
{
|
||||||
Index maxi = std::min(j, dst.rows());
|
Index maxi = (std::min)(j, dst.rows());
|
||||||
for(Index i = 0; i < maxi; ++i)
|
for(Index i = 0; i < maxi; ++i)
|
||||||
dst.copyCoeff(i, j, src);
|
dst.copyCoeff(i, j, src);
|
||||||
if (ClearOpposite)
|
if (ClearOpposite)
|
||||||
@ -584,7 +584,7 @@ struct triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, Cl
|
|||||||
{
|
{
|
||||||
for(Index j = 0; j < dst.cols(); ++j)
|
for(Index j = 0; j < dst.cols(); ++j)
|
||||||
{
|
{
|
||||||
Index maxi = std::min(j, dst.rows());
|
Index maxi = (std::min)(j, dst.rows());
|
||||||
for(Index i = maxi+1; i < dst.rows(); ++i)
|
for(Index i = maxi+1; i < dst.rows(); ++i)
|
||||||
dst.copyCoeff(i, j, src);
|
dst.copyCoeff(i, j, src);
|
||||||
if (ClearOpposite)
|
if (ClearOpposite)
|
||||||
@ -796,7 +796,7 @@ bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
|
|||||||
RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
|
RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
|
||||||
for(Index j = 0; j < cols(); ++j)
|
for(Index j = 0; j < cols(); ++j)
|
||||||
{
|
{
|
||||||
Index maxi = std::min(j, rows()-1);
|
Index maxi = (std::min)(j, rows()-1);
|
||||||
for(Index i = 0; i <= maxi; ++i)
|
for(Index i = 0; i <= maxi; ++i)
|
||||||
{
|
{
|
||||||
RealScalar absValue = internal::abs(coeff(i,j));
|
RealScalar absValue = internal::abs(coeff(i,j));
|
||||||
@ -828,7 +828,7 @@ bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
|
|||||||
RealScalar threshold = maxAbsOnLowerPart * prec;
|
RealScalar threshold = maxAbsOnLowerPart * prec;
|
||||||
for(Index j = 1; j < cols(); ++j)
|
for(Index j = 1; j < cols(); ++j)
|
||||||
{
|
{
|
||||||
Index maxi = std::min(j, rows()-1);
|
Index maxi = (std::min)(j, rows()-1);
|
||||||
for(Index i = 0; i < maxi; ++i)
|
for(Index i = 0; i < maxi; ++i)
|
||||||
if(internal::abs(coeff(i, j)) > threshold) return false;
|
if(internal::abs(coeff(i, j)) > threshold) return false;
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||||||
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
|
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
|
||||||
|
|
||||||
Index kc = blocking.kc(); // cache block size along the K direction
|
Index kc = blocking.kc(); // cache block size along the K direction
|
||||||
Index mc = std::min(rows,blocking.mc()); // cache block size along the M direction
|
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
|
||||||
//Index nc = blocking.nc(); // cache block size along the N direction
|
//Index nc = blocking.nc(); // cache block size along the N direction
|
||||||
|
|
||||||
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
|
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
|
||||||
@ -103,7 +103,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||||||
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
|
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
|
||||||
for(Index k=0; k<depth; k+=kc)
|
for(Index k=0; k<depth; k+=kc)
|
||||||
{
|
{
|
||||||
const Index actual_kc = std::min(k+kc,depth)-k; // => rows of B', and cols of the A'
|
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
|
||||||
|
|
||||||
// In order to reduce the chance that a thread has to wait for the other,
|
// In order to reduce the chance that a thread has to wait for the other,
|
||||||
// let's start by packing A'.
|
// let's start by packing A'.
|
||||||
@ -140,7 +140,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||||||
// Then keep going as usual with the remaining A'
|
// Then keep going as usual with the remaining A'
|
||||||
for(Index i=mc; i<rows; i+=mc)
|
for(Index i=mc; i<rows; i+=mc)
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(i+mc,rows)-i;
|
const Index actual_mc = (std::min)(i+mc,rows)-i;
|
||||||
|
|
||||||
// pack A_i,k to A'
|
// pack A_i,k to A'
|
||||||
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
|
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
|
||||||
@ -174,7 +174,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||||||
// (==GEMM_VAR1)
|
// (==GEMM_VAR1)
|
||||||
for(Index k2=0; k2<depth; k2+=kc)
|
for(Index k2=0; k2<depth; k2+=kc)
|
||||||
{
|
{
|
||||||
const Index actual_kc = std::min(k2+kc,depth)-k2;
|
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
|
||||||
|
|
||||||
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
|
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
|
||||||
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
|
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
|
||||||
@ -187,7 +187,7 @@ static void run(Index rows, Index cols, Index depth,
|
|||||||
// (==GEPP_VAR1)
|
// (==GEPP_VAR1)
|
||||||
for(Index i2=0; i2<rows; i2+=mc)
|
for(Index i2=0; i2<rows; i2+=mc)
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(i2+mc,rows)-i2;
|
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
|
||||||
|
|
||||||
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
|
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
|
||||||
// Note that this block will be read a very high number of times, which is equal to the number of
|
// Note that this block will be read a very high number of times, which is equal to the number of
|
||||||
|
@ -96,14 +96,14 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
|||||||
|
|
||||||
for(Index k2=0; k2<depth; k2+=kc)
|
for(Index k2=0; k2<depth; k2+=kc)
|
||||||
{
|
{
|
||||||
const Index actual_kc = std::min(k2+kc,depth)-k2;
|
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
|
||||||
|
|
||||||
// note that the actual rhs is the transpose/adjoint of mat
|
// note that the actual rhs is the transpose/adjoint of mat
|
||||||
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, size);
|
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, size);
|
||||||
|
|
||||||
for(Index i2=0; i2<size; i2+=mc)
|
for(Index i2=0; i2<size; i2+=mc)
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(i2+mc,size)-i2;
|
const Index actual_mc = (std::min)(i2+mc,size)-i2;
|
||||||
|
|
||||||
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
@ -112,7 +112,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
|||||||
// 2 - the actual_mc x actual_mc symmetric block => processed with a special kernel
|
// 2 - the actual_mc x actual_mc symmetric block => processed with a special kernel
|
||||||
// 3 - after the diagonal => processed with gebp or skipped
|
// 3 - after the diagonal => processed with gebp or skipped
|
||||||
if (UpLo==Lower)
|
if (UpLo==Lower)
|
||||||
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, std::min(size,i2), alpha,
|
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, (std::min)(size,i2), alpha,
|
||||||
-1, -1, 0, 0, allocatedBlockB);
|
-1, -1, 0, 0, allocatedBlockB);
|
||||||
|
|
||||||
sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha, allocatedBlockB);
|
sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha, allocatedBlockB);
|
||||||
@ -120,7 +120,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
|
|||||||
if (UpLo==Upper)
|
if (UpLo==Upper)
|
||||||
{
|
{
|
||||||
Index j2 = i2+actual_mc;
|
Index j2 = i2+actual_mc;
|
||||||
gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, std::max(Index(0), size-j2), alpha,
|
gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, (std::max)(Index(0), size-j2), alpha,
|
||||||
-1, -1, 0, 0, allocatedBlockB);
|
-1, -1, 0, 0, allocatedBlockB);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -134,7 +134,7 @@ EIGEN_DONT_INLINE static void run(
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
skipColumns = std::min(skipColumns,cols);
|
skipColumns = (std::min)(skipColumns,cols);
|
||||||
// note that the skiped columns are processed later.
|
// note that the skiped columns are processed later.
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -386,7 +386,7 @@ EIGEN_DONT_INLINE static void run(
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
skipRows = std::min(skipRows,Index(rows));
|
skipRows = (std::min)(skipRows,Index(rows));
|
||||||
// note that the skiped columns are processed later.
|
// note that the skiped columns are processed later.
|
||||||
}
|
}
|
||||||
eigen_internal_assert( alignmentPattern==NoneAligned
|
eigen_internal_assert( alignmentPattern==NoneAligned
|
||||||
|
@ -114,7 +114,7 @@ struct symm_pack_rhs
|
|||||||
}
|
}
|
||||||
|
|
||||||
// second part: diagonal block
|
// second part: diagonal block
|
||||||
for(Index j2=k2; j2<std::min(k2+rows,packet_cols); j2+=nr)
|
for(Index j2=k2; j2<(std::min)(k2+rows,packet_cols); j2+=nr)
|
||||||
{
|
{
|
||||||
// again we can split vertically in three different parts (transpose, symmetric, normal)
|
// again we can split vertically in three different parts (transpose, symmetric, normal)
|
||||||
// transpose
|
// transpose
|
||||||
@ -179,7 +179,7 @@ struct symm_pack_rhs
|
|||||||
for(Index j2=packet_cols; j2<cols; ++j2)
|
for(Index j2=packet_cols; j2<cols; ++j2)
|
||||||
{
|
{
|
||||||
// transpose
|
// transpose
|
||||||
Index half = std::min(end_k,j2);
|
Index half = (std::min)(end_k,j2);
|
||||||
for(Index k=k2; k<half; k++)
|
for(Index k=k2; k<half; k++)
|
||||||
{
|
{
|
||||||
blockB[count] = conj(rhs(j2,k));
|
blockB[count] = conj(rhs(j2,k));
|
||||||
@ -261,7 +261,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
|||||||
Index nc = cols; // cache block size along the N direction
|
Index nc = cols; // cache block size along the N direction
|
||||||
computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
|
computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
|
||||||
// kc must smaller than mc
|
// kc must smaller than mc
|
||||||
kc = std::min(kc,mc);
|
kc = (std::min)(kc,mc);
|
||||||
|
|
||||||
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
|
||||||
std::size_t sizeB = sizeW + kc*cols;
|
std::size_t sizeB = sizeW + kc*cols;
|
||||||
@ -276,7 +276,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
|||||||
|
|
||||||
for(Index k2=0; k2<size; k2+=kc)
|
for(Index k2=0; k2<size; k2+=kc)
|
||||||
{
|
{
|
||||||
const Index actual_kc = std::min(k2+kc,size)-k2;
|
const Index actual_kc = (std::min)(k2+kc,size)-k2;
|
||||||
|
|
||||||
// we have selected one row panel of rhs and one column panel of lhs
|
// we have selected one row panel of rhs and one column panel of lhs
|
||||||
// pack rhs's panel into a sequential chunk of memory
|
// pack rhs's panel into a sequential chunk of memory
|
||||||
@ -289,7 +289,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
|||||||
// 3 - the panel below the diagonal block => generic packed copy
|
// 3 - the panel below the diagonal block => generic packed copy
|
||||||
for(Index i2=0; i2<k2; i2+=mc)
|
for(Index i2=0; i2<k2; i2+=mc)
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(i2+mc,k2)-i2;
|
const Index actual_mc = (std::min)(i2+mc,k2)-i2;
|
||||||
// transposed packed copy
|
// transposed packed copy
|
||||||
pack_lhs_transposed(blockA, &lhs(k2, i2), lhsStride, actual_kc, actual_mc);
|
pack_lhs_transposed(blockA, &lhs(k2, i2), lhsStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
@ -297,7 +297,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
|||||||
}
|
}
|
||||||
// the block diagonal
|
// the block diagonal
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(k2+kc,size)-k2;
|
const Index actual_mc = (std::min)(k2+kc,size)-k2;
|
||||||
// symmetric packed copy
|
// symmetric packed copy
|
||||||
pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
|
pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
@ -306,7 +306,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
|
|||||||
|
|
||||||
for(Index i2=k2+kc; i2<size; i2+=mc)
|
for(Index i2=k2+kc; i2<size; i2+=mc)
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(i2+mc,size)-i2;
|
const Index actual_mc = (std::min)(i2+mc,size)-i2;
|
||||||
gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()
|
gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()
|
||||||
(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
@ -352,14 +352,14 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLh
|
|||||||
|
|
||||||
for(Index k2=0; k2<size; k2+=kc)
|
for(Index k2=0; k2<size; k2+=kc)
|
||||||
{
|
{
|
||||||
const Index actual_kc = std::min(k2+kc,size)-k2;
|
const Index actual_kc = (std::min)(k2+kc,size)-k2;
|
||||||
|
|
||||||
pack_rhs(blockB, _rhs, rhsStride, actual_kc, cols, k2);
|
pack_rhs(blockB, _rhs, rhsStride, actual_kc, cols, k2);
|
||||||
|
|
||||||
// => GEPP
|
// => GEPP
|
||||||
for(Index i2=0; i2<rows; i2+=mc)
|
for(Index i2=0; i2<rows; i2+=mc)
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(i2+mc,rows)-i2;
|
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
|
||||||
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
|
gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
|
||||||
|
@ -70,7 +70,7 @@ static EIGEN_DONT_INLINE void product_selfadjoint_vector(
|
|||||||
rhs[i] = *it;
|
rhs[i] = *it;
|
||||||
}
|
}
|
||||||
|
|
||||||
Index bound = std::max(Index(0),size-8) & 0xfffffffe;
|
Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;
|
||||||
if (FirstTriangular)
|
if (FirstTriangular)
|
||||||
bound = size - bound;
|
bound = size - bound;
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
|||||||
Scalar alpha)
|
Scalar alpha)
|
||||||
{
|
{
|
||||||
// strip zeros
|
// strip zeros
|
||||||
Index diagSize = std::min(_rows,_depth);
|
Index diagSize = (std::min)(_rows,_depth);
|
||||||
Index rows = IsLower ? _rows : diagSize;
|
Index rows = IsLower ? _rows : diagSize;
|
||||||
Index depth = IsLower ? diagSize : _depth;
|
Index depth = IsLower ? diagSize : _depth;
|
||||||
Index cols = _cols;
|
Index cols = _cols;
|
||||||
@ -145,7 +145,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
|||||||
IsLower ? k2>0 : k2<depth;
|
IsLower ? k2>0 : k2<depth;
|
||||||
IsLower ? k2-=kc : k2+=kc)
|
IsLower ? k2-=kc : k2+=kc)
|
||||||
{
|
{
|
||||||
Index actual_kc = std::min(IsLower ? k2 : depth-k2, kc);
|
Index actual_kc = (std::min)(IsLower ? k2 : depth-k2, kc);
|
||||||
Index actual_k2 = IsLower ? k2-actual_kc : k2;
|
Index actual_k2 = IsLower ? k2-actual_kc : k2;
|
||||||
|
|
||||||
// align blocks with the end of the triangular part for trapezoidal lhs
|
// align blocks with the end of the triangular part for trapezoidal lhs
|
||||||
@ -203,10 +203,10 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
|
|||||||
// the part below (lower case) or above (upper case) the diagonal => GEPP
|
// the part below (lower case) or above (upper case) the diagonal => GEPP
|
||||||
{
|
{
|
||||||
Index start = IsLower ? k2 : 0;
|
Index start = IsLower ? k2 : 0;
|
||||||
Index end = IsLower ? rows : std::min(actual_k2,rows);
|
Index end = IsLower ? rows : (std::min)(actual_k2,rows);
|
||||||
for(Index i2=start; i2<end; i2+=mc)
|
for(Index i2=start; i2<end; i2+=mc)
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(i2+mc,end)-i2;
|
const Index actual_mc = (std::min)(i2+mc,end)-i2;
|
||||||
gemm_pack_lhs<Scalar, Index, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
|
gemm_pack_lhs<Scalar, Index, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
|
||||||
(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
|
(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
@ -240,7 +240,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
|||||||
Scalar alpha)
|
Scalar alpha)
|
||||||
{
|
{
|
||||||
// strip zeros
|
// strip zeros
|
||||||
Index diagSize = std::min(_cols,_depth);
|
Index diagSize = (std::min)(_cols,_depth);
|
||||||
Index rows = _rows;
|
Index rows = _rows;
|
||||||
Index depth = IsLower ? _depth : diagSize;
|
Index depth = IsLower ? _depth : diagSize;
|
||||||
Index cols = IsLower ? diagSize : _cols;
|
Index cols = IsLower ? diagSize : _cols;
|
||||||
@ -275,7 +275,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
|||||||
IsLower ? k2<depth : k2>0;
|
IsLower ? k2<depth : k2>0;
|
||||||
IsLower ? k2+=kc : k2-=kc)
|
IsLower ? k2+=kc : k2-=kc)
|
||||||
{
|
{
|
||||||
Index actual_kc = std::min(IsLower ? depth-k2 : k2, kc);
|
Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc);
|
||||||
Index actual_k2 = IsLower ? k2 : k2-actual_kc;
|
Index actual_k2 = IsLower ? k2 : k2-actual_kc;
|
||||||
|
|
||||||
// align blocks with the end of the triangular part for trapezoidal rhs
|
// align blocks with the end of the triangular part for trapezoidal rhs
|
||||||
@ -286,7 +286,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// remaining size
|
// remaining size
|
||||||
Index rs = IsLower ? std::min(cols,actual_k2) : cols - k2;
|
Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2;
|
||||||
// size of the triangular part
|
// size of the triangular part
|
||||||
Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;
|
Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;
|
||||||
|
|
||||||
@ -327,7 +327,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
|||||||
|
|
||||||
for (Index i2=0; i2<rows; i2+=mc)
|
for (Index i2=0; i2<rows; i2+=mc)
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(mc,rows-i2);
|
const Index actual_mc = (std::min)(mc,rows-i2);
|
||||||
pack_lhs(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
|
pack_lhs(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
|
||||||
|
|
||||||
// triangular kernel
|
// triangular kernel
|
||||||
|
@ -56,7 +56,7 @@ struct product_triangular_matrix_vector<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
|
|||||||
|
|
||||||
for (Index pi=0; pi<cols; pi+=PanelWidth)
|
for (Index pi=0; pi<cols; pi+=PanelWidth)
|
||||||
{
|
{
|
||||||
Index actualPanelWidth = std::min(PanelWidth, cols-pi);
|
Index actualPanelWidth = (std::min)(PanelWidth, cols-pi);
|
||||||
for (Index k=0; k<actualPanelWidth; ++k)
|
for (Index k=0; k<actualPanelWidth; ++k)
|
||||||
{
|
{
|
||||||
Index i = pi + k;
|
Index i = pi + k;
|
||||||
@ -107,7 +107,7 @@ struct product_triangular_matrix_vector<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
|
|||||||
|
|
||||||
for (Index pi=0; pi<cols; pi+=PanelWidth)
|
for (Index pi=0; pi<cols; pi+=PanelWidth)
|
||||||
{
|
{
|
||||||
Index actualPanelWidth = std::min(PanelWidth, cols-pi);
|
Index actualPanelWidth = (std::min)(PanelWidth, cols-pi);
|
||||||
for (Index k=0; k<actualPanelWidth; ++k)
|
for (Index k=0; k<actualPanelWidth; ++k)
|
||||||
{
|
{
|
||||||
Index i = pi + k;
|
Index i = pi + k;
|
||||||
|
@ -85,7 +85,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageO
|
|||||||
IsLower ? k2<size : k2>0;
|
IsLower ? k2<size : k2>0;
|
||||||
IsLower ? k2+=kc : k2-=kc)
|
IsLower ? k2+=kc : k2-=kc)
|
||||||
{
|
{
|
||||||
const Index actual_kc = std::min(IsLower ? size-k2 : k2, kc);
|
const Index actual_kc = (std::min)(IsLower ? size-k2 : k2, kc);
|
||||||
|
|
||||||
// We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,
|
// We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,
|
||||||
// and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into
|
// and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into
|
||||||
@ -164,7 +164,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageO
|
|||||||
Index end = IsLower ? size : k2-kc;
|
Index end = IsLower ? size : k2-kc;
|
||||||
for(Index i2=start; i2<end; i2+=mc)
|
for(Index i2=start; i2<end; i2+=mc)
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(mc,end-i2);
|
const Index actual_mc = (std::min)(mc,end-i2);
|
||||||
if (actual_mc>0)
|
if (actual_mc>0)
|
||||||
{
|
{
|
||||||
pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc);
|
pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc);
|
||||||
@ -222,7 +222,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorage
|
|||||||
IsLower ? k2>0 : k2<size;
|
IsLower ? k2>0 : k2<size;
|
||||||
IsLower ? k2-=kc : k2+=kc)
|
IsLower ? k2-=kc : k2+=kc)
|
||||||
{
|
{
|
||||||
const Index actual_kc = std::min(IsLower ? k2 : size-k2, kc);
|
const Index actual_kc = (std::min)(IsLower ? k2 : size-k2, kc);
|
||||||
Index actual_k2 = IsLower ? k2-actual_kc : k2 ;
|
Index actual_k2 = IsLower ? k2-actual_kc : k2 ;
|
||||||
|
|
||||||
Index startPanel = IsLower ? 0 : k2+actual_kc;
|
Index startPanel = IsLower ? 0 : k2+actual_kc;
|
||||||
@ -251,7 +251,7 @@ struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorage
|
|||||||
|
|
||||||
for(Index i2=0; i2<rows; i2+=mc)
|
for(Index i2=0; i2<rows; i2+=mc)
|
||||||
{
|
{
|
||||||
const Index actual_mc = std::min(mc,rows-i2);
|
const Index actual_mc = (std::min)(mc,rows-i2);
|
||||||
|
|
||||||
// triangular solver kernel
|
// triangular solver kernel
|
||||||
{
|
{
|
||||||
|
@ -60,7 +60,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
|
|||||||
IsLower ? pi<size : pi>0;
|
IsLower ? pi<size : pi>0;
|
||||||
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
|
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
|
||||||
{
|
{
|
||||||
Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
|
Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
|
||||||
|
|
||||||
Index r = IsLower ? pi : size - pi; // remaining size
|
Index r = IsLower ? pi : size - pi; // remaining size
|
||||||
if (r > 0)
|
if (r > 0)
|
||||||
@ -114,7 +114,7 @@ struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Con
|
|||||||
IsLower ? pi<size : pi>0;
|
IsLower ? pi<size : pi>0;
|
||||||
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
|
IsLower ? pi+=PanelWidth : pi-=PanelWidth)
|
||||||
{
|
{
|
||||||
Index actualPanelWidth = std::min(IsLower ? size - pi : pi, PanelWidth);
|
Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
|
||||||
Index startBlock = IsLower ? pi : pi-actualPanelWidth;
|
Index startBlock = IsLower ? pi : pi-actualPanelWidth;
|
||||||
Index endBlock = IsLower ? pi + actualPanelWidth : 0;
|
Index endBlock = IsLower ? pi + actualPanelWidth : 0;
|
||||||
|
|
||||||
|
@ -399,7 +399,7 @@
|
|||||||
#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \
|
#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \
|
||||||
template<typename OtherDerived> \
|
template<typename OtherDerived> \
|
||||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
|
EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
|
||||||
METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
|
(METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
|
||||||
{ \
|
{ \
|
||||||
return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
|
return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
|
||||||
}
|
}
|
||||||
|
@ -156,7 +156,7 @@ inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size)
|
|||||||
|
|
||||||
if (ptr != 0)
|
if (ptr != 0)
|
||||||
{
|
{
|
||||||
std::memcpy(newptr, ptr, std::min(size,old_size));
|
std::memcpy(newptr, ptr, (std::min)(size,old_size));
|
||||||
aligned_free(ptr);
|
aligned_free(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -663,7 +663,7 @@ public:
|
|||||||
|
|
||||||
size_type max_size() const throw()
|
size_type max_size() const throw()
|
||||||
{
|
{
|
||||||
return std::numeric_limits<size_type>::max();
|
return (std::numeric_limits<size_type>::max)();
|
||||||
}
|
}
|
||||||
|
|
||||||
pointer allocate( size_type num, const void* hint = 0 )
|
pointer allocate( size_type num, const void* hint = 0 )
|
||||||
@ -903,7 +903,7 @@ inline int queryTopLevelCacheSize()
|
|||||||
{
|
{
|
||||||
int l1, l2(-1), l3(-1);
|
int l1, l2(-1), l3(-1);
|
||||||
queryCacheSizes(l1,l2,l3);
|
queryCacheSizes(l1,l2,l3);
|
||||||
return std::max(l2,l3);
|
return (std::max)(l2,l3);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // end namespace internal
|
} // end namespace internal
|
||||||
|
@ -84,11 +84,11 @@ template<typename ExpressionType> class Cwise
|
|||||||
|
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)
|
const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)
|
||||||
min(const MatrixBase<OtherDerived> &other) const;
|
(min)(const MatrixBase<OtherDerived> &other) const;
|
||||||
|
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)
|
const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)
|
||||||
max(const MatrixBase<OtherDerived> &other) const;
|
(max)(const MatrixBase<OtherDerived> &other) const;
|
||||||
|
|
||||||
const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op) abs() const;
|
const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op) abs() const;
|
||||||
const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op) abs2() const;
|
const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op) abs2() const;
|
||||||
|
@ -100,7 +100,7 @@ inline ExpressionType& Cwise<ExpressionType>::operator/=(const MatrixBase<OtherD
|
|||||||
template<typename ExpressionType>
|
template<typename ExpressionType>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)
|
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)
|
||||||
Cwise<ExpressionType>::min(const MatrixBase<OtherDerived> &other) const
|
(Cwise<ExpressionType>::min)(const MatrixBase<OtherDerived> &other) const
|
||||||
{
|
{
|
||||||
return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived());
|
return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived());
|
||||||
}
|
}
|
||||||
@ -109,7 +109,7 @@ Cwise<ExpressionType>::min(const MatrixBase<OtherDerived> &other) const
|
|||||||
template<typename ExpressionType>
|
template<typename ExpressionType>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)
|
EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)
|
||||||
Cwise<ExpressionType>::max(const MatrixBase<OtherDerived> &other) const
|
(Cwise<ExpressionType>::max)(const MatrixBase<OtherDerived> &other) const
|
||||||
{
|
{
|
||||||
return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived());
|
return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived());
|
||||||
}
|
}
|
||||||
|
@ -51,14 +51,14 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
|||||||
{ if (AmbientDimAtCompileTime!=Dynamic) setNull(); }
|
{ if (AmbientDimAtCompileTime!=Dynamic) setNull(); }
|
||||||
|
|
||||||
/** Constructs a null box with \a _dim the dimension of the ambient space. */
|
/** Constructs a null box with \a _dim the dimension of the ambient space. */
|
||||||
inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim)
|
inline explicit AlignedBox(int _dim) : m_(min)(_dim), m_(max)(_dim)
|
||||||
{ setNull(); }
|
{ setNull(); }
|
||||||
|
|
||||||
/** Constructs a box with extremities \a _min and \a _max. */
|
/** Constructs a box with extremities \a _min and \a _max. */
|
||||||
inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_min(_min), m_max(_max) {}
|
inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_(min)(_min), m_(max)(_max) {}
|
||||||
|
|
||||||
/** Constructs a box containing a single point \a p. */
|
/** Constructs a box containing a single point \a p. */
|
||||||
inline explicit AlignedBox(const VectorType& p) : m_min(p), m_max(p) {}
|
inline explicit AlignedBox(const VectorType& p) : m_(min)(p), m_(max)(p) {}
|
||||||
|
|
||||||
~AlignedBox() {}
|
~AlignedBox() {}
|
||||||
|
|
||||||
@ -71,18 +71,18 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
|||||||
/** Makes \c *this a null/empty box. */
|
/** Makes \c *this a null/empty box. */
|
||||||
inline void setNull()
|
inline void setNull()
|
||||||
{
|
{
|
||||||
m_min.setConstant( std::numeric_limits<Scalar>::max());
|
m_min.setConstant( std::numeric_limits<Scalar>::(max)());
|
||||||
m_max.setConstant(-std::numeric_limits<Scalar>::max());
|
m_max.setConstant(-std::numeric_limits<Scalar>::(max)());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns the minimal corner */
|
/** \returns the minimal corner */
|
||||||
inline const VectorType& min() const { return m_min; }
|
inline const VectorType& (min)() const { return m_min; }
|
||||||
/** \returns a non const reference to the minimal corner */
|
/** \returns a non const reference to the minimal corner */
|
||||||
inline VectorType& min() { return m_min; }
|
inline VectorType& (min)() { return m_min; }
|
||||||
/** \returns the maximal corner */
|
/** \returns the maximal corner */
|
||||||
inline const VectorType& max() const { return m_max; }
|
inline const VectorType& (max)() const { return m_max; }
|
||||||
/** \returns a non const reference to the maximal corner */
|
/** \returns a non const reference to the maximal corner */
|
||||||
inline VectorType& max() { return m_max; }
|
inline VectorType& (max)() { return m_max; }
|
||||||
|
|
||||||
/** \returns true if the point \a p is inside the box \c *this. */
|
/** \returns true if the point \a p is inside the box \c *this. */
|
||||||
inline bool contains(const VectorType& p) const
|
inline bool contains(const VectorType& p) const
|
||||||
@ -90,19 +90,19 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
|||||||
|
|
||||||
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
||||||
inline bool contains(const AlignedBox& b) const
|
inline bool contains(const AlignedBox& b) const
|
||||||
{ return (m_min.cwise()<=b.min()).all() && (b.max().cwise()<=m_max).all(); }
|
{ return (m_min.cwise()<=b.(min)()).all() && (b.(max)().cwise()<=m_max).all(); }
|
||||||
|
|
||||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
||||||
inline AlignedBox& extend(const VectorType& p)
|
inline AlignedBox& extend(const VectorType& p)
|
||||||
{ m_min = m_min.cwise().min(p); m_max = m_max.cwise().max(p); return *this; }
|
{ m_min = m_min.cwise().(min)(p); m_max = m_max.cwise().(max)(p); return *this; }
|
||||||
|
|
||||||
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
|
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
|
||||||
inline AlignedBox& extend(const AlignedBox& b)
|
inline AlignedBox& extend(const AlignedBox& b)
|
||||||
{ m_min = m_min.cwise().min(b.m_min); m_max = m_max.cwise().max(b.m_max); return *this; }
|
{ m_min = m_min.cwise().(min)(b.m_min); m_max = m_max.cwise().(max)(b.m_max); return *this; }
|
||||||
|
|
||||||
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */
|
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */
|
||||||
inline AlignedBox& clamp(const AlignedBox& b)
|
inline AlignedBox& clamp(const AlignedBox& b)
|
||||||
{ m_min = m_min.cwise().max(b.m_min); m_max = m_max.cwise().min(b.m_max); return *this; }
|
{ m_min = m_min.cwise().(max)(b.m_min); m_max = m_max.cwise().(min)(b.m_max); return *this; }
|
||||||
|
|
||||||
/** Translate \c *this by the vector \a t and returns a reference to \c *this. */
|
/** Translate \c *this by the vector \a t and returns a reference to \c *this. */
|
||||||
inline AlignedBox& translate(const VectorType& t)
|
inline AlignedBox& translate(const VectorType& t)
|
||||||
@ -138,8 +138,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
|||||||
template<typename OtherScalarType>
|
template<typename OtherScalarType>
|
||||||
inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
|
inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
|
||||||
{
|
{
|
||||||
m_min = other.min().template cast<Scalar>();
|
m_min = other.(min)().template cast<Scalar>();
|
||||||
m_max = other.max().template cast<Scalar>();
|
m_max = other.(max)().template cast<Scalar>();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
||||||
|
@ -64,9 +64,9 @@ template<typename MatrixType> class SVD
|
|||||||
SVD() {} // a user who relied on compiler-generated default compiler reported problems with MSVC in 2.0.7
|
SVD() {} // a user who relied on compiler-generated default compiler reported problems with MSVC in 2.0.7
|
||||||
|
|
||||||
SVD(const MatrixType& matrix)
|
SVD(const MatrixType& matrix)
|
||||||
: m_matU(matrix.rows(), std::min(matrix.rows(), matrix.cols())),
|
: m_matU(matrix.rows(), (std::min)(matrix.rows(), matrix.cols())),
|
||||||
m_matV(matrix.cols(),matrix.cols()),
|
m_matV(matrix.cols(),matrix.cols()),
|
||||||
m_sigma(std::min(matrix.rows(),matrix.cols()))
|
m_sigma((std::min)(matrix.rows(),matrix.cols()))
|
||||||
{
|
{
|
||||||
compute(matrix);
|
compute(matrix);
|
||||||
}
|
}
|
||||||
@ -108,13 +108,13 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
|||||||
{
|
{
|
||||||
const int m = matrix.rows();
|
const int m = matrix.rows();
|
||||||
const int n = matrix.cols();
|
const int n = matrix.cols();
|
||||||
const int nu = std::min(m,n);
|
const int nu = (std::min)(m,n);
|
||||||
ei_assert(m>=n && "In Eigen 2.0, SVD only works for MxN matrices with M>=N. Sorry!");
|
ei_assert(m>=n && "In Eigen 2.0, SVD only works for MxN matrices with M>=N. Sorry!");
|
||||||
ei_assert(m>1 && "In Eigen 2.0, SVD doesn't work on 1x1 matrices");
|
ei_assert(m>1 && "In Eigen 2.0, SVD doesn't work on 1x1 matrices");
|
||||||
|
|
||||||
m_matU.resize(m, nu);
|
m_matU.resize(m, nu);
|
||||||
m_matU.setZero();
|
m_matU.setZero();
|
||||||
m_sigma.resize(std::min(m,n));
|
m_sigma.resize((std::min)(m,n));
|
||||||
m_matV.resize(n,n);
|
m_matV.resize(n,n);
|
||||||
|
|
||||||
RowVector e(n);
|
RowVector e(n);
|
||||||
@ -126,9 +126,9 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
|||||||
|
|
||||||
// Reduce A to bidiagonal form, storing the diagonal elements
|
// Reduce A to bidiagonal form, storing the diagonal elements
|
||||||
// in s and the super-diagonal elements in e.
|
// in s and the super-diagonal elements in e.
|
||||||
int nct = std::min(m-1,n);
|
int nct = (std::min)(m-1,n);
|
||||||
int nrt = std::max(0,std::min(n-2,m));
|
int nrt = (std::max)(0,(std::min)(n-2,m));
|
||||||
for (k = 0; k < std::max(nct,nrt); ++k)
|
for (k = 0; k < (std::max)(nct,nrt); ++k)
|
||||||
{
|
{
|
||||||
if (k < nct)
|
if (k < nct)
|
||||||
{
|
{
|
||||||
@ -193,7 +193,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
|||||||
|
|
||||||
|
|
||||||
// Set up the final bidiagonal matrix or order p.
|
// Set up the final bidiagonal matrix or order p.
|
||||||
int p = std::min(n,m+1);
|
int p = (std::min)(n,m+1);
|
||||||
if (nct < n)
|
if (nct < n)
|
||||||
m_sigma[nct] = matA(nct,nct);
|
m_sigma[nct] = matA(nct,nct);
|
||||||
if (m < p)
|
if (m < p)
|
||||||
@ -380,7 +380,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
|||||||
case 3:
|
case 3:
|
||||||
{
|
{
|
||||||
// Calculate the shift.
|
// Calculate the shift.
|
||||||
Scalar scale = std::max(std::max(std::max(std::max(
|
Scalar scale = (std::max)((std::max)((std::max)((std::max)(
|
||||||
ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])),
|
ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])),
|
||||||
ei_abs(m_sigma[k])),ei_abs(e[k]));
|
ei_abs(m_sigma[k])),ei_abs(e[k]));
|
||||||
Scalar sp = m_sigma[p-1]/scale;
|
Scalar sp = m_sigma[p-1]/scale;
|
||||||
|
@ -423,7 +423,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
|
|||||||
JacobiRotation<ComplexScalar> rot;
|
JacobiRotation<ComplexScalar> rot;
|
||||||
rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
|
rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
|
||||||
m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
|
m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
|
||||||
m_matT.topRows(std::min(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
|
m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
|
||||||
if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
|
if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
|
||||||
|
|
||||||
for(Index i=il+1 ; i<iu ; i++)
|
for(Index i=il+1 ; i<iu ; i++)
|
||||||
@ -431,7 +431,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
|
|||||||
rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
|
rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
|
||||||
m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
|
m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
|
||||||
m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
|
m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
|
||||||
m_matT.topRows(std::min(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
|
m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
|
||||||
if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
|
if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -435,7 +435,7 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
|||||||
Scalar norm = 0.0;
|
Scalar norm = 0.0;
|
||||||
for (Index j = 0; j < size; ++j)
|
for (Index j = 0; j < size; ++j)
|
||||||
{
|
{
|
||||||
norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum();
|
norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backsubstitute to find vectors of upper triangular form
|
// Backsubstitute to find vectors of upper triangular form
|
||||||
@ -564,7 +564,7 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
|||||||
|
|
||||||
// Overflow control
|
// Overflow control
|
||||||
using std::max;
|
using std::max;
|
||||||
Scalar t = max(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
|
Scalar t = (max)(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
|
||||||
if ((eps * t) * t > Scalar(1))
|
if ((eps * t) * t > Scalar(1))
|
||||||
m_matT.block(i, n-1, size-i, 2) /= t;
|
m_matT.block(i, n-1, size-i, 2) /= t;
|
||||||
|
|
||||||
|
@ -290,7 +290,7 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
|
|||||||
// + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
|
// + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
|
||||||
Scalar norm = 0.0;
|
Scalar norm = 0.0;
|
||||||
for (Index j = 0; j < size; ++j)
|
for (Index j = 0; j < size; ++j)
|
||||||
norm += m_matT.row(j).segment(std::max(j-1,Index(0)), size-std::max(j-1,Index(0))).cwiseAbs().sum();
|
norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
|
||||||
return norm;
|
return norm;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -442,7 +442,7 @@ inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Inde
|
|||||||
|
|
||||||
// These Householder transformations form the O(n^3) part of the algorithm
|
// These Householder transformations form the O(n^3) part of the algorithm
|
||||||
m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
|
m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
|
||||||
m_matT.block(0, k, std::min(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||||
if (computeU)
|
if (computeU)
|
||||||
m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
|
||||||
}
|
}
|
||||||
|
@ -111,13 +111,13 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** \returns the minimal corner */
|
/** \returns the minimal corner */
|
||||||
inline const VectorType& min() const { return m_min; }
|
inline const VectorType& (min)() const { return m_min; }
|
||||||
/** \returns a non const reference to the minimal corner */
|
/** \returns a non const reference to the minimal corner */
|
||||||
inline VectorType& min() { return m_min; }
|
inline VectorType& (min)() { return m_min; }
|
||||||
/** \returns the maximal corner */
|
/** \returns the maximal corner */
|
||||||
inline const VectorType& max() const { return m_max; }
|
inline const VectorType& (max)() const { return m_max; }
|
||||||
/** \returns a non const reference to the maximal corner */
|
/** \returns a non const reference to the maximal corner */
|
||||||
inline VectorType& max() { return m_max; }
|
inline VectorType& (max)() { return m_max; }
|
||||||
|
|
||||||
/** \returns the center of the box */
|
/** \returns the center of the box */
|
||||||
inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>,
|
inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>,
|
||||||
@ -196,7 +196,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||||||
|
|
||||||
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
||||||
inline bool contains(const AlignedBox& b) const
|
inline bool contains(const AlignedBox& b) const
|
||||||
{ return (m_min.array()<=b.min().array()).all() && (b.max().array()<=m_max.array()).all(); }
|
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
|
||||||
|
|
||||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
@ -287,8 +287,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
|||||||
template<typename OtherScalarType>
|
template<typename OtherScalarType>
|
||||||
inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
|
inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
|
||||||
{
|
{
|
||||||
m_min = other.min().template cast<Scalar>();
|
m_min = (other.min)().template cast<Scalar>();
|
||||||
m_max = other.max().template cast<Scalar>();
|
m_max = (other.max)().template cast<Scalar>();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
||||||
|
@ -182,7 +182,7 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
m_angle = Scalar(2)*acos(min(max(Scalar(-1),q.w()),Scalar(1)));
|
m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1)));
|
||||||
m_axis = q.vec() / internal::sqrt(n2);
|
m_axis = q.vec() / internal::sqrt(n2);
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
|
@ -533,7 +533,7 @@ template<typename MatrixType>
|
|||||||
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
|
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
|
||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && "LU is not initialized.");
|
eigen_assert(m_isInitialized && "LU is not initialized.");
|
||||||
const Index smalldim = std::min(m_lu.rows(), m_lu.cols());
|
const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols());
|
||||||
// LU
|
// LU
|
||||||
MatrixType res(m_lu.rows(),m_lu.cols());
|
MatrixType res(m_lu.rows(),m_lu.cols());
|
||||||
// FIXME the .toDenseMatrix() should not be needed...
|
// FIXME the .toDenseMatrix() should not be needed...
|
||||||
@ -695,7 +695,7 @@ struct solve_retval<FullPivLU<_MatrixType>, Rhs>
|
|||||||
const Index rows = dec().rows(), cols = dec().cols(),
|
const Index rows = dec().rows(), cols = dec().cols(),
|
||||||
nonzero_pivots = dec().nonzeroPivots();
|
nonzero_pivots = dec().nonzeroPivots();
|
||||||
eigen_assert(rhs().rows() == rows);
|
eigen_assert(rhs().rows() == rows);
|
||||||
const Index smalldim = std::min(rows, cols);
|
const Index smalldim = (std::min)(rows, cols);
|
||||||
|
|
||||||
if(nonzero_pivots == 0)
|
if(nonzero_pivots == 0)
|
||||||
{
|
{
|
||||||
|
@ -253,7 +253,7 @@ struct partial_lu_impl
|
|||||||
{
|
{
|
||||||
const Index rows = lu.rows();
|
const Index rows = lu.rows();
|
||||||
const Index cols = lu.cols();
|
const Index cols = lu.cols();
|
||||||
const Index size = std::min(rows,cols);
|
const Index size = (std::min)(rows,cols);
|
||||||
nb_transpositions = 0;
|
nb_transpositions = 0;
|
||||||
int first_zero_pivot = -1;
|
int first_zero_pivot = -1;
|
||||||
for(Index k = 0; k < size; ++k)
|
for(Index k = 0; k < size; ++k)
|
||||||
@ -313,7 +313,7 @@ struct partial_lu_impl
|
|||||||
MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
|
MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
|
||||||
MatrixType lu(lu1,0,0,rows,cols);
|
MatrixType lu(lu1,0,0,rows,cols);
|
||||||
|
|
||||||
const Index size = std::min(rows,cols);
|
const Index size = (std::min)(rows,cols);
|
||||||
|
|
||||||
// if the matrix is too small, no blocking:
|
// if the matrix is too small, no blocking:
|
||||||
if(size<=16)
|
if(size<=16)
|
||||||
@ -327,14 +327,14 @@ struct partial_lu_impl
|
|||||||
{
|
{
|
||||||
blockSize = size/8;
|
blockSize = size/8;
|
||||||
blockSize = (blockSize/16)*16;
|
blockSize = (blockSize/16)*16;
|
||||||
blockSize = std::min(std::max(blockSize,Index(8)), maxBlockSize);
|
blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
nb_transpositions = 0;
|
nb_transpositions = 0;
|
||||||
int first_zero_pivot = -1;
|
int first_zero_pivot = -1;
|
||||||
for(Index k = 0; k < size; k+=blockSize)
|
for(Index k = 0; k < size; k+=blockSize)
|
||||||
{
|
{
|
||||||
Index bs = std::min(size-k,blockSize); // actual size of the block
|
Index bs = (std::min)(size-k,blockSize); // actual size of the block
|
||||||
Index trows = rows - k - bs; // trailing rows
|
Index trows = rows - k - bs; // trailing rows
|
||||||
Index tsize = size - k - bs; // trailing size
|
Index tsize = size - k - bs; // trailing size
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
|||||||
*/
|
*/
|
||||||
ColPivHouseholderQR(Index rows, Index cols)
|
ColPivHouseholderQR(Index rows, Index cols)
|
||||||
: m_qr(rows, cols),
|
: m_qr(rows, cols),
|
||||||
m_hCoeffs(std::min(rows,cols)),
|
m_hCoeffs((std::min)(rows,cols)),
|
||||||
m_colsPermutation(cols),
|
m_colsPermutation(cols),
|
||||||
m_colsTranspositions(cols),
|
m_colsTranspositions(cols),
|
||||||
m_temp(cols),
|
m_temp(cols),
|
||||||
@ -103,7 +103,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
|||||||
|
|
||||||
ColPivHouseholderQR(const MatrixType& matrix)
|
ColPivHouseholderQR(const MatrixType& matrix)
|
||||||
: m_qr(matrix.rows(), matrix.cols()),
|
: m_qr(matrix.rows(), matrix.cols()),
|
||||||
m_hCoeffs(std::min(matrix.rows(),matrix.cols())),
|
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
|
||||||
m_colsPermutation(matrix.cols()),
|
m_colsPermutation(matrix.cols()),
|
||||||
m_colsTranspositions(matrix.cols()),
|
m_colsTranspositions(matrix.cols()),
|
||||||
m_temp(matrix.cols()),
|
m_temp(matrix.cols()),
|
||||||
|
@ -93,21 +93,21 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
|||||||
*/
|
*/
|
||||||
FullPivHouseholderQR(Index rows, Index cols)
|
FullPivHouseholderQR(Index rows, Index cols)
|
||||||
: m_qr(rows, cols),
|
: m_qr(rows, cols),
|
||||||
m_hCoeffs(std::min(rows,cols)),
|
m_hCoeffs((std::min)(rows,cols)),
|
||||||
m_rows_transpositions(rows),
|
m_rows_transpositions(rows),
|
||||||
m_cols_transpositions(cols),
|
m_cols_transpositions(cols),
|
||||||
m_cols_permutation(cols),
|
m_cols_permutation(cols),
|
||||||
m_temp(std::min(rows,cols)),
|
m_temp((std::min)(rows,cols)),
|
||||||
m_isInitialized(false),
|
m_isInitialized(false),
|
||||||
m_usePrescribedThreshold(false) {}
|
m_usePrescribedThreshold(false) {}
|
||||||
|
|
||||||
FullPivHouseholderQR(const MatrixType& matrix)
|
FullPivHouseholderQR(const MatrixType& matrix)
|
||||||
: m_qr(matrix.rows(), matrix.cols()),
|
: m_qr(matrix.rows(), matrix.cols()),
|
||||||
m_hCoeffs(std::min(matrix.rows(), matrix.cols())),
|
m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
|
||||||
m_rows_transpositions(matrix.rows()),
|
m_rows_transpositions(matrix.rows()),
|
||||||
m_cols_transpositions(matrix.cols()),
|
m_cols_transpositions(matrix.cols()),
|
||||||
m_cols_permutation(matrix.cols()),
|
m_cols_permutation(matrix.cols()),
|
||||||
m_temp(std::min(matrix.rows(), matrix.cols())),
|
m_temp((std::min)(matrix.rows(), matrix.cols())),
|
||||||
m_isInitialized(false),
|
m_isInitialized(false),
|
||||||
m_usePrescribedThreshold(false)
|
m_usePrescribedThreshold(false)
|
||||||
{
|
{
|
||||||
@ -379,7 +379,7 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
|
|||||||
{
|
{
|
||||||
Index rows = matrix.rows();
|
Index rows = matrix.rows();
|
||||||
Index cols = matrix.cols();
|
Index cols = matrix.cols();
|
||||||
Index size = std::min(rows,cols);
|
Index size = (std::min)(rows,cols);
|
||||||
|
|
||||||
m_qr = matrix;
|
m_qr = matrix;
|
||||||
m_hCoeffs.resize(size);
|
m_hCoeffs.resize(size);
|
||||||
@ -493,7 +493,7 @@ struct solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
|
|||||||
RealScalar biggest_in_upper_part_of_c = c.topRows( dec().rank() ).cwiseAbs().maxCoeff();
|
RealScalar biggest_in_upper_part_of_c = c.topRows( dec().rank() ).cwiseAbs().maxCoeff();
|
||||||
RealScalar biggest_in_lower_part_of_c = c.bottomRows(rows-dec().rank()).cwiseAbs().maxCoeff();
|
RealScalar biggest_in_lower_part_of_c = c.bottomRows(rows-dec().rank()).cwiseAbs().maxCoeff();
|
||||||
// FIXME brain dead
|
// FIXME brain dead
|
||||||
const RealScalar m_precision = NumTraits<Scalar>::epsilon() * std::min(rows,cols);
|
const RealScalar m_precision = NumTraits<Scalar>::epsilon() * (std::min)(rows,cols);
|
||||||
// this internal:: prefix is needed by at least gcc 3.4 and ICC
|
// this internal:: prefix is needed by at least gcc 3.4 and ICC
|
||||||
if(!internal::isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision))
|
if(!internal::isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision))
|
||||||
return;
|
return;
|
||||||
@ -520,7 +520,7 @@ typename FullPivHouseholderQR<MatrixType>::MatrixQType FullPivHouseholderQR<Matr
|
|||||||
// and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
|
// and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
|
||||||
Index rows = m_qr.rows();
|
Index rows = m_qr.rows();
|
||||||
Index cols = m_qr.cols();
|
Index cols = m_qr.cols();
|
||||||
Index size = std::min(rows,cols);
|
Index size = (std::min)(rows,cols);
|
||||||
MatrixQType res = MatrixQType::Identity(rows, rows);
|
MatrixQType res = MatrixQType::Identity(rows, rows);
|
||||||
Matrix<Scalar,1,MatrixType::RowsAtCompileTime> temp(rows);
|
Matrix<Scalar,1,MatrixType::RowsAtCompileTime> temp(rows);
|
||||||
for (Index k = size-1; k >= 0; k--)
|
for (Index k = size-1; k >= 0; k--)
|
||||||
|
@ -88,13 +88,13 @@ template<typename _MatrixType> class HouseholderQR
|
|||||||
*/
|
*/
|
||||||
HouseholderQR(Index rows, Index cols)
|
HouseholderQR(Index rows, Index cols)
|
||||||
: m_qr(rows, cols),
|
: m_qr(rows, cols),
|
||||||
m_hCoeffs(std::min(rows,cols)),
|
m_hCoeffs((std::min)(rows,cols)),
|
||||||
m_temp(cols),
|
m_temp(cols),
|
||||||
m_isInitialized(false) {}
|
m_isInitialized(false) {}
|
||||||
|
|
||||||
HouseholderQR(const MatrixType& matrix)
|
HouseholderQR(const MatrixType& matrix)
|
||||||
: m_qr(matrix.rows(), matrix.cols()),
|
: m_qr(matrix.rows(), matrix.cols()),
|
||||||
m_hCoeffs(std::min(matrix.rows(),matrix.cols())),
|
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
|
||||||
m_temp(matrix.cols()),
|
m_temp(matrix.cols()),
|
||||||
m_isInitialized(false)
|
m_isInitialized(false)
|
||||||
{
|
{
|
||||||
@ -210,7 +210,7 @@ void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename
|
|||||||
typedef typename MatrixQR::RealScalar RealScalar;
|
typedef typename MatrixQR::RealScalar RealScalar;
|
||||||
Index rows = mat.rows();
|
Index rows = mat.rows();
|
||||||
Index cols = mat.cols();
|
Index cols = mat.cols();
|
||||||
Index size = std::min(rows,cols);
|
Index size = (std::min)(rows,cols);
|
||||||
|
|
||||||
eigen_assert(hCoeffs.size() == size);
|
eigen_assert(hCoeffs.size() == size);
|
||||||
|
|
||||||
@ -250,7 +250,7 @@ void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
|
|||||||
|
|
||||||
Index rows = mat.rows();
|
Index rows = mat.rows();
|
||||||
Index cols = mat.cols();
|
Index cols = mat.cols();
|
||||||
Index size = std::min(rows, cols);
|
Index size = (std::min)(rows, cols);
|
||||||
|
|
||||||
typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
|
typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
|
||||||
TempType tempVector;
|
TempType tempVector;
|
||||||
@ -260,12 +260,12 @@ void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
|
|||||||
tempData = tempVector.data();
|
tempData = tempVector.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
Index blockSize = std::min(maxBlockSize,size);
|
Index blockSize = (std::min)(maxBlockSize,size);
|
||||||
|
|
||||||
int k = 0;
|
int k = 0;
|
||||||
for (k = 0; k < size; k += blockSize)
|
for (k = 0; k < size; k += blockSize)
|
||||||
{
|
{
|
||||||
Index bs = std::min(size-k,blockSize); // actual size of the block
|
Index bs = (std::min)(size-k,blockSize); // actual size of the block
|
||||||
Index tcols = cols - k - bs; // trailing columns
|
Index tcols = cols - k - bs; // trailing columns
|
||||||
Index brows = rows-k; // rows of the block
|
Index brows = rows-k; // rows of the block
|
||||||
|
|
||||||
@ -299,7 +299,7 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
|
|||||||
template<typename Dest> void evalTo(Dest& dst) const
|
template<typename Dest> void evalTo(Dest& dst) const
|
||||||
{
|
{
|
||||||
const Index rows = dec().rows(), cols = dec().cols();
|
const Index rows = dec().rows(), cols = dec().cols();
|
||||||
const Index rank = std::min(rows, cols);
|
const Index rank = (std::min)(rows, cols);
|
||||||
eigen_assert(rhs().rows() == rows);
|
eigen_assert(rhs().rows() == rows);
|
||||||
|
|
||||||
typename Rhs::PlainObject c(rhs());
|
typename Rhs::PlainObject c(rhs());
|
||||||
@ -327,7 +327,7 @@ HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType&
|
|||||||
{
|
{
|
||||||
Index rows = matrix.rows();
|
Index rows = matrix.rows();
|
||||||
Index cols = matrix.cols();
|
Index cols = matrix.cols();
|
||||||
Index size = std::min(rows,cols);
|
Index size = (std::min)(rows,cols);
|
||||||
|
|
||||||
m_qr = matrix;
|
m_qr = matrix;
|
||||||
m_hCoeffs.resize(size);
|
m_hCoeffs.resize(size);
|
||||||
|
@ -569,7 +569,7 @@ void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, u
|
|||||||
"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
|
"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
|
||||||
"Use the ColPivHouseholderQR preconditioner instead.");
|
"Use the ColPivHouseholderQR preconditioner instead.");
|
||||||
}
|
}
|
||||||
m_diagSize = std::min(m_rows, m_cols);
|
m_diagSize = (std::min)(m_rows, m_cols);
|
||||||
m_singularValues.resize(m_diagSize);
|
m_singularValues.resize(m_diagSize);
|
||||||
m_matrixU.resize(m_rows, m_computeFullU ? m_rows
|
m_matrixU.resize(m_rows, m_computeFullU ? m_rows
|
||||||
: m_computeThinU ? m_diagSize
|
: m_computeThinU ? m_diagSize
|
||||||
@ -619,8 +619,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
|
|||||||
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
|
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
|
||||||
// keep us iterating forever.
|
// keep us iterating forever.
|
||||||
using std::max;
|
using std::max;
|
||||||
if(max(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p)))
|
if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p)))
|
||||||
> max(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision)
|
> (max)(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision)
|
||||||
{
|
{
|
||||||
finished = false;
|
finished = false;
|
||||||
|
|
||||||
@ -689,7 +689,7 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
|
|||||||
// A = U S V^*
|
// A = U S V^*
|
||||||
// So A^{-1} = V S^{-1} U^*
|
// So A^{-1} = V S^{-1} U^*
|
||||||
|
|
||||||
Index diagSize = std::min(dec().rows(), dec().cols());
|
Index diagSize = (std::min)(dec().rows(), dec().cols());
|
||||||
typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
|
typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
|
||||||
|
|
||||||
Index nonzeroSingVals = dec().nonzeroSingularValues();
|
Index nonzeroSingVals = dec().nonzeroSingularValues();
|
||||||
|
@ -97,7 +97,7 @@ class AmbiVector
|
|||||||
void reallocateSparse()
|
void reallocateSparse()
|
||||||
{
|
{
|
||||||
Index copyElements = m_allocatedElements;
|
Index copyElements = m_allocatedElements;
|
||||||
m_allocatedElements = std::min(Index(m_allocatedElements*1.5),m_size);
|
m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
|
||||||
Index allocSize = m_allocatedElements * sizeof(ListEl);
|
Index allocSize = m_allocatedElements * sizeof(ListEl);
|
||||||
allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
|
allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
|
||||||
Scalar* newBuffer = new Scalar[allocSize];
|
Scalar* newBuffer = new Scalar[allocSize];
|
||||||
|
@ -216,7 +216,7 @@ class CompressedStorage
|
|||||||
{
|
{
|
||||||
Scalar* newValues = new Scalar[size];
|
Scalar* newValues = new Scalar[size];
|
||||||
Index* newIndices = new Index[size];
|
Index* newIndices = new Index[size];
|
||||||
size_t copySize = std::min(size, m_size);
|
size_t copySize = (std::min)(size, m_size);
|
||||||
// copy
|
// copy
|
||||||
memcpy(newValues, m_values, copySize * sizeof(Scalar));
|
memcpy(newValues, m_values, copySize * sizeof(Scalar));
|
||||||
memcpy(newIndices, m_indices, copySize * sizeof(Index));
|
memcpy(newIndices, m_indices, copySize * sizeof(Index));
|
||||||
|
@ -141,7 +141,7 @@ class DynamicSparseMatrix
|
|||||||
{
|
{
|
||||||
if (outerSize()>0)
|
if (outerSize()>0)
|
||||||
{
|
{
|
||||||
Index reserveSizePerVector = std::max(reserveSize/outerSize(),Index(4));
|
Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4));
|
||||||
for (Index j=0; j<outerSize(); ++j)
|
for (Index j=0; j<outerSize(); ++j)
|
||||||
{
|
{
|
||||||
m_data[j].reserve(reserveSizePerVector);
|
m_data[j].reserve(reserveSizePerVector);
|
||||||
|
@ -35,7 +35,7 @@
|
|||||||
// const typename internal::nested<Derived,2>::type nested(derived());
|
// const typename internal::nested<Derived,2>::type nested(derived());
|
||||||
// const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
|
// const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
|
||||||
// return (nested - otherNested).cwise().abs2().sum()
|
// return (nested - otherNested).cwise().abs2().sum()
|
||||||
// <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
|
// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
|
||||||
// }
|
// }
|
||||||
|
|
||||||
#endif // EIGEN_SPARSE_FUZZY_H
|
#endif // EIGEN_SPARSE_FUZZY_H
|
||||||
|
@ -257,7 +257,7 @@ class SparseMatrix
|
|||||||
// furthermore we bound the realloc ratio to:
|
// furthermore we bound the realloc ratio to:
|
||||||
// 1) reduce multiple minor realloc when the matrix is almost filled
|
// 1) reduce multiple minor realloc when the matrix is almost filled
|
||||||
// 2) avoid to allocate too much memory when the matrix is almost empty
|
// 2) avoid to allocate too much memory when the matrix is almost empty
|
||||||
reallocRatio = std::min(std::max(reallocRatio,1.5f),8.f);
|
reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_data.resize(m_data.size()+1,reallocRatio);
|
m_data.resize(m_data.size()+1,reallocRatio);
|
||||||
|
@ -223,7 +223,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
|||||||
// thanks to shallow copies, we always eval to a tempary
|
// thanks to shallow copies, we always eval to a tempary
|
||||||
Derived temp(other.rows(), other.cols());
|
Derived temp(other.rows(), other.cols());
|
||||||
|
|
||||||
temp.reserve(std::max(this->rows(),this->cols())*2);
|
temp.reserve((std::max)(this->rows(),this->cols())*2);
|
||||||
for (Index j=0; j<outerSize; ++j)
|
for (Index j=0; j<outerSize; ++j)
|
||||||
{
|
{
|
||||||
temp.startVec(j);
|
temp.startVec(j);
|
||||||
@ -253,7 +253,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
|||||||
// eval without temporary
|
// eval without temporary
|
||||||
derived().resize(other.rows(), other.cols());
|
derived().resize(other.rows(), other.cols());
|
||||||
derived().setZero();
|
derived().setZero();
|
||||||
derived().reserve(std::max(this->rows(),this->cols())*2);
|
derived().reserve((std::max)(this->rows(),this->cols())*2);
|
||||||
for (Index j=0; j<outerSize; ++j)
|
for (Index j=0; j<outerSize; ++j)
|
||||||
{
|
{
|
||||||
derived().startVec(j);
|
derived().startVec(j);
|
||||||
|
@ -383,7 +383,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
Index ip = perm ? perm[i] : i;
|
Index ip = perm ? perm[i] : i;
|
||||||
count[DstUpLo==Lower ? std::min(ip,jp) : std::max(ip,jp)]++;
|
count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dest._outerIndexPtr()[0] = 0;
|
dest._outerIndexPtr()[0] = 0;
|
||||||
@ -403,8 +403,8 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
Index ip = perm? perm[i] : i;
|
Index ip = perm? perm[i] : i;
|
||||||
Index k = count[DstUpLo==Lower ? std::min(ip,jp) : std::max(ip,jp)]++;
|
Index k = count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||||
dest._innerIndexPtr()[k] = DstUpLo==Lower ? std::max(ip,jp) : std::min(ip,jp);
|
dest._innerIndexPtr()[k] = DstUpLo==Lower ? (std::max)(ip,jp) : (std::min)(ip,jp);
|
||||||
|
|
||||||
if((DstUpLo==Lower && ip<jp) || (DstUpLo==Upper && ip>jp))
|
if((DstUpLo==Lower && ip<jp) || (DstUpLo==Upper && ip>jp))
|
||||||
dest._valuePtr()[k] = conj(it.value());
|
dest._valuePtr()[k] = conj(it.value());
|
||||||
|
@ -45,7 +45,7 @@ static void sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res
|
|||||||
// estimate the number of non zero entries
|
// estimate the number of non zero entries
|
||||||
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
|
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
|
||||||
float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
|
float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
|
||||||
float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||||
|
|
||||||
// int t200 = rows/(log2(200)*1.39);
|
// int t200 = rows/(log2(200)*1.39);
|
||||||
// int t = (rows*100)/139;
|
// int t = (rows*100)/139;
|
||||||
@ -131,7 +131,7 @@ static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
|||||||
// estimate the number of non zero entries
|
// estimate the number of non zero entries
|
||||||
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
|
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
|
||||||
float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
|
float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
|
||||||
float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
|
||||||
|
|
||||||
// mimics a resizeByInnerOuter:
|
// mimics a resizeByInnerOuter:
|
||||||
if(ResultType::IsRowMajor)
|
if(ResultType::IsRowMajor)
|
||||||
@ -143,7 +143,7 @@ static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
|||||||
for (Index j=0; j<cols; ++j)
|
for (Index j=0; j<cols; ++j)
|
||||||
{
|
{
|
||||||
// let's do a more accurate determination of the nnz ratio for the current column j of res
|
// let's do a more accurate determination of the nnz ratio for the current column j of res
|
||||||
//float ratioColRes = std::min(ratioLhs * rhs.innerNonZeros(j), 1.f);
|
//float ratioColRes = (std::min)(ratioLhs * rhs.innerNonZeros(j), 1.f);
|
||||||
// FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
|
// FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
|
||||||
float ratioColRes = ratioRes;
|
float ratioColRes = ratioRes;
|
||||||
tempVector.init(ratioColRes);
|
tempVector.init(ratioColRes);
|
||||||
|
@ -65,7 +65,7 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
|
|||||||
// check basic properties of dot, norm, norm2
|
// check basic properties of dot, norm, norm2
|
||||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||||
|
|
||||||
RealScalar ref = NumTraits<Scalar>::IsInteger ? 0 : std::max((s1 * v1 + s2 * v2).norm(),v3.norm());
|
RealScalar ref = NumTraits<Scalar>::IsInteger ? 0 : (std::max)((s1 * v1 + s2 * v2).norm(),v3.norm());
|
||||||
VERIFY(test_isApproxWithRef((s1 * v1 + s2 * v2).dot(v3), internal::conj(s1) * v1.dot(v3) + internal::conj(s2) * v2.dot(v3), ref));
|
VERIFY(test_isApproxWithRef((s1 * v1 + s2 * v2).dot(v3), internal::conj(s1) * v1.dot(v3) + internal::conj(s2) * v2.dot(v3), ref));
|
||||||
VERIFY(test_isApproxWithRef(v3.dot(s1 * v1 + s2 * v2), s1*v3.dot(v1)+s2*v3.dot(v2), ref));
|
VERIFY(test_isApproxWithRef(v3.dot(s1 * v1 + s2 * v2), s1*v3.dot(v1)+s2*v3.dot(v2), ref));
|
||||||
VERIFY_IS_APPROX(internal::conj(v1.dot(v2)), v2.dot(v1));
|
VERIFY_IS_APPROX(internal::conj(v1.dot(v2)), v2.dot(v1));
|
||||||
@ -76,7 +76,7 @@ template<typename MatrixType> void adjoint(const MatrixType& m)
|
|||||||
|
|
||||||
// check compatibility of dot and adjoint
|
// check compatibility of dot and adjoint
|
||||||
|
|
||||||
ref = NumTraits<Scalar>::IsInteger ? 0 : std::max(std::max(v1.norm(),v2.norm()),std::max((square * v2).norm(),(square.adjoint() * v1).norm()));
|
ref = NumTraits<Scalar>::IsInteger ? 0 : (std::max)((std::max)(v1.norm(),v2.norm()),(std::max)((square * v2).norm(),(square.adjoint() * v1).norm()));
|
||||||
VERIFY(test_isApproxWithRef(v1.dot(square * v2), (square.adjoint() * v1).dot(v2), ref));
|
VERIFY(test_isApproxWithRef(v1.dot(square * v2), (square.adjoint() * v1).dot(v2), ref));
|
||||||
|
|
||||||
// like in testBasicStuff, test operator() to check const-qualification
|
// like in testBasicStuff, test operator() to check const-qualification
|
||||||
|
@ -61,7 +61,7 @@ template<typename MatrixType> void bandmatrix(const MatrixType& _m)
|
|||||||
m.col(i).setConstant(static_cast<RealScalar>(i+1));
|
m.col(i).setConstant(static_cast<RealScalar>(i+1));
|
||||||
dm1.col(i).setConstant(static_cast<RealScalar>(i+1));
|
dm1.col(i).setConstant(static_cast<RealScalar>(i+1));
|
||||||
}
|
}
|
||||||
Index d = std::min(rows,cols);
|
Index d = (std::min)(rows,cols);
|
||||||
Index a = std::max<Index>(0,cols-d-supers);
|
Index a = std::max<Index>(0,cols-d-supers);
|
||||||
Index b = std::max<Index>(0,rows-d-subs);
|
Index b = std::max<Index>(0,rows-d-subs);
|
||||||
if(a>0) dm1.block(0,d+supers,rows,a).setZero();
|
if(a>0) dm1.block(0,d+supers,rows,a).setZero();
|
||||||
|
@ -28,6 +28,14 @@
|
|||||||
#include "main.h"
|
#include "main.h"
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
|
||||||
|
#ifdef min
|
||||||
|
#undef min
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef max
|
||||||
|
#undef max
|
||||||
|
#endif
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
template<typename Scalar> struct AddIfNull {
|
template<typename Scalar> struct AddIfNull {
|
||||||
|
@ -23,6 +23,9 @@
|
|||||||
// License and a copy of the GNU General Public License along with
|
// License and a copy of the GNU General Public License along with
|
||||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#define min(A,B) please_protect_your_min_with_parentheses
|
||||||
|
#define max(A,B) please_protect_your_max_with_parentheses
|
||||||
|
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <cerrno>
|
#include <cerrno>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
@ -429,7 +432,7 @@ void createRandomPIMatrixOfRank(typename MatrixType::Index desired_rank, typenam
|
|||||||
MatrixBType b = MatrixBType::Random(cols,cols);
|
MatrixBType b = MatrixBType::Random(cols,cols);
|
||||||
|
|
||||||
// set the diagonal such that only desired_rank non-zero entries reamain
|
// set the diagonal such that only desired_rank non-zero entries reamain
|
||||||
const Index diag_size = std::min(d.rows(),d.cols());
|
const Index diag_size = (std::min)(d.rows(),d.cols());
|
||||||
if(diag_size != desired_rank)
|
if(diag_size != desired_rank)
|
||||||
d.diagonal().segment(desired_rank, diag_size-desired_rank) = VectorType::Zero(diag_size-desired_rank);
|
d.diagonal().segment(desired_rank, diag_size-desired_rank) = VectorType::Zero(diag_size-desired_rank);
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ int main()
|
|||||||
|
|
||||||
inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
|
inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
|
||||||
{
|
{
|
||||||
return mpfr::abs(a - b) <= mpfr::min(mpfr::abs(a), mpfr::abs(b)) * prec;
|
return mpfr::abs(a - b) <= (mpfr::min)(mpfr::abs(a), mpfr::abs(b)) * prec;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
|
inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
|
||||||
|
@ -178,7 +178,7 @@ typename Minimizer::Scalar minimize_helper(const BVH &tree, Minimizer &minimizer
|
|||||||
todo.pop();
|
todo.pop();
|
||||||
|
|
||||||
for(; oBegin != oEnd; ++oBegin) //go through child objects
|
for(; oBegin != oEnd; ++oBegin) //go through child objects
|
||||||
minimum = std::min(minimum, minimizer.minimumOnObject(*oBegin));
|
minimum = (std::min)(minimum, minimizer.minimumOnObject(*oBegin));
|
||||||
|
|
||||||
for(; vBegin != vEnd; ++vBegin) { //go through child volumes
|
for(; vBegin != vEnd; ++vBegin) { //go through child volumes
|
||||||
Scalar val = minimizer.minimumOnVolume(tree.getVolume(*vBegin));
|
Scalar val = minimizer.minimumOnVolume(tree.getVolume(*vBegin));
|
||||||
@ -274,12 +274,12 @@ typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Mini
|
|||||||
|
|
||||||
for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree
|
for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree
|
||||||
for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
|
for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
|
||||||
minimum = std::min(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2));
|
minimum = (std::min)(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2));
|
||||||
}
|
}
|
||||||
|
|
||||||
for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
|
for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
|
||||||
Helper2 helper(*oBegin1, minimizer);
|
Helper2 helper(*oBegin1, minimizer);
|
||||||
minimum = std::min(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum));
|
minimum = (std::min)(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,7 +288,7 @@ typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Mini
|
|||||||
|
|
||||||
for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
|
for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
|
||||||
Helper1 helper(*oCur2, minimizer);
|
Helper1 helper(*oCur2, minimizer);
|
||||||
minimum = std::min(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum));
|
minimum = (std::min)(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum));
|
||||||
}
|
}
|
||||||
|
|
||||||
for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
|
for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
|
||||||
|
@ -172,7 +172,7 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
|
|||||||
|
|
||||||
if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n";
|
if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n";
|
||||||
if (transition || iter.first()) gamma = 0.0;
|
if (transition || iter.first()) gamma = 0.0;
|
||||||
else gamma = std::max(0.0, (rho - old_z.dot(z)) / rho_1);
|
else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1);
|
||||||
p = z + gamma*p;
|
p = z + gamma*p;
|
||||||
|
|
||||||
++iter;
|
++iter;
|
||||||
@ -185,7 +185,7 @@ void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
|
|||||||
{
|
{
|
||||||
Scalar bb = C.row(i).dot(p) - f[i];
|
Scalar bb = C.row(i).dot(p) - f[i];
|
||||||
if (bb > 0.0)
|
if (bb > 0.0)
|
||||||
lambda = std::min(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb);
|
lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
x += lambda * p;
|
x += lambda * p;
|
||||||
|
@ -141,7 +141,7 @@ class IterationController
|
|||||||
bool converged(double nr)
|
bool converged(double nr)
|
||||||
{
|
{
|
||||||
m_res = internal::abs(nr);
|
m_res = internal::abs(nr);
|
||||||
m_resminreach = std::min(m_resminreach, m_res);
|
m_resminreach = (std::min)(m_resminreach, m_res);
|
||||||
return converged();
|
return converged();
|
||||||
}
|
}
|
||||||
template<typename VectorType> bool converged(const VectorType &v)
|
template<typename VectorType> bool converged(const VectorType &v)
|
||||||
|
@ -127,10 +127,10 @@ bool MatrixFunctionAtomic<MatrixType>::taylorConverged(Index s, const MatrixType
|
|||||||
for (Index r = 0; r < n; r++) {
|
for (Index r = 0; r < n; r++) {
|
||||||
RealScalar mx = 0;
|
RealScalar mx = 0;
|
||||||
for (Index i = 0; i < n; i++)
|
for (Index i = 0; i < n; i++)
|
||||||
mx = std::max(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast<int>(s+r))));
|
mx = (std::max)(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast<int>(s+r))));
|
||||||
if (r != 0)
|
if (r != 0)
|
||||||
rfactorial *= RealScalar(r);
|
rfactorial *= RealScalar(r);
|
||||||
delta = std::max(delta, mx / rfactorial);
|
delta = (std::max)(delta, mx / rfactorial);
|
||||||
}
|
}
|
||||||
const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
|
const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
|
||||||
if (m_mu * delta * P_norm < NumTraits<Scalar>::epsilon() * F_norm)
|
if (m_mu * delta * P_norm < NumTraits<Scalar>::epsilon() * F_norm)
|
||||||
|
@ -255,7 +255,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
|
|||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
if (iter == 1)
|
if (iter == 1)
|
||||||
delta = std::min(delta,pnorm);
|
delta = (std::min)(delta,pnorm);
|
||||||
|
|
||||||
/* evaluate the function at x + p and calculate its norm. */
|
/* evaluate the function at x + p and calculate its norm. */
|
||||||
if ( functor(wa2, wa4) < 0)
|
if ( functor(wa2, wa4) < 0)
|
||||||
@ -289,7 +289,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
|
|||||||
ncfail = 0;
|
ncfail = 0;
|
||||||
++ncsuc;
|
++ncsuc;
|
||||||
if (ratio >= Scalar(.5) || ncsuc > 1)
|
if (ratio >= Scalar(.5) || ncsuc > 1)
|
||||||
delta = std::max(delta, pnorm / Scalar(.5));
|
delta = (std::max)(delta, pnorm / Scalar(.5));
|
||||||
if (internal::abs(ratio - 1.) <= Scalar(.1)) {
|
if (internal::abs(ratio - 1.) <= Scalar(.1)) {
|
||||||
delta = pnorm / Scalar(.5);
|
delta = pnorm / Scalar(.5);
|
||||||
}
|
}
|
||||||
@ -322,7 +322,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
|
|||||||
/* tests for termination and stringent tolerances. */
|
/* tests for termination and stringent tolerances. */
|
||||||
if (nfev >= parameters.maxfev)
|
if (nfev >= parameters.maxfev)
|
||||||
return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
|
return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
|
||||||
if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
|
if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
|
||||||
return HybridNonLinearSolverSpace::TolTooSmall;
|
return HybridNonLinearSolverSpace::TolTooSmall;
|
||||||
if (nslow2 == 5)
|
if (nslow2 == 5)
|
||||||
return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
|
return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
|
||||||
@ -449,7 +449,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
|
|||||||
/* calculate the jacobian matrix. */
|
/* calculate the jacobian matrix. */
|
||||||
if (internal::fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0)
|
if (internal::fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0)
|
||||||
return HybridNonLinearSolverSpace::UserAsked;
|
return HybridNonLinearSolverSpace::UserAsked;
|
||||||
nfev += std::min(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
|
nfev += (std::min)(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
|
||||||
|
|
||||||
wa2 = fjac.colwise().blueNorm();
|
wa2 = fjac.colwise().blueNorm();
|
||||||
|
|
||||||
@ -496,7 +496,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
|
|||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
if (iter == 1)
|
if (iter == 1)
|
||||||
delta = std::min(delta,pnorm);
|
delta = (std::min)(delta,pnorm);
|
||||||
|
|
||||||
/* evaluate the function at x + p and calculate its norm. */
|
/* evaluate the function at x + p and calculate its norm. */
|
||||||
if ( functor(wa2, wa4) < 0)
|
if ( functor(wa2, wa4) < 0)
|
||||||
@ -530,7 +530,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
|
|||||||
ncfail = 0;
|
ncfail = 0;
|
||||||
++ncsuc;
|
++ncsuc;
|
||||||
if (ratio >= Scalar(.5) || ncsuc > 1)
|
if (ratio >= Scalar(.5) || ncsuc > 1)
|
||||||
delta = std::max(delta, pnorm / Scalar(.5));
|
delta = (std::max)(delta, pnorm / Scalar(.5));
|
||||||
if (internal::abs(ratio - 1.) <= Scalar(.1)) {
|
if (internal::abs(ratio - 1.) <= Scalar(.1)) {
|
||||||
delta = pnorm / Scalar(.5);
|
delta = pnorm / Scalar(.5);
|
||||||
}
|
}
|
||||||
@ -563,7 +563,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
|
|||||||
/* tests for termination and stringent tolerances. */
|
/* tests for termination and stringent tolerances. */
|
||||||
if (nfev >= parameters.maxfev)
|
if (nfev >= parameters.maxfev)
|
||||||
return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
|
return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
|
||||||
if (Scalar(.1) * std::max(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
|
if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
|
||||||
return HybridNonLinearSolverSpace::TolTooSmall;
|
return HybridNonLinearSolverSpace::TolTooSmall;
|
||||||
if (nslow2 == 5)
|
if (nslow2 == 5)
|
||||||
return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
|
return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
|
||||||
|
@ -263,7 +263,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
|
|||||||
if (fnorm != 0.)
|
if (fnorm != 0.)
|
||||||
for (Index j = 0; j < n; ++j)
|
for (Index j = 0; j < n; ++j)
|
||||||
if (wa2[permutation.indices()[j]] != 0.)
|
if (wa2[permutation.indices()[j]] != 0.)
|
||||||
gnorm = std::max(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
|
gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
|
||||||
|
|
||||||
/* test for convergence of the gradient norm. */
|
/* test for convergence of the gradient norm. */
|
||||||
if (gnorm <= parameters.gtol)
|
if (gnorm <= parameters.gtol)
|
||||||
@ -285,7 +285,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
|
|||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
if (iter == 1)
|
if (iter == 1)
|
||||||
delta = std::min(delta,pnorm);
|
delta = (std::min)(delta,pnorm);
|
||||||
|
|
||||||
/* evaluate the function at x + p and calculate its norm. */
|
/* evaluate the function at x + p and calculate its norm. */
|
||||||
if ( functor(wa2, wa4) < 0)
|
if ( functor(wa2, wa4) < 0)
|
||||||
@ -321,7 +321,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
|
|||||||
if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
|
if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
|
||||||
temp = Scalar(.1);
|
temp = Scalar(.1);
|
||||||
/* Computing MIN */
|
/* Computing MIN */
|
||||||
delta = temp * std::min(delta, pnorm / Scalar(.1));
|
delta = temp * (std::min)(delta, pnorm / Scalar(.1));
|
||||||
par /= temp;
|
par /= temp;
|
||||||
} else if (!(par != 0. && ratio < Scalar(.75))) {
|
} else if (!(par != 0. && ratio < Scalar(.75))) {
|
||||||
delta = pnorm / Scalar(.5);
|
delta = pnorm / Scalar(.5);
|
||||||
@ -510,7 +510,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
|
|||||||
if (fnorm != 0.)
|
if (fnorm != 0.)
|
||||||
for (j = 0; j < n; ++j)
|
for (j = 0; j < n; ++j)
|
||||||
if (wa2[permutation.indices()[j]] != 0.)
|
if (wa2[permutation.indices()[j]] != 0.)
|
||||||
gnorm = std::max(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
|
gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
|
||||||
|
|
||||||
/* test for convergence of the gradient norm. */
|
/* test for convergence of the gradient norm. */
|
||||||
if (gnorm <= parameters.gtol)
|
if (gnorm <= parameters.gtol)
|
||||||
@ -532,7 +532,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
|
|||||||
|
|
||||||
/* on the first iteration, adjust the initial step bound. */
|
/* on the first iteration, adjust the initial step bound. */
|
||||||
if (iter == 1)
|
if (iter == 1)
|
||||||
delta = std::min(delta,pnorm);
|
delta = (std::min)(delta,pnorm);
|
||||||
|
|
||||||
/* evaluate the function at x + p and calculate its norm. */
|
/* evaluate the function at x + p and calculate its norm. */
|
||||||
if ( functor(wa2, wa4) < 0)
|
if ( functor(wa2, wa4) < 0)
|
||||||
@ -568,7 +568,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
|
|||||||
if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
|
if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
|
||||||
temp = Scalar(.1);
|
temp = Scalar(.1);
|
||||||
/* Computing MIN */
|
/* Computing MIN */
|
||||||
delta = temp * std::min(delta, pnorm / Scalar(.1));
|
delta = temp * (std::min)(delta, pnorm / Scalar(.1));
|
||||||
par /= temp;
|
par /= temp;
|
||||||
} else if (!(par != 0. && ratio < Scalar(.75))) {
|
} else if (!(par != 0. && ratio < Scalar(.75))) {
|
||||||
delta = pnorm / Scalar(.5);
|
delta = pnorm / Scalar(.5);
|
||||||
|
@ -93,7 +93,7 @@ algo_end:
|
|||||||
|
|
||||||
/* form appropriate convex combination of the gauss-newton */
|
/* form appropriate convex combination of the gauss-newton */
|
||||||
/* direction and the scaled gradient direction. */
|
/* direction and the scaled gradient direction. */
|
||||||
temp = (1.-alpha) * std::min(sgnorm,delta);
|
temp = (1.-alpha) * (std::min)(sgnorm,delta);
|
||||||
x = temp * wa1 + alpha * x;
|
x = temp * wa1 + alpha * x;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ DenseIndex fdjac1(
|
|||||||
Matrix< Scalar, Dynamic, 1 > wa1(n);
|
Matrix< Scalar, Dynamic, 1 > wa1(n);
|
||||||
Matrix< Scalar, Dynamic, 1 > wa2(n);
|
Matrix< Scalar, Dynamic, 1 > wa2(n);
|
||||||
|
|
||||||
eps = sqrt(std::max(epsfcn,epsmch));
|
eps = sqrt((std::max)(epsfcn,epsmch));
|
||||||
msum = ml + mu + 1;
|
msum = ml + mu + 1;
|
||||||
if (msum >= n) {
|
if (msum >= n) {
|
||||||
/* computation of dense approximate jacobian. */
|
/* computation of dense approximate jacobian. */
|
||||||
@ -61,7 +61,7 @@ DenseIndex fdjac1(
|
|||||||
if (h == 0.) h = eps;
|
if (h == 0.) h = eps;
|
||||||
fjac.col(j).setZero();
|
fjac.col(j).setZero();
|
||||||
start = std::max<Index>(0,j-mu);
|
start = std::max<Index>(0,j-mu);
|
||||||
length = std::min(n-1, j+ml) - start + 1;
|
length = (std::min)(n-1, j+ml) - start + 1;
|
||||||
fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
|
fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -91,12 +91,12 @@ void lmpar(
|
|||||||
gnorm = wa1.stableNorm();
|
gnorm = wa1.stableNorm();
|
||||||
paru = gnorm / delta;
|
paru = gnorm / delta;
|
||||||
if (paru == 0.)
|
if (paru == 0.)
|
||||||
paru = dwarf / std::min(delta,Scalar(0.1));
|
paru = dwarf / (std::min)(delta,Scalar(0.1));
|
||||||
|
|
||||||
/* if the input par lies outside of the interval (parl,paru), */
|
/* if the input par lies outside of the interval (parl,paru), */
|
||||||
/* set par to the closer endpoint. */
|
/* set par to the closer endpoint. */
|
||||||
par = std::max(par,parl);
|
par = (std::max)(par,parl);
|
||||||
par = std::min(par,paru);
|
par = (std::min)(par,paru);
|
||||||
if (par == 0.)
|
if (par == 0.)
|
||||||
par = gnorm / dxnorm;
|
par = gnorm / dxnorm;
|
||||||
|
|
||||||
@ -106,7 +106,7 @@ void lmpar(
|
|||||||
|
|
||||||
/* evaluate the function at the current value of par. */
|
/* evaluate the function at the current value of par. */
|
||||||
if (par == 0.)
|
if (par == 0.)
|
||||||
par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
|
par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
|
||||||
wa1 = sqrt(par)* diag;
|
wa1 = sqrt(par)* diag;
|
||||||
|
|
||||||
Matrix< Scalar, Dynamic, 1 > sdiag(n);
|
Matrix< Scalar, Dynamic, 1 > sdiag(n);
|
||||||
@ -139,13 +139,13 @@ void lmpar(
|
|||||||
|
|
||||||
/* depending on the sign of the function, update parl or paru. */
|
/* depending on the sign of the function, update parl or paru. */
|
||||||
if (fp > 0.)
|
if (fp > 0.)
|
||||||
parl = std::max(parl,par);
|
parl = (std::max)(parl,par);
|
||||||
if (fp < 0.)
|
if (fp < 0.)
|
||||||
paru = std::min(paru,par);
|
paru = (std::min)(paru,par);
|
||||||
|
|
||||||
/* compute an improved estimate for par. */
|
/* compute an improved estimate for par. */
|
||||||
/* Computing MAX */
|
/* Computing MAX */
|
||||||
par = std::max(parl,par+parc);
|
par = (std::max)(parl,par+parc);
|
||||||
|
|
||||||
/* end of an iteration. */
|
/* end of an iteration. */
|
||||||
}
|
}
|
||||||
@ -227,12 +227,12 @@ void lmpar2(
|
|||||||
gnorm = wa1.stableNorm();
|
gnorm = wa1.stableNorm();
|
||||||
paru = gnorm / delta;
|
paru = gnorm / delta;
|
||||||
if (paru == 0.)
|
if (paru == 0.)
|
||||||
paru = dwarf / std::min(delta,Scalar(0.1));
|
paru = dwarf / (std::min)(delta,Scalar(0.1));
|
||||||
|
|
||||||
/* if the input par lies outside of the interval (parl,paru), */
|
/* if the input par lies outside of the interval (parl,paru), */
|
||||||
/* set par to the closer endpoint. */
|
/* set par to the closer endpoint. */
|
||||||
par = std::max(par,parl);
|
par = (std::max)(par,parl);
|
||||||
par = std::min(par,paru);
|
par = (std::min)(par,paru);
|
||||||
if (par == 0.)
|
if (par == 0.)
|
||||||
par = gnorm / dxnorm;
|
par = gnorm / dxnorm;
|
||||||
|
|
||||||
@ -243,7 +243,7 @@ void lmpar2(
|
|||||||
|
|
||||||
/* evaluate the function at the current value of par. */
|
/* evaluate the function at the current value of par. */
|
||||||
if (par == 0.)
|
if (par == 0.)
|
||||||
par = std::max(dwarf,Scalar(.001) * paru); /* Computing MAX */
|
par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
|
||||||
wa1 = sqrt(par)* diag;
|
wa1 = sqrt(par)* diag;
|
||||||
|
|
||||||
Matrix< Scalar, Dynamic, 1 > sdiag(n);
|
Matrix< Scalar, Dynamic, 1 > sdiag(n);
|
||||||
@ -275,12 +275,12 @@ void lmpar2(
|
|||||||
|
|
||||||
/* depending on the sign of the function, update parl or paru. */
|
/* depending on the sign of the function, update parl or paru. */
|
||||||
if (fp > 0.)
|
if (fp > 0.)
|
||||||
parl = std::max(parl,par);
|
parl = (std::max)(parl,par);
|
||||||
if (fp < 0.)
|
if (fp < 0.)
|
||||||
paru = std::min(paru,par);
|
paru = (std::min)(paru,par);
|
||||||
|
|
||||||
/* compute an improved estimate for par. */
|
/* compute an improved estimate for par. */
|
||||||
par = std::max(parl,par+parc);
|
par = (std::max)(parl,par+parc);
|
||||||
}
|
}
|
||||||
if (iter == 0)
|
if (iter == 0)
|
||||||
par = 0.;
|
par = 0.;
|
||||||
|
@ -80,7 +80,7 @@ public:
|
|||||||
Scalar h;
|
Scalar h;
|
||||||
int nfev=0;
|
int nfev=0;
|
||||||
const typename InputType::Index n = _x.size();
|
const typename InputType::Index n = _x.size();
|
||||||
const Scalar eps = internal::sqrt((std::max(epsfcn,NumTraits<Scalar>::epsilon() )));
|
const Scalar eps = internal::sqrt(((std::max)(epsfcn,NumTraits<Scalar>::epsilon() )));
|
||||||
ValueType val1, val2;
|
ValueType val1, val2;
|
||||||
InputType x = _x;
|
InputType x = _x;
|
||||||
// TODO : we should do this only if the size is not already known
|
// TODO : we should do this only if the size is not already known
|
||||||
|
@ -221,11 +221,11 @@ protected:
|
|||||||
Index* upperProfile = new Index[upperProfileSize];
|
Index* upperProfile = new Index[upperProfileSize];
|
||||||
Index* lowerProfile = new Index[lowerProfileSize];
|
Index* lowerProfile = new Index[lowerProfileSize];
|
||||||
|
|
||||||
Index copyDiagSize = std::min(diagSize, m_diagSize);
|
Index copyDiagSize = (std::min)(diagSize, m_diagSize);
|
||||||
Index copyUpperSize = std::min(upperSize, m_upperSize);
|
Index copyUpperSize = (std::min)(upperSize, m_upperSize);
|
||||||
Index copyLowerSize = std::min(lowerSize, m_lowerSize);
|
Index copyLowerSize = (std::min)(lowerSize, m_lowerSize);
|
||||||
Index copyUpperProfileSize = std::min(upperProfileSize, m_upperProfileSize);
|
Index copyUpperProfileSize = (std::min)(upperProfileSize, m_upperProfileSize);
|
||||||
Index copyLowerProfileSize = std::min(lowerProfileSize, m_lowerProfileSize);
|
Index copyLowerProfileSize = (std::min)(lowerProfileSize, m_lowerProfileSize);
|
||||||
|
|
||||||
// copy
|
// copy
|
||||||
memcpy(diag, m_diag, copyDiagSize * sizeof (Scalar));
|
memcpy(diag, m_diag, copyDiagSize * sizeof (Scalar));
|
||||||
|
@ -295,10 +295,10 @@ void SparseLU<MatrixType,UmfPack>::extractData() const
|
|||||||
umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
|
umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
|
||||||
|
|
||||||
// allocate data
|
// allocate data
|
||||||
m_l.resize(rows,std::min(rows,cols));
|
m_l.resize(rows,(std::min)(rows,cols));
|
||||||
m_l.resizeNonZeros(lnz);
|
m_l.resizeNonZeros(lnz);
|
||||||
|
|
||||||
m_u.resize(std::min(rows,cols),cols);
|
m_u.resize((std::min)(rows,cols),cols);
|
||||||
m_u.resizeNonZeros(unz);
|
m_u.resizeNonZeros(unz);
|
||||||
|
|
||||||
m_p.resize(rows);
|
m_p.resize(rows);
|
||||||
|
@ -143,10 +143,10 @@ struct TreeTest
|
|||||||
VectorType pt = VectorType::Random();
|
VectorType pt = VectorType::Random();
|
||||||
BallPointStuff<Dim> i1(pt), i2(pt);
|
BallPointStuff<Dim> i1(pt), i2(pt);
|
||||||
|
|
||||||
double m1 = std::numeric_limits<double>::max(), m2 = m1;
|
double m1 = (std::numeric_limits<double>::max)(), m2 = m1;
|
||||||
|
|
||||||
for(int i = 0; i < (int)b.size(); ++i)
|
for(int i = 0; i < (int)b.size(); ++i)
|
||||||
m1 = std::min(m1, i1.minimumOnObject(b[i]));
|
m1 = (std::min)(m1, i1.minimumOnObject(b[i]));
|
||||||
|
|
||||||
m2 = BVMinimize(tree, i2);
|
m2 = BVMinimize(tree, i2);
|
||||||
|
|
||||||
@ -194,11 +194,11 @@ struct TreeTest
|
|||||||
|
|
||||||
BallPointStuff<Dim> i1, i2;
|
BallPointStuff<Dim> i1, i2;
|
||||||
|
|
||||||
double m1 = std::numeric_limits<double>::max(), m2 = m1;
|
double m1 = (std::numeric_limits<double>::max)(), m2 = m1;
|
||||||
|
|
||||||
for(int i = 0; i < (int)b.size(); ++i)
|
for(int i = 0; i < (int)b.size(); ++i)
|
||||||
for(int j = 0; j < (int)v.size(); ++j)
|
for(int j = 0; j < (int)v.size(); ++j)
|
||||||
m1 = std::min(m1, i1.minimumOnObjectObject(b[i], v[j]));
|
m1 = (std::min)(m1, i1.minimumOnObjectObject(b[i], v[j]));
|
||||||
|
|
||||||
m2 = BVMinimize(tree, vTree, i2);
|
m2 = BVMinimize(tree, vTree, i2);
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ complex<long double> promote(long double x) { return complex<long double>( x);
|
|||||||
{
|
{
|
||||||
long double totalpower=0;
|
long double totalpower=0;
|
||||||
long double difpower=0;
|
long double difpower=0;
|
||||||
size_t n = min( buf1.size(),buf2.size() );
|
size_t n = (min)( buf1.size(),buf2.size() );
|
||||||
for (size_t k=0;k<n;++k) {
|
for (size_t k=0;k<n;++k) {
|
||||||
totalpower += (norm( buf1[k] ) + norm(buf2[k]) )/2.;
|
totalpower += (norm( buf1[k] ) + norm(buf2[k]) )/2.;
|
||||||
difpower += norm(buf1[k] - buf2[k]);
|
difpower += norm(buf1[k] - buf2[k]);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user