diff --git a/CTestConfig.cmake b/CTestConfig.cmake index 263cda480..0ccd5a1ad 100644 --- a/CTestConfig.cmake +++ b/CTestConfig.cmake @@ -5,9 +5,9 @@ ## ENABLE_TESTING() ## INCLUDE(CTest) set(CTEST_PROJECT_NAME "Eigen") -set(CTEST_NIGHTLY_START_TIME "05:00:00 UTC") +set(CTEST_NIGHTLY_START_TIME "06:00:00 UTC") set(CTEST_DROP_METHOD "http") -set(CTEST_DROP_SITE "my.cdash.org") -set(CTEST_DROP_LOCATION "/submit.php?project=Eigen") +set(CTEST_DROP_SITE "eigen.tuxfamily.org") +set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen") set(CTEST_DROP_SITE_CDASH TRUE) diff --git a/Eigen/Core b/Eigen/Core index 15dae4af0..463529123 100644 --- a/Eigen/Core +++ b/Eigen/Core @@ -161,6 +161,7 @@ namespace Eigen { #include "src/Core/CwiseNullaryOp.h" #include "src/Core/CwiseUnaryView.h" #include "src/Core/Dot.h" +#include "src/Core/StableNorm.h" #include "src/Core/MapBase.h" #include "src/Core/Map.h" #include "src/Core/Block.h" diff --git a/Eigen/QR b/Eigen/QR index 97907d1e5..5f36d0987 100644 --- a/Eigen/QR +++ b/Eigen/QR @@ -41,7 +41,7 @@ namespace Eigen { // declare all classes for a given matrix type #define EIGEN_QR_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \ - PREFIX template class QR; \ + PREFIX template class HouseholderQR; \ PREFIX template class Tridiagonalization; \ PREFIX template class HessenbergDecomposition; \ PREFIX template class SelfAdjointEigenSolver diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index 18029bab8..200b428af 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -62,7 +62,7 @@ template class LDLT typedef Matrix IntColVectorType; typedef Matrix IntRowVectorType; - /** + /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to @@ -83,7 +83,7 @@ template class LDLT inline TriangularView matrixL(void) const { ei_assert(m_isInitialized && "LDLT is not initialized."); - return m_matrix; + return m_matrix; } /** \returns a vector of integers, whose size is the number of rows of the matrix being decomposed, @@ -97,24 +97,24 @@ template class LDLT } /** \returns the coefficients of the diagonal matrix D */ - inline Diagonal vectorD(void) const - { + inline Diagonal vectorD(void) const + { ei_assert(m_isInitialized && "LDLT is not initialized."); - return m_matrix.diagonal(); + return m_matrix.diagonal(); } /** \returns true if the matrix is positive (semidefinite) */ - inline bool isPositive(void) const - { + inline bool isPositive(void) const + { ei_assert(m_isInitialized && "LDLT is not initialized."); - return m_sign == 1; + return m_sign == 1; } /** \returns true if the matrix is negative (semidefinite) */ - inline bool isNegative(void) const - { + inline bool isNegative(void) const + { ei_assert(m_isInitialized && "LDLT is not initialized."); - return m_sign == -1; + return m_sign == -1; } template diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h index ffe065e8b..cf7730170 100644 --- a/Eigen/src/Core/Block.h +++ b/Eigen/src/Core/Block.h @@ -33,8 +33,8 @@ * \param MatrixType the type of the object in which we are taking a block * \param BlockRows the number of rows of the block we are taking at compile time (optional) * \param BlockCols the number of columns of the block we are taking at compile time (optional) - * \param _PacketAccess allows to enforce aligned loads and stores if set to ForceAligned. - * The default is AsRequested. This parameter is internaly used by Eigen + * \param _PacketAccess allows to enforce aligned loads and stores if set to \b ForceAligned. + * The default is \b AsRequested. This parameter is internaly used by Eigen * in expressions such as \code mat.block() += other; \endcode and most of * the time this is the only way it is used. * \param _DirectAccessStatus \internal used for partial specialization diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h index c5f2e8505..9e84d72bb 100644 --- a/Eigen/src/Core/Dot.h +++ b/Eigen/src/Core/Dot.h @@ -292,154 +292,6 @@ inline typename NumTraits::Scalar>::Real MatrixBase< return ei_sqrt(squaredNorm()); } -/** \returns the \em l2 norm of \c *this using a numerically more stable - * algorithm. - * - * \sa norm(), dot(), squaredNorm(), blueNorm() - */ -template -inline typename NumTraits::Scalar>::Real -MatrixBase::stableNorm() const -{ - return this->cwise().abs().redux(ei_scalar_hypot_op()); -} - -/** \internal Computes ibeta^iexp by binary expansion of iexp, - * exact if ibeta is the machine base */ -template inline T bexp(int ibeta, int iexp) -{ - T tbeta = T(ibeta); - T res = 1.0; - int n = iexp; - if (n<0) - { - n = - n; - tbeta = 1.0/tbeta; - } - for(;;) - { - if ((n % 2)==0) - res = res * tbeta; - n = n/2; - if (n==0) return res; - tbeta = tbeta*tbeta; - } - return res; -} - -/** \returns the \em l2 norm of \c *this using the Blue's algorithm. - * A Portable Fortran Program to Find the Euclidean Norm of a Vector, - * ACM TOMS, Vol 4, Issue 1, 1978. - * - * \sa norm(), dot(), squaredNorm(), stableNorm() - */ -template -inline typename NumTraits::Scalar>::Real -MatrixBase::blueNorm() const -{ - static int nmax; - static Scalar b1, b2, s1m, s2m, overfl, rbig, relerr; - int n; - Scalar ax, abig, amed, asml; - - if(nmax <= 0) - { - int nbig, ibeta, it, iemin, iemax, iexp; - Scalar abig, eps; - // This program calculates the machine-dependent constants - // bl, b2, slm, s2m, relerr overfl, nmax - // from the "basic" machine-dependent numbers - // nbig, ibeta, it, iemin, iemax, rbig. - // The following define the basic machine-dependent constants. - // For portability, the PORT subprograms "ilmaeh" and "rlmach" - // are used. For any specific computer, each of the assignment - // statements can be replaced - nbig = std::numeric_limits::max(); // largest integer - ibeta = NumTraits::Base; // base for floating-point numbers - it = NumTraits::Mantissa; // number of base-beta digits in mantissa - iemin = std::numeric_limits::min_exponent; // minimum exponent - iemax = std::numeric_limits::max_exponent; // maximum exponent - rbig = std::numeric_limits::max(); // largest floating-point number - - // Check the basic machine-dependent constants. - if(iemin > 1 - 2*it || 1+it>iemax || (it==2 && ibeta<5) - || (it<=4 && ibeta <= 3 ) || it<2) - { - ei_assert(false && "the algorithm cannot be guaranteed on this computer"); - } - iexp = -((1-iemin)/2); - b1 = bexp(ibeta, iexp); // lower boundary of midrange - iexp = (iemax + 1 - it)/2; - b2 = bexp(ibeta,iexp); // upper boundary of midrange - - iexp = (2-iemin)/2; - s1m = bexp(ibeta,iexp); // scaling factor for lower range - iexp = - ((iemax+it)/2); - s2m = bexp(ibeta,iexp); // scaling factor for upper range - - overfl = rbig*s2m; // overfow boundary for abig - eps = bexp(ibeta, 1-it); - relerr = ei_sqrt(eps); // tolerance for neglecting asml - abig = 1.0/eps - 1.0; - if (Scalar(nbig)>abig) nmax = abig; // largest safe n - else nmax = nbig; - } - n = size(); - if(n==0) - return 0; - asml = Scalar(0); - amed = Scalar(0); - abig = Scalar(0); - for(int j=0; j b2) abig += ei_abs2(ax*s2m); - else if(ax < b1) asml += ei_abs2(ax*s1m); - else amed += ei_abs2(ax); - } - if(abig > Scalar(0)) - { - abig = ei_sqrt(abig); - if(abig > overfl) - { - ei_assert(false && "overflow"); - return rbig; - } - if(amed > Scalar(0)) - { - abig = abig/s2m; - amed = ei_sqrt(amed); - } - else - { - return abig/s2m; - } - - } - else if(asml > Scalar(0)) - { - if (amed > Scalar(0)) - { - abig = ei_sqrt(amed); - amed = ei_sqrt(asml) / s1m; - } - else - { - return ei_sqrt(asml)/s1m; - } - } - else - { - return ei_sqrt(amed); - } - asml = std::min(abig, amed); - abig = std::max(abig, amed); - if(asml <= abig*relerr) - return abig; - else - return abig * ei_sqrt(Scalar(1) + ei_abs2(asml/abig)); -} - /** \returns an expression of the quotient of *this by its own norm. * * \only_for_vectors diff --git a/Eigen/src/Core/Functors.h b/Eigen/src/Core/Functors.h index 89badb353..a4c9604df 100644 --- a/Eigen/src/Core/Functors.h +++ b/Eigen/src/Core/Functors.h @@ -124,9 +124,6 @@ template struct ei_scalar_hypot_op EIGEN_EMPTY_STRUCT { // typedef typename NumTraits::Real result_type; EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const { -// typedef typename NumTraits::Real RealScalar; -// RealScalar _x = ei_abs(x); -// RealScalar _y = ei_abs(y); Scalar p = std::max(_x, _y); Scalar q = std::min(_x, _y); Scalar qp = q/p; diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h index 721f2d476..e643144ff 100644 --- a/Eigen/src/Core/MapBase.h +++ b/Eigen/src/Core/MapBase.h @@ -173,8 +173,14 @@ template class MapBase using Base::operator=; using Base::operator*=; - using Base::operator+=; - using Base::operator-=; + + template + Derived& operator+=(const Flagged, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other) + { return Base::operator+=(other); } + + template + Derived& operator-=(const Flagged, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other) + { return Base::operator-=(other); } template Derived& operator+=(const MatrixBase& other) diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h index d5c508128..f21364c70 100644 --- a/Eigen/src/Core/Matrix.h +++ b/Eigen/src/Core/Matrix.h @@ -571,10 +571,16 @@ class Matrix template EIGEN_STRONG_INLINE Matrix& _set(const MatrixBase& other) { - _resize_to_match(other); - return Base::operator=(other); + _set_selector(other.derived(), typename ei_meta_if<(int(OtherDerived::Flags) & EvalBeforeAssigningBit), ei_meta_true, ei_meta_false>::ret()); + return *this; } + template + EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const ei_meta_true&) { _set_noalias(other.eval()); } + + template + EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const ei_meta_false&) { _set_noalias(other); } + /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which * is the case when creating a new matrix) so one can enforce lazy evaluation. * diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index 3df518d1a..ac2b2532f 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -437,6 +437,7 @@ template class MatrixBase RealScalar norm() const; RealScalar stableNorm() const; RealScalar blueNorm() const; + RealScalar hypotNorm() const; const PlainMatrixType normalized() const; void normalize(); diff --git a/Eigen/src/Core/NumTraits.h b/Eigen/src/Core/NumTraits.h index dec07a036..24afe54c5 100644 --- a/Eigen/src/Core/NumTraits.h +++ b/Eigen/src/Core/NumTraits.h @@ -70,9 +70,7 @@ template<> struct NumTraits HasFloatingPoint = 1, ReadCost = 1, AddCost = 1, - MulCost = 1, - Base = 2, - Mantissa = 23 + MulCost = 1 }; }; @@ -85,9 +83,7 @@ template<> struct NumTraits HasFloatingPoint = 1, ReadCost = 1, AddCost = 1, - MulCost = 1, - Base = 2, - Mantissa = 52 + MulCost = 1 }; }; diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h index 402f597d2..b46440ec0 100644 --- a/Eigen/src/Core/Product.h +++ b/Eigen/src/Core/Product.h @@ -287,6 +287,18 @@ MatrixBase::operator*(const MatrixBase &other) const return typename ProductReturnType::Type(derived(), other.derived()); } +/** replaces \c *this by \c *this * \a other. + * + * \returns a reference to \c *this + */ +template +template +inline Derived & +MatrixBase::operator*=(const MatrixBase &other) +{ + return derived() = derived() * other.derived(); +} + /*************************************************************************** * Normal product .coeff() implementation (with meta-unrolling) ***************************************************************************/ diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h new file mode 100644 index 000000000..22809633d --- /dev/null +++ b/Eigen/src/Core/StableNorm.h @@ -0,0 +1,194 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_STABLENORM_H +#define EIGEN_STABLENORM_H + +template +inline void ei_stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale) +{ + Scalar max = bl.cwise().abs().maxCoeff(); + if (max>scale) + { + ssq = ssq * ei_abs2(scale/max); + scale = max; + invScale = Scalar(1)/scale; + } + // TODO if the max is much much smaller than the current scale, + // then we can neglect this sub vector + ssq += (bl*invScale).squaredNorm(); +} + +/** \returns the \em l2 norm of \c *this avoiding underflow and overflow. + * This version use a blockwise two passes algorithm: + * 1 - find the absolute largest coefficient \c s + * 2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way + * + * For architecture/scalar types supporting vectorization, this version + * is faster than blueNorm(). Otherwise the blueNorm() is much faster. + * + * \sa norm(), blueNorm(), hypotNorm() + */ +template +inline typename NumTraits::Scalar>::Real +MatrixBase::stableNorm() const +{ + const int blockSize = 4096; + RealScalar scale = 0; + RealScalar invScale; + RealScalar ssq = 0; // sum of square + enum { + Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? ForceAligned : AsRequested + }; + int n = size(); + int bi=0; + if ((int(Flags)&DirectAccessBit) && !(int(Flags)&AlignedBit)) + { + bi = ei_alignmentOffset(&const_cast_derived().coeffRef(0), n); + if (bi>0) + ei_stable_norm_kernel(start(bi), ssq, scale, invScale); + } + for (; bi(derived(),bi,std::min(blockSize, n - bi)), ssq, scale, invScale); + return scale * ei_sqrt(ssq); +} + +/** \returns the \em l2 norm of \c *this using the Blue's algorithm. + * A Portable Fortran Program to Find the Euclidean Norm of a Vector, + * ACM TOMS, Vol 4, Issue 1, 1978. + * + * For architecture/scalar types without vectorization, this version + * is much faster than stableNorm(). Otherwise the stableNorm() is faster. + * + * \sa norm(), stableNorm(), hypotNorm() + */ +template +inline typename NumTraits::Scalar>::Real +MatrixBase::blueNorm() const +{ + static int nmax = -1; + static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr; + if(nmax <= 0) + { + int nbig, ibeta, it, iemin, iemax, iexp; + RealScalar abig, eps; + // This program calculates the machine-dependent constants + // bl, b2, slm, s2m, relerr overfl, nmax + // from the "basic" machine-dependent numbers + // nbig, ibeta, it, iemin, iemax, rbig. + // The following define the basic machine-dependent constants. + // For portability, the PORT subprograms "ilmaeh" and "rlmach" + // are used. For any specific computer, each of the assignment + // statements can be replaced + nbig = std::numeric_limits::max(); // largest integer + ibeta = std::numeric_limits::radix; // base for floating-point numbers + it = std::numeric_limits::digits; // number of base-beta digits in mantissa + iemin = std::numeric_limits::min_exponent; // minimum exponent + iemax = std::numeric_limits::max_exponent; // maximum exponent + rbig = std::numeric_limits::max(); // largest floating-point number + + // Check the basic machine-dependent constants. + if(iemin > 1 - 2*it || 1+it>iemax || (it==2 && ibeta<5) + || (it<=4 && ibeta <= 3 ) || it<2) + { + ei_assert(false && "the algorithm cannot be guaranteed on this computer"); + } + iexp = -((1-iemin)/2); + b1 = std::pow(double(ibeta),iexp); // lower boundary of midrange + iexp = (iemax + 1 - it)/2; + b2 = std::pow(double(ibeta),iexp); // upper boundary of midrange + + iexp = (2-iemin)/2; + s1m = std::pow(double(ibeta),iexp); // scaling factor for lower range + iexp = - ((iemax+it)/2); + s2m = std::pow(double(ibeta),iexp); // scaling factor for upper range + + overfl = rbig*s2m; // overfow boundary for abig + eps = std::pow(double(ibeta), 1-it); + relerr = ei_sqrt(eps); // tolerance for neglecting asml + abig = 1.0/eps - 1.0; + if (RealScalar(nbig)>abig) nmax = abig; // largest safe n + else nmax = nbig; + } + int n = size(); + RealScalar ab2 = b2 / RealScalar(n); + RealScalar asml = RealScalar(0); + RealScalar amed = RealScalar(0); + RealScalar abig = RealScalar(0); + for(int j=0; j ab2) abig += ei_abs2(ax*s2m); + else if(ax < b1) asml += ei_abs2(ax*s1m); + else amed += ei_abs2(ax); + } + if(abig > RealScalar(0)) + { + abig = ei_sqrt(abig); + if(abig > overfl) + { + ei_assert(false && "overflow"); + return rbig; + } + if(amed > RealScalar(0)) + { + abig = abig/s2m; + amed = ei_sqrt(amed); + } + else + return abig/s2m; + } + else if(asml > RealScalar(0)) + { + if (amed > RealScalar(0)) + { + abig = ei_sqrt(amed); + amed = ei_sqrt(asml) / s1m; + } + else + return ei_sqrt(asml)/s1m; + } + else + return ei_sqrt(amed); + asml = std::min(abig, amed); + abig = std::max(abig, amed); + if(asml <= abig*relerr) + return abig; + else + return abig * ei_sqrt(RealScalar(1) + ei_abs2(asml/abig)); +} + +/** \returns the \em l2 norm of \c *this avoiding undeflow and overflow. + * This version use a concatenation of hypot() calls, and it is very slow. + * + * \sa norm(), stableNorm() + */ +template +inline typename NumTraits::Scalar>::Real +MatrixBase::hypotNorm() const +{ + return this->cwise().abs().redux(ei_scalar_hypot_op()); +} + +#endif // EIGEN_STABLENORM_H diff --git a/Eigen/src/QR/SelfAdjointEigenSolver.h b/Eigen/src/QR/SelfAdjointEigenSolver.h index b003fb4d7..167fedebf 100644 --- a/Eigen/src/QR/SelfAdjointEigenSolver.h +++ b/Eigen/src/QR/SelfAdjointEigenSolver.h @@ -383,18 +383,24 @@ static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int st int kn1 = (k+1)*n; #endif // let's do the product manually to avoid the need of temporaries... - for (int i=0; i aux = Map, ForceAligned >(matrixQ+kn,n); + Map, ForceAligned >(matrixQ+kn,n) + = Map, ForceAligned >(matrixQ+kn,n) * c - s * Map >(matrixQ+kn1,n); + + Map, ForceAligned >(matrixQ+kn1,n) + = Map, ForceAligned >(matrixQ+kn1,n) * c - s * aux;//Map >(matrixQ+kn,n); +// for (int i=0; i class SVD { return (b >= Scalar(0.0) ? ei_abs(a) : -ei_abs(a)); } - + protected: /** \internal */ MatrixUType m_matU; @@ -254,11 +254,14 @@ void SVD::compute(const MatrixType& matrix) if (g != Scalar(0.0)) { g = Scalar(1.0)/g; - for (j=l; j +#include +#include "BenchTimer.h" +using namespace Eigen; +using namespace std; + +template +EIGEN_DONT_INLINE typename T::Scalar sqsumNorm(const T& v) +{ + return v.norm(); +} + +template +EIGEN_DONT_INLINE typename T::Scalar hypotNorm(const T& v) +{ + return v.hypotNorm(); +} + +template +EIGEN_DONT_INLINE typename T::Scalar blueNorm(const T& v) +{ + return v.blueNorm(); +} + +template +EIGEN_DONT_INLINE typename T::Scalar lapackNorm(T& v) +{ + typedef typename T::Scalar Scalar; + int n = v.size(); + Scalar scale = 0; + Scalar ssq = 1; + for (int i=0;i= ax) + { + ssq += ei_abs2(ax/scale); + } + else + { + ssq = Scalar(1) + ssq * ei_abs2(scale/ax); + scale = ax; + } + } + return scale * ei_sqrt(ssq); +} + +template +EIGEN_DONT_INLINE typename T::Scalar twopassNorm(T& v) +{ + typedef typename T::Scalar Scalar; + Scalar s = v.cwise().abs().maxCoeff(); + return s*(v/s).norm(); +} + +template +EIGEN_DONT_INLINE typename T::Scalar bl2passNorm(T& v) +{ + return v.stableNorm(); +} + +template +EIGEN_DONT_INLINE typename T::Scalar divacNorm(T& v) +{ + int n =v.size() / 2; + for (int i=0;i0) + { + for (int i=0;i +EIGEN_DONT_INLINE typename T::Scalar pblueNorm(const T& v) +{ + #ifndef EIGEN_VECTORIZE + return v.blueNorm(); + #else + typedef typename T::Scalar Scalar; + + static int nmax = 0; + static Scalar b1, b2, s1m, s2m, overfl, rbig, relerr; + int n; + + if(nmax <= 0) + { + int nbig, ibeta, it, iemin, iemax, iexp; + Scalar abig, eps; + + nbig = std::numeric_limits::max(); // largest integer + ibeta = std::numeric_limits::radix; //NumTraits::Base; // base for floating-point numbers + it = std::numeric_limits::digits; //NumTraits::Mantissa; // number of base-beta digits in mantissa + iemin = std::numeric_limits::min_exponent; // minimum exponent + iemax = std::numeric_limits::max_exponent; // maximum exponent + rbig = std::numeric_limits::max(); // largest floating-point number + + // Check the basic machine-dependent constants. + if(iemin > 1 - 2*it || 1+it>iemax || (it==2 && ibeta<5) + || (it<=4 && ibeta <= 3 ) || it<2) + { + ei_assert(false && "the algorithm cannot be guaranteed on this computer"); + } + iexp = -((1-iemin)/2); + b1 = std::pow(ibeta, iexp); // lower boundary of midrange + iexp = (iemax + 1 - it)/2; + b2 = std::pow(ibeta,iexp); // upper boundary of midrange + + iexp = (2-iemin)/2; + s1m = std::pow(ibeta,iexp); // scaling factor for lower range + iexp = - ((iemax+it)/2); + s2m = std::pow(ibeta,iexp); // scaling factor for upper range + + overfl = rbig*s2m; // overfow boundary for abig + eps = std::pow(ibeta, 1-it); + relerr = ei_sqrt(eps); // tolerance for neglecting asml + abig = 1.0/eps - 1.0; + if (Scalar(nbig)>abig) nmax = abig; // largest safe n + else nmax = nbig; + } + + typedef typename ei_packet_traits::type Packet; + const int ps = ei_packet_traits::size; + Packet pasml = ei_pset1(Scalar(0)); + Packet pamed = ei_pset1(Scalar(0)); + Packet pabig = ei_pset1(Scalar(0)); + Packet ps2m = ei_pset1(s2m); + Packet ps1m = ei_pset1(s1m); + Packet pb2 = ei_pset1(b2); + Packet pb1 = ei_pset1(b1); + for(int j=0; j(j)); + Packet ax_s2m = ei_pmul(ax,ps2m); + Packet ax_s1m = ei_pmul(ax,ps1m); + Packet maskBig = ei_plt(pb2,ax); + Packet maskSml = ei_plt(ax,pb1); + +// Packet maskMed = ei_pand(maskSml,maskBig); +// Packet scale = ei_pset1(Scalar(0)); +// scale = ei_por(scale, ei_pand(maskBig,ps2m)); +// scale = ei_por(scale, ei_pand(maskSml,ps1m)); +// scale = ei_por(scale, ei_pandnot(ei_pset1(Scalar(1)),maskMed)); +// ax = ei_pmul(ax,scale); +// ax = ei_pmul(ax,ax); +// pabig = ei_padd(pabig, ei_pand(maskBig, ax)); +// pasml = ei_padd(pasml, ei_pand(maskSml, ax)); +// pamed = ei_padd(pamed, ei_pandnot(ax,maskMed)); + + + pabig = ei_padd(pabig, ei_pand(maskBig, ei_pmul(ax_s2m,ax_s2m))); + pasml = ei_padd(pasml, ei_pand(maskSml, ei_pmul(ax_s1m,ax_s1m))); + pamed = ei_padd(pamed, ei_pandnot(ei_pmul(ax,ax),ei_pand(maskSml,maskBig))); + } + Scalar abig = ei_predux(pabig); + Scalar asml = ei_predux(pasml); + Scalar amed = ei_predux(pamed); + if(abig > Scalar(0)) + { + abig = ei_sqrt(abig); + if(abig > overfl) + { + ei_assert(false && "overflow"); + return rbig; + } + if(amed > Scalar(0)) + { + abig = abig/s2m; + amed = ei_sqrt(amed); + } + else + { + return abig/s2m; + } + + } + else if(asml > Scalar(0)) + { + if (amed > Scalar(0)) + { + abig = ei_sqrt(amed); + amed = ei_sqrt(asml) / s1m; + } + else + { + return ei_sqrt(asml)/s1m; + } + } + else + { + return ei_sqrt(amed); + } + asml = std::min(abig, amed); + abig = std::max(abig, amed); + if(asml <= abig*relerr) + return abig; + else + return abig * ei_sqrt(Scalar(1) + ei_abs2(asml/abig)); + #endif +} + +#define BENCH_PERF(NRM) { \ + Eigen::BenchTimer tf, td, tcf; tf.reset(); td.reset(); tcf.reset();\ + for (int k=0; k()); + double yd = based * ei_abs(ei_random()); + VectorXf vf = VectorXf::Ones(s) * yf; + VectorXd vd = VectorXd::Ones(s) * yd; + + std::cout << "reference\t" << ei_sqrt(double(s))*yf << "\t" << ei_sqrt(double(s))*yd << "\n"; + std::cout << "sqsumNorm\t" << sqsumNorm(vf) << "\t" << sqsumNorm(vd) << "\n"; + std::cout << "hypotNorm\t" << hypotNorm(vf) << "\t" << hypotNorm(vd) << "\n"; + std::cout << "blueNorm\t" << blueNorm(vf) << "\t" << blueNorm(vd) << "\n"; + std::cout << "pblueNorm\t" << pblueNorm(vf) << "\t" << pblueNorm(vd) << "\n"; + std::cout << "lapackNorm\t" << lapackNorm(vf) << "\t" << lapackNorm(vd) << "\n"; + std::cout << "twopassNorm\t" << twopassNorm(vf) << "\t" << twopassNorm(vd) << "\n"; + std::cout << "bl2passNorm\t" << bl2passNorm(vf) << "\t" << bl2passNorm(vd) << "\n"; +} + +void check_accuracy_var(int ef0, int ef1, int ed0, int ed1, int s) +{ + VectorXf vf(s); + VectorXd vd(s); + for (int i=0; i()) * std::pow(double(10), ei_random(ef0,ef1)); + vd[i] = ei_abs(ei_random()) * std::pow(double(10), ei_random(ed0,ed1)); + } + + //std::cout << "reference\t" << ei_sqrt(double(s))*yf << "\t" << ei_sqrt(double(s))*yd << "\n"; + std::cout << "sqsumNorm\t" << sqsumNorm(vf) << "\t" << sqsumNorm(vd) << "\t" << sqsumNorm(vf.cast()) << "\t" << sqsumNorm(vd.cast()) << "\n"; + std::cout << "hypotNorm\t" << hypotNorm(vf) << "\t" << hypotNorm(vd) << "\t" << hypotNorm(vf.cast()) << "\t" << hypotNorm(vd.cast()) << "\n"; + std::cout << "blueNorm\t" << blueNorm(vf) << "\t" << blueNorm(vd) << "\t" << blueNorm(vf.cast()) << "\t" << blueNorm(vd.cast()) << "\n"; + std::cout << "pblueNorm\t" << pblueNorm(vf) << "\t" << pblueNorm(vd) << "\t" << blueNorm(vf.cast()) << "\t" << blueNorm(vd.cast()) << "\n"; + std::cout << "lapackNorm\t" << lapackNorm(vf) << "\t" << lapackNorm(vd) << "\t" << lapackNorm(vf.cast()) << "\t" << lapackNorm(vd.cast()) << "\n"; + std::cout << "twopassNorm\t" << twopassNorm(vf) << "\t" << twopassNorm(vd) << "\t" << twopassNorm(vf.cast()) << "\t" << twopassNorm(vd.cast()) << "\n"; +// std::cout << "bl2passNorm\t" << bl2passNorm(vf) << "\t" << bl2passNorm(vd) << "\t" << bl2passNorm(vf.cast()) << "\t" << bl2passNorm(vd.cast()) << "\n"; +} + +int main(int argc, char** argv) +{ + int tries = 10; + int iters = 100000; + double y = 1.1345743233455785456788e12 * ei_random(); + VectorXf v = VectorXf::Ones(1024) * y; + +// return 0; + int s = 10000; + double basef_ok = 1.1345743233455785456788e15; + double based_ok = 1.1345743233455785456788e95; + + double basef_under = 1.1345743233455785456788e-27; + double based_under = 1.1345743233455785456788e-303; + + double basef_over = 1.1345743233455785456788e+27; + double based_over = 1.1345743233455785456788e+302; + + std::cout.precision(20); + + std::cerr << "\nNo under/overflow:\n"; + check_accuracy(basef_ok, based_ok, s); + + std::cerr << "\nUnderflow:\n"; + check_accuracy(basef_under, based_under, s); + + std::cerr << "\nOverflow:\n"; + check_accuracy(basef_over, based_over, s); + + std::cerr << "\nVarying (over):\n"; + for (int k=0; k<1; ++k) + { + check_accuracy_var(20,27,190,302,s); + std::cout << "\n"; + } + + std::cerr << "\nVarying (under):\n"; + for (int k=0; k<1; ++k) + { + check_accuracy_var(-27,20,-302,-190,s); + std::cout << "\n"; + } + + std::cout.precision(4); + std::cerr << "Performance (out of cache):\n"; + { + int iters = 1; + VectorXf vf = VectorXf::Random(1024*1024*32) * y; + VectorXd vd = VectorXd::Random(1024*1024*32) * y; + VectorXcf vcf = VectorXcf::Random(1024*1024*32) * y; + BENCH_PERF(sqsumNorm); + BENCH_PERF(blueNorm); +// BENCH_PERF(pblueNorm); +// BENCH_PERF(lapackNorm); +// BENCH_PERF(hypotNorm); +// BENCH_PERF(twopassNorm); + BENCH_PERF(bl2passNorm); + } + + std::cerr << "\nPerformance (in cache):\n"; + { + int iters = 100000; + VectorXf vf = VectorXf::Random(512) * y; + VectorXd vd = VectorXd::Random(512) * y; + VectorXcf vcf = VectorXcf::Random(512) * y; + BENCH_PERF(sqsumNorm); + BENCH_PERF(blueNorm); +// BENCH_PERF(pblueNorm); +// BENCH_PERF(lapackNorm); +// BENCH_PERF(hypotNorm); +// BENCH_PERF(twopassNorm); + BENCH_PERF(bl2passNorm); + } +} diff --git a/test/adjoint.cpp b/test/adjoint.cpp index 1f4aa7427..47e1bd740 100644 --- a/test/adjoint.cpp +++ b/test/adjoint.cpp @@ -76,8 +76,8 @@ template void adjoint(const MatrixType& m) { VERIFY_IS_MUCH_SMALLER_THAN(vzero.norm(), static_cast(1)); VERIFY_IS_APPROX(v1.norm(), v1.stableNorm()); - // NOTE disabled because it currently compiles for float and double only - // VERIFY_IS_APPROX(v1.blueNorm(), v1.stableNorm()); + VERIFY_IS_APPROX(v1.blueNorm(), v1.stableNorm()); + VERIFY_IS_APPROX(v1.hypotNorm(), v1.stableNorm()); } // check compatibility of dot and adjoint diff --git a/test/mixingtypes.cpp b/test/mixingtypes.cpp index ec8a0f190..d14232bd4 100644 --- a/test/mixingtypes.cpp +++ b/test/mixingtypes.cpp @@ -82,7 +82,7 @@ template void mixingtypes(int size = SizeAtCompileType) void test_mixingtypes() { // check that our operator new is indeed called: - CALL_SUBTEST(mixingtypes<3>()); - CALL_SUBTEST(mixingtypes<4>()); + CALL_SUBTEST(mixingtypes<3>(3)); + CALL_SUBTEST(mixingtypes<4>(4)); CALL_SUBTEST(mixingtypes(20)); } diff --git a/test/product_large.cpp b/test/product_large.cpp index 77ae7b587..9b53e7b89 100644 --- a/test/product_large.cpp +++ b/test/product_large.cpp @@ -42,4 +42,10 @@ void test_product_large() m = (v+v).asDiagonal() * m; VERIFY_IS_APPROX(m, MatrixXf::Constant(N,3,2)); } + + { + // test deferred resizing in Matrix::operator= + MatrixXf a = MatrixXf::Random(10,4), b = MatrixXf::Random(4,10), c = a; + VERIFY_IS_APPROX((a = a * b), (c * b).eval()); + } }