This commit is contained in:
Gael Guennebaud 2010-04-08 13:29:31 +02:00
commit 1ad6c79467
40 changed files with 2468 additions and 460 deletions

View File

@ -36,6 +36,7 @@ namespace Eigen {
*/
#include "src/Eigenvalues/Tridiagonalization.h"
#include "src/Eigenvalues/RealSchur.h"
#include "src/Eigenvalues/EigenSolver.h"
#include "src/Eigenvalues/SelfAdjointEigenSolver.h"
#include "src/Eigenvalues/HessenbergDecomposition.h"

View File

@ -296,8 +296,7 @@ template<typename Derived>
bool LLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
{
ei_assert(m_isInitialized && "LLT is not initialized.");
const int size = m_matrix.rows();
ei_assert(size==bAndX.rows());
ei_assert(m_matrix.rows()==bAndX.rows());
matrixL().solveInPlace(bAndX);
matrixU().solveInPlace(bAndX);
return true;

View File

@ -584,7 +584,7 @@ template<typename Derived> class DenseBase
#endif
// disable the use of evalTo for dense objects with a nice compilation error
template<typename Dest> inline void evalTo(Dest& dst) const
template<typename Dest> inline void evalTo(Dest& ) const
{
EIGEN_STATIC_ASSERT((ei_is_same_type<Dest,void>::ret),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS);
}

View File

@ -274,7 +274,7 @@ template<typename Scalar, typename NewType>
struct ei_scalar_cast_op {
EIGEN_EMPTY_STRUCT_CTOR(ei_scalar_cast_op)
typedef NewType result_type;
EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return static_cast<NewType>(a); }
EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return ei_cast<Scalar, NewType>(a); }
};
template<typename Scalar, typename NewType>
struct ei_functor_traits<ei_scalar_cast_op<Scalar,NewType> >

View File

@ -126,6 +126,16 @@ DenseBase<Derived>::format(const IOFormat& fmt) const
return WithFormat<Derived>(derived(), fmt);
}
template<typename Scalar>
struct ei_significant_decimals_impl
{
typedef typename NumTraits<Scalar>::Real RealScalar;
static inline int run()
{
return ei_cast<RealScalar,int>(std::ceil(-ei_log(NumTraits<RealScalar>::epsilon())/ei_log(RealScalar(10))));
}
};
/** \internal
* print the matrix \a _m to the output stream \a s using the output format \a fmt */
template<typename Derived>
@ -145,9 +155,7 @@ std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOForm
{
if (NumTraits<Scalar>::HasFloatingPoint)
{
typedef typename NumTraits<Scalar>::Real RealScalar;
RealScalar explicit_precision_fp = std::ceil(-ei_log(NumTraits<Scalar>::epsilon())/ei_log(10.0));
explicit_precision = static_cast<std::streamsize>(explicit_precision_fp);
explicit_precision = ei_significant_decimals_impl<Scalar>::run();
}
else
{

View File

@ -44,6 +44,23 @@ template<typename T> inline typename NumTraits<T>::Real ei_hypot(T x, T y)
return p * ei_sqrt(T(1) + qp*qp);
}
// the point of wrapping these casts in this helper template struct is to allow users to specialize it to custom types
// that may not have the needed conversion operators (especially as c++98 doesn't have explicit conversion operators).
template<typename OldType, typename NewType> struct ei_cast_impl
{
static inline NewType run(const OldType& x)
{
return static_cast<NewType>(x);
}
};
template<typename OldType, typename NewType> inline NewType ei_cast(const OldType& x)
{
return ei_cast_impl<OldType, NewType>::run(x);
}
/**************
*** int ***
**************/

View File

@ -369,10 +369,14 @@ static EIGEN_DONT_INLINE EIGEN_UNUSED Packet4f ei_pcos(Packet4f x)
// For detail see here: http://www.beyond3d.com/content/articles/8/
static EIGEN_UNUSED Packet4f ei_psqrt(Packet4f _x)
{
Packet4f half = ei_pmul(_x, ei_pset1(.5f));
Packet4f x = _mm_rsqrt_ps(_x);
x = ei_pmul(x, ei_psub(ei_pset1(1.5f), ei_pmul(half, ei_pmul(x,x))));
return ei_pmul(_x,x);
Packet4f half = ei_pmul(_x, ei_pset1(.5f));
/* select only the inverse sqrt of non-zero inputs */
Packet4f non_zero_mask = _mm_cmpgt_ps(_x, ei_pset1(std::numeric_limits<float>::epsilon()));
Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x));
x = ei_pmul(x, ei_psub(ei_pset1(1.5f), ei_pmul(half, ei_pmul(x,x))));
return ei_pmul(_x,x);
}
#endif // EIGEN_MATH_FUNCTIONS_SSE_H

View File

@ -86,7 +86,7 @@ template<typename _MatrixType> class ComplexSchur
/** \brief Default constructor.
*
* \param [in] size The size of the matrix whose Schur decomposition will be computed.
* \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed.
*
* The default constructor is useful in cases in which the user
* intends to perform decompositions via compute(). The \p size
@ -95,7 +95,7 @@ template<typename _MatrixType> class ComplexSchur
*
* \sa compute() for an example.
*/
ComplexSchur(int size = RowsAtCompileTime==Dynamic ? 0 : RowsAtCompileTime)
ComplexSchur(int size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
: m_matT(size,size), m_matU(size,size), m_isInitialized(false), m_matUisUptodate(false)
{}
@ -157,7 +157,7 @@ template<typename _MatrixType> class ComplexSchur
*/
const ComplexMatrixType& matrixT() const
{
ei_assert(m_isInitialized && "ComplexShur is not initialized.");
ei_assert(m_isInitialized && "ComplexSchur is not initialized.");
return m_matT;
}

View File

@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@ -25,20 +26,51 @@
#ifndef EIGEN_EIGENSOLVER_H
#define EIGEN_EIGENSOLVER_H
#include "./RealSchur.h"
/** \eigenvalues_module \ingroup Eigenvalues_Module
* \nonstableyet
*
* \class EigenSolver
*
* \brief Eigen values/vectors solver for non selfadjoint matrices
* \brief Computes eigenvalues and eigenvectors of general matrices
*
* \param MatrixType the type of the matrix of which we are computing the eigen decomposition
* \tparam _MatrixType the type of the matrix of which we are computing the
* eigendecomposition; this is expected to be an instantiation of the Matrix
* class template. Currently, only real matrices are supported.
*
* Currently it only support real matrices.
* The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
* \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v \f$. If
* \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
* \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
* V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
* have \f$ A = V D V^{-1} \f$. This is called the eigendecomposition.
*
* The eigenvalues and eigenvectors of a matrix may be complex, even when the
* matrix is real. However, we can choose real matrices \f$ V \f$ and \f$ D
* \f$ satisfying \f$ A V = V D \f$, just like the eigendecomposition, if the
* matrix \f$ D \f$ is not required to be diagonal, but if it is allowed to
* have blocks of the form
* \f[ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f]
* (where \f$ u \f$ and \f$ v \f$ are real numbers) on the diagonal. These
* blocks correspond to complex eigenvalue pairs \f$ u \pm iv \f$. We call
* this variant of the eigendecomposition the pseudo-eigendecomposition.
*
* Call the function compute() to compute the eigenvalues and eigenvectors of
* a given matrix. Alternatively, you can use the
* EigenSolver(const MatrixType&) constructor which computes the eigenvalues
* and eigenvectors at construction time. Once the eigenvalue and eigenvectors
* are computed, they can be retrieved with the eigenvalues() and
* eigenvectors() functions. The pseudoEigenvalueMatrix() and
* pseudoEigenvectors() methods allow the construction of the
* pseudo-eigendecomposition.
*
* The documentation for EigenSolver(const MatrixType&) contains an example of
* the typical use of this class.
*
* \note this code was adapted from JAMA (public domain)
*
* \sa MatrixBase::eigenvalues(), SelfAdjointEigenSolver
* \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
*/
template<typename _MatrixType> class EigenSolver
{
@ -52,21 +84,54 @@ template<typename _MatrixType> class EigenSolver
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef std::complex<RealScalar> Complex;
typedef Matrix<Complex, ColsAtCompileTime, 1, Options, MaxColsAtCompileTime, 1> EigenvalueType;
typedef Matrix<Complex, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorType;
typedef Matrix<RealScalar, ColsAtCompileTime, 1, Options, MaxColsAtCompileTime, 1> RealVectorType;
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via EigenSolver::compute(const MatrixType&).
*/
/** \brief Complex scalar type for \p _MatrixType.
*
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
* \c float or \c double) and just \c Scalar if #Scalar is
* complex.
*/
typedef std::complex<RealScalar> ComplexScalar;
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
*
* This is a column vector with entries of type #ComplexScalar.
* The length of the vector is the size of \p _MatrixType.
*/
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options, MaxColsAtCompileTime, 1> EigenvalueType;
/** \brief Type for matrix of eigenvectors as returned by eigenvectors().
*
* This is a square matrix with entries of type #ComplexScalar.
* The size is the same as the size of \p _MatrixType.
*/
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorType;
/** \brief Default constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via EigenSolver::compute(const MatrixType&).
*
* \sa compute() for an example.
*/
EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false) {}
/** \brief Constructor; computes eigendecomposition of given matrix.
*
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
*
* This constructor calls compute() to compute the eigenvalues
* and eigenvectors.
*
* Example: \include EigenSolver_EigenSolver_MatrixType.cpp
* Output: \verbinclude EigenSolver_EigenSolver_MatrixType.out
*
* \sa compute()
*/
EigenSolver(const MatrixType& matrix)
: m_eivec(matrix.rows(), matrix.cols()),
m_eivalues(matrix.cols()),
@ -75,39 +140,42 @@ template<typename _MatrixType> class EigenSolver
compute(matrix);
}
/** \brief Returns the eigenvectors of given matrix.
*
* \returns %Matrix whose columns are the (possibly complex) eigenvectors.
*
* \pre Either the constructor EigenSolver(const MatrixType&) or the
* member function compute(const MatrixType&) has been called before.
*
* Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
* to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
* eigenvectors are normalized to have (Euclidean) norm equal to one. The
* matrix returned by this function is the matrix \f$ V \f$ in the
* eigendecomposition \f$ A = V D V^{-1} \f$, if it exists.
*
* Example: \include EigenSolver_eigenvectors.cpp
* Output: \verbinclude EigenSolver_eigenvectors.out
*
* \sa eigenvalues(), pseudoEigenvectors()
*/
EigenvectorType eigenvectors() const;
EigenvectorType eigenvectors(void) const;
/** \returns a real matrix V of pseudo eigenvectors.
/** \brief Returns the pseudo-eigenvectors of given matrix.
*
* Let D be the block diagonal matrix with the real eigenvalues in 1x1 blocks,
* and any complex values u+iv in 2x2 blocks [u v ; -v u]. Then, the matrices D
* and V satisfy A*V = V*D.
* \returns Const reference to matrix whose columns are the pseudo-eigenvectors.
*
* More precisely, if the diagonal matrix of the eigen values is:\n
* \f$
* \left[ \begin{array}{cccccc}
* u+iv & & & & & \\
* & u-iv & & & & \\
* & & a+ib & & & \\
* & & & a-ib & & \\
* & & & & x & \\
* & & & & & y \\
* \end{array} \right]
* \f$ \n
* then, we have:\n
* \f$
* D =\left[ \begin{array}{cccccc}
* u & v & & & & \\
* -v & u & & & & \\
* & & a & b & & \\
* & & -b & a & & \\
* & & & & x & \\
* & & & & & y \\
* \end{array} \right]
* \f$
* \pre Either the constructor EigenSolver(const MatrixType&) or
* the member function compute(const MatrixType&) has been called
* before.
*
* \sa pseudoEigenvalueMatrix()
* The real matrix \f$ V \f$ returned by this function and the
* block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix()
* satisfy \f$ AV = VD \f$.
*
* Example: \include EigenSolver_pseudoEigenvectors.cpp
* Output: \verbinclude EigenSolver_pseudoEigenvectors.out
*
* \sa pseudoEigenvalueMatrix(), eigenvectors()
*/
const MatrixType& pseudoEigenvectors() const
{
@ -115,21 +183,71 @@ template<typename _MatrixType> class EigenSolver
return m_eivec;
}
/** \brief Returns the block-diagonal matrix in the pseudo-eigendecomposition.
*
* \returns A block-diagonal matrix.
*
* \pre Either the constructor EigenSolver(const MatrixType&) or the
* member function compute(const MatrixType&) has been called before.
*
* The matrix \f$ D \f$ returned by this function is real and
* block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2
* blocks of the form
* \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$.
* The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by
* pseudoEigenvectors() satisfy \f$ AV = VD \f$.
*
* \sa pseudoEigenvectors() for an example, eigenvalues()
*/
MatrixType pseudoEigenvalueMatrix() const;
/** \returns the eigenvalues as a column vector */
/** \brief Returns the eigenvalues of given matrix.
*
* \returns Column vector containing the eigenvalues.
*
* \pre Either the constructor EigenSolver(const MatrixType&) or the
* member function compute(const MatrixType&) has been called before.
*
* The eigenvalues are repeated according to their algebraic multiplicity,
* so there are as many eigenvalues as rows in the matrix.
*
* Example: \include EigenSolver_eigenvalues.cpp
* Output: \verbinclude EigenSolver_eigenvalues.out
*
* \sa eigenvectors(), pseudoEigenvalueMatrix(),
* MatrixBase::eigenvalues()
*/
EigenvalueType eigenvalues() const
{
ei_assert(m_isInitialized && "EigenSolver is not initialized.");
return m_eivalues;
}
/** \brief Computes eigendecomposition of given matrix.
*
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
* \returns Reference to \c *this
*
* This function computes the eigenvalues and eigenvectors of \p matrix.
* The eigenvalues() and eigenvectors() functions can be used to retrieve
* the computed eigendecomposition.
*
* The matrix is first reduced to Schur form. The Schur decomposition is
* then used to compute the eigenvalues and eigenvectors.
*
* The cost of the computation is dominated by the cost of the Schur
* decomposition, which is \f$ O(n^3) \f$ where \f$ n \f$ is the size of
* the matrix.
*
* This method reuses of the allocated data in the EigenSolver object.
*
* Example: \include EigenSolver_compute.cpp
* Output: \verbinclude EigenSolver_compute.out
*/
EigenSolver& compute(const MatrixType& matrix);
private:
void orthes(MatrixType& matH, RealVectorType& ort);
void hqr2(MatrixType& matH);
void hqr2_step2(MatrixType& matH);
protected:
MatrixType m_eivec;
@ -137,10 +255,6 @@ template<typename _MatrixType> class EigenSolver
bool m_isInitialized;
};
/** \returns the real block diagonal matrix D of the eigenvalues.
*
* See pseudoEigenvectors() for the details.
*/
template<typename MatrixType>
MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
{
@ -161,12 +275,8 @@ MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
return matD;
}
/** \returns the normalized complex eigenvectors as a matrix of column vectors.
*
* \sa eigenvalues(), pseudoEigenvectors()
*/
template<typename MatrixType>
typename EigenSolver<MatrixType>::EigenvectorType EigenSolver<MatrixType>::eigenvectors(void) const
typename EigenSolver<MatrixType>::EigenvectorType EigenSolver<MatrixType>::eigenvectors() const
{
ei_assert(m_isInitialized && "EigenSolver is not initialized.");
int n = m_eivec.cols();
@ -176,15 +286,15 @@ typename EigenSolver<MatrixType>::EigenvectorType EigenSolver<MatrixType>::eigen
if (ei_isMuchSmallerThan(ei_abs(ei_imag(m_eivalues.coeff(j))), ei_abs(ei_real(m_eivalues.coeff(j)))))
{
// we have a real eigen value
matV.col(j) = m_eivec.col(j).template cast<Complex>();
matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
}
else
{
// we have a pair of complex eigen values
for (int i=0; i<n; ++i)
{
matV.coeffRef(i,j) = Complex(m_eivec.coeff(i,j), m_eivec.coeff(i,j+1));
matV.coeffRef(i,j+1) = Complex(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
matV.coeffRef(i,j) = ComplexScalar(m_eivec.coeff(i,j), m_eivec.coeff(i,j+1));
matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
}
matV.col(j).normalize();
matV.col(j+1).normalize();
@ -198,86 +308,20 @@ template<typename MatrixType>
EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matrix)
{
assert(matrix.cols() == matrix.rows());
int n = matrix.cols();
m_eivalues.resize(n,1);
m_eivec.resize(n,n);
MatrixType matH = matrix;
RealVectorType ort(n);
// Reduce to real Schur form.
RealSchur<MatrixType> rs(matrix);
MatrixType matH = rs.matrixT();
m_eivec = rs.matrixU();
m_eivalues = rs.eigenvalues();
// Reduce to Hessenberg form.
orthes(matH, ort);
// Reduce Hessenberg to real Schur form.
hqr2(matH);
// Compute eigenvectors.
hqr2_step2(matH);
m_isInitialized = true;
return *this;
}
// Nonsymmetric reduction to Hessenberg form.
template<typename MatrixType>
void EigenSolver<MatrixType>::orthes(MatrixType& matH, RealVectorType& ort)
{
// This is derived from the Algol procedures orthes and ortran,
// by Martin and Wilkinson, Handbook for Auto. Comp.,
// Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutines in EISPACK.
int n = m_eivec.cols();
int low = 0;
int high = n-1;
for (int m = low+1; m <= high-1; ++m)
{
// Scale column.
RealScalar scale = matH.block(m, m-1, high-m+1, 1).cwiseAbs().sum();
if (scale != 0.0)
{
// Compute Householder transformation.
RealScalar h = 0.0;
// FIXME could be rewritten, but this one looks better wrt cache
for (int i = high; i >= m; i--)
{
ort.coeffRef(i) = matH.coeff(i,m-1)/scale;
h += ort.coeff(i) * ort.coeff(i);
}
RealScalar g = ei_sqrt(h);
if (ort.coeff(m) > 0)
g = -g;
h = h - ort.coeff(m) * g;
ort.coeffRef(m) = ort.coeff(m) - g;
// Apply Householder similarity transformation
// H = (I-u*u'/h)*H*(I-u*u')/h)
int bSize = high-m+1;
matH.block(m, m, bSize, n-m).noalias() -= ((ort.segment(m, bSize)/h)
* (ort.segment(m, bSize).transpose() * matH.block(m, m, bSize, n-m)));
matH.block(0, m, high+1, bSize).noalias() -= ((matH.block(0, m, high+1, bSize) * ort.segment(m, bSize))
* (ort.segment(m, bSize)/h).transpose());
ort.coeffRef(m) = scale*ort.coeff(m);
matH.coeffRef(m,m-1) = scale*g;
}
}
// Accumulate transformations (Algol's ortran).
m_eivec.setIdentity();
for (int m = high-1; m >= low+1; m--)
{
if (matH.coeff(m,m-1) != 0.0)
{
ort.segment(m+1, high-m) = matH.col(m-1).segment(m+1, high-m);
int bSize = high-m+1;
m_eivec.block(m, m, bSize, bSize).noalias() += ( (ort.segment(m, bSize) / (matH.coeff(m,m-1) * ort.coeff(m)))
* (ort.segment(m, bSize).transpose() * m_eivec.block(m, m, bSize, bSize)) );
}
}
}
// Complex scalar division.
template<typename Scalar>
std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
@ -298,289 +342,29 @@ std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
}
// Nonsymmetric reduction from Hessenberg to real Schur form.
template<typename MatrixType>
void EigenSolver<MatrixType>::hqr2(MatrixType& matH)
void EigenSolver<MatrixType>::hqr2_step2(MatrixType& matH)
{
// This is derived from the Algol procedure hqr2,
// by Martin and Wilkinson, Handbook for Auto. Comp.,
// Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutine in EISPACK.
const int nn = m_eivec.cols();
const int low = 0;
const int high = nn-1;
const Scalar eps = ei_pow(Scalar(2),ei_is_same_type<Scalar,float>::ret ? Scalar(-23) : Scalar(-52));
Scalar p, q, r=0, s=0, t, w, x, y, z=0;
// Initialize
int nn = m_eivec.cols();
int n = nn-1;
int low = 0;
int high = nn-1;
Scalar eps = ei_pow(Scalar(2),ei_is_same_type<Scalar,float>::ret ? Scalar(-23) : Scalar(-52));
Scalar exshift = 0.0;
Scalar p=0,q=0,r=0,s=0,z=0,t,w,x,y;
// Store roots isolated by balanc and compute matrix norm
// FIXME to be efficient the following would requires a triangular reduxion code
// Scalar norm = matH.upper().cwiseAbs().sum() + matH.corner(BottomLeft,n,n).diagonal().cwiseAbs().sum();
// inefficient! this is already computed in RealSchur
Scalar norm = 0.0;
for (int j = 0; j < nn; ++j)
{
// FIXME what's the purpose of the following since the condition is always false
if ((j < low) || (j > high))
{
m_eivalues.coeffRef(j) = Complex(matH.coeff(j,j), 0.0);
}
norm += matH.row(j).segment(std::max(j-1,0), nn-std::max(j-1,0)).cwiseAbs().sum();
}
// Outer loop over eigenvalue index
int iter = 0;
while (n >= low)
{
// Look for single small sub-diagonal element
int l = n;
while (l > low)
{
s = ei_abs(matH.coeff(l-1,l-1)) + ei_abs(matH.coeff(l,l));
if (s == 0.0)
s = norm;
if (ei_abs(matH.coeff(l,l-1)) < eps * s)
break;
l--;
}
// Check for convergence
// One root found
if (l == n)
{
matH.coeffRef(n,n) = matH.coeff(n,n) + exshift;
m_eivalues.coeffRef(n) = Complex(matH.coeff(n,n), 0.0);
n--;
iter = 0;
}
else if (l == n-1) // Two roots found
{
w = matH.coeff(n,n-1) * matH.coeff(n-1,n);
p = (matH.coeff(n-1,n-1) - matH.coeff(n,n)) * Scalar(0.5);
q = p * p + w;
z = ei_sqrt(ei_abs(q));
matH.coeffRef(n,n) = matH.coeff(n,n) + exshift;
matH.coeffRef(n-1,n-1) = matH.coeff(n-1,n-1) + exshift;
x = matH.coeff(n,n);
// Scalar pair
if (q >= 0)
{
if (p >= 0)
z = p + z;
else
z = p - z;
m_eivalues.coeffRef(n-1) = Complex(x + z, 0.0);
m_eivalues.coeffRef(n) = Complex(z!=0.0 ? x - w / z : m_eivalues.coeff(n-1).real(), 0.0);
x = matH.coeff(n,n-1);
s = ei_abs(x) + ei_abs(z);
p = x / s;
q = z / s;
r = ei_sqrt(p * p+q * q);
p = p / r;
q = q / r;
// Row modification
for (int j = n-1; j < nn; ++j)
{
z = matH.coeff(n-1,j);
matH.coeffRef(n-1,j) = q * z + p * matH.coeff(n,j);
matH.coeffRef(n,j) = q * matH.coeff(n,j) - p * z;
}
// Column modification
for (int i = 0; i <= n; ++i)
{
z = matH.coeff(i,n-1);
matH.coeffRef(i,n-1) = q * z + p * matH.coeff(i,n);
matH.coeffRef(i,n) = q * matH.coeff(i,n) - p * z;
}
// Accumulate transformations
for (int i = low; i <= high; ++i)
{
z = m_eivec.coeff(i,n-1);
m_eivec.coeffRef(i,n-1) = q * z + p * m_eivec.coeff(i,n);
m_eivec.coeffRef(i,n) = q * m_eivec.coeff(i,n) - p * z;
}
}
else // Complex pair
{
m_eivalues.coeffRef(n-1) = Complex(x + p, z);
m_eivalues.coeffRef(n) = Complex(x + p, -z);
}
n = n - 2;
iter = 0;
}
else // No convergence yet
{
// Form shift
x = matH.coeff(n,n);
y = 0.0;
w = 0.0;
if (l < n)
{
y = matH.coeff(n-1,n-1);
w = matH.coeff(n,n-1) * matH.coeff(n-1,n);
}
// Wilkinson's original ad hoc shift
if (iter == 10)
{
exshift += x;
for (int i = low; i <= n; ++i)
matH.coeffRef(i,i) -= x;
s = ei_abs(matH.coeff(n,n-1)) + ei_abs(matH.coeff(n-1,n-2));
x = y = Scalar(0.75) * s;
w = Scalar(-0.4375) * s * s;
}
// MATLAB's new ad hoc shift
if (iter == 30)
{
s = Scalar((y - x) / 2.0);
s = s * s + w;
if (s > 0)
{
s = ei_sqrt(s);
if (y < x)
s = -s;
s = Scalar(x - w / ((y - x) / 2.0 + s));
for (int i = low; i <= n; ++i)
matH.coeffRef(i,i) -= s;
exshift += s;
x = y = w = Scalar(0.964);
}
}
iter = iter + 1; // (Could check iteration count here.)
// Look for two consecutive small sub-diagonal elements
int m = n-2;
while (m >= l)
{
z = matH.coeff(m,m);
r = x - z;
s = y - z;
p = (r * s - w) / matH.coeff(m+1,m) + matH.coeff(m,m+1);
q = matH.coeff(m+1,m+1) - z - r - s;
r = matH.coeff(m+2,m+1);
s = ei_abs(p) + ei_abs(q) + ei_abs(r);
p = p / s;
q = q / s;
r = r / s;
if (m == l) {
break;
}
if (ei_abs(matH.coeff(m,m-1)) * (ei_abs(q) + ei_abs(r)) <
eps * (ei_abs(p) * (ei_abs(matH.coeff(m-1,m-1)) + ei_abs(z) +
ei_abs(matH.coeff(m+1,m+1)))))
{
break;
}
m--;
}
for (int i = m+2; i <= n; ++i)
{
matH.coeffRef(i,i-2) = 0.0;
if (i > m+2)
matH.coeffRef(i,i-3) = 0.0;
}
// Double QR step involving rows l:n and columns m:n
for (int k = m; k <= n-1; ++k)
{
int notlast = (k != n-1);
if (k != m) {
p = matH.coeff(k,k-1);
q = matH.coeff(k+1,k-1);
r = notlast ? matH.coeff(k+2,k-1) : Scalar(0);
x = ei_abs(p) + ei_abs(q) + ei_abs(r);
if (x != 0.0)
{
p = p / x;
q = q / x;
r = r / x;
}
}
if (x == 0.0)
break;
s = ei_sqrt(p * p + q * q + r * r);
if (p < 0)
s = -s;
if (s != 0)
{
if (k != m)
matH.coeffRef(k,k-1) = -s * x;
else if (l != m)
matH.coeffRef(k,k-1) = -matH.coeff(k,k-1);
p = p + s;
x = p / s;
y = q / s;
z = r / s;
q = q / p;
r = r / p;
// Row modification
for (int j = k; j < nn; ++j)
{
p = matH.coeff(k,j) + q * matH.coeff(k+1,j);
if (notlast)
{
p = p + r * matH.coeff(k+2,j);
matH.coeffRef(k+2,j) = matH.coeff(k+2,j) - p * z;
}
matH.coeffRef(k,j) = matH.coeff(k,j) - p * x;
matH.coeffRef(k+1,j) = matH.coeff(k+1,j) - p * y;
}
// Column modification
for (int i = 0; i <= std::min(n,k+3); ++i)
{
p = x * matH.coeff(i,k) + y * matH.coeff(i,k+1);
if (notlast)
{
p = p + z * matH.coeff(i,k+2);
matH.coeffRef(i,k+2) = matH.coeff(i,k+2) - p * r;
}
matH.coeffRef(i,k) = matH.coeff(i,k) - p;
matH.coeffRef(i,k+1) = matH.coeff(i,k+1) - p * q;
}
// Accumulate transformations
for (int i = low; i <= high; ++i)
{
p = x * m_eivec.coeff(i,k) + y * m_eivec.coeff(i,k+1);
if (notlast)
{
p = p + z * m_eivec.coeff(i,k+2);
m_eivec.coeffRef(i,k+2) = m_eivec.coeff(i,k+2) - p * r;
}
m_eivec.coeffRef(i,k) = m_eivec.coeff(i,k) - p;
m_eivec.coeffRef(i,k+1) = m_eivec.coeff(i,k+1) - p * q;
}
} // (s != 0)
} // k loop
} // check convergence
} // while (n >= low)
// Backsubstitute to find vectors of upper triangular form
if (norm == 0.0)
{
return;
}
for (n = nn-1; n >= 0; n--)
for (int n = nn-1; n >= 0; n--)
{
p = m_eivalues.coeff(n).real();
q = m_eivalues.coeff(n).imag();

View File

@ -30,14 +30,30 @@
*
* \class HessenbergDecomposition
*
* \brief Reduces a squared matrix to an Hessemberg form
* \brief Reduces a square matrix to Hessenberg form by an orthogonal similarity transformation
*
* \param MatrixType the type of the matrix of which we are computing the Hessenberg decomposition
* \tparam _MatrixType the type of the matrix of which we are computing the Hessenberg decomposition
*
* This class performs an Hessenberg decomposition of a matrix \f$ A \f$ such that:
* \f$ A = Q H Q^* \f$ where \f$ Q \f$ is unitary and \f$ H \f$ a Hessenberg matrix.
* This class performs an Hessenberg decomposition of a matrix \f$ A \f$. In
* the real case, the Hessenberg decomposition consists of an orthogonal
* matrix \f$ Q \f$ and a Hessenberg matrix \f$ H \f$ such that \f$ A = Q H
* Q^T \f$. An orthogonal matrix is a matrix whose inverse equals its
* transpose (\f$ Q^{-1} = Q^T \f$). A Hessenberg matrix has zeros below the
* subdiagonal, so it is almost upper triangular. The Hessenberg decomposition
* of a complex matrix is \f$ A = Q H Q^* \f$ with \f$ Q \f$ unitary (that is,
* \f$ Q^{-1} = Q^* \f$).
*
* \sa class Tridiagonalization, class Qr
* Call the function compute() to compute the Hessenberg decomposition of a
* given matrix. Alternatively, you can use the
* HessenbergDecomposition(const MatrixType&) constructor which computes the
* Hessenberg decomposition at construction time. Once the decomposition is
* computed, you can use the matrixH() and matrixQ() functions to construct
* the matrices H and Q in the decomposition.
*
* The documentation for matrixH() contains an example of the typical use of
* this class.
*
* \sa class ComplexSchur, class Tridiagonalization, \ref QR_Module "QR Module"
*/
template<typename _MatrixType> class HessenbergDecomposition
{
@ -51,13 +67,28 @@ template<typename _MatrixType> class HessenbergDecomposition
MaxSize = MatrixType::MaxRowsAtCompileTime,
MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : MaxSize - 1
};
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, SizeMinusOne, 1, Options, MaxSizeMinusOne, 1> CoeffVectorType;
typedef Matrix<Scalar, 1, Size, Options, 1, MaxSize> VectorType;
/** This constructor initializes a HessenbergDecomposition object for
* further use with HessenbergDecomposition::compute()
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
/** \brief Type for vector of Householder coefficients.
*
* This is column vector with entries of type #Scalar. The length of the
* vector is one less than the size of \p _MatrixType, if it is a
* fixed-side type.
*/
typedef Matrix<Scalar, SizeMinusOne, 1, Options, MaxSizeMinusOne, 1> CoeffVectorType;
/** \brief Default constructor; the decomposition will be computed later.
*
* \param [in] size The size of the matrix whose Hessenberg decomposition will be computed.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). The \p size parameter is only
* used as a hint. It is not an error to give a wrong \p size, but it may
* impair performance.
*
* \sa compute() for an example.
*/
HessenbergDecomposition(int size = Size==Dynamic ? 2 : Size)
: m_matrix(size,size)
@ -66,6 +97,15 @@ template<typename _MatrixType> class HessenbergDecomposition
m_hCoeffs.resize(size-1);
}
/** \brief Constructor; computes Hessenberg decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed.
*
* This constructor calls compute() to compute the Hessenberg
* decomposition.
*
* \sa matrixH() for an example.
*/
HessenbergDecomposition(const MatrixType& matrix)
: m_matrix(matrix)
{
@ -75,9 +115,21 @@ template<typename _MatrixType> class HessenbergDecomposition
_compute(m_matrix, m_hCoeffs);
}
/** Computes or re-compute the Hessenberg decomposition for the matrix \a matrix.
/** \brief Computes Hessenberg decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed.
*
* This method allows to re-use the allocated data.
* The Hessenberg decomposition is computed by bringing the columns of the
* matrix successively in the required form using Householder reflections
* (see, e.g., Algorithm 7.4.2 in Golub \& Van Loan, <i>%Matrix
* Computations</i>). The cost is \f$ 10n^3/3 \f$ flops, where \f$ n \f$
* denotes the size of the given matrix.
*
* This method reuses of the allocated data in the HessenbergDecomposition
* object.
*
* Example: \include HessenbergDecomposition_compute.cpp
* Output: \verbinclude HessenbergDecomposition_compute.out
*/
void compute(const MatrixType& matrix)
{
@ -88,36 +140,95 @@ template<typename _MatrixType> class HessenbergDecomposition
_compute(m_matrix, m_hCoeffs);
}
/** \returns a const reference to the householder coefficients allowing to
* reconstruct the matrix Q from the packed data.
/** \brief Returns the Householder coefficients.
*
* \sa packedMatrix()
* \returns a const reference to the vector of Householder coefficients
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* The Householder coefficients allow the reconstruction of the matrix
* \f$ Q \f$ in the Hessenberg decomposition from the packed data.
*
* \sa packedMatrix(), \ref Householder_Module "Householder module"
*/
const CoeffVectorType& householderCoefficients() const { return m_hCoeffs; }
/** \returns a const reference to the internal representation of the decomposition.
/** \brief Returns the internal representation of the decomposition
*
* \returns a const reference to a matrix with the internal representation
* of the decomposition.
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* The returned matrix contains the following information:
* - the upper part and lower sub-diagonal represent the Hessenberg matrix H
* - the rest of the lower part contains the Householder vectors that, combined with
* Householder coefficients returned by householderCoefficients(),
* allows to reconstruct the matrix Q as follow:
* Q = H_{N-1} ... H_1 H_0
* where the matrices H are the Householder transformation:
* H_i = (I - h_i * v_i * v_i')
* where h_i == householderCoefficients()[i] and v_i is a Householder vector:
* v_i = [ 0, ..., 0, 1, M(i+2,i), ..., M(N-1,i) ]
* allows to reconstruct the matrix Q as
* \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
* Here, the matrices \f$ H_i \f$ are the Householder transformations
* \f$ H_i = (I - h_i v_i v_i^T) \f$
* where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
* \f$ v_i \f$ is the Householder vector defined by
* \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
* with M the matrix returned by this function.
*
* See LAPACK for further details on this packed storage.
*
* Example: \include HessenbergDecomposition_packedMatrix.cpp
* Output: \verbinclude HessenbergDecomposition_packedMatrix.out
*
* \sa householderCoefficients()
*/
const MatrixType& packedMatrix(void) const { return m_matrix; }
/** \brief Reconstructs the orthogonal matrix Q in the decomposition
*
* \returns the matrix Q
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* This function reconstructs the matrix Q from the Householder
* coefficients and the packed matrix stored internally. This
* reconstruction requires \f$ 4n^3 / 3 \f$ flops.
*
* \sa matrixH() for an example
*/
MatrixType matrixQ() const;
/** \brief Constructs the Hessenberg matrix H in the decomposition
*
* \returns the matrix H
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* This function copies the matrix H from internal data. The upper part
* (including the subdiagonal) of the packed matrix as returned by
* packedMatrix() contains the matrix H. This function copies those
* entries in a newly created matrix and sets the remaining entries to
* zero. It may sometimes be sufficient to directly use the packed matrix
* instead of creating a new one.
*
* Example: \include HessenbergDecomposition_matrixH.cpp
* Output: \verbinclude HessenbergDecomposition_matrixH.out
*
* \sa matrixQ(), packedMatrix()
*/
MatrixType matrixH() const;
private:
static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs);
typedef Matrix<Scalar, 1, Size, Options, 1, MaxSize> VectorType;
typedef typename NumTraits<Scalar>::Real RealScalar;
protected:
MatrixType m_matrix;
@ -134,7 +245,7 @@ template<typename _MatrixType> class HessenbergDecomposition
*
* The result is written in the lower triangular part of \a matA.
*
* Implemented from Golub's "Matrix Computations", algorithm 8.3.1.
* Implemented from Golub's "%Matrix Computations", algorithm 8.3.1.
*
* \sa packedMatrix()
*/
@ -167,7 +278,6 @@ void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVector
}
}
/** reconstructs and returns the matrix Q */
template<typename MatrixType>
typename HessenbergDecomposition<MatrixType>::MatrixType
HessenbergDecomposition<MatrixType>::matrixQ() const
@ -185,11 +295,6 @@ HessenbergDecomposition<MatrixType>::matrixQ() const
#endif // EIGEN_HIDE_HEAVY_CODE
/** constructs and returns the matrix H.
* Note that the matrix H is equivalent to the upper part of the packed matrix
* (including the lower sub-diagonal). Therefore, it might be often sufficient
* to directly use the packed matrix instead of creating a new one.
*/
template<typename MatrixType>
typename HessenbergDecomposition<MatrixType>::MatrixType
HessenbergDecomposition<MatrixType>::matrixH() const

View File

@ -0,0 +1,388 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_REAL_SCHUR_H
#define EIGEN_REAL_SCHUR_H
#include "./HessenbergDecomposition.h"
/** \eigenvalues_module \ingroup Eigenvalues_Module
* \nonstableyet
*
* \class RealSchur
*
* \brief Performs a real Schur decomposition of a square matrix
*/
template<typename _MatrixType> class RealSchur
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options, MaxColsAtCompileTime, 1> EigenvalueType;
/** \brief Constructor; computes Schur decomposition of given matrix. */
RealSchur(const MatrixType& matrix)
: m_matT(matrix.rows(),matrix.cols()),
m_matU(matrix.rows(),matrix.cols()),
m_eivalues(matrix.rows()),
m_isInitialized(false)
{
compute(matrix);
}
/** \brief Returns the orthogonal matrix in the Schur decomposition. */
const MatrixType& matrixU() const
{
ei_assert(m_isInitialized && "RealSchur is not initialized.");
return m_matU;
}
/** \brief Returns the quasi-triangular matrix in the Schur decomposition. */
const MatrixType& matrixT() const
{
ei_assert(m_isInitialized && "RealSchur is not initialized.");
return m_matT;
}
/** \brief Returns vector of eigenvalues.
*
* This function will likely be removed. */
const EigenvalueType& eigenvalues() const
{
ei_assert(m_isInitialized && "RealSchur is not initialized.");
return m_eivalues;
}
/** \brief Computes Schur decomposition of given matrix. */
void compute(const MatrixType& matrix);
private:
MatrixType m_matT;
MatrixType m_matU;
EigenvalueType m_eivalues;
bool m_isInitialized;
void hqr2();
};
template<typename MatrixType>
void RealSchur<MatrixType>::compute(const MatrixType& matrix)
{
assert(matrix.cols() == matrix.rows());
// Reduce to Hessenberg form
// TODO skip Q if skipU = true
HessenbergDecomposition<MatrixType> hess(matrix);
m_matT = hess.matrixH();
m_matU = hess.matrixQ();
// Reduce to Real Schur form
hqr2();
m_isInitialized = true;
}
template<typename MatrixType>
void RealSchur<MatrixType>::hqr2()
{
// This is derived from the Algol procedure hqr2,
// by Martin and Wilkinson, Handbook for Auto. Comp.,
// Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutine in EISPACK.
// Initialize
const int size = m_matU.cols();
int n = size-1;
const int low = 0;
const int high = size-1;
Scalar exshift = 0.0;
Scalar p=0,q=0,r=0,s=0,z=0,w,x,y;
// Compute matrix norm
// FIXME to be efficient the following would requires a triangular reduxion code
// Scalar norm = m_matT.upper().cwiseAbs().sum() + m_matT.corner(BottomLeft,n,n).diagonal().cwiseAbs().sum();
Scalar norm = 0.0;
for (int j = 0; j < size; ++j)
{
norm += m_matT.row(j).segment(std::max(j-1,0), size-std::max(j-1,0)).cwiseAbs().sum();
}
// Outer loop over eigenvalue index
int iter = 0;
while (n >= low)
{
// Look for single small sub-diagonal element
int l = n;
while (l > low)
{
s = ei_abs(m_matT.coeff(l-1,l-1)) + ei_abs(m_matT.coeff(l,l));
if (s == 0.0)
s = norm;
if (ei_abs(m_matT.coeff(l,l-1)) < NumTraits<Scalar>::epsilon() * s)
break;
l--;
}
// Check for convergence
// One root found
if (l == n)
{
m_matT.coeffRef(n,n) = m_matT.coeff(n,n) + exshift;
m_eivalues.coeffRef(n) = ComplexScalar(m_matT.coeff(n,n), 0.0);
n--;
iter = 0;
}
else if (l == n-1) // Two roots found
{
w = m_matT.coeff(n,n-1) * m_matT.coeff(n-1,n);
p = (m_matT.coeff(n-1,n-1) - m_matT.coeff(n,n)) * Scalar(0.5);
q = p * p + w;
z = ei_sqrt(ei_abs(q));
m_matT.coeffRef(n,n) = m_matT.coeff(n,n) + exshift;
m_matT.coeffRef(n-1,n-1) = m_matT.coeff(n-1,n-1) + exshift;
x = m_matT.coeff(n,n);
// Scalar pair
if (q >= 0)
{
if (p >= 0)
z = p + z;
else
z = p - z;
m_eivalues.coeffRef(n-1) = ComplexScalar(x + z, 0.0);
m_eivalues.coeffRef(n) = ComplexScalar(z!=0.0 ? x - w / z : m_eivalues.coeff(n-1).real(), 0.0);
x = m_matT.coeff(n,n-1);
s = ei_abs(x) + ei_abs(z);
p = x / s;
q = z / s;
r = ei_sqrt(p * p+q * q);
p = p / r;
q = q / r;
// Row modification
for (int j = n-1; j < size; ++j)
{
z = m_matT.coeff(n-1,j);
m_matT.coeffRef(n-1,j) = q * z + p * m_matT.coeff(n,j);
m_matT.coeffRef(n,j) = q * m_matT.coeff(n,j) - p * z;
}
// Column modification
for (int i = 0; i <= n; ++i)
{
z = m_matT.coeff(i,n-1);
m_matT.coeffRef(i,n-1) = q * z + p * m_matT.coeff(i,n);
m_matT.coeffRef(i,n) = q * m_matT.coeff(i,n) - p * z;
}
// Accumulate transformations
for (int i = low; i <= high; ++i)
{
z = m_matU.coeff(i,n-1);
m_matU.coeffRef(i,n-1) = q * z + p * m_matU.coeff(i,n);
m_matU.coeffRef(i,n) = q * m_matU.coeff(i,n) - p * z;
}
}
else // Complex pair
{
m_eivalues.coeffRef(n-1) = ComplexScalar(x + p, z);
m_eivalues.coeffRef(n) = ComplexScalar(x + p, -z);
}
n = n - 2;
iter = 0;
}
else // No convergence yet
{
// Form shift
x = m_matT.coeff(n,n);
y = 0.0;
w = 0.0;
if (l < n)
{
y = m_matT.coeff(n-1,n-1);
w = m_matT.coeff(n,n-1) * m_matT.coeff(n-1,n);
}
// Wilkinson's original ad hoc shift
if (iter == 10)
{
exshift += x;
for (int i = low; i <= n; ++i)
m_matT.coeffRef(i,i) -= x;
s = ei_abs(m_matT.coeff(n,n-1)) + ei_abs(m_matT.coeff(n-1,n-2));
x = y = Scalar(0.75) * s;
w = Scalar(-0.4375) * s * s;
}
// MATLAB's new ad hoc shift
if (iter == 30)
{
s = Scalar((y - x) / 2.0);
s = s * s + w;
if (s > 0)
{
s = ei_sqrt(s);
if (y < x)
s = -s;
s = Scalar(x - w / ((y - x) / 2.0 + s));
for (int i = low; i <= n; ++i)
m_matT.coeffRef(i,i) -= s;
exshift += s;
x = y = w = Scalar(0.964);
}
}
iter = iter + 1; // (Could check iteration count here.)
// Look for two consecutive small sub-diagonal elements
int m = n-2;
while (m >= l)
{
z = m_matT.coeff(m,m);
r = x - z;
s = y - z;
p = (r * s - w) / m_matT.coeff(m+1,m) + m_matT.coeff(m,m+1);
q = m_matT.coeff(m+1,m+1) - z - r - s;
r = m_matT.coeff(m+2,m+1);
s = ei_abs(p) + ei_abs(q) + ei_abs(r);
p = p / s;
q = q / s;
r = r / s;
if (m == l) {
break;
}
if (ei_abs(m_matT.coeff(m,m-1)) * (ei_abs(q) + ei_abs(r)) <
NumTraits<Scalar>::epsilon() * (ei_abs(p) * (ei_abs(m_matT.coeff(m-1,m-1)) + ei_abs(z) +
ei_abs(m_matT.coeff(m+1,m+1)))))
{
break;
}
m--;
}
for (int i = m+2; i <= n; ++i)
{
m_matT.coeffRef(i,i-2) = 0.0;
if (i > m+2)
m_matT.coeffRef(i,i-3) = 0.0;
}
// Double QR step involving rows l:n and columns m:n
for (int k = m; k <= n-1; ++k)
{
int notlast = (k != n-1);
if (k != m) {
p = m_matT.coeff(k,k-1);
q = m_matT.coeff(k+1,k-1);
r = notlast ? m_matT.coeff(k+2,k-1) : Scalar(0);
x = ei_abs(p) + ei_abs(q) + ei_abs(r);
if (x != 0.0)
{
p = p / x;
q = q / x;
r = r / x;
}
}
if (x == 0.0)
break;
s = ei_sqrt(p * p + q * q + r * r);
if (p < 0)
s = -s;
if (s != 0)
{
if (k != m)
m_matT.coeffRef(k,k-1) = -s * x;
else if (l != m)
m_matT.coeffRef(k,k-1) = -m_matT.coeff(k,k-1);
p = p + s;
x = p / s;
y = q / s;
z = r / s;
q = q / p;
r = r / p;
// Row modification
for (int j = k; j < size; ++j)
{
p = m_matT.coeff(k,j) + q * m_matT.coeff(k+1,j);
if (notlast)
{
p = p + r * m_matT.coeff(k+2,j);
m_matT.coeffRef(k+2,j) = m_matT.coeff(k+2,j) - p * z;
}
m_matT.coeffRef(k,j) = m_matT.coeff(k,j) - p * x;
m_matT.coeffRef(k+1,j) = m_matT.coeff(k+1,j) - p * y;
}
// Column modification
for (int i = 0; i <= std::min(n,k+3); ++i)
{
p = x * m_matT.coeff(i,k) + y * m_matT.coeff(i,k+1);
if (notlast)
{
p = p + z * m_matT.coeff(i,k+2);
m_matT.coeffRef(i,k+2) = m_matT.coeff(i,k+2) - p * r;
}
m_matT.coeffRef(i,k) = m_matT.coeff(i,k) - p;
m_matT.coeffRef(i,k+1) = m_matT.coeff(i,k+1) - p * q;
}
// Accumulate transformations
for (int i = low; i <= high; ++i)
{
p = x * m_matU.coeff(i,k) + y * m_matU.coeff(i,k+1);
if (notlast)
{
p = p + z * m_matU.coeff(i,k+2);
m_matU.coeffRef(i,k+2) = m_matU.coeff(i,k+2) - p * r;
}
m_matU.coeffRef(i,k) = m_matU.coeff(i,k) - p;
m_matU.coeffRef(i,k+1) = m_matU.coeff(i,k+1) - p * q;
}
} // (s != 0)
} // k loop
} // check convergence
} // while (n >= low)
}
#endif // EIGEN_REAL_SCHUR_H

View File

@ -439,8 +439,7 @@ struct ei_solve_retval<PartialPivLU<_MatrixType>, Rhs>
* Step 3: replace c by the solution x to Ux = c.
*/
const int size = dec().matrixLU().rows();
ei_assert(rhs().rows() == size);
ei_assert(rhs().rows() == dec().matrixLU().rows());
// Step 1
dst = dec().permutationP() * rhs();

View File

@ -477,3 +477,8 @@ DIV.eimainmenu {
/* border-top: solid; */
/* border-bottom: solid; */
}
/* center version number on main page */
H3.version {
text-align: center;
}

View File

@ -0,0 +1,16 @@
MatrixXd A = MatrixXd::Random(6,6);
cout << "Here is a random 6x6 matrix, A:" << endl << A << endl << endl;
EigenSolver<MatrixXd> es(A);
cout << "The eigenvalues of A are:" << endl << es.eigenvalues() << endl;
cout << "The matrix of eigenvectors, V, is:" << endl << es.eigenvectors() << endl << endl;
complex<double> lambda = es.eigenvalues()[0];
cout << "Consider the first eigenvalue, lambda = " << lambda << endl;
VectorXcd v = es.eigenvectors().col(0);
cout << "If v is the corresponding eigenvector, then lambda * v = " << endl << lambda * v << endl;
cout << "... and A * v = " << endl << A.cast<complex<double> >() * v << endl << endl;
MatrixXcd D = es.eigenvalues().asDiagonal();
MatrixXcd V = es.eigenvectors();
cout << "Finally, V * D * V^(-1) = " << endl << V * D * V.inverse() << endl;

View File

@ -0,0 +1,6 @@
EigenSolver<MatrixXf> es;
MatrixXf A = MatrixXf::Random(4,4);
es.compute(A);
cout << "The eigenvalues of A are: " << es.eigenvalues().transpose() << endl;
es.compute(A + MatrixXf::Identity(4,4)); // re-use es to compute eigenvalues of A+I
cout << "The eigenvalues of A+I are: " << es.eigenvalues().transpose() << endl;

View File

@ -0,0 +1,4 @@
MatrixXd ones = MatrixXd::Ones(3,3);
EigenSolver<MatrixXd> es(ones);
cout << "The eigenvalues of the 3x3 matrix of ones are:"
<< endl << es.eigenvalues() << endl;

View File

@ -0,0 +1,4 @@
MatrixXd ones = MatrixXd::Ones(3,3);
EigenSolver<MatrixXd> es(ones);
cout << "The first eigenvector of the 3x3 matrix of ones is:"
<< endl << es.eigenvectors().col(1) << endl;

View File

@ -0,0 +1,9 @@
MatrixXd A = MatrixXd::Random(6,6);
cout << "Here is a random 6x6 matrix, A:" << endl << A << endl << endl;
EigenSolver<MatrixXd> es(A);
MatrixXd D = es.pseudoEigenvalueMatrix();
MatrixXd V = es.pseudoEigenvectors();
cout << "The pseudo-eigenvalue matrix D is:" << endl << D << endl;
cout << "The pseudo-eigenvector matrix V is:" << endl << V << endl;
cout << "Finally, V * D * V^(-1) = " << endl << V * D * V.inverse() << endl;

View File

@ -0,0 +1,6 @@
MatrixXcf A = MatrixXcf::Random(4,4);
HessenbergDecomposition<MatrixXcf> hd(4);
hd.compute(A);
cout << "The matrix H in the decomposition of A is:" << endl << hd.matrixH() << endl;
hd.compute(2*A); // re-use hd to compute and store decomposition of 2A
cout << "The matrix H in the decomposition of 2A is:" << endl << hd.matrixH() << endl;

View File

@ -0,0 +1,8 @@
Matrix4f A = MatrixXf::Random(4,4);
cout << "Here is a random 4x4 matrix:" << endl << A << endl;
HessenbergDecomposition<MatrixXf> hessOfA(A);
MatrixXf H = hessOfA.matrixH();
cout << "The Hessenberg matrix H is:" << endl << H << endl;
MatrixXf Q = hessOfA.matrixQ();
cout << "The orthogonal matrix Q is:" << endl << Q << endl;
cout << "Q H Q^T is:" << endl << Q * H * Q.transpose() << endl;

View File

@ -0,0 +1,9 @@
Matrix4d A = Matrix4d::Random(4,4);
cout << "Here is a random 4x4 matrix:" << endl << A << endl;
HessenbergDecomposition<Matrix4d> hessOfA(A);
Matrix4d pm = hessOfA.packedMatrix();
cout << "The packed matrix M is:" << endl << pm << endl;
cout << "The upper Hessenberg part corresponds to the matrix H, which is:"
<< endl << hessOfA.matrixH() << endl;
Vector3d hc = hessOfA.householderCoefficients();
cout << "The vector of Householder coefficients is:" << endl << hc << endl;

View File

@ -43,6 +43,9 @@ namespace Eigen {
/** \ingroup Unsupported_modules
* \defgroup NumericalDiff_Module */
/** \ingroup Unsupported_modules
* \defgroup Polynomials_Module */
/** \ingroup Unsupported_modules
* \defgroup Skyline_Module */

View File

@ -138,7 +138,9 @@ ei_add_test(qr)
ei_add_test(qr_colpivoting)
ei_add_test(qr_fullpivoting)
ei_add_test(upperbidiagonalization)
ei_add_test(hessenberg " " "${GSL_LIBRARIES}")
ei_add_test(hessenberg)
ei_add_test(schur_real)
ei_add_test(schur_complex)
ei_add_test(eigensolver_selfadjoint " " "${GSL_LIBRARIES}")
ei_add_test(eigensolver_generic " " "${GSL_LIBRARIES}")
ei_add_test(eigensolver_complex)

View File

@ -33,6 +33,7 @@
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_complex.h>
#include <gsl/gsl_complex_math.h>
#include <gsl/gsl_poly.h>
namespace Eigen {
@ -69,6 +70,27 @@ template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex> struct
gsl_eigen_gensymmv_free(w);
free(a);
}
template<class EIGEN_VECTOR, class EIGEN_ROOTS>
static void eigen_poly_solve(const EIGEN_VECTOR& poly, EIGEN_ROOTS& roots )
{
const int deg = poly.size()-1;
double *z = new double[2*deg];
double *a = new double[poly.size()];
for( int i=0; i<poly.size(); ++i ){ a[i] = poly[i]; }
gsl_poly_complex_workspace * w = gsl_poly_complex_workspace_alloc (poly.size());
gsl_poly_complex_solve(a, poly.size(), w, z);
gsl_poly_complex_workspace_free (w);
for( int i=0; i<deg; ++i )
{
roots[i].real() = z[2*i];
roots[i].imag() = z[2*i+1];
}
delete[] a;
delete[] z;
}
};
template<typename Scalar> struct GslTraits<Scalar,true>

View File

@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@ -28,19 +29,36 @@
template<typename Scalar,int Size> void hessenberg(int size = Size)
{
typedef Matrix<Scalar,Size,Size> MatrixType;
MatrixType m = MatrixType::Random(size,size);
HessenbergDecomposition<MatrixType> hess(m);
VERIFY_IS_APPROX(m, hess.matrixQ() * hess.matrixH() * hess.matrixQ().adjoint());
// Test basic functionality: A = U H U* and H is Hessenberg
for(int counter = 0; counter < g_repeat; ++counter) {
MatrixType m = MatrixType::Random(size,size);
HessenbergDecomposition<MatrixType> hess(m);
VERIFY_IS_APPROX(m, hess.matrixQ() * hess.matrixH() * hess.matrixQ().adjoint());
MatrixType H = hess.matrixH();
for(int row = 2; row < size; ++row) {
for(int col = 0; col < row-1; ++col) {
VERIFY(H(row,col) == (typename MatrixType::Scalar)0);
}
}
}
// Test whether compute() and constructor returns same result
MatrixType A = MatrixType::Random(size, size);
HessenbergDecomposition<MatrixType> cs1;
cs1.compute(A);
HessenbergDecomposition<MatrixType> cs2(A);
VERIFY_IS_EQUAL(cs1.matrixQ(), cs2.matrixQ());
VERIFY_IS_EQUAL(cs1.matrixH(), cs2.matrixH());
// TODO: Add tests for packedMatrix() and householderCoefficients()
}
void test_hessenberg()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1(( hessenberg<std::complex<double>,1>() ));
CALL_SUBTEST_2(( hessenberg<std::complex<double>,2>() ));
CALL_SUBTEST_3(( hessenberg<std::complex<float>,4>() ));
CALL_SUBTEST_4(( hessenberg<float,Dynamic>(ei_random<int>(1,320)) ));
CALL_SUBTEST_5(( hessenberg<std::complex<double>,Dynamic>(ei_random<int>(1,320)) ));
}
CALL_SUBTEST_1(( hessenberg<std::complex<double>,1>() ));
CALL_SUBTEST_2(( hessenberg<std::complex<double>,2>() ));
CALL_SUBTEST_3(( hessenberg<std::complex<float>,4>() ));
CALL_SUBTEST_4(( hessenberg<float,Dynamic>(ei_random<int>(1,320)) ));
CALL_SUBTEST_5(( hessenberg<std::complex<double>,Dynamic>(ei_random<int>(1,320)) ));
}

67
test/schur_complex.cpp Normal file
View File

@ -0,0 +1,67 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra. Eigen itself is part of the KDE project.
//
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#include "main.h"
#include <Eigen/Eigenvalues>
template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTime)
{
typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
typedef typename ComplexSchur<MatrixType>::ComplexMatrixType ComplexMatrixType;
// Test basic functionality: T is triangular and A = U T U*
for(int counter = 0; counter < g_repeat; ++counter) {
MatrixType A = MatrixType::Random(size, size);
ComplexSchur<MatrixType> schurOfA(A);
ComplexMatrixType U = schurOfA.matrixU();
ComplexMatrixType T = schurOfA.matrixT();
for(int row = 1; row < size; ++row) {
for(int col = 0; col < row; ++col) {
VERIFY(T(row,col) == (typename MatrixType::Scalar)0);
}
}
VERIFY_IS_APPROX(A.template cast<ComplexScalar>(), U * T * U.adjoint());
}
// Test asserts when not initialized
ComplexSchur<MatrixType> csUninitialized;
VERIFY_RAISES_ASSERT(csUninitialized.matrixT());
VERIFY_RAISES_ASSERT(csUninitialized.matrixU());
// Test whether compute() and constructor returns same result
MatrixType A = MatrixType::Random(size, size);
ComplexSchur<MatrixType> cs1;
cs1.compute(A);
ComplexSchur<MatrixType> cs2(A);
VERIFY_IS_EQUAL(cs1.matrixT(), cs2.matrixT());
VERIFY_IS_EQUAL(cs1.matrixU(), cs2.matrixU());
}
void test_schur_complex()
{
CALL_SUBTEST_1(( schur<Matrix4cd>() ));
CALL_SUBTEST_2(( schur<MatrixXcf>(ei_random<int>(1,50)) ));
CALL_SUBTEST_3(( schur<Matrix<std::complex<float>, 1, 1> >() ));
CALL_SUBTEST_4(( schur<Matrix<float, 3, 3, Eigen::RowMajor> >() ));
}

75
test/schur_real.cpp Normal file
View File

@ -0,0 +1,75 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra. Eigen itself is part of the KDE project.
//
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#include "main.h"
#include <Eigen/Eigenvalues>
template<typename MatrixType> void verifyIsQuasiTriangular(const MatrixType& T)
{
const int size = T.cols();
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
// The "zeros" in the real Schur decomposition are only approximately zero
RealScalar norm = T.norm();
// Check T is lower Hessenberg
for(int row = 2; row < size; ++row) {
for(int col = 0; col < row - 1; ++col) {
VERIFY_IS_MUCH_SMALLER_THAN(T(row,col), norm);
}
}
// Check that any non-zero on the subdiagonal is followed by a zero and is
// part of a 2x2 diagonal block with imaginary eigenvalues.
for(int row = 1; row < size; ++row) {
if (!test_ei_isMuchSmallerThan(T(row,row-1), norm)) {
VERIFY(row == size-1 || test_ei_isMuchSmallerThan(T(row+1,row), norm));
Scalar tr = T(row-1,row-1) + T(row,row);
Scalar det = T(row-1,row-1) * T(row,row) - T(row-1,row) * T(row,row-1);
VERIFY(4 * det > tr * tr);
}
}
}
template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTime)
{
// Test basic functionality: T is quasi-triangular and A = U T U*
for(int counter = 0; counter < g_repeat; ++counter) {
MatrixType A = MatrixType::Random(size, size);
RealSchur<MatrixType> schurOfA(A);
MatrixType U = schurOfA.matrixU();
MatrixType T = schurOfA.matrixT();
verifyIsQuasiTriangular(T);
VERIFY_IS_APPROX(A, U * T * U.transpose());
}
}
void test_schur_real()
{
CALL_SUBTEST_1(( schur<Matrix4f>() ));
CALL_SUBTEST_2(( schur<MatrixXd>(ei_random<int>(1,50)) ));
CALL_SUBTEST_3(( schur<Matrix<float, 1, 1> >() ));
CALL_SUBTEST_4(( schur<Matrix<double, 3, 3, Eigen::RowMajor> >() ));
}

View File

@ -1,4 +1,4 @@
set(Eigen_HEADERS AdolcForward BVH IterativeSolvers MatrixFunctions MoreVectorization AutoDiff AlignedVector3)
set(Eigen_HEADERS AdolcForward BVH IterativeSolvers MatrixFunctions MoreVectorization AutoDiff AlignedVector3 Polynomials)
install(FILES
${Eigen_HEADERS}

View File

@ -320,7 +320,7 @@ class FFT
// if the vector is strided, then we need to copy it to a packed temporary
Matrix<src_type,1,Dynamic> tmp;
if ( resize_input ) {
size_t ncopy = min(src.size(),src.size() + resize_input);
size_t ncopy = std::min(src.size(),src.size() + resize_input);
tmp.setZero(src.size() + resize_input);
if ( realfft && HasFlag(HalfSpectrum) ) {
// pad at the Nyquist bin

View File

@ -0,0 +1,137 @@
#ifndef EIGEN_POLYNOMIALS_MODULE_H
#define EIGEN_POLYNOMIALS_MODULE_H
#include <Eigen/Core>
#include <Eigen/src/Core/util/DisableMSVCWarnings.h>
#include <Eigen/QR>
// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module
#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2)
#ifndef EIGEN_HIDE_HEAVY_CODE
#define EIGEN_HIDE_HEAVY_CODE
#endif
#elif defined EIGEN_HIDE_HEAVY_CODE
#undef EIGEN_HIDE_HEAVY_CODE
#endif
namespace Eigen {
/** \ingroup Unsupported_modules
* \defgroup Polynomials_Module Polynomials module
*
* \nonstableyet
*
* \brief This module provides a QR based polynomial solver.
*
* To use this module, add
* \code
* #include <unsupported/Eigen/Polynomials>
* \endcode
* at the start of your source file.
*/
#include "src/Polynomials/PolynomialUtils.h"
#include "src/Polynomials/Companion.h"
#include "src/Polynomials/PolynomialSolver.h"
/**
\page polynomials Polynomials defines functions for dealing with polynomials
and a QR based polynomial solver.
\ingroup Polynomials_Module
The remainder of the page documents first the functions for evaluating, computing
polynomials, computing estimates about polynomials and next the QR based polynomial
solver.
\section polynomialUtils convenient functions to deal with polynomials
\subsection roots_to_monicPolynomial
The function
\code
void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly )
\endcode
computes the coefficients \f$ a_i \f$ of
\f$ p(x) = a_0 + a_{1}x + ... + a_{n-1}x^{n-1} + x^n \f$
where \f$ p \f$ is known through its roots i.e. \f$ p(x) = (x-r_1)(x-r_2)...(x-r_n) \f$.
\subsection poly_eval
The function
\code
T poly_eval( const Polynomials& poly, const T& x )
\endcode
evaluates a polynomial at a given point using stabilized H&ouml;rner method.
The following code: first computes the coefficients in the monomial basis of the monic polynomial that has the provided roots;
then, it evaluates the computed polynomial, using a stabilized H&ouml;rner method.
\include PolynomialUtils1.cpp
Output: \verbinclude PolynomialUtils1.out
\subsection Cauchy bounds
The function
\code
Real cauchy_max_bound( const Polynomial& poly )
\endcode
provides a maximum bound (the Cauchy one: \f$C(p)\f$) for the absolute value of a root of the given polynomial i.e.
\f$ \forall r_i \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$,
\f$ |r_i| \le C(p) = \sum_{k=0}^{d} \left | \frac{a_k}{a_d} \right | \f$
The leading coefficient \f$ p \f$: should be non zero \f$a_d \neq 0\f$.
The function
\code
Real cauchy_min_bound( const Polynomial& poly )
\endcode
provides a minimum bound (the Cauchy one: \f$c(p)\f$) for the absolute value of a non zero root of the given polynomial i.e.
\f$ \forall r_i \neq 0 \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$,
\f$ |r_i| \ge c(p) = \left( \sum_{k=0}^{d} \left | \frac{a_k}{a_0} \right | \right)^{-1} \f$
\section QR polynomial solver class
Computes the complex roots of a polynomial by computing the eigenvalues of the associated companion matrix with the QR algorithm.
The roots of \f$ p(x) = a_0 + a_1 x + a_2 x^2 + a_{3} x^3 + x^4 \f$ are the eigenvalues of
\f$
\left [
\begin{array}{cccc}
0 & 0 & 0 & a_0 \\
1 & 0 & 0 & a_1 \\
0 & 1 & 0 & a_2 \\
0 & 0 & 1 & a_3
\end{array} \right ]
\f$
However, the QR algorithm is not guaranteed to converge when there are several eigenvalues with same modulus.
Therefore the current polynomial solver is guaranteed to provide a correct result only when the complex roots \f$r_1,r_2,...,r_d\f$ have distinct moduli i.e.
\f$ \forall i,j \in [1;d],~ \| r_i \| \neq \| r_j \| \f$.
With 32bit (float) floating types this problem shows up frequently.
However, almost always, correct accuracy is reached even in these cases for 64bit
(double) floating types and small polynomial degree (<20).
\include PolynomialSolver1.cpp
In the above example:
-# a simple use of the polynomial solver is shown;
-# the accuracy problem with the QR algorithm is presented: a polynomial with almost conjugate roots is provided to the solver.
Those roots have almost same module therefore the QR algorithm failed to converge: the accuracy
of the last root is bad;
-# a simple way to circumvent the problem is shown: use doubles instead of floats.
Output: \verbinclude PolynomialSolver1.out
*/
} // namespace Eigen
#include <Eigen/src/Core/util/EnableMSVCWarnings.h>
#endif // EIGEN_POLYNOMIALS_MODULE_H
/* vim: set filetype=cpp et sw=2 ts=2 ai: */

View File

@ -5,3 +5,4 @@ ADD_SUBDIRECTORY(MoreVectorization)
# ADD_SUBDIRECTORY(FFT)
# ADD_SUBDIRECTORY(Skyline)
ADD_SUBDIRECTORY(MatrixFunctions)
ADD_SUBDIRECTORY(Polynomials)

View File

@ -0,0 +1,6 @@
FILE(GLOB Eigen_Polynomials_SRCS "*.h")
INSTALL(FILES
${Eigen_Polynomials_SRCS}
DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/Polynomials COMPONENT Devel
)

View File

@ -0,0 +1,281 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_COMPANION_H
#define EIGEN_COMPANION_H
// This file requires the user to include
// * Eigen/Core
// * Eigen/src/PolynomialSolver.h
#ifndef EIGEN_PARSED_BY_DOXYGEN
template <typename T>
T ei_radix(){ return 2; }
template <typename T>
T ei_radix2(){ return ei_radix<T>()*ei_radix<T>(); }
template<int Size>
struct ei_decrement_if_fixed_size
{
enum {
ret = (Size == Dynamic) ? Dynamic : Size-1 };
};
#endif
template< typename _Scalar, int _Deg >
class ei_companion
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg)
enum {
Deg = _Deg,
Deg_1=ei_decrement_if_fixed_size<Deg>::ret
};
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, Deg, 1> RightColumn;
//typedef DiagonalMatrix< Scalar, Deg_1, Deg_1 > BottomLeftDiagonal;
typedef Matrix<Scalar, Deg_1, 1> BottomLeftDiagonal;
typedef Matrix<Scalar, Deg, Deg> DenseCompanionMatrixType;
typedef Matrix< Scalar, _Deg, Deg_1 > LeftBlock;
typedef Matrix< Scalar, Deg_1, Deg_1 > BottomLeftBlock;
typedef Matrix< Scalar, 1, Deg_1 > LeftBlockFirstRow;
public:
EIGEN_STRONG_INLINE const _Scalar operator()( int row, int col ) const
{
if( m_bl_diag.rows() > col )
{
if( 0 < row ){ return m_bl_diag[col]; }
else{ return 0; }
}
else{ return m_monic[row]; }
}
public:
template<typename VectorType>
void setPolynomial( const VectorType& poly )
{
const int deg = poly.size()-1;
m_monic = -1/poly[deg] * poly.head(deg);
//m_bl_diag.setIdentity( deg-1 );
m_bl_diag.setOnes(deg-1);
}
template<typename VectorType>
ei_companion( const VectorType& poly ){
setPolynomial( poly ); }
public:
DenseCompanionMatrixType denseMatrix() const
{
const int deg = m_monic.size();
const int deg_1 = deg-1;
DenseCompanionMatrixType companion(deg,deg);
companion <<
( LeftBlock(deg,deg_1)
<< LeftBlockFirstRow::Zero(1,deg_1),
BottomLeftBlock::Identity(deg-1,deg-1)*m_bl_diag.asDiagonal() ).finished()
, m_monic;
return companion;
}
protected:
/** Helper function for the balancing algorithm.
* \returns true if the row and the column, having colNorm and rowNorm
* as norms, are balanced, false otherwise.
* colB and rowB are repectively the multipliers for
* the column and the row in order to balance them.
* */
bool balanced( Scalar colNorm, Scalar rowNorm,
bool& isBalanced, Scalar& colB, Scalar& rowB );
/** Helper function for the balancing algorithm.
* \returns true if the row and the column, having colNorm and rowNorm
* as norms, are balanced, false otherwise.
* colB and rowB are repectively the multipliers for
* the column and the row in order to balance them.
* */
bool balancedR( Scalar colNorm, Scalar rowNorm,
bool& isBalanced, Scalar& colB, Scalar& rowB );
public:
/**
* Balancing algorithm from B. N. PARLETT and C. REINSCH (1969)
* "Balancing a matrix for calculation of eigenvalues and eigenvectors"
* adapted to the case of companion matrices.
* A matrix with non zero row and non zero column is balanced
* for a certain norm if the i-th row and the i-th column
* have same norm for all i.
*/
void balance();
protected:
RightColumn m_monic;
BottomLeftDiagonal m_bl_diag;
};
template< typename _Scalar, int _Deg >
inline
bool ei_companion<_Scalar,_Deg>::balanced( Scalar colNorm, Scalar rowNorm,
bool& isBalanced, Scalar& colB, Scalar& rowB )
{
if( Scalar(0) == colNorm || Scalar(0) == rowNorm ){ return true; }
else
{
//To find the balancing coefficients, if the radix is 2,
//one finds \f$ \sigma \f$ such that
// \f$ 2^{2\sigma-1} < rowNorm / colNorm \le 2^{2\sigma+1} \f$
// then the balancing coefficient for the row is \f$ 1/2^{\sigma} \f$
// and the balancing coefficient for the column is \f$ 2^{\sigma} \f$
rowB = rowNorm / ei_radix<Scalar>();
colB = Scalar(1);
const Scalar s = colNorm + rowNorm;
while (colNorm < rowB)
{
colB *= ei_radix<Scalar>();
colNorm *= ei_radix2<Scalar>();
}
rowB = rowNorm * ei_radix<Scalar>();
while (colNorm >= rowB)
{
colB /= ei_radix<Scalar>();
colNorm /= ei_radix2<Scalar>();
}
//This line is used to avoid insubstantial balancing
if ((rowNorm + colNorm) < Scalar(0.95) * s * colB)
{
isBalanced = false;
rowB = Scalar(1) / colB;
return false;
}
else{
return true; }
}
}
template< typename _Scalar, int _Deg >
inline
bool ei_companion<_Scalar,_Deg>::balancedR( Scalar colNorm, Scalar rowNorm,
bool& isBalanced, Scalar& colB, Scalar& rowB )
{
if( Scalar(0) == colNorm || Scalar(0) == rowNorm ){ return true; }
else
{
/**
* Set the norm of the column and the row to the geometric mean
* of the row and column norm
*/
const _Scalar q = colNorm/rowNorm;
if( !ei_isApprox( q, _Scalar(1) ) )
{
rowB = ei_sqrt( colNorm/rowNorm );
colB = Scalar(1)/rowB;
isBalanced = false;
return false;
}
else{
return true; }
}
}
template< typename _Scalar, int _Deg >
void ei_companion<_Scalar,_Deg>::balance()
{
EIGEN_STATIC_ASSERT( 1 < Deg, YOU_MADE_A_PROGRAMMING_MISTAKE );
const int deg = m_monic.size();
const int deg_1 = deg-1;
bool hasConverged=false;
while( !hasConverged )
{
hasConverged = true;
Scalar colNorm,rowNorm;
Scalar colB,rowB;
//First row, first column excluding the diagonal
//==============================================
colNorm = ei_abs(m_bl_diag[0]);
rowNorm = ei_abs(m_monic[0]);
//Compute balancing of the row and the column
if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) )
{
m_bl_diag[0] *= colB;
m_monic[0] *= rowB;
}
//Middle rows and columns excluding the diagonal
//==============================================
for( int i=1; i<deg_1; ++i )
{
// column norm, excluding the diagonal
colNorm = ei_abs(m_bl_diag[i]);
// row norm, excluding the diagonal
rowNorm = ei_abs(m_bl_diag[i-1]) + ei_abs(m_monic[i]);
//Compute balancing of the row and the column
if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) )
{
m_bl_diag[i] *= colB;
m_bl_diag[i-1] *= rowB;
m_monic[i] *= rowB;
}
}
//Last row, last column excluding the diagonal
//============================================
const int ebl = m_bl_diag.size()-1;
VectorBlock<RightColumn,Deg_1> headMonic( m_monic, 0, deg_1 );
colNorm = headMonic.array().abs().sum();
rowNorm = ei_abs( m_bl_diag[ebl] );
//Compute balancing of the row and the column
if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) )
{
headMonic *= colB;
m_bl_diag[ebl] *= rowB;
}
}
}
#endif // EIGEN_COMPANION_H

View File

@ -0,0 +1,395 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_POLYNOMIAL_SOLVER_H
#define EIGEN_POLYNOMIAL_SOLVER_H
/** \ingroup Polynomials_Module
* \class PolynomialSolverBase.
*
* \brief Defined to be inherited by polynomial solvers: it provides
* convenient methods such as
* - real roots,
* - greatest, smallest complex roots,
* - real roots with greatest, smallest absolute real value,
* - greatest, smallest real roots.
*
* It stores the set of roots as a vector of complexes.
*
*/
template< typename _Scalar, int _Deg >
class PolynomialSolverBase
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg)
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef std::complex<RealScalar> RootType;
typedef Matrix<RootType,_Deg,1> RootsType;
protected:
template< typename OtherPolynomial >
inline void setPolynomial( const OtherPolynomial& poly ){
m_roots.resize(poly.size()); }
public:
template< typename OtherPolynomial >
inline PolynomialSolverBase( const OtherPolynomial& poly ){
setPolynomial( poly() ); }
inline PolynomialSolverBase(){}
public:
/** \returns the complex roots of the polynomial */
inline const RootsType& roots() const { return m_roots; }
public:
/** Clear and fills the back insertion sequence with the real roots of the polynomial
* i.e. the real part of the complex roots that have an imaginary part which
* absolute value is smaller than absImaginaryThreshold.
* absImaginaryThreshold takes the dummy_precision associated
* with the _Scalar template parameter of the PolynomialSolver class as the default value.
*
* \param[out] bi_seq : the back insertion sequence (stl concept)
* \param[in] absImaginaryThreshold : the maximum bound of the imaginary part of a complex
* number that is considered as real.
* */
template<typename Stl_back_insertion_sequence>
inline void realRoots( Stl_back_insertion_sequence& bi_seq,
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
bi_seq.clear();
for( int i=0; i<m_roots.size(); ++i )
{
if( ei_abs( m_roots[i].imag() ) < absImaginaryThreshold ){
bi_seq.push_back( m_roots[i].real() ); }
}
}
protected:
template<typename squaredNormBinaryPredicate>
inline const RootType& selectComplexRoot_withRespectToNorm( squaredNormBinaryPredicate& pred ) const
{
int res=0;
RealScalar norm2 = ei_abs2( m_roots[0] );
for( int i=1; i<m_roots.size(); ++i )
{
const RealScalar currNorm2 = ei_abs2( m_roots[i] );
if( pred( currNorm2, norm2 ) ){
res=i; norm2=currNorm2; }
}
return m_roots[res];
}
public:
/**
* \returns the complex root with greatest norm.
*/
inline const RootType& greatestRoot() const
{
std::greater<Scalar> greater;
return selectComplexRoot_withRespectToNorm( greater );
}
/**
* \returns the complex root with smallest norm.
*/
inline const RootType& smallestRoot() const
{
std::less<Scalar> less;
return selectComplexRoot_withRespectToNorm( less );
}
protected:
template<typename squaredRealPartBinaryPredicate>
inline const RealScalar& selectRealRoot_withRespectToAbsRealPart(
squaredRealPartBinaryPredicate& pred,
bool& hasArealRoot,
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
hasArealRoot = false;
int res=0;
RealScalar abs2;
for( int i=0; i<m_roots.size(); ++i )
{
if( ei_abs( m_roots[i].imag() ) < absImaginaryThreshold )
{
if( !hasArealRoot )
{
hasArealRoot = true;
res = i;
abs2 = m_roots[i].real() * m_roots[i].real();
}
else
{
const RealScalar currAbs2 = m_roots[i].real() * m_roots[i].real();
if( pred( currAbs2, abs2 ) )
{
abs2 = currAbs2;
res = i;
}
}
}
else
{
if( ei_abs( m_roots[i].imag() ) < ei_abs( m_roots[res].imag() ) ){
res = i; }
}
}
return m_roots[res].real();
}
template<typename RealPartBinaryPredicate>
inline const RealScalar& selectRealRoot_withRespectToRealPart(
RealPartBinaryPredicate& pred,
bool& hasArealRoot,
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
hasArealRoot = false;
int res=0;
RealScalar val;
for( int i=0; i<m_roots.size(); ++i )
{
if( ei_abs( m_roots[i].imag() ) < absImaginaryThreshold )
{
if( !hasArealRoot )
{
hasArealRoot = true;
res = i;
val = m_roots[i].real();
}
else
{
const RealScalar curr = m_roots[i].real();
if( pred( curr, val ) )
{
val = curr;
res = i;
}
}
}
else
{
if( ei_abs( m_roots[i].imag() ) < ei_abs( m_roots[res].imag() ) ){
res = i; }
}
}
return m_roots[res].real();
}
public:
/**
* \returns a real root with greatest absolute magnitude.
* A real root is defined as the real part of a complex root with absolute imaginary
* part smallest than absImaginaryThreshold.
* absImaginaryThreshold takes the dummy_precision associated
* with the _Scalar template parameter of the PolynomialSolver class as the default value.
* If no real root is found the boolean hasArealRoot is set to false and the real part of
* the root with smallest absolute imaginary part is returned instead.
*
* \param[out] hasArealRoot : boolean true if a real root is found according to the
* absImaginaryThreshold criterion, false otherwise.
* \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide
* whether or not a root is real.
*/
inline const RealScalar& absGreatestRealRoot(
bool& hasArealRoot,
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
std::greater<Scalar> greater;
return selectRealRoot_withRespectToAbsRealPart( greater, hasArealRoot, absImaginaryThreshold );
}
/**
* \returns a real root with smallest absolute magnitude.
* A real root is defined as the real part of a complex root with absolute imaginary
* part smallest than absImaginaryThreshold.
* absImaginaryThreshold takes the dummy_precision associated
* with the _Scalar template parameter of the PolynomialSolver class as the default value.
* If no real root is found the boolean hasArealRoot is set to false and the real part of
* the root with smallest absolute imaginary part is returned instead.
*
* \param[out] hasArealRoot : boolean true if a real root is found according to the
* absImaginaryThreshold criterion, false otherwise.
* \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide
* whether or not a root is real.
*/
inline const RealScalar& absSmallestRealRoot(
bool& hasArealRoot,
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
std::less<Scalar> less;
return selectRealRoot_withRespectToAbsRealPart( less, hasArealRoot, absImaginaryThreshold );
}
/**
* \returns the real root with greatest value.
* A real root is defined as the real part of a complex root with absolute imaginary
* part smallest than absImaginaryThreshold.
* absImaginaryThreshold takes the dummy_precision associated
* with the _Scalar template parameter of the PolynomialSolver class as the default value.
* If no real root is found the boolean hasArealRoot is set to false and the real part of
* the root with smallest absolute imaginary part is returned instead.
*
* \param[out] hasArealRoot : boolean true if a real root is found according to the
* absImaginaryThreshold criterion, false otherwise.
* \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide
* whether or not a root is real.
*/
inline const RealScalar& greatestRealRoot(
bool& hasArealRoot,
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
std::greater<Scalar> greater;
return selectRealRoot_withRespectToRealPart( greater, hasArealRoot, absImaginaryThreshold );
}
/**
* \returns the real root with smallest value.
* A real root is defined as the real part of a complex root with absolute imaginary
* part smallest than absImaginaryThreshold.
* absImaginaryThreshold takes the dummy_precision associated
* with the _Scalar template parameter of the PolynomialSolver class as the default value.
* If no real root is found the boolean hasArealRoot is set to false and the real part of
* the root with smallest absolute imaginary part is returned instead.
*
* \param[out] hasArealRoot : boolean true if a real root is found according to the
* absImaginaryThreshold criterion, false otherwise.
* \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide
* whether or not a root is real.
*/
inline const RealScalar& smallestRealRoot(
bool& hasArealRoot,
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
std::less<Scalar> less;
return selectRealRoot_withRespectToRealPart( less, hasArealRoot, absImaginaryThreshold );
}
protected:
RootsType m_roots;
};
#define EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( BASE ) \
typedef typename BASE::Scalar Scalar; \
typedef typename BASE::RealScalar RealScalar; \
typedef typename BASE::RootType RootType; \
typedef typename BASE::RootsType RootsType;
/** \ingroup Polynomials_Module
*
* \class PolynomialSolver
*
* \brief A polynomial solver
*
* Computes the complex roots of a real polynomial.
*
* \param _Scalar the scalar type, i.e., the type of the polynomial coefficients
* \param _Deg the degree of the polynomial, can be a compile time value or Dynamic.
* Notice that the number of polynomial coefficients is _Deg+1.
*
* This class implements a polynomial solver and provides convenient methods such as
* - real roots,
* - greatest, smallest complex roots,
* - real roots with greatest, smallest absolute real value.
* - greatest, smallest real roots.
*
* WARNING: this polynomial solver is experimental, part of the unsuported Eigen modules.
*
*
* Currently a QR algorithm is used to compute the eigenvalues of the companion matrix of
* the polynomial to compute its roots.
* This supposes that the complex moduli of the roots are all distinct: e.g. there should
* be no multiple roots or conjugate roots for instance.
* With 32bit (float) floating types this problem shows up frequently.
* However, almost always, correct accuracy is reached even in these cases for 64bit
* (double) floating types and small polynomial degree (<20).
*/
template< typename _Scalar, int _Deg >
class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg)
typedef PolynomialSolverBase<_Scalar,_Deg> PS_Base;
EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base )
typedef Matrix<Scalar,_Deg,_Deg> CompanionMatrixType;
typedef EigenSolver<CompanionMatrixType> EigenSolverType;
public:
/** Computes the complex roots of a new polynomial. */
template< typename OtherPolynomial >
void compute( const OtherPolynomial& poly )
{
assert( Scalar(0) != poly[poly.size()-1] );
ei_companion<Scalar,_Deg> companion( poly );
companion.balance();
m_eigenSolver.compute( companion.denseMatrix() );
m_roots = m_eigenSolver.eigenvalues();
}
public:
template< typename OtherPolynomial >
inline PolynomialSolver( const OtherPolynomial& poly ){
compute( poly ); }
inline PolynomialSolver(){}
protected:
using PS_Base::m_roots;
EigenSolverType m_eigenSolver;
};
template< typename _Scalar >
class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1>
{
public:
typedef PolynomialSolverBase<_Scalar,1> PS_Base;
EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base )
public:
/** Computes the complex roots of a new polynomial. */
template< typename OtherPolynomial >
void compute( const OtherPolynomial& poly )
{
assert( Scalar(0) != poly[poly.size()-1] );
m_roots[0] = -poly[0]/poly[poly.size()-1];
}
protected:
using PS_Base::m_roots;
};
#endif // EIGEN_POLYNOMIAL_SOLVER_H

View File

@ -0,0 +1,153 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_POLYNOMIAL_UTILS_H
#define EIGEN_POLYNOMIAL_UTILS_H
/** \ingroup Polynomials_Module
* \returns the evaluation of the polynomial at x using Horner algorithm.
*
* \param[in] poly : the vector of coefficients of the polynomial ordered
* by degrees i.e. poly[i] is the coefficient of degree i of the polynomial
* e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$.
* \param[in] x : the value to evaluate the polynomial at.
*
* <i><b>Note for stability:</b></i>
* <dd> \f$ |x| \le 1 \f$ </dd>
*/
template <typename Polynomials, typename T>
inline
T poly_eval_horner( const Polynomials& poly, const T& x )
{
T val=poly[poly.size()-1];
for( int i=poly.size()-2; i>=0; --i ){
val = val*x + poly[i]; }
return val;
}
/** \ingroup Polynomials_Module
* \returns the evaluation of the polynomial at x using stabilized Horner algorithm.
*
* \param[in] poly : the vector of coefficients of the polynomial ordered
* by degrees i.e. poly[i] is the coefficient of degree i of the polynomial
* e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$.
* \param[in] x : the value to evaluate the polynomial at.
*/
template <typename Polynomials, typename T>
inline
T poly_eval( const Polynomials& poly, const T& x )
{
typedef typename NumTraits<T>::Real Real;
if( ei_abs2( x ) <= Real(1) ){
return poly_eval_horner( poly, x ); }
else
{
T val=poly[0];
T inv_x = T(1)/x;
for( int i=1; i<poly.size(); ++i ){
val = val*inv_x + poly[i]; }
return std::pow(x,(T)(poly.size()-1)) * val;
}
}
/** \ingroup Polynomials_Module
* \returns a maximum bound for the absolute value of any root of the polynomial.
*
* \param[in] poly : the vector of coefficients of the polynomial ordered
* by degrees i.e. poly[i] is the coefficient of degree i of the polynomial
* e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$.
*
* <i><b>Precondition:</b></i>
* <dd> the leading coefficient of the input polynomial poly must be non zero </dd>
*/
template <typename Polynomial>
inline
typename NumTraits<typename Polynomial::Scalar>::Real cauchy_max_bound( const Polynomial& poly )
{
typedef typename Polynomial::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real Real;
assert( Scalar(0) != poly[poly.size()-1] );
const Scalar inv_leading_coeff = Scalar(1)/poly[poly.size()-1];
Real cb(0);
for( int i=0; i<poly.size()-1; ++i ){
cb += ei_abs(poly[i]*inv_leading_coeff); }
return cb + Real(1);
}
/** \ingroup Polynomials_Module
* \returns a minimum bound for the absolute value of any non zero root of the polynomial.
* \param[in] poly : the vector of coefficients of the polynomial ordered
* by degrees i.e. poly[i] is the coefficient of degree i of the polynomial
* e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$.
*/
template <typename Polynomial>
inline
typename NumTraits<typename Polynomial::Scalar>::Real cauchy_min_bound( const Polynomial& poly )
{
typedef typename Polynomial::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real Real;
int i=0;
while( i<poly.size()-1 && Scalar(0) == poly(i) ){ ++i; }
if( poly.size()-1 == i ){
return Real(1); }
const Scalar inv_min_coeff = Scalar(1)/poly[i];
Real cb(1);
for( int j=i+1; j<poly.size(); ++j ){
cb += ei_abs(poly[j]*inv_min_coeff); }
return Real(1)/cb;
}
/** \ingroup Polynomials_Module
* Given the roots of a polynomial compute the coefficients in the
* monomial basis of the monic polynomial with same roots and minimal degree.
* If RootVector is a vector of complexes, Polynomial should also be a vector
* of complexes.
* \param[in] rv : a vector containing the roots of a polynomial.
* \param[out] poly : the vector of coefficients of the polynomial ordered
* by degrees i.e. poly[i] is the coefficient of degree i of the polynomial
* e.g. \f$ 3 + x^2 \f$ is stored as a vector \f$ [ 3, 0, 1 ] \f$.
*/
template <typename RootVector, typename Polynomial>
void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly )
{
typedef typename Polynomial::Scalar Scalar;
poly.setZero( rv.size()+1 );
poly[0] = -rv[0]; poly[1] = Scalar(1);
for( int i=1; i<(int)rv.size(); ++i )
{
for( int j=i+1; j>0; --j ){ poly[j] = poly[j-1] - rv[i]*poly[j]; }
poly[0] = -rv[i]*poly[0];
}
}
#endif // EIGEN_POLYNOMIAL_UTILS_H

View File

@ -0,0 +1,53 @@
#include <unsupported/Eigen/Polynomials>
#include <vector>
#include <iostream>
using namespace Eigen;
using namespace std;
int main()
{
typedef Matrix<double,5,1> Vector5d;
Vector5d roots = Vector5d::Random();
cout << "Roots: " << roots.transpose() << endl;
Eigen::Matrix<double,6,1> polynomial;
roots_to_monicPolynomial( roots, polynomial );
PolynomialSolver<double,5> psolve( polynomial );
cout << "Complex roots: " << psolve.roots().transpose() << endl;
std::vector<double> realRoots;
psolve.realRoots( realRoots );
Map<Vector5d> mapRR( &realRoots[0] );
cout << "Real roots: " << mapRR.transpose() << endl;
cout << endl;
cout << "Illustration of the convergence problem with the QR algorithm: " << endl;
cout << "---------------------------------------------------------------" << endl;
Eigen::Matrix<float,7,1> hardCase_polynomial;
hardCase_polynomial <<
-0.957, 0.9219, 0.3516, 0.9453, -0.4023, -0.5508, -0.03125;
cout << "Hard case polynomial defined by floats: " << hardCase_polynomial.transpose() << endl;
PolynomialSolver<float,6> psolvef( hardCase_polynomial );
cout << "Complex roots: " << psolvef.roots().transpose() << endl;
Eigen::Matrix<float,6,1> evals;
for( int i=0; i<6; ++i ){ evals[i] = std::abs( poly_eval( hardCase_polynomial, psolvef.roots()[i] ) ); }
cout << "Norms of the evaluations of the polynomial at the roots: " << evals.transpose() << endl << endl;
cout << "Using double's almost always solves the problem for small degrees: " << endl;
cout << "-------------------------------------------------------------------" << endl;
PolynomialSolver<double,6> psolve6d( hardCase_polynomial.cast<double>() );
cout << "Complex roots: " << psolve6d.roots().transpose() << endl;
for( int i=0; i<6; ++i )
{
std::complex<float> castedRoot( psolve6d.roots()[i].real(), psolve6d.roots()[i].imag() );
evals[i] = std::abs( poly_eval( hardCase_polynomial, castedRoot ) );
}
cout << "Norms of the evaluations of the polynomial at the roots: " << evals.transpose() << endl << endl;
cout.precision(10);
cout << "The last root in float then in double: " << psolvef.roots()[5] << "\t" << psolve6d.roots()[5] << endl;
std::complex<float> castedRoot( psolve6d.roots()[5].real(), psolve6d.roots()[5].imag() );
cout << "Norm of the difference: " << ei_abs( psolvef.roots()[5] - castedRoot ) << endl;
}

View File

@ -0,0 +1,20 @@
#include <unsupported/Eigen/Polynomials>
#include <iostream>
using namespace Eigen;
using namespace std;
int main()
{
Vector4d roots = Vector4d::Random();
cout << "Roots: " << roots.transpose() << endl;
Eigen::Matrix<double,5,1> polynomial;
roots_to_monicPolynomial( roots, polynomial );
cout << "Polynomial: ";
for( int i=0; i<4; ++i ){ cout << polynomial[i] << ".x^" << i << "+ "; }
cout << polynomial[4] << ".x^4" << endl;
Vector4d evaluation;
for( int i=0; i<4; ++i ){
evaluation[i] = poly_eval( polynomial, roots[i] ); }
cout << "Evaluation of the polynomial at the roots: " << evaluation.transpose();
}

View File

@ -24,3 +24,19 @@ if(FFTW_FOUND)
ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT " "-lfftw3 -lfftw3f -lfftw3l" )
endif(FFTW_FOUND)
find_package(GSL)
if(GSL_FOUND AND GSL_VERSION_MINOR LESS 9)
set(GSL_FOUND "")
endif(GSL_FOUND AND GSL_VERSION_MINOR LESS 9)
if(GSL_FOUND)
add_definitions("-DHAS_GSL" ${GSL_DEFINITIONS})
include_directories(${GSL_INCLUDE_DIR})
ei_add_property(EIGEN_TESTED_BACKENDS "GSL, ")
else(GSL_FOUND)
ei_add_property(EIGEN_MISSING_BACKENDS "GSL, ")
set(GSL_LIBRARIES " ")
endif(GSL_FOUND)
ei_add_test(polynomialutils)
ei_add_test(polynomialsolver " " "${GSL_LIBRARIES}" )

View File

@ -0,0 +1,263 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#include "main.h"
#include <unsupported/Eigen/Polynomials>
#include <iostream>
#include <algorithm>
#ifdef HAS_GSL
#include "gsl_helper.h"
#endif
using namespace std;
template<int Size>
struct ei_increment_if_fixed_size
{
enum {
ret = (Size == Dynamic) ? Dynamic : Size+1
};
};
template<int Deg, typename POLYNOMIAL, typename SOLVER>
bool aux_evalSolver( const POLYNOMIAL& pols, SOLVER& psolve )
{
typedef typename POLYNOMIAL::Scalar Scalar;
typedef typename SOLVER::RootsType RootsType;
typedef Matrix<Scalar,Deg,1> EvalRootsType;
const int deg = pols.size()-1;
psolve.compute( pols );
const RootsType& roots( psolve.roots() );
EvalRootsType evr( deg );
for( int i=0; i<roots.size(); ++i ){
evr[i] = std::abs( poly_eval( pols, roots[i] ) ); }
bool evalToZero = evr.isZero( test_precision<Scalar>() );
if( !evalToZero )
{
cerr << "WRONG root: " << endl;
cerr << "Polynomial: " << pols.transpose() << endl;
cerr << "Roots found: " << roots.transpose() << endl;
cerr << "Abs value of the polynomial at the roots: " << evr.transpose() << endl;
cerr << endl;
}
#ifdef HAS_GSL
if (ei_is_same_type< Scalar, double>::ret)
{
typedef GslTraits<Scalar> Gsl;
RootsType gslRoots(deg);
Gsl::eigen_poly_solve( pols, gslRoots );
EvalRootsType gslEvr( deg );
for( int i=0; i<gslRoots.size(); ++i )
{
gslEvr[i] = std::abs( poly_eval( pols, gslRoots[i] ) );
}
bool gslEvalToZero = gslEvr.isZero( test_precision<Scalar>() );
if( !evalToZero )
{
if( !gslEvalToZero ){
cerr << "GSL also failed" << endl; }
else{
cerr << "GSL did NOT failed" << endl; }
cerr << "GSL roots found: " << gslRoots.transpose() << endl;
cerr << "Abs value of the polynomial at the GSL roots: " << gslEvr.transpose() << endl;
cerr << endl;
}
}
#endif //< HAS_GSL
std::vector<Scalar> rootModuli( roots.size() );
Map< EvalRootsType > aux( &rootModuli[0], roots.size() );
aux = roots.array().abs();
std::sort( rootModuli.begin(), rootModuli.end() );
bool distinctModuli=true;
for( size_t i=1; i<rootModuli.size() && distinctModuli; ++i )
{
if( ei_isApprox( rootModuli[i], rootModuli[i-1] ) ){
distinctModuli = false; }
}
VERIFY( evalToZero || !distinctModuli );
return distinctModuli;
}
template<int Deg, typename POLYNOMIAL>
void evalSolver( const POLYNOMIAL& pols )
{
typedef typename POLYNOMIAL::Scalar Scalar;
typedef PolynomialSolver<Scalar, Deg > PolynomialSolverType;
PolynomialSolverType psolve;
aux_evalSolver<Deg, POLYNOMIAL, PolynomialSolverType>( pols, psolve );
}
template< int Deg, typename POLYNOMIAL, typename ROOTS, typename REAL_ROOTS >
void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const REAL_ROOTS& real_roots )
{
typedef typename POLYNOMIAL::Scalar Scalar;
typedef PolynomialSolver<Scalar, Deg > PolynomialSolverType;
PolynomialSolverType psolve;
if( aux_evalSolver<Deg, POLYNOMIAL, PolynomialSolverType>( pols, psolve ) )
{
//It is supposed that
// 1) the roots found are correct
// 2) the roots have distinct moduli
typedef typename POLYNOMIAL::Scalar Scalar;
typedef typename REAL_ROOTS::Scalar Real;
typedef PolynomialSolver<Scalar, Deg > PolynomialSolverType;
typedef typename PolynomialSolverType::RootsType RootsType;
typedef Matrix<Scalar,Deg,1> EvalRootsType;
//Test realRoots
std::vector< Real > calc_realRoots;
psolve.realRoots( calc_realRoots );
VERIFY( calc_realRoots.size() == (size_t)real_roots.size() );
const Scalar psPrec = ei_sqrt( test_precision<Scalar>() );
for( size_t i=0; i<calc_realRoots.size(); ++i )
{
bool found = false;
for( size_t j=0; j<calc_realRoots.size()&& !found; ++j )
{
if( ei_isApprox( calc_realRoots[i], real_roots[j] ), psPrec ){
found = true; }
}
VERIFY( found );
}
//Test greatestRoot
VERIFY( ei_isApprox( roots.array().abs().maxCoeff(),
ei_abs( psolve.greatestRoot() ), psPrec ) );
//Test smallestRoot
VERIFY( ei_isApprox( roots.array().abs().minCoeff(),
ei_abs( psolve.smallestRoot() ), psPrec ) );
bool hasRealRoot;
//Test absGreatestRealRoot
Real r = psolve.absGreatestRealRoot( hasRealRoot );
VERIFY( hasRealRoot == (real_roots.size() > 0 ) );
if( hasRealRoot ){
VERIFY( ei_isApprox( real_roots.array().abs().maxCoeff(), ei_abs(r), psPrec ) ); }
//Test absSmallestRealRoot
r = psolve.absSmallestRealRoot( hasRealRoot );
VERIFY( hasRealRoot == (real_roots.size() > 0 ) );
if( hasRealRoot ){
VERIFY( ei_isApprox( real_roots.array().abs().minCoeff(), ei_abs( r ), psPrec ) ); }
//Test greatestRealRoot
r = psolve.greatestRealRoot( hasRealRoot );
VERIFY( hasRealRoot == (real_roots.size() > 0 ) );
if( hasRealRoot ){
VERIFY( ei_isApprox( real_roots.array().maxCoeff(), r, psPrec ) ); }
//Test smallestRealRoot
r = psolve.smallestRealRoot( hasRealRoot );
VERIFY( hasRealRoot == (real_roots.size() > 0 ) );
if( hasRealRoot ){
VERIFY( ei_isApprox( real_roots.array().minCoeff(), r, psPrec ) ); }
}
}
template<typename _Scalar, int _Deg>
void polynomialsolver(int deg)
{
typedef ei_increment_if_fixed_size<_Deg> Dim;
typedef Matrix<_Scalar,Dim::ret,1> PolynomialType;
typedef Matrix<_Scalar,_Deg,1> EvalRootsType;
cout << "Standard cases" << endl;
PolynomialType pols = PolynomialType::Random(deg+1);
evalSolver<_Deg,PolynomialType>( pols );
cout << "Hard cases" << endl;
_Scalar multipleRoot = ei_random<_Scalar>();
EvalRootsType allRoots = EvalRootsType::Constant(deg,multipleRoot);
roots_to_monicPolynomial( allRoots, pols );
evalSolver<_Deg,PolynomialType>( pols );
cout << "Test sugar" << endl;
EvalRootsType realRoots = EvalRootsType::Random(deg);
roots_to_monicPolynomial( realRoots, pols );
evalSolverSugarFunction<_Deg>(
pols,
realRoots.template cast <
std::complex<
typename NumTraits<_Scalar>::Real
>
>(),
realRoots );
}
template<typename _Scalar> void polynomialsolver_scalar()
{
CALL_SUBTEST_1( (polynomialsolver<_Scalar,1>(1)) );
CALL_SUBTEST_2( (polynomialsolver<_Scalar,2>(2)) );
CALL_SUBTEST_3( (polynomialsolver<_Scalar,3>(3)) );
CALL_SUBTEST_4( (polynomialsolver<_Scalar,4>(4)) );
CALL_SUBTEST_5( (polynomialsolver<_Scalar,5>(5)) );
CALL_SUBTEST_6( (polynomialsolver<_Scalar,6>(6)) );
CALL_SUBTEST_7( (polynomialsolver<_Scalar,7>(7)) );
CALL_SUBTEST_8( (polynomialsolver<_Scalar,8>(8)) );
CALL_SUBTEST_9( (polynomialsolver<_Scalar,Dynamic>(
ei_random<int>(9,45)
)) );
}
void test_polynomialsolver()
{
for(int i = 0; i < g_repeat; i++)
{
polynomialsolver_scalar<double>();
polynomialsolver_scalar<float>();
}
}

View File

@ -0,0 +1,124 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#include "main.h"
#include <unsupported/Eigen/Polynomials>
#include <iostream>
using namespace std;
template<int Size>
struct ei_increment_if_fixed_size
{
enum {
ret = (Size == Dynamic) ? Dynamic : Size+1
};
};
template<typename _Scalar, int _Deg>
void realRoots_to_monicPolynomial_test(int deg)
{
typedef ei_increment_if_fixed_size<_Deg> Dim;
typedef Matrix<_Scalar,Dim::ret,1> PolynomialType;
typedef Matrix<_Scalar,_Deg,1> EvalRootsType;
PolynomialType pols(deg+1);
EvalRootsType roots = EvalRootsType::Random(deg);
roots_to_monicPolynomial( roots, pols );
EvalRootsType evr( deg );
for( int i=0; i<roots.size(); ++i ){
evr[i] = std::abs( poly_eval( pols, roots[i] ) ); }
bool evalToZero = evr.isZero( test_precision<_Scalar>() );
if( !evalToZero ){
cerr << evr.transpose() << endl; }
VERIFY( evalToZero );
}
template<typename _Scalar> void realRoots_to_monicPolynomial_scalar()
{
CALL_SUBTEST_2( (realRoots_to_monicPolynomial_test<_Scalar,2>(2)) );
CALL_SUBTEST_3( (realRoots_to_monicPolynomial_test<_Scalar,3>(3)) );
CALL_SUBTEST_4( (realRoots_to_monicPolynomial_test<_Scalar,4>(4)) );
CALL_SUBTEST_5( (realRoots_to_monicPolynomial_test<_Scalar,5>(5)) );
CALL_SUBTEST_6( (realRoots_to_monicPolynomial_test<_Scalar,6>(6)) );
CALL_SUBTEST_7( (realRoots_to_monicPolynomial_test<_Scalar,7>(7)) );
CALL_SUBTEST_8( (realRoots_to_monicPolynomial_test<_Scalar,17>(17)) );
CALL_SUBTEST_9( (realRoots_to_monicPolynomial_test<_Scalar,Dynamic>(
ei_random<int>(18,26) )) );
}
template<typename _Scalar, int _Deg>
void CauchyBounds(int deg)
{
typedef ei_increment_if_fixed_size<_Deg> Dim;
typedef Matrix<_Scalar,Dim::ret,1> PolynomialType;
typedef Matrix<_Scalar,_Deg,1> EvalRootsType;
PolynomialType pols(deg+1);
EvalRootsType roots = EvalRootsType::Random(deg);
roots_to_monicPolynomial( roots, pols );
_Scalar M = cauchy_max_bound( pols );
_Scalar m = cauchy_min_bound( pols );
_Scalar Max = roots.array().abs().maxCoeff();
_Scalar min = roots.array().abs().minCoeff();
bool eval = (M >= Max) && (m <= min);
if( !eval )
{
cerr << "Roots: " << roots << endl;
cerr << "Bounds: (" << m << ", " << M << ")" << endl;
cerr << "Min,Max: (" << min << ", " << Max << ")" << endl;
}
VERIFY( eval );
}
template<typename _Scalar> void CauchyBounds_scalar()
{
CALL_SUBTEST_2( (CauchyBounds<_Scalar,2>(2)) );
CALL_SUBTEST_3( (CauchyBounds<_Scalar,3>(3)) );
CALL_SUBTEST_4( (CauchyBounds<_Scalar,4>(4)) );
CALL_SUBTEST_5( (CauchyBounds<_Scalar,5>(5)) );
CALL_SUBTEST_6( (CauchyBounds<_Scalar,6>(6)) );
CALL_SUBTEST_7( (CauchyBounds<_Scalar,7>(7)) );
CALL_SUBTEST_8( (CauchyBounds<_Scalar,17>(17)) );
CALL_SUBTEST_9( (CauchyBounds<_Scalar,Dynamic>(
ei_random<int>(18,26) )) );
}
void test_polynomialutils()
{
for(int i = 0; i < g_repeat; i++)
{
realRoots_to_monicPolynomial_scalar<double>();
realRoots_to_monicPolynomial_scalar<float>();
CauchyBounds_scalar<double>();
CauchyBounds_scalar<float>();
}
}