From 064f3eff959f92190b057ae989137713afb34820 Mon Sep 17 00:00:00 2001 From: giacomo po Date: Thu, 30 Aug 2012 10:01:34 +0200 Subject: [PATCH 1/9] first working version. Still no preconditioning --- Eigen/src/IterativeLinearSolvers/MINRES.h | 273 ++++++++++++++++++++++ 1 file changed, 273 insertions(+) create mode 100644 Eigen/src/IterativeLinearSolvers/MINRES.h diff --git a/Eigen/src/IterativeLinearSolvers/MINRES.h b/Eigen/src/IterativeLinearSolvers/MINRES.h new file mode 100644 index 000000000..ca93ebc32 --- /dev/null +++ b/Eigen/src/IterativeLinearSolvers/MINRES.h @@ -0,0 +1,273 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Giacomo Po +// Copyright (C) 2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_MINRES_H_ +#define EIGEN_MINRES_H_ + + +namespace Eigen { + + namespace internal { + + /** \internal Low-level MINRES algorithm + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A preconditioner being able to efficiently solve for an + * approximation of Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + */ + template + EIGEN_DONT_INLINE + void minres(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, int& iters, + typename Dest::RealScalar& tol_error) + { + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + // initialize + const int maxIters(iters); // initialize maxIters to iters + const int N(mat.cols()); // the size of the matrix + const RealScalar threshold(tol_error); // convergence threshold + VectorType v(VectorType::Zero(N)); + VectorType v_hat(rhs-mat*x); + RealScalar beta(v_hat.norm()); + RealScalar c(1.0); // the cosine of the Givens rotation + RealScalar c_old(1.0); + RealScalar s(0.0); // the sine of the Givens rotation + RealScalar s_old(0.0); // the sine of the Givens rotation + VectorType w(VectorType::Zero(N)); + VectorType w_old(w); + RealScalar eta(beta); + RealScalar norm_rMR=beta; + const RealScalar norm_r0(beta); + + int n = 0; + while ( n < maxIters ){ + + + // Lanczos + VectorType v_old(v); + v=v_hat/beta; + VectorType Av(mat*v); + RealScalar alpha(v.transpose()*Av); + v_hat=Av-alpha*v-beta*v_old; + RealScalar beta_old(beta); + beta=v_hat.norm(); + + // QR + RealScalar c_oold(c_old); + c_old=c; + RealScalar s_oold(s_old); + s_old=s; + RealScalar r1_hat=c_old *alpha-c_oold*s_old *beta_old; + RealScalar r1 =std::pow(std::pow(r1_hat,2)+std::pow(beta,2),0.5); + RealScalar r2 =s_old *alpha+c_oold*c_old*beta_old; + RealScalar r3 =s_oold*beta_old; + + // Givens rotation + c=r1_hat/r1; + s=beta/r1; + + // update + VectorType w_oold(w_old); + w_old=w; + w=(v-r3*w_oold-r2*w_old) /r1; + x += c*eta*w; + norm_rMR *= std::fabs(s); + eta=-s*eta; + //if(norm_rMR/norm_r0 < threshold){ + if ( (mat*x-rhs).norm()/rhs.norm() < threshold){ + break; + } + n++; + } + tol_error = (mat*x-rhs).norm()/rhs.norm(); // return error DOES mat*x NEED TO BE RECOMPUTED??? + iters = n; // return number of iterations + } + + } + + template< typename _MatrixType, int _UpLo=Lower, + typename _Preconditioner = DiagonalPreconditioner > + class MINRES; + + namespace internal { + + template< typename _MatrixType, int _UpLo, typename _Preconditioner> + struct traits > + { + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; + }; + + } + + /** \ingroup IterativeLinearSolvers_Module + * \brief A minimal residual solver for sparse symmetric problems + * + * This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm + * of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite). + * The vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + * \code + * int n = 10000; + * VectorXd x(n), b(n); + * SparseMatrix A(n,n); + * // fill A and b + * MINRES > mr; + * mr.compute(A); + * x = mr.solve(b); + * std::cout << "#iterations: " << mr.iterations() << std::endl; + * std::cout << "estimated error: " << mr.error() << std::endl; + * // update b, and solve again + * x = mr.solve(b); + * \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. Here is a step by + * step execution example starting with a random guess and printing the evolution + * of the estimated error: + * * \code + * x = VectorXd::Random(n); + * mr.setMaxIterations(1); + * int i = 0; + * do { + * x = mr.solveWithGuess(b,x); + * std::cout << i << " : " << mr.error() << std::endl; + * ++i; + * } while (mr.info()!=Success && i<100); + * \endcode + * Note that such a step by step excution is slightly slower. + * + * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ + template< typename _MatrixType, int _UpLo, typename _Preconditioner> + class MINRES : public IterativeSolverBase > + { + + typedef IterativeSolverBase Base; + using Base::mp_matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; + public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + + enum {UpLo = _UpLo}; + + public: + + /** Default constructor. */ + MINRES() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + MINRES(const MatrixType& A) : Base(A) {} + + /** Destructor. */ + ~MINRES(){} + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A + * \a x0 as an initial solution. + * + * \sa compute() + */ + template + inline const internal::solve_retval_with_guess + solveWithGuess(const MatrixBase& b, const Guess& x0) const + { + eigen_assert(m_isInitialized && "MINRES is not initialized."); + eigen_assert(Base::rows()==b.rows() + && "MINRES::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval_with_guess + (*this, b.derived(), x0); + } + + /** \internal */ + template + void _solveWithGuess(const Rhs& b, Dest& x) const + { + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + + for(int j=0; jtemplate selfadjointView(), b.col(j), xj, + Base::m_preconditioner, m_iterations, m_error); + } + + m_isInitialized = true; + m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; + } + + /** \internal */ + template + void _solve(const Rhs& b, Dest& x) const + { + x.setOnes(); + _solveWithGuess(b,x); + } + + protected: + + }; + + namespace internal { + + template + struct solve_retval, Rhs> + : solve_retval_base, Rhs> + { + typedef MINRES<_MatrixType,_UpLo,_Preconditioner> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } + }; + + } // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MINRES_H + From 5f3880c5a875a55345ae87550afba20f21c422ca Mon Sep 17 00:00:00 2001 From: giacomo po Date: Thu, 30 Aug 2012 13:10:08 +0200 Subject: [PATCH 2/9] some optimization in MINRES, not sure about convergence criterion --- Eigen/src/IterativeLinearSolvers/MINRES.h | 25 ++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/Eigen/src/IterativeLinearSolvers/MINRES.h b/Eigen/src/IterativeLinearSolvers/MINRES.h index ca93ebc32..a87b7cb28 100644 --- a/Eigen/src/IterativeLinearSolvers/MINRES.h +++ b/Eigen/src/IterativeLinearSolvers/MINRES.h @@ -38,7 +38,9 @@ namespace Eigen { // initialize const int maxIters(iters); // initialize maxIters to iters const int N(mat.cols()); // the size of the matrix - const RealScalar threshold(tol_error); // convergence threshold + const RealScalar rhsNorm2(rhs.squaredNorm()); +// const RealScalar threshold(tol_error); // threshold for original convergence criterion, see below + const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold VectorType v(VectorType::Zero(N)); VectorType v_hat(rhs-mat*x); RealScalar beta(v_hat.norm()); @@ -52,14 +54,19 @@ namespace Eigen { RealScalar norm_rMR=beta; const RealScalar norm_r0(beta); + VectorType v_old(N), Av(N), w_oold(N); // preallocate temporaty vectors used in iteration + RealScalar residualNorm2; // not needed for original convergnce criterion + int n = 0; while ( n < maxIters ){ // Lanczos - VectorType v_old(v); + // VectorType v_old(v); // now pre-allocated + v_old = v; v=v_hat/beta; - VectorType Av(mat*v); +// VectorType Av(mat*v); // now pre-allocated + Av = mat*v; RealScalar alpha(v.transpose()*Av); v_hat=Av-alpha*v-beta*v_old; RealScalar beta_old(beta); @@ -80,19 +87,23 @@ namespace Eigen { s=beta/r1; // update - VectorType w_oold(w_old); + // VectorType w_oold(w_old); // now pre-allocated + w_oold = w_old; + w_old=w; w=(v-r3*w_oold-r2*w_old) /r1; x += c*eta*w; norm_rMR *= std::fabs(s); eta=-s*eta; - //if(norm_rMR/norm_r0 < threshold){ - if ( (mat*x-rhs).norm()/rhs.norm() < threshold){ + + residualNorm2 = (mat*x-rhs).squaredNorm(); // DOES mat*x NEED TO BE RECOMPUTED ???? + //if(norm_rMR/norm_r0 < threshold){ // original convergence criterion, does not require "mat*x" + if ( residualNorm2 < threshold2){ break; } n++; } - tol_error = (mat*x-rhs).norm()/rhs.norm(); // return error DOES mat*x NEED TO BE RECOMPUTED??? + tol_error = std::sqrt(residualNorm2 / rhsNorm2); // return error iters = n; // return number of iterations } From 751501eade51d9843b84b18743601c46c76fcb8e Mon Sep 17 00:00:00 2001 From: giacomo po Date: Sat, 1 Sep 2012 21:59:06 +0200 Subject: [PATCH 3/9] added preconditioner with preconditioned-Lanczos iteration --- Eigen/src/IterativeLinearSolvers/MINRES.h | 134 ++++++++++++++++------ 1 file changed, 97 insertions(+), 37 deletions(-) diff --git a/Eigen/src/IterativeLinearSolvers/MINRES.h b/Eigen/src/IterativeLinearSolvers/MINRES.h index a87b7cb28..5bc4773d7 100644 --- a/Eigen/src/IterativeLinearSolvers/MINRES.h +++ b/Eigen/src/IterativeLinearSolvers/MINRES.h @@ -35,73 +35,133 @@ namespace Eigen { typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix VectorType; + + // initialize const int maxIters(iters); // initialize maxIters to iters const int N(mat.cols()); // the size of the matrix const RealScalar rhsNorm2(rhs.squaredNorm()); // const RealScalar threshold(tol_error); // threshold for original convergence criterion, see below const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold - VectorType v(VectorType::Zero(N)); - VectorType v_hat(rhs-mat*x); - RealScalar beta(v_hat.norm()); + + +// VectorType v(VectorType::Zero(N)); +// VectorType v_hat(rhs-mat*x); + + // Compute initial residual + VectorType residual(rhs-mat*x); + + + // Initialize preconditioned Lanczos + VectorType v_old(N); // will be initialized inside loop + VectorType v = VectorType::Zero(N); //initialize v + VectorType v_new = residual; //initialize v_new + VectorType w(N); // will be initialized inside loop + VectorType w_new = precond.solve(v_new); // initialize w_new + RealScalar beta; // will be initialized inside loop + RealScalar beta_new = sqrt(v_new.dot(w_new)); + v_new /= beta_new; + w_new /= beta_new; + + + + // RealScalar beta(v_hat.norm()); RealScalar c(1.0); // the cosine of the Givens rotation RealScalar c_old(1.0); RealScalar s(0.0); // the sine of the Givens rotation RealScalar s_old(0.0); // the sine of the Givens rotation - VectorType w(VectorType::Zero(N)); - VectorType w_old(w); - RealScalar eta(beta); + VectorType p_oold(VectorType::Zero(N)); // initialize p_oold=0 + VectorType p_old(p_oold); // initialize p_old=0 + VectorType p(N); // will be initialized in loop + + //RealScalar eta(beta); // CHANGE THIS RealScalar norm_rMR=beta; const RealScalar norm_r0(beta); - VectorType v_old(N), Av(N), w_oold(N); // preallocate temporaty vectors used in iteration + RealScalar eta(1.0); + + // VectorType v_old(N), Av(N), w_oold(N); // preallocate temporaty vectors used in iteration RealScalar residualNorm2; // not needed for original convergnce criterion int n = 0; while ( n < maxIters ){ - // Lanczos - // VectorType v_old(v); // now pre-allocated - v_old = v; - v=v_hat/beta; -// VectorType Av(mat*v); // now pre-allocated - Av = mat*v; - RealScalar alpha(v.transpose()*Av); - v_hat=Av-alpha*v-beta*v_old; - RealScalar beta_old(beta); - beta=v_hat.norm(); + // Preconditioned Lanczos + /* Note that there are 4 variants on the Lanczos algorithm. These are + * described in Paige, C. C. (1972). Computational variants of + * the Lanczos method for the eigenproblem. IMA Journal of Applied + * Mathematics, 10(3), 373–381. The current implementation corresonds + * to the case A(2,7) in the paper. It also corresponds to + * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear + * Systems, 2003 p.173. For the preconditioned version see + * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987). + */ + beta = beta_new; + v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter + v = v_new; // update + w = w_new; // update + v_new.noalias() = mat*w - beta*v_old; // compute v_new + const RealScalar alpha = v_new.dot(w); + v_new -= alpha*v; // overwrite v_new + w_new = precond.solve(v_new); // overwrite w_new + beta_new = sqrt(v_new.dot(w_new)); // compute beta_new + v_new /= beta_new; // overwrite v_new + w_new /= beta_new; // overwrite w_new - // QR - RealScalar c_oold(c_old); - c_old=c; - RealScalar s_oold(s_old); - s_old=s; - RealScalar r1_hat=c_old *alpha-c_oold*s_old *beta_old; - RealScalar r1 =std::pow(std::pow(r1_hat,2)+std::pow(beta,2),0.5); - RealScalar r2 =s_old *alpha+c_oold*c_old*beta_old; - RealScalar r3 =s_oold*beta_old; +// +// +// +// +// +// +// +// +// // VectorType v_old(v); // now pre-allocated +// v_old = v; +// v=v_hat/beta; +//// VectorType Av(mat*v); // now pre-allocated +// Av = mat*v; +// RealScalar alpha(v.transpose()*Av); +// v_hat=Av-alpha*v-beta*v_old; +// RealScalar beta_old(beta); +// beta=v_hat.norm(); - // Givens rotation - c=r1_hat/r1; - s=beta/r1; + // Apply QR +// RealScalar c_oold(c_old); // store old-old cosine +// c_old=c; // store old cosine +// RealScalar s_oold(s_old); // store old-old sine +// s_old=s; // store old sine +// const RealScalar r1_hat=c_old *alpha-c_oold*s_old *beta_old; +// const RealScalar r1 =std::pow(std::pow(r1_hat,2)+std::pow(beta,2),0.5); + const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration + const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration - // update + // Compute new Givens rotation + const RealScalar r1_hat=c*alpha-c_old*s*beta; + const RealScalar r1 =std::pow(std::pow(r1_hat,2)+std::pow(beta_new,2),0.5); + c_old = c; // store for next iteration + s_old = s; // store for next iteration + c=r1_hat/r1; // new cosine + s=beta/r1; // new sine + + // update w // VectorType w_oold(w_old); // now pre-allocated - w_oold = w_old; - - w_old=w; - w=(v-r3*w_oold-r2*w_old) /r1; - x += c*eta*w; + p_oold = p_old; + p_old = p; + p=(w-r2*p_old-r3*p_oold) /r1; + // update x + x += c*eta*p; norm_rMR *= std::fabs(s); - eta=-s*eta; residualNorm2 = (mat*x-rhs).squaredNorm(); // DOES mat*x NEED TO BE RECOMPUTED ???? //if(norm_rMR/norm_r0 < threshold){ // original convergence criterion, does not require "mat*x" if ( residualNorm2 < threshold2){ break; } - n++; + + eta=-s*eta; // update eta + n++; // increment iteration } tol_error = std::sqrt(residualNorm2 / rhsNorm2); // return error iters = n; // return number of iterations From 8c5e4fae6186004b8121276f30dd75a8b217eec9 Mon Sep 17 00:00:00 2001 From: giacomo po Date: Sat, 22 Sep 2012 15:29:00 -0700 Subject: [PATCH 4/9] working preconditioned MINRES solver --- Eigen/IterativeLinearSolvers | 1 + Eigen/src/IterativeLinearSolvers/MINRES.h | 96 +++++++---------------- 2 files changed, 28 insertions(+), 69 deletions(-) diff --git a/Eigen/IterativeLinearSolvers b/Eigen/IterativeLinearSolvers index 315c2dd1e..741bac824 100644 --- a/Eigen/IterativeLinearSolvers +++ b/Eigen/IterativeLinearSolvers @@ -34,6 +34,7 @@ #include "src/IterativeLinearSolvers/ConjugateGradient.h" #include "src/IterativeLinearSolvers/BiCGSTAB.h" #include "src/IterativeLinearSolvers/IncompleteLUT.h" +#include "src/IterativeLinearSolvers/MINRES.h" #include "src/Core/util/ReenableStupidWarnings.h" diff --git a/Eigen/src/IterativeLinearSolvers/MINRES.h b/Eigen/src/IterativeLinearSolvers/MINRES.h index 5bc4773d7..d5527a163 100644 --- a/Eigen/src/IterativeLinearSolvers/MINRES.h +++ b/Eigen/src/IterativeLinearSolvers/MINRES.h @@ -21,7 +21,7 @@ namespace Eigen { * \param mat The matrix A * \param rhs The right hand side vector b * \param x On input and initial solution, on output the computed solution. - * \param precond A preconditioner being able to efficiently solve for an + * \param precond A right preconditioner being able to efficiently solve for an * approximation of Ax=b (regardless of b) * \param iters On input the max number of iteration, on output the number of performed iterations. * \param tol_error On input the tolerance error, on output an estimation of the relative error. @@ -35,22 +35,16 @@ namespace Eigen { typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix VectorType; - - + // initialize const int maxIters(iters); // initialize maxIters to iters const int N(mat.cols()); // the size of the matrix const RealScalar rhsNorm2(rhs.squaredNorm()); -// const RealScalar threshold(tol_error); // threshold for original convergence criterion, see below const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold - - -// VectorType v(VectorType::Zero(N)); -// VectorType v_hat(rhs-mat*x); // Compute initial residual - VectorType residual(rhs-mat*x); - + const VectorType residual(rhs-mat*x); + RealScalar residualNorm2(residual.squaredNorm()); // not needed for original convergnce criterion // Initialize preconditioned Lanczos VectorType v_old(N); // will be initialized inside loop @@ -59,34 +53,25 @@ namespace Eigen { VectorType w(N); // will be initialized inside loop VectorType w_new = precond.solve(v_new); // initialize w_new RealScalar beta; // will be initialized inside loop - RealScalar beta_new = sqrt(v_new.dot(w_new)); + RealScalar beta_new2 = v_new.dot(w_new); + assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); + RealScalar beta_new = sqrt(beta_new2); + RealScalar beta_one = beta_new; v_new /= beta_new; w_new /= beta_new; - - - - // RealScalar beta(v_hat.norm()); + // Initialize other variables RealScalar c(1.0); // the cosine of the Givens rotation RealScalar c_old(1.0); RealScalar s(0.0); // the sine of the Givens rotation RealScalar s_old(0.0); // the sine of the Givens rotation - VectorType p_oold(VectorType::Zero(N)); // initialize p_oold=0 - VectorType p_old(p_oold); // initialize p_old=0 - VectorType p(N); // will be initialized in loop - - //RealScalar eta(beta); // CHANGE THIS - RealScalar norm_rMR=beta; - const RealScalar norm_r0(beta); - + VectorType p_oold(N); // will be initialized in loop + VectorType p_old(VectorType::Zero(N)); // initialize p_old=0 + VectorType p(p_old); // initialize p=0 RealScalar eta(1.0); - - // VectorType v_old(N), Av(N), w_oold(N); // preallocate temporaty vectors used in iteration - RealScalar residualNorm2; // not needed for original convergnce criterion - + int n = 0; while ( n < maxIters ){ - // Preconditioned Lanczos /* Note that there are 4 variants on the Lanczos algorithm. These are * described in Paige, C. C. (1972). Computational variants of @@ -105,57 +90,29 @@ namespace Eigen { const RealScalar alpha = v_new.dot(w); v_new -= alpha*v; // overwrite v_new w_new = precond.solve(v_new); // overwrite w_new - beta_new = sqrt(v_new.dot(w_new)); // compute beta_new - v_new /= beta_new; // overwrite v_new - w_new /= beta_new; // overwrite w_new + beta_new2 = v_new.dot(w_new); // compute beta_new + assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); + beta_new = sqrt(beta_new2); // compute beta_new + v_new /= beta_new; // overwrite v_new for next iteration + w_new /= beta_new; // overwrite w_new for next iteration -// -// -// -// -// -// -// -// -// // VectorType v_old(v); // now pre-allocated -// v_old = v; -// v=v_hat/beta; -//// VectorType Av(mat*v); // now pre-allocated -// Av = mat*v; -// RealScalar alpha(v.transpose()*Av); -// v_hat=Av-alpha*v-beta*v_old; -// RealScalar beta_old(beta); -// beta=v_hat.norm(); - - // Apply QR -// RealScalar c_oold(c_old); // store old-old cosine -// c_old=c; // store old cosine -// RealScalar s_oold(s_old); // store old-old sine -// s_old=s; // store old sine -// const RealScalar r1_hat=c_old *alpha-c_oold*s_old *beta_old; -// const RealScalar r1 =std::pow(std::pow(r1_hat,2)+std::pow(beta,2),0.5); + // Givens rotation const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration - - // Compute new Givens rotation const RealScalar r1_hat=c*alpha-c_old*s*beta; - const RealScalar r1 =std::pow(std::pow(r1_hat,2)+std::pow(beta_new,2),0.5); + const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) ); c_old = c; // store for next iteration s_old = s; // store for next iteration c=r1_hat/r1; // new cosine - s=beta/r1; // new sine + s=beta_new/r1; // new sine - // update w - // VectorType w_oold(w_old); // now pre-allocated + // Update solution p_oold = p_old; p_old = p; p=(w-r2*p_old-r3*p_oold) /r1; - // update x - x += c*eta*p; - norm_rMR *= std::fabs(s); + x += beta_one*c*eta*p; + residualNorm2 *= s*s; - residualNorm2 = (mat*x-rhs).squaredNorm(); // DOES mat*x NEED TO BE RECOMPUTED ???? - //if(norm_rMR/norm_r0 < threshold){ // original convergence criterion, does not require "mat*x" if ( residualNorm2 < threshold2){ break; } @@ -170,7 +127,8 @@ namespace Eigen { } template< typename _MatrixType, int _UpLo=Lower, - typename _Preconditioner = DiagonalPreconditioner > + typename _Preconditioner = IdentityPreconditioner> +// typename _Preconditioner = IdentityPreconditioner > // preconditioner must be positive definite class MINRES; namespace internal { @@ -313,7 +271,7 @@ namespace Eigen { template void _solve(const Rhs& b, Dest& x) const { - x.setOnes(); + x.setZero(); _solveWithGuess(b,x); } From dd7ff3f4934b173fe337916fc9225facbaf955c3 Mon Sep 17 00:00:00 2001 From: giacomo po Date: Mon, 24 Sep 2012 07:47:38 -0700 Subject: [PATCH 5/9] moved MINRES to unsupported. Made unit test. --- Eigen/IterativeLinearSolvers | 1 - unsupported/Eigen/IterativeSolvers | 1 + .../Eigen/src/IterativeSolvers}/MINRES.h | 0 unsupported/test/CMakeLists.txt | 2 +- unsupported/test/minres.cpp | 33 +++++++++++++++++++ 5 files changed, 35 insertions(+), 2 deletions(-) rename {Eigen/src/IterativeLinearSolvers => unsupported/Eigen/src/IterativeSolvers}/MINRES.h (100%) create mode 100644 unsupported/test/minres.cpp diff --git a/Eigen/IterativeLinearSolvers b/Eigen/IterativeLinearSolvers index 741bac824..315c2dd1e 100644 --- a/Eigen/IterativeLinearSolvers +++ b/Eigen/IterativeLinearSolvers @@ -34,7 +34,6 @@ #include "src/IterativeLinearSolvers/ConjugateGradient.h" #include "src/IterativeLinearSolvers/BiCGSTAB.h" #include "src/IterativeLinearSolvers/IncompleteLUT.h" -#include "src/IterativeLinearSolvers/MINRES.h" #include "src/Core/util/ReenableStupidWarnings.h" diff --git a/unsupported/Eigen/IterativeSolvers b/unsupported/Eigen/IterativeSolvers index 6c6946d91..7a5776d9c 100644 --- a/unsupported/Eigen/IterativeSolvers +++ b/unsupported/Eigen/IterativeSolvers @@ -34,6 +34,7 @@ #include "../../Eigen/Householder" #include "src/IterativeSolvers/GMRES.h" //#include "src/IterativeSolvers/SSORPreconditioner.h" +#include "src/IterativeSolvers/MINRES.h" //@} diff --git a/Eigen/src/IterativeLinearSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h similarity index 100% rename from Eigen/src/IterativeLinearSolvers/MINRES.h rename to unsupported/Eigen/src/IterativeSolvers/MINRES.h diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt index ff0137ec6..1e8ba7240 100644 --- a/unsupported/test/CMakeLists.txt +++ b/unsupported/test/CMakeLists.txt @@ -85,4 +85,4 @@ ei_add_test(polynomialutils) ei_add_test(kronecker_product) ei_add_test(splines) ei_add_test(gmres) - +ei_add_test(minres) diff --git a/unsupported/test/minres.cpp b/unsupported/test/minres.cpp new file mode 100644 index 000000000..46946ca8b --- /dev/null +++ b/unsupported/test/minres.cpp @@ -0,0 +1,33 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012 Giacomo Po +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "../../test/sparse_solver.h" +#include + +template void test_minres_T() +{ + minres, DiagonalPreconditioner > minres_colmajor_diag; + minres, IdentityPreconditioner > minres_colmajor_I; + minres, IncompleteLUT > minres_colmajor_ilut; + //minres, SSORPreconditioner > minres_colmajor_ssor; + + CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_diag) ); +// CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_I) ); + CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ilut) ); + //CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ssor) ); +} + +void test_minres() +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1(test_minres_T()); + CALL_SUBTEST_2(test_minres_T >()); + } +} From 18c41aa04f4d04a9c4c4d170150bc0daa92a5650 Mon Sep 17 00:00:00 2001 From: giacomo po Date: Mon, 24 Sep 2012 08:33:11 -0700 Subject: [PATCH 6/9] Some minor optimization. --- .../Eigen/src/IterativeSolvers/MINRES.h | 53 +++++++++++-------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h index d5527a163..01ab319a1 100644 --- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h @@ -40,23 +40,24 @@ namespace Eigen { const int maxIters(iters); // initialize maxIters to iters const int N(mat.cols()); // the size of the matrix const RealScalar rhsNorm2(rhs.squaredNorm()); - const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold + const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2) - // Compute initial residual - const VectorType residual(rhs-mat*x); - RealScalar residualNorm2(residual.squaredNorm()); // not needed for original convergnce criterion +// // Compute initial residual +// const VectorType residual(rhs-mat*x); +// RealScalar residualNorm2(residual.squaredNorm()); // Initialize preconditioned Lanczos - VectorType v_old(N); // will be initialized inside loop - VectorType v = VectorType::Zero(N); //initialize v - VectorType v_new = residual; //initialize v_new - VectorType w(N); // will be initialized inside loop - VectorType w_new = precond.solve(v_new); // initialize w_new - RealScalar beta; // will be initialized inside loop - RealScalar beta_new2 = v_new.dot(w_new); +// VectorType v_old(N); // will be initialized inside loop + VectorType v( VectorType::Zero(N) ); //initialize v + VectorType v_new(rhs-mat*x); //initialize v_new + RealScalar residualNorm2(v_new.squaredNorm()); +// VectorType w(N); // will be initialized inside loop + VectorType w_new(precond.solve(v_new)); // initialize w_new +// RealScalar beta; // will be initialized inside loop + RealScalar beta_new2(v_new.dot(w_new)); assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); - RealScalar beta_new = sqrt(beta_new2); - RealScalar beta_one = beta_new; + RealScalar beta_new(sqrt(beta_new2)); + const RealScalar beta_one(beta_new); v_new /= beta_new; w_new /= beta_new; // Initialize other variables @@ -64,13 +65,15 @@ namespace Eigen { RealScalar c_old(1.0); RealScalar s(0.0); // the sine of the Givens rotation RealScalar s_old(0.0); // the sine of the Givens rotation - VectorType p_oold(N); // will be initialized in loop +// VectorType p_oold(N); // will be initialized in loop VectorType p_old(VectorType::Zero(N)); // initialize p_old=0 VectorType p(p_old); // initialize p=0 RealScalar eta(1.0); - int n = 0; - while ( n < maxIters ){ + //int n = 0; + iters = 0; +// while ( n < maxIters ){ + while ( iters < maxIters ){ // Preconditioned Lanczos /* Note that there are 4 variants on the Lanczos algorithm. These are @@ -78,14 +81,16 @@ namespace Eigen { * the Lanczos method for the eigenproblem. IMA Journal of Applied * Mathematics, 10(3), 373–381. The current implementation corresonds * to the case A(2,7) in the paper. It also corresponds to - * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear + * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear * Systems, 2003 p.173. For the preconditioned version see * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987). */ - beta = beta_new; - v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter + const RealScalar beta(beta_new); +// v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter + const VectorType v_old(v); v = v_new; // update - w = w_new; // update +// w = w_new; // update + const VectorType w(w_new); v_new.noalias() = mat*w - beta*v_old; // compute v_new const RealScalar alpha = v_new.dot(w); v_new -= alpha*v; // overwrite v_new @@ -107,7 +112,8 @@ namespace Eigen { s=beta_new/r1; // new sine // Update solution - p_oold = p_old; +// p_oold = p_old; + const VectorType p_oold(p_old); p_old = p; p=(w-r2*p_old-r3*p_oold) /r1; x += beta_one*c*eta*p; @@ -118,10 +124,11 @@ namespace Eigen { } eta=-s*eta; // update eta - n++; // increment iteration + // n++; // increment iteration + iters++; } tol_error = std::sqrt(residualNorm2 / rhsNorm2); // return error - iters = n; // return number of iterations + // iters = n; // return number of iterations } } From fd0441baee4b8ccbe404b01b1f24a0f90c52ecc9 Mon Sep 17 00:00:00 2001 From: giacomo po Date: Mon, 24 Sep 2012 09:20:40 -0700 Subject: [PATCH 7/9] some clean-up and new comments. --- .../Eigen/src/IterativeSolvers/MINRES.h | 24 +++++++------------ 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h index 01ab319a1..46d7bedc1 100644 --- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h +++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h @@ -41,10 +41,6 @@ namespace Eigen { const int N(mat.cols()); // the size of the matrix const RealScalar rhsNorm2(rhs.squaredNorm()); const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2) - -// // Compute initial residual -// const VectorType residual(rhs-mat*x); -// RealScalar residualNorm2(residual.squaredNorm()); // Initialize preconditioned Lanczos // VectorType v_old(N); // will be initialized inside loop @@ -70,16 +66,14 @@ namespace Eigen { VectorType p(p_old); // initialize p=0 RealScalar eta(1.0); - //int n = 0; - iters = 0; -// while ( n < maxIters ){ + iters = 0; // reset iters while ( iters < maxIters ){ // Preconditioned Lanczos /* Note that there are 4 variants on the Lanczos algorithm. These are * described in Paige, C. C. (1972). Computational variants of * the Lanczos method for the eigenproblem. IMA Journal of Applied - * Mathematics, 10(3), 373–381. The current implementation corresonds + * Mathematics, 10(3), 373–381. The current implementation corresponds * to the case A(2,7) in the paper. It also corresponds to * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear * Systems, 2003 p.173. For the preconditioned version see @@ -87,10 +81,10 @@ namespace Eigen { */ const RealScalar beta(beta_new); // v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter - const VectorType v_old(v); + const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT v = v_new; // update // w = w_new; // update - const VectorType w(w_new); + const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT v_new.noalias() = mat*w - beta*v_old; // compute v_new const RealScalar alpha = v_new.dot(w); v_new -= alpha*v; // overwrite v_new @@ -113,9 +107,9 @@ namespace Eigen { // Update solution // p_oold = p_old; - const VectorType p_oold(p_old); + const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT p_old = p; - p=(w-r2*p_old-r3*p_oold) /r1; + p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED? x += beta_one*c*eta*p; residualNorm2 *= s*s; @@ -124,11 +118,9 @@ namespace Eigen { } eta=-s*eta; // update eta - // n++; // increment iteration - iters++; + iters++; // increment iteration number (for output purposes) } - tol_error = std::sqrt(residualNorm2 / rhsNorm2); // return error - // iters = n; // return number of iterations + tol_error = std::sqrt(residualNorm2 / rhsNorm2); // return error. Note that this is the estimated error. The real error |Ax-b|/|b| may be slightly larger } } From 01cb88fff8d7f11993c037fa0d8bb7a3d0ff10fb Mon Sep 17 00:00:00 2001 From: giacomo po Date: Thu, 27 Sep 2012 17:44:54 -0700 Subject: [PATCH 8/9] compiling (but failing) unit test --- unsupported/test/minres.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/unsupported/test/minres.cpp b/unsupported/test/minres.cpp index 46946ca8b..faa799e06 100644 --- a/unsupported/test/minres.cpp +++ b/unsupported/test/minres.cpp @@ -13,14 +13,14 @@ template void test_minres_T() { - minres, DiagonalPreconditioner > minres_colmajor_diag; - minres, IdentityPreconditioner > minres_colmajor_I; - minres, IncompleteLUT > minres_colmajor_ilut; +// MINRES, Lower, DiagonalPreconditioner > minres_colmajor_diag; + MINRES, Lower, IdentityPreconditioner > minres_colmajor_I; +// MINRES, Lower, IncompleteLUT > minres_colmajor_ilut; //minres, SSORPreconditioner > minres_colmajor_ssor; - CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_diag) ); -// CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_I) ); - CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ilut) ); +// CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_diag) ); + CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_I) ); + // CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ilut) ); //CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ssor) ); } @@ -28,6 +28,6 @@ void test_minres() { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1(test_minres_T()); - CALL_SUBTEST_2(test_minres_T >()); + // CALL_SUBTEST_2(test_minres_T >()); } } From bf81276dadc4dc15b3dfae947aca6f39420e325e Mon Sep 17 00:00:00 2001 From: giacomo po Date: Mon, 1 Oct 2012 12:23:03 -0700 Subject: [PATCH 9/9] spd test instead of square test. Still missing complex version of MINRES. --- unsupported/test/minres.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsupported/test/minres.cpp b/unsupported/test/minres.cpp index faa799e06..eb187d445 100644 --- a/unsupported/test/minres.cpp +++ b/unsupported/test/minres.cpp @@ -19,7 +19,7 @@ template void test_minres_T() //minres, SSORPreconditioner > minres_colmajor_ssor; // CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_diag) ); - CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_I) ); + CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_I) ); // CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ilut) ); //CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ssor) ); }