diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index b4ba37a36..3552d5a16 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -370,11 +370,8 @@ template class MatrixBase /////////// SVD module /////////// - template - inline JacobiSVD jacobiSvd() const; - - template - inline BDCSVD bdcSvd() const; + inline JacobiSVD jacobiSvd(unsigned int computationOptions = 0) const; + inline BDCSVD bdcSvd(unsigned int computationOptions = 0) const; /////////// Geometry module /////////// diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h index 16ed58560..72fe3307c 100644 --- a/Eigen/src/Core/util/Constants.h +++ b/Eigen/src/Core/util/Constants.h @@ -423,14 +423,14 @@ enum DecompositionOptions { /** \ingroup enums * Possible values for the \p QRPreconditioner template parameter of JacobiSVD. */ enum QRPreconditioners { - /** Use a QR decomposition with column pivoting as the first step. */ - ColPivHouseholderQRPreconditioner = 0x0, /** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */ - NoQRPreconditioner = 0x40, + NoQRPreconditioner, /** Use a QR decomposition without pivoting as the first step. */ - HouseholderQRPreconditioner = 0x80, + HouseholderQRPreconditioner, + /** Use a QR decomposition with column pivoting as the first step. */ + ColPivHouseholderQRPreconditioner, /** Use a QR decomposition with full pivoting as the first step. */ - FullPivHouseholderQRPreconditioner = 0xC0 + FullPivHouseholderQRPreconditioner }; #ifdef Success diff --git a/Eigen/src/Core/util/ForwardDeclarations.h b/Eigen/src/Core/util/ForwardDeclarations.h index 1c88fa92b..6b0ac502b 100644 --- a/Eigen/src/Core/util/ForwardDeclarations.h +++ b/Eigen/src/Core/util/ForwardDeclarations.h @@ -277,8 +277,8 @@ template class ColPivHouseholderQR; template class FullPivHouseholderQR; template class CompleteOrthogonalDecomposition; template class SVDBase; -template class JacobiSVD; -template class BDCSVD; +template class JacobiSVD; +template class BDCSVD; template class LLT; template class LDLT; template class HouseholderSequence; diff --git a/Eigen/src/Geometry/Hyperplane.h b/Eigen/src/Geometry/Hyperplane.h index 0d23df34a..db0a48585 100644 --- a/Eigen/src/Geometry/Hyperplane.h +++ b/Eigen/src/Geometry/Hyperplane.h @@ -108,7 +108,7 @@ public: if(norm <= v0.norm() * v1.norm() * NumTraits::epsilon()) { Matrix m; m << v0.transpose(), v1.transpose(); - JacobiSVD, ComputeFullV> svd(m); + JacobiSVD > svd(m, ComputeFullV); result.normal() = svd.matrixV().col(2); } else diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h index 52e8a81f0..e291bf30f 100644 --- a/Eigen/src/Geometry/Quaternion.h +++ b/Eigen/src/Geometry/Quaternion.h @@ -651,7 +651,7 @@ EIGEN_DEVICE_FUNC inline Derived& QuaternionBase::setFromTwoVectors(con { c = numext::maxi(c,Scalar(-1)); Matrix m; m << v0.transpose(), v1.transpose(); - JacobiSVD, ComputeFullV> svd(m); + JacobiSVD > svd(m, ComputeFullV); Vector3 axis = svd.matrixV().col(2); Scalar w2 = (Scalar(1)+c)*Scalar(0.5); diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h index c64fc5f0b..27ea9625e 100644 --- a/Eigen/src/Geometry/Transform.h +++ b/Eigen/src/Geometry/Transform.h @@ -1105,7 +1105,7 @@ template EIGEN_DEVICE_FUNC void Transform::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const { // Note that JacobiSVD is faster than BDCSVD for small matrices. - JacobiSVD svd(linear()); + JacobiSVD svd(linear(), ComputeFullU | ComputeFullV); Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant() < Scalar(0) ? Scalar(-1) : Scalar(1); // so x has absolute value 1 VectorType sv(svd.singularValues()); @@ -1135,7 +1135,7 @@ template EIGEN_DEVICE_FUNC void Transform::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const { // Note that JacobiSVD is faster than BDCSVD for small matrices. - JacobiSVD svd(linear()); + JacobiSVD svd(linear(), ComputeFullU | ComputeFullV); Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant() < Scalar(0) ? Scalar(-1) : Scalar(1); // so x has absolute value 1 VectorType sv(svd.singularValues()); diff --git a/Eigen/src/Geometry/Umeyama.h b/Eigen/src/Geometry/Umeyama.h index 736ccb644..0a9dd355c 100644 --- a/Eigen/src/Geometry/Umeyama.h +++ b/Eigen/src/Geometry/Umeyama.h @@ -127,7 +127,7 @@ umeyama(const MatrixBase& src, const MatrixBase& dst, boo // Eq. (38) const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose(); - JacobiSVD svd(sigma); + JacobiSVD svd(sigma, ComputeFullU | ComputeFullV); // Initialize the resulting transformation with an identity matrix... TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1); diff --git a/Eigen/src/SVD/BDCSVD.h b/Eigen/src/SVD/BDCSVD.h index 48288a50d..8bb30cd68 100644 --- a/Eigen/src/SVD/BDCSVD.h +++ b/Eigen/src/SVD/BDCSVD.h @@ -38,14 +38,14 @@ namespace Eigen { #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE IOFormat bdcsvdfmt(8, 0, ", ", "\n", " [", "]"); #endif - -template class BDCSVD; + +template class BDCSVD; namespace internal { -template -struct traits > - : svd_traits +template +struct traits > + : traits { typedef MatrixType_ MatrixType; }; @@ -61,11 +61,6 @@ struct traits > * \brief class Bidiagonal Divide and Conquer SVD * * \tparam MatrixType_ the type of the matrix of which we are computing the SVD decomposition - * - * \tparam Options this optional parameter allows one to specify options for computing unitaries \a U and \a V. - * Possible values are #ComputeThinU, #ComputeThinV, #ComputeFullU, #ComputeFullV. - * It is not possible to request the thin and full version of U or V. - * By default, unitaries are not computed. * * This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization, * and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD. @@ -80,8 +75,8 @@ struct traits > * * \sa class JacobiSVD */ -template -class BDCSVD : public SVDBase > +template +class BDCSVD : public SVDBase > { typedef SVDBase Base; @@ -132,20 +127,26 @@ public: * according to the specified problem size. * \sa BDCSVD() */ - BDCSVD(Index rows, Index cols) + BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0) : m_algoswap(16), m_numIters(0) { - allocate(rows, cols); + allocate(rows, cols, computationOptions); } /** \brief Constructor performing the decomposition of given matrix. * * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non - default) FullPivHouseholderQR preconditioner. */ - BDCSVD(const MatrixType& matrix) + BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0) : m_algoswap(16), m_numIters(0) { - compute(matrix); + compute(matrix, computationOptions); } ~BDCSVD() @@ -155,8 +156,25 @@ public: /** \brief Method performing the decomposition of given matrix using custom options. * * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non - default) FullPivHouseholderQR preconditioner. */ - BDCSVD& compute(const MatrixType& matrix); + BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions); + + /** \brief Method performing the decomposition of given matrix using current options. + * + * \param matrix the matrix to decompose + * + * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). + */ + BDCSVD& compute(const MatrixType& matrix) + { + return compute(matrix, this->m_computationOptions); + } void setSwitchSize(int s) { @@ -165,7 +183,7 @@ public: } private: - void allocate(Index rows, Index cols); + void allocate(Index rows, Index cols, unsigned int computationOptions); void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift); void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V); void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus); @@ -178,8 +196,6 @@ private: void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev); void structured_update(Block A, const MatrixXr &B, Index n1); static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift); - template - void computeBaseCase(SVDType& svd, Index n, Index firstCol, Index firstRowW, Index firstColW, Index shift); protected: MatrixXr m_naiveU, m_naiveV; @@ -192,10 +208,10 @@ protected: using Base::m_singularValues; using Base::m_diagSize; - using Base::ShouldComputeFullU; - using Base::ShouldComputeFullV; - using Base::ShouldComputeThinU; - using Base::ShouldComputeThinV; + using Base::m_computeFullU; + using Base::m_computeFullV; + using Base::m_computeThinU; + using Base::m_computeThinV; using Base::m_matrixU; using Base::m_matrixV; using Base::m_info; @@ -208,12 +224,12 @@ public: // Method to allocate and initialize matrix and attributes -template -void BDCSVD::allocate(Eigen::Index rows, Eigen::Index cols) +template +void BDCSVD::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions) { m_isTranspose = (cols > rows); - if (Base::allocate(rows, cols)) + if (Base::allocate(rows, cols, computationOptions)) return; m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize ); @@ -231,13 +247,13 @@ void BDCSVD::allocate(Eigen::Index rows, Eigen::Index cols) m_workspaceI.resize(3*m_diagSize); }// end allocate -template -BDCSVD& BDCSVD::compute(const MatrixType& matrix) +template +BDCSVD& BDCSVD::compute(const MatrixType& matrix, unsigned int computationOptions) { #ifdef EIGEN_BDCSVD_DEBUG_VERBOSE std::cout << "\n\n\n======================================================================================================================\n\n\n"; #endif - allocate(matrix.rows(), matrix.cols()); + allocate(matrix.rows(), matrix.cols(), computationOptions); using std::abs; const RealScalar considerZero = (std::numeric_limits::min)(); @@ -246,7 +262,7 @@ BDCSVD& BDCSVD::compute(const MatrixTy if(matrix.cols() < m_algoswap) { // FIXME this line involves temporaries - JacobiSVD jsvd(matrix); + JacobiSVD jsvd(matrix,computationOptions); m_isInitialized = true; m_info = jsvd.info(); if (m_info == Success || m_info == NoConvergence) { @@ -317,21 +333,21 @@ BDCSVD& BDCSVD::compute(const MatrixTy }// end compute -template +template template -void BDCSVD::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV) +void BDCSVD::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV) { // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa if (computeU()) { - Index Ucols = ShouldComputeThinU ? m_diagSize : householderU.cols(); + Index Ucols = m_computeThinU ? m_diagSize : householderU.cols(); m_matrixU = MatrixX::Identity(householderU.cols(), Ucols); m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast().topLeftCorner(m_diagSize, m_diagSize); householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer } if (computeV()) { - Index Vcols = ShouldComputeThinV ? m_diagSize : householderV.cols(); + Index Vcols = m_computeThinV ? m_diagSize : householderV.cols(); m_matrixV = MatrixX::Identity(householderV.cols(), Vcols); m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast().topLeftCorner(m_diagSize, m_diagSize); householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer @@ -346,8 +362,8 @@ void BDCSVD::copyUV(const HouseholderU &householderU, const * We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large * enough. */ -template -void BDCSVD::structured_update(Block A, const MatrixXr &B, Index n1) +template +void BDCSVD::structured_update(Block A, const MatrixXr &B, Index n1) { Index n = A.rows(); if(n>100) @@ -387,26 +403,7 @@ void BDCSVD::structured_update(Block -template -void BDCSVD::computeBaseCase(SVDType& svd, Index n, Index firstCol, Index firstRowW, Index firstColW, Index shift) -{ - svd.compute(m_computed.block(firstCol, firstCol, n + 1, n)); - m_info = svd.info(); - if (m_info != Success && m_info != NoConvergence) return; - if (m_compU) - m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = svd.matrixU(); - else - { - m_naiveU.row(0).segment(firstCol, n + 1).real() = svd.matrixU().row(0); - m_naiveU.row(1).segment(firstCol, n + 1).real() = svd.matrixU().row(n); - } - if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = svd.matrixV(); - m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); - m_computed.diagonal().segment(firstCol + shift, n) = svd.singularValues().head(n); -} - -// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the +// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the // place of the submatrix we are currently working on. //@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU; @@ -416,8 +413,8 @@ void BDCSVD::computeBaseCase(SVDType& svd, Index n, Index f //@param firstColW : Same as firstRowW with the column. //@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix // to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper. -template -void BDCSVD::divide(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift) +template +void BDCSVD::divide(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift) { // requires rows = cols + 1; using std::pow; @@ -435,17 +432,20 @@ void BDCSVD::divide(Eigen::Index firstCol, Eigen::Index las // matrices. if (n < m_algoswap) { - // FIXME this block involves temporaries - if (m_compV) - { - JacobiSVD baseSvd; - computeBaseCase(baseSvd, n, firstCol, firstRowW, firstColW, shift); - } + // FIXME this line involves temporaries + JacobiSVD b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0)); + m_info = b.info(); + if (m_info != Success && m_info != NoConvergence) return; + if (m_compU) + m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU(); else { - JacobiSVD baseSvd; - computeBaseCase(baseSvd, n, firstCol, firstRowW, firstColW, shift); + m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0); + m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n); } + if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV(); + m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero(); + m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n); return; } // We use the divide and conquer algorithm @@ -597,8 +597,8 @@ void BDCSVD::divide(Eigen::Index firstCol, Eigen::Index las // TODO Opportunities for optimization: better root finding algo, better stopping criterion, better // handling of round-off errors, be consistent in ordering // For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf -template -void BDCSVD::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V) +template +void BDCSVD::computeSVDofM(Eigen::Index firstCol, Eigen::Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V) { const RealScalar considerZero = (std::numeric_limits::min)(); using std::abs; @@ -725,8 +725,8 @@ void BDCSVD::computeSVDofM(Eigen::Index firstCol, Eigen::In #endif } -template -typename BDCSVD::RealScalar BDCSVD::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift) +template +typename BDCSVD::RealScalar BDCSVD::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift) { Index m = perm.size(); RealScalar res = Literal(1); @@ -741,9 +741,9 @@ typename BDCSVD::RealScalar BDCSVD::se } -template -void BDCSVD::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, - VectorType& singVals, ArrayRef shifts, ArrayRef mus) +template +void BDCSVD::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, + VectorType& singVals, ArrayRef shifts, ArrayRef mus) { using std::abs; using std::swap; @@ -987,8 +987,8 @@ void BDCSVD::computeSingVals(const ArrayRef& col0, const Ar // zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1) -template -void BDCSVD::perturbCol0 +template +void BDCSVD::perturbCol0 (const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat) { @@ -1067,8 +1067,8 @@ void BDCSVD::perturbCol0 } // compute singular vectors -template -void BDCSVD::computeSingVecs +template +void BDCSVD::computeSingVecs (const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V) { @@ -1113,8 +1113,8 @@ void BDCSVD::computeSingVecs // page 12_13 // i >= 1, di almost null and zi non null. // We use a rotation to zero out zi applied to the left of M -template -void BDCSVD::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size) +template +void BDCSVD::deflation43(Eigen::Index firstCol, Eigen::Index shift, Eigen::Index i, Eigen::Index size) { using std::abs; using std::sqrt; @@ -1142,8 +1142,8 @@ void BDCSVD::deflation43(Eigen::Index firstCol, Eigen::Inde // i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M) // We apply two rotations to have zj = 0; // TODO deflation44 is still broken and not properly tested -template -void BDCSVD::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size) +template +void BDCSVD::deflation44(Eigen::Index firstColu , Eigen::Index firstColm, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index i, Eigen::Index j, Eigen::Index size) { using std::abs; using std::sqrt; @@ -1182,8 +1182,8 @@ void BDCSVD::deflation44(Eigen::Index firstColu , Eigen::In // acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive] -template -void BDCSVD::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift) +template +void BDCSVD::deflation(Eigen::Index firstCol, Eigen::Index lastCol, Eigen::Index k, Eigen::Index firstRowW, Eigen::Index firstColW, Eigen::Index shift) { using std::sqrt; using std::abs; @@ -1361,11 +1361,10 @@ void BDCSVD::deflation(Eigen::Index firstCol, Eigen::Index * \sa class BDCSVD */ template -template -BDCSVD::PlainObject, Options> -MatrixBase::bdcSvd() const +BDCSVD::PlainObject> +MatrixBase::bdcSvd(unsigned int computationOptions) const { - return BDCSVD(*this); + return BDCSVD(*this, computationOptions); } } // end namespace Eigen diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h index 9b765e02d..91c95ec89 100644 --- a/Eigen/src/SVD/JacobiSVD.h +++ b/Eigen/src/SVD/JacobiSVD.h @@ -13,20 +13,12 @@ #include "./InternalHeaderCheck.h" -namespace Eigen { +namespace Eigen { namespace internal { - -enum OptionsMasks { - QRPreconditionerBits = NoQRPreconditioner | - HouseholderQRPreconditioner | - ColPivHouseholderQRPreconditioner | - FullPivHouseholderQRPreconditioner -}; - // forward declaration (needed by ICC) // the empty body is required by MSVC -template::IsComplex> struct svd_precondition_2x2_block_to_be_real {}; @@ -54,16 +46,16 @@ struct qr_preconditioner_should_do_anything }; }; -template::ret > struct qr_preconditioner_impl {}; -template -class qr_preconditioner_impl +template +class qr_preconditioner_impl { public: - void allocate(const JacobiSVD&) {} - bool run(JacobiSVD&, const MatrixType&) + void allocate(const JacobiSVD&) {} + bool run(JacobiSVD&, const MatrixType&) { return false; } @@ -71,71 +63,65 @@ public: /*** preconditioner using FullPivHouseholderQR ***/ -template -class qr_preconditioner_impl +template +class qr_preconditioner_impl { public: typedef typename MatrixType::Scalar Scalar; - typedef JacobiSVD SVDType; - enum { - WorkspaceSize = MatrixType::RowsAtCompileTime, - MaxWorkspaceSize = MatrixType::MaxRowsAtCompileTime + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime }; + typedef Matrix WorkspaceType; - typedef Matrix WorkspaceType; - - void allocate(const SVDType& svd) + void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.rows(), svd.cols()); } - if (svd.ShouldComputeFullU) m_workspace.resize(svd.rows()); + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); } - bool run(SVDType& svd, const MatrixType& matrix) + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { m_qr.compute(matrix); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.ShouldComputeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace); + if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace); if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation(); return true; } return false; } - private: typedef FullPivHouseholderQR QRType; QRType m_qr; WorkspaceType m_workspace; }; -template -class qr_preconditioner_impl +template +class qr_preconditioner_impl { public: typedef typename MatrixType::Scalar Scalar; - typedef JacobiSVD SVDType; - enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - MatrixOptions = MatrixType::Options + Options = MatrixType::Options }; typedef typename internal::make_proper_matrix_type< - Scalar, ColsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime + Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime >::type TransposeTypeWithSameStorageOrder; - void allocate(const SVDType& svd) + void allocate(const JacobiSVD& svd) { if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) { @@ -143,66 +129,54 @@ public: ::new (&m_qr) QRType(svd.cols(), svd.rows()); } m_adjoint.resize(svd.cols(), svd.rows()); - if (svd.ShouldComputeFullV) m_workspace.resize(svd.cols()); + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); } - bool run(SVDType& svd, const MatrixType& matrix) + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { m_adjoint = matrix.adjoint(); m_qr.compute(m_adjoint); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.ShouldComputeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace); + if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace); if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation(); return true; } else return false; } - private: typedef FullPivHouseholderQR QRType; QRType m_qr; TransposeTypeWithSameStorageOrder m_adjoint; - typename plain_row_type::type m_workspace; + typename internal::plain_row_type::type m_workspace; }; /*** preconditioner using ColPivHouseholderQR ***/ -template -class qr_preconditioner_impl +template +class qr_preconditioner_impl { public: - typedef typename MatrixType::Scalar Scalar; - typedef JacobiSVD SVDType; - - enum - { - WorkspaceSize = internal::traits::MatrixUColsAtCompileTime, - MaxWorkspaceSize = internal::traits::MatrixUMaxColsAtCompileTime - }; - - typedef Matrix WorkspaceType; - - void allocate(const SVDType& svd) + void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.rows(), svd.cols()); } - if (svd.ShouldComputeFullU) m_workspace.resize(svd.rows()); - else if (svd.ShouldComputeThinU) m_workspace.resize(svd.cols()); + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); } - bool run(SVDType& svd, const MatrixType& matrix) + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { m_qr.compute(matrix); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.ShouldComputeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); - else if(svd.ShouldComputeThinU) + if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); + else if(svd.m_computeThinU) { svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); @@ -216,46 +190,40 @@ public: private: typedef ColPivHouseholderQR QRType; QRType m_qr; - WorkspaceType m_workspace; + typename internal::plain_col_type::type m_workspace; }; -template -class qr_preconditioner_impl +template +class qr_preconditioner_impl { public: typedef typename MatrixType::Scalar Scalar; - typedef JacobiSVD SVDType; - enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - MatrixOptions = MatrixType::Options, - WorkspaceSize = internal::traits::MatrixVColsAtCompileTime, - MaxWorkspaceSize = internal::traits::MatrixVMaxColsAtCompileTime + Options = MatrixType::Options }; - typedef Matrix WorkspaceType; - typedef typename internal::make_proper_matrix_type< - Scalar, ColsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime + Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime >::type TransposeTypeWithSameStorageOrder; - void allocate(const SVDType& svd) + void allocate(const JacobiSVD& svd) { if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.cols(), svd.rows()); } - if (svd.ShouldComputeFullV) m_workspace.resize(svd.cols()); - else if (svd.ShouldComputeThinV) m_workspace.resize(svd.rows()); + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); m_adjoint.resize(svd.cols(), svd.rows()); } - bool run(SVDType& svd, const MatrixType& matrix) + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { @@ -263,8 +231,8 @@ public: m_qr.compute(m_adjoint); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.ShouldComputeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); - else if(svd.ShouldComputeThinV) + if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); + else if(svd.m_computeThinV) { svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); @@ -279,45 +247,34 @@ private: typedef ColPivHouseholderQR QRType; QRType m_qr; TransposeTypeWithSameStorageOrder m_adjoint; - WorkspaceType m_workspace; + typename internal::plain_row_type::type m_workspace; }; /*** preconditioner using HouseholderQR ***/ -template -class qr_preconditioner_impl +template +class qr_preconditioner_impl { public: - typedef typename MatrixType::Scalar Scalar; - typedef JacobiSVD SVDType; - - enum - { - WorkspaceSize = internal::traits::MatrixUColsAtCompileTime, - MaxWorkspaceSize = internal::traits::MatrixUMaxColsAtCompileTime - }; - - typedef Matrix WorkspaceType; - - void allocate(const SVDType& svd) + void allocate(const JacobiSVD& svd) { if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.rows(), svd.cols()); } - if (svd.ShouldComputeFullU) m_workspace.resize(svd.rows()); - else if (svd.ShouldComputeThinU) m_workspace.resize(svd.cols()); + if (svd.m_computeFullU) m_workspace.resize(svd.rows()); + else if (svd.m_computeThinU) m_workspace.resize(svd.cols()); } - bool run(SVDType& svd, const MatrixType& matrix) + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.rows() > matrix.cols()) { m_qr.compute(matrix); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView(); - if(svd.ShouldComputeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); - else if(svd.ShouldComputeThinU) + if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace); + else if(svd.m_computeThinU) { svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace); @@ -327,50 +284,43 @@ public: } return false; } - private: typedef HouseholderQR QRType; QRType m_qr; - WorkspaceType m_workspace; + typename internal::plain_col_type::type m_workspace; }; -template -class qr_preconditioner_impl +template +class qr_preconditioner_impl { public: typedef typename MatrixType::Scalar Scalar; - typedef JacobiSVD SVDType; - enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - MatrixOptions = MatrixType::Options, - WorkspaceSize = internal::traits::MatrixVColsAtCompileTime, - MaxWorkspaceSize = internal::traits::MatrixVMaxColsAtCompileTime + Options = MatrixType::Options }; - typedef Matrix WorkspaceType; - typedef typename internal::make_proper_matrix_type< - Scalar, ColsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime + Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime >::type TransposeTypeWithSameStorageOrder; - void allocate(const SVDType& svd) + void allocate(const JacobiSVD& svd) { if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols()) { m_qr.~QRType(); ::new (&m_qr) QRType(svd.cols(), svd.rows()); } - if (svd.ShouldComputeFullV) m_workspace.resize(svd.cols()); - else if (svd.ShouldComputeThinV) m_workspace.resize(svd.rows()); + if (svd.m_computeFullV) m_workspace.resize(svd.cols()); + else if (svd.m_computeThinV) m_workspace.resize(svd.rows()); m_adjoint.resize(svd.cols(), svd.rows()); } - bool run(SVDType& svd, const MatrixType& matrix) + bool run(JacobiSVD& svd, const MatrixType& matrix) { if(matrix.cols() > matrix.rows()) { @@ -378,8 +328,8 @@ public: m_qr.compute(m_adjoint); svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView().adjoint(); - if(svd.ShouldComputeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); - else if(svd.ShouldComputeThinV) + if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace); + else if(svd.m_computeThinV) { svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows()); m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace); @@ -394,7 +344,7 @@ private: typedef HouseholderQR QRType; QRType m_qr; TransposeTypeWithSameStorageOrder m_adjoint; - WorkspaceType m_workspace; + typename internal::plain_row_type::type m_workspace; }; /*** 2x2 SVD implementation @@ -402,18 +352,18 @@ private: *** JacobiSVD consists in performing a series of 2x2 SVD subproblems ***/ -template -struct svd_precondition_2x2_block_to_be_real +template +struct svd_precondition_2x2_block_to_be_real { - typedef JacobiSVD SVD; + typedef JacobiSVD SVD; typedef typename MatrixType::RealScalar RealScalar; static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; } }; -template -struct svd_precondition_2x2_block_to_be_real +template +struct svd_precondition_2x2_block_to_be_real { - typedef JacobiSVD SVD; + typedef JacobiSVD SVD; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry) @@ -475,9 +425,9 @@ struct svd_precondition_2x2_block_to_be_real } }; -template -struct traits > - : svd_traits +template +struct traits > + : traits { typedef MatrixType_ MatrixType; }; @@ -492,9 +442,8 @@ struct traits > * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix * * \tparam MatrixType_ the type of the matrix of which we are computing the SVD decomposition - * \tparam Options this optional parameter allows one to specify the type of QR decomposition that will be used internally - * for the R-SVD step for non-square matrices. Additionally, it allows one to specify whether to compute - * thin or full unitaries \a U and \a V. See discussion of possible values below. + * \tparam QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally + * for the R-SVD step for non-square matrices. See discussion of possible values below. * * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product * \f[ A = U S V^* \f] @@ -523,7 +472,7 @@ struct traits > * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to * terminate in finite (and reasonable) time. * - * The possible QR preconditioners that can be set with Options template parameter are: + * The possible values for QRPreconditioner are: * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR. * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR. * Contrary to other QRs, it doesn't allow computing thin unitaries. @@ -536,16 +485,10 @@ struct traits > * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking * if QR preconditioning is needed before applying it anyway. * - * One may also use the Options template parameter to specify how the unitaries should be computed. The options are #ComputeThinU, - * #ComputeThinV, #ComputeFullU, #ComputeFullV. It is not possible to request both a thin and full unitary. - * So, it is not possible to use ComputeThinU | ComputeFullU or ComputeThinV | ComputeFullV. By default, unitaries will not be computed. - * - * You can set the QRPreconditioner and unitary options together: JacobiSVD - * * \sa MatrixBase::jacobiSvd() */ -template class JacobiSVD - : public SVDBase > +template class JacobiSVD + : public SVDBase > { typedef SVDBase Base; public: @@ -554,7 +497,6 @@ template class JacobiSVD typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; enum { - QRPreconditioner = Options & internal::QRPreconditionerBits, RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), @@ -567,6 +509,9 @@ template class JacobiSVD typedef typename Base::MatrixUType MatrixUType; typedef typename Base::MatrixVType MatrixVType; typedef typename Base::SingularValuesType SingularValuesType; + + typedef typename internal::plain_row_type::type RowType; + typedef typename internal::plain_col_type::type ColType; typedef Matrix WorkMatrixType; @@ -579,31 +524,55 @@ template class JacobiSVD JacobiSVD() {} + /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem size. * \sa JacobiSVD() */ - JacobiSVD(Index rows, Index cols) + JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) { - allocate(rows, cols); + allocate(rows, cols, computationOptions); } /** \brief Constructor performing the decomposition of given matrix. * * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non-default) FullPivHouseholderQR preconditioner. */ - explicit JacobiSVD(const MatrixType& matrix) + explicit JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) { - compute(matrix); + compute(matrix, computationOptions); } /** \brief Method performing the decomposition of given matrix using custom options. * * \param matrix the matrix to decompose + * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed. + * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU, + * #ComputeFullV, #ComputeThinV. + * + * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not + * available with the (non-default) FullPivHouseholderQR preconditioner. */ - JacobiSVD& compute(const MatrixType& matrix); + JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions); + + /** \brief Method performing the decomposition of given matrix using current options. + * + * \param matrix the matrix to decompose + * + * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int). + */ + JacobiSVD& compute(const MatrixType& matrix) + { + return compute(matrix, m_computationOptions); + } using Base::computeU; using Base::computeV; @@ -612,7 +581,7 @@ template class JacobiSVD using Base::rank; private: - void allocate(Index rows, Index cols); + void allocate(Index rows, Index cols, unsigned int computationOptions); protected: using Base::m_matrixU; @@ -622,49 +591,84 @@ template class JacobiSVD using Base::m_isInitialized; using Base::m_isAllocated; using Base::m_usePrescribedThreshold; + using Base::m_computeFullU; + using Base::m_computeThinU; + using Base::m_computeFullV; + using Base::m_computeThinV; + using Base::m_computationOptions; using Base::m_nonzeroSingularValues; using Base::m_rows; using Base::m_cols; using Base::m_diagSize; using Base::m_prescribedThreshold; - using Base::ShouldComputeFullU; - using Base::ShouldComputeThinU; - using Base::ShouldComputeFullV; - using Base::ShouldComputeThinV; WorkMatrixType m_workMatrix; - EIGEN_STATIC_ASSERT(EIGEN_IMPLIES((ShouldComputeThinU != 0 || ShouldComputeThinV != 0), QRPreconditioner != FullPivHouseholderQRPreconditioner), - "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. " - "Use the ColPivHouseholderQR preconditioner instead.") - - template + template friend struct internal::svd_precondition_2x2_block_to_be_real; - template + template friend struct internal::qr_preconditioner_impl; - internal::qr_preconditioner_impl m_qr_precond_morecols; - internal::qr_preconditioner_impl m_qr_precond_morerows; + internal::qr_preconditioner_impl m_qr_precond_morecols; + internal::qr_preconditioner_impl m_qr_precond_morerows; MatrixType m_scaledMatrix; }; -template -void JacobiSVD::allocate(Eigen::Index rows, Eigen::Index cols) +template +void JacobiSVD::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions) { - if (Base::allocate(rows, cols)) - return; + eigen_assert(rows >= 0 && cols >= 0); + if (m_isAllocated && + rows == m_rows && + cols == m_cols && + computationOptions == m_computationOptions) + { + return; + } + + m_rows = rows; + m_cols = cols; + m_info = Success; + m_isInitialized = false; + m_isAllocated = true; + m_computationOptions = computationOptions; + m_computeFullU = (computationOptions & ComputeFullU) != 0; + m_computeThinU = (computationOptions & ComputeThinU) != 0; + m_computeFullV = (computationOptions & ComputeFullV) != 0; + m_computeThinV = (computationOptions & ComputeThinV) != 0; + eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U"); + eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V"); + eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && + "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns."); + if (QRPreconditioner == FullPivHouseholderQRPreconditioner) + { + eigen_assert(!(m_computeThinU || m_computeThinV) && + "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. " + "Use the ColPivHouseholderQR preconditioner instead."); + } + m_diagSize = (std::min)(m_rows, m_cols); + m_singularValues.resize(m_diagSize); + if(RowsAtCompileTime==Dynamic) + m_matrixU.resize(m_rows, m_computeFullU ? m_rows + : m_computeThinU ? m_diagSize + : 0); + if(ColsAtCompileTime==Dynamic) + m_matrixV.resize(m_cols, m_computeFullV ? m_cols + : m_computeThinV ? m_diagSize + : 0); m_workMatrix.resize(m_diagSize, m_diagSize); + if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this); if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this); if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols); } -template -JacobiSVD& -JacobiSVD::compute(const MatrixType& matrix) +template +JacobiSVD& +JacobiSVD::compute(const MatrixType& matrix, unsigned int computationOptions) { using std::abs; - allocate(matrix.rows(), matrix.cols()); + allocate(matrix.rows(), matrix.cols(), computationOptions); // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations, // only worsening the precision of U and V as we accumulate more rotations @@ -693,10 +697,10 @@ JacobiSVD::compute(const MatrixType& matrix) else { m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale; - if(ShouldComputeFullU) m_matrixU.setIdentity(m_rows,m_rows); - if(ShouldComputeThinU) m_matrixU.setIdentity(m_rows,m_diagSize); - if(ShouldComputeFullV) m_matrixV.setIdentity(m_cols,m_cols); - if(ShouldComputeThinV) m_matrixV.setIdentity(m_cols, m_diagSize); + if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows); + if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize); + if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols); + if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize); } /*** step 2. The main Jacobi SVD iteration. ***/ @@ -722,7 +726,7 @@ JacobiSVD::compute(const MatrixType& matrix) finished = false; // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal // the complex to real operation returns true if the updated 2x2 block is not already diagonal - if(internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q, maxDiagEntry)) + if(internal::svd_precondition_2x2_block_to_be_real::run(m_workMatrix, *this, p, q, maxDiagEntry)) { JacobiRotation j_left, j_right; internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right); @@ -799,11 +803,10 @@ JacobiSVD::compute(const MatrixType& matrix) * \sa class JacobiSVD */ template -template -JacobiSVD::PlainObject, Options> -MatrixBase::jacobiSvd() const +JacobiSVD::PlainObject> +MatrixBase::jacobiSvd(unsigned int computationOptions) const { - return JacobiSVD(*this); + return JacobiSVD(*this, computationOptions); } } // end namespace Eigen diff --git a/Eigen/src/SVD/JacobiSVD_LAPACKE.h b/Eigen/src/SVD/JacobiSVD_LAPACKE.h index 688df3155..611ae8ce4 100644 --- a/Eigen/src/SVD/JacobiSVD_LAPACKE.h +++ b/Eigen/src/SVD/JacobiSVD_LAPACKE.h @@ -39,15 +39,15 @@ namespace Eigen { /** \internal Specialization for the data types supported by LAPACKe */ -#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW, OPTIONS) \ +#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \ template<> inline \ -JacobiSVD, OPTIONS>& \ -JacobiSVD, OPTIONS>::compute(const Matrix& matrix) \ +JacobiSVD, ColPivHouseholderQRPreconditioner>& \ +JacobiSVD, ColPivHouseholderQRPreconditioner>::compute(const Matrix& matrix, unsigned int computationOptions) \ { \ typedef Matrix MatrixType; \ /*typedef MatrixType::Scalar Scalar;*/ \ /*typedef MatrixType::RealScalar RealScalar;*/ \ - allocate(matrix.rows(), matrix.cols()); \ + allocate(matrix.rows(), matrix.cols(), computationOptions); \ \ /*const RealScalar precision = RealScalar(2) * NumTraits::epsilon();*/ \ m_nonzeroSingularValues = m_diagSize; \ @@ -56,14 +56,14 @@ JacobiSVD, OPTION lapack_int matrix_order = LAPACKE_COLROW; \ char jobu, jobvt; \ LAPACKE_TYPE *u, *vt, dummy; \ - jobu = (ShouldComputeFullU) ? 'A' : (ShouldComputeThinU) ? 'S' : 'N'; \ - jobvt = (ShouldComputeFullV) ? 'A' : (ShouldComputeThinV) ? 'S' : 'N'; \ + jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \ + jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \ if (computeU()) { \ ldu = internal::convert_index(m_matrixU.outerStride()); \ u = (LAPACKE_TYPE*)m_matrixU.data(); \ } else { ldu=1; u=&dummy; }\ MatrixType localV; \ - lapack_int vt_rows = (ShouldComputeFullV) ? internal::convert_index(m_cols) : (ShouldComputeThinV) ? internal::convert_index(m_diagSize) : 1; \ + lapack_int vt_rows = (m_computeFullV) ? internal::convert_index(m_cols) : (m_computeThinV) ? internal::convert_index(m_diagSize) : 1; \ if (computeV()) { \ localV.resize(vt_rows, m_cols); \ ldvt = internal::convert_index(localV.outerStride()); \ @@ -78,26 +78,15 @@ JacobiSVD, OPTION return *this; \ } -#define EIGEN_LAPACK_SVD_OPTIONS(OPTIONS) \ - EIGEN_LAPACKE_SVD(double, double, double, d, ColMajor, LAPACK_COL_MAJOR, OPTIONS) \ - EIGEN_LAPACKE_SVD(float, float, float , s, ColMajor, LAPACK_COL_MAJOR, OPTIONS) \ - EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, ColMajor, LAPACK_COL_MAJOR, OPTIONS) \ - EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, ColMajor, LAPACK_COL_MAJOR, OPTIONS) \ -\ - EIGEN_LAPACKE_SVD(double, double, double, d, RowMajor, LAPACK_ROW_MAJOR, OPTIONS) \ - EIGEN_LAPACKE_SVD(float, float, float , s, RowMajor, LAPACK_ROW_MAJOR, OPTIONS) \ - EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, RowMajor, LAPACK_ROW_MAJOR, OPTIONS) \ - EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, RowMajor, LAPACK_ROW_MAJOR, OPTIONS) +EIGEN_LAPACKE_SVD(double, double, double, d, ColMajor, LAPACK_COL_MAJOR) +EIGEN_LAPACKE_SVD(float, float, float , s, ColMajor, LAPACK_COL_MAJOR) +EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, ColMajor, LAPACK_COL_MAJOR) +EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, ColMajor, LAPACK_COL_MAJOR) -EIGEN_LAPACK_SVD_OPTIONS(0) -EIGEN_LAPACK_SVD_OPTIONS(ComputeThinU) -EIGEN_LAPACK_SVD_OPTIONS(ComputeThinV) -EIGEN_LAPACK_SVD_OPTIONS(ComputeFullU) -EIGEN_LAPACK_SVD_OPTIONS(ComputeFullV) -EIGEN_LAPACK_SVD_OPTIONS(ComputeThinU | ComputeThinV) -EIGEN_LAPACK_SVD_OPTIONS(ComputeFullU | ComputeFullV) -EIGEN_LAPACK_SVD_OPTIONS(ComputeThinU | ComputeFullV) -EIGEN_LAPACK_SVD_OPTIONS(ComputeFullU | ComputeThinV) +EIGEN_LAPACKE_SVD(double, double, double, d, RowMajor, LAPACK_ROW_MAJOR) +EIGEN_LAPACKE_SVD(float, float, float , s, RowMajor, LAPACK_ROW_MAJOR) +EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, RowMajor, LAPACK_ROW_MAJOR) +EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, RowMajor, LAPACK_ROW_MAJOR) } // end namespace Eigen diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h index 1ca3ac0b9..7ecaf21eb 100644 --- a/Eigen/src/SVD/SVDBase.h +++ b/Eigen/src/SVD/SVDBase.h @@ -21,7 +21,6 @@ namespace Eigen { namespace internal { - template struct traits > : traits { @@ -30,32 +29,6 @@ template struct traits > typedef int StorageIndex; enum { Flags = 0 }; }; - -template -struct svd_traits : traits -{ - enum { - ShouldComputeFullU = Options & ComputeFullU, - ShouldComputeThinU = Options & ComputeThinU, - ShouldComputeFullV = Options & ComputeFullV, - ShouldComputeThinV = Options & ComputeThinV, - DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime), - MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::MaxRowsAtCompileTime,MatrixType::MaxColsAtCompileTime), - MatrixUColsAtCompileTime = ShouldComputeFullU ? MatrixType::RowsAtCompileTime - : ShouldComputeThinU ? DiagSizeAtCompileTime - : Dynamic, - MatrixVColsAtCompileTime = ShouldComputeFullV ? MatrixType::ColsAtCompileTime - : ShouldComputeThinV ? DiagSizeAtCompileTime - : Dynamic, - MatrixUMaxColsAtCompileTime = ShouldComputeFullU ? MatrixType::MaxRowsAtCompileTime - : ShouldComputeThinU ? MaxDiagSizeAtCompileTime - : Dynamic, - MatrixVMaxColsAtCompileTime = ShouldComputeFullV ? MatrixType::MaxColsAtCompileTime - : ShouldComputeThinV ? MaxDiagSizeAtCompileTime - : Dynamic - }; -}; - } /** \ingroup SVD_Module @@ -102,33 +75,19 @@ public: typedef typename Eigen::internal::traits::StorageIndex StorageIndex; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 enum { - ShouldComputeFullU = internal::traits::ShouldComputeFullU, - ShouldComputeThinU = internal::traits::ShouldComputeThinU, - ShouldComputeFullV = internal::traits::ShouldComputeFullV, - ShouldComputeThinV = internal::traits::ShouldComputeThinV, RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, + DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime), MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, - MatrixOptions = MatrixType::Options, - MatrixUColsAtCompileTime = internal::traits::MatrixUColsAtCompileTime, - MatrixVColsAtCompileTime = internal::traits::MatrixVColsAtCompileTime, - MatrixUMaxColsAtCompileTime = internal::traits::MatrixUMaxColsAtCompileTime, - MatrixVMaxColsAtCompileTime = internal::traits::MatrixVMaxColsAtCompileTime + MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime), + MatrixOptions = MatrixType::Options }; - EIGEN_STATIC_ASSERT(!(ShouldComputeFullU != 0 && ShouldComputeThinU != 0), "SVDBase: Cannot request both full and thin U") - EIGEN_STATIC_ASSERT(!(ShouldComputeFullV != 0 && ShouldComputeThinV != 0), "SVDBase: Cannot request both full and thin V") - - typedef typename internal::make_proper_matrix_type< - Scalar, RowsAtCompileTime, MatrixUColsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MatrixUMaxColsAtCompileTime - >::type MatrixUType; - typedef typename internal::make_proper_matrix_type< - Scalar, ColsAtCompileTime, MatrixVColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MatrixVMaxColsAtCompileTime - >::type MatrixVType; - + typedef Matrix MatrixUType; + typedef Matrix MatrixVType; typedef typename internal::plain_diag_type::type SingularValuesType; - + Derived& derived() { return *static_cast(this); } const Derived& derived() const { return *static_cast(this); } @@ -248,9 +207,9 @@ public: } /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */ - EIGEN_CONSTEXPR inline bool computeU() const { return ShouldComputeFullU != 0 || ShouldComputeThinU != 0; } + inline bool computeU() const { return m_computeFullU || m_computeThinU; } /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */ - EIGEN_CONSTEXPR inline bool computeV() const { return ShouldComputeFullV != 0 || ShouldComputeThinV != 0; } + inline bool computeV() const { return m_computeFullV || m_computeThinV; } inline Index rows() const { return m_rows; } inline Index cols() const { return m_cols; } @@ -307,13 +266,16 @@ protected: } // return true if already allocated - bool allocate(Index rows, Index cols); + bool allocate(Index rows, Index cols, unsigned int computationOptions) ; MatrixUType m_matrixU; MatrixVType m_matrixV; SingularValuesType m_singularValues; ComputationInfo m_info; bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold; + bool m_computeFullU, m_computeThinU; + bool m_computeFullV, m_computeThinV; + unsigned int m_computationOptions; Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; RealScalar m_prescribedThreshold; @@ -326,8 +288,15 @@ protected: m_isInitialized(false), m_isAllocated(false), m_usePrescribedThreshold(false), + m_computeFullU(false), + m_computeThinU(false), + m_computeFullV(false), + m_computeThinV(false), + m_computationOptions(0), m_rows(-1), m_cols(-1), m_diagSize(0) { } + + }; #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -361,14 +330,15 @@ void SVDBase::_solve_impl_transposed(const RhsType &rhs, DstType &dst) } #endif -template -bool SVDBase::allocate(Index rows, Index cols) +template +bool SVDBase::allocate(Index rows, Index cols, unsigned int computationOptions) { eigen_assert(rows >= 0 && cols >= 0); if (m_isAllocated && rows == m_rows && - cols == m_cols) + cols == m_cols && + computationOptions == m_computationOptions) { return true; } @@ -378,13 +348,22 @@ bool SVDBase::allocate(Index rows, Index cols) m_info = Success; m_isInitialized = false; m_isAllocated = true; + m_computationOptions = computationOptions; + m_computeFullU = (computationOptions & ComputeFullU) != 0; + m_computeThinU = (computationOptions & ComputeThinU) != 0; + m_computeFullV = (computationOptions & ComputeFullV) != 0; + m_computeThinV = (computationOptions & ComputeThinV) != 0; + eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U"); + eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V"); + eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) && + "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns."); m_diagSize = (std::min)(m_rows, m_cols); m_singularValues.resize(m_diagSize); if(RowsAtCompileTime==Dynamic) - m_matrixU.resize(m_rows, ShouldComputeFullU ? m_rows : ShouldComputeThinU ? m_diagSize : 0); + m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0); if(ColsAtCompileTime==Dynamic) - m_matrixV.resize(m_cols, ShouldComputeFullV ? m_cols : ShouldComputeThinV ? m_diagSize : 0); + m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0); return false; } diff --git a/bench/dense_solvers.cpp b/bench/dense_solvers.cpp index 11c755b30..24343dcd8 100644 --- a/bench/dense_solvers.cpp +++ b/bench/dense_solvers.cpp @@ -38,6 +38,8 @@ void bench(int id, int rows, int size = Size) A = A*A.adjoint(); BenchTimer t_llt, t_ldlt, t_lu, t_fplu, t_qr, t_cpqr, t_cod, t_fpqr, t_jsvd, t_bdcsvd; + int svd_opt = ComputeThinU|ComputeThinV; + int tries = 5; int rep = 1000/size; if(rep==0) rep = 1; @@ -51,8 +53,8 @@ void bench(int id, int rows, int size = Size) ColPivHouseholderQR cpqr(A.rows(),A.cols()); CompleteOrthogonalDecomposition cod(A.rows(),A.cols()); FullPivHouseholderQR fpqr(A.rows(),A.cols()); - JacobiSVD jsvd(A.rows(),A.cols()); - BDCSVD bdcsvd(A.rows(),A.cols()); + JacobiSVD jsvd(A.rows(),A.cols()); + BDCSVD bdcsvd(A.rows(),A.cols()); BENCH(t_llt, tries, rep, compute_norm_equation(llt,A)); BENCH(t_ldlt, tries, rep, compute_norm_equation(ldlt,A)); @@ -65,9 +67,9 @@ void bench(int id, int rows, int size = Size) if(size*rows<=10000000) BENCH(t_fpqr, tries, rep, compute(fpqr,A)); if(size<500) // JacobiSVD is really too slow for too large matrices - BENCH(t_jsvd, tries, rep, jsvd.compute(A)); + BENCH(t_jsvd, tries, rep, jsvd.compute(A,svd_opt)); // if(size*rows<=20000000) - BENCH(t_bdcsvd, tries, rep, bdcsvd.compute(A)); + BENCH(t_bdcsvd, tries, rep, bdcsvd.compute(A,svd_opt)); results["LLT"][id] = t_llt.best(); results["LDLT"][id] = t_ldlt.best(); diff --git a/doc/UsingBlasLapackBackends.dox b/doc/UsingBlasLapackBackends.dox index 457282056..caa597122 100644 --- a/doc/UsingBlasLapackBackends.dox +++ b/doc/UsingBlasLapackBackends.dox @@ -101,8 +101,8 @@ m1.colPivHouseholderQr(); ?geqp3 \endcode Singular value decomposition \n \c EIGEN_USE_LAPACKE \code -JacobiSVD svd; -svd.compute(m1); +JacobiSVD svd; +svd.compute(m1, ComputeThinV); \endcode\code ?gesvd \endcode diff --git a/doc/examples/TutorialLinAlgSVDSolve.cpp b/doc/examples/TutorialLinAlgSVDSolve.cpp index 7bbbeae4f..f109f04e5 100644 --- a/doc/examples/TutorialLinAlgSVDSolve.cpp +++ b/doc/examples/TutorialLinAlgSVDSolve.cpp @@ -11,5 +11,5 @@ int main() VectorXf b = VectorXf::Random(3); cout << "Here is the right hand side b:\n" << b << endl; cout << "The least-squares solution is:\n" - << A.template bdcSvd().solve(b) << endl; + << A.bdcSvd(ComputeThinU | ComputeThinV).solve(b) << endl; } diff --git a/doc/snippets/JacobiSVD_basic.cpp b/doc/snippets/JacobiSVD_basic.cpp index 6c21bafc2..ab24b9bca 100644 --- a/doc/snippets/JacobiSVD_basic.cpp +++ b/doc/snippets/JacobiSVD_basic.cpp @@ -1,6 +1,6 @@ MatrixXf m = MatrixXf::Random(3,2); cout << "Here is the matrix m:" << endl << m << endl; -JacobiSVD svd(m); +JacobiSVD svd(m, ComputeThinU | ComputeThinV); cout << "Its singular values are:" << endl << svd.singularValues() << endl; cout << "Its left singular vectors are the columns of the thin U matrix:" << endl << svd.matrixU() << endl; cout << "Its right singular vectors are the columns of the thin V matrix:" << endl << svd.matrixV() << endl; diff --git a/lapack/svd.cpp b/lapack/svd.cpp index 424f253e5..77b302b6b 100644 --- a/lapack/svd.cpp +++ b/lapack/svd.cpp @@ -10,7 +10,6 @@ #include "lapack_common.h" #include - // computes the singular values/vectors a general M-by-N matrix A using divide-and-conquer EIGEN_LAPACK_FUNC(gesdd,(char *jobz, int *m, int* n, Scalar* a, int *lda, RealScalar *s, Scalar *u, int *ldu, Scalar *vt, int *ldvt, Scalar* /*work*/, int* lwork, EIGEN_LAPACK_ARG_IF_COMPLEX(RealScalar */*rwork*/) int * /*iwork*/, int *info)) @@ -48,97 +47,40 @@ EIGEN_LAPACK_FUNC(gesdd,(char *jobz, int *m, int* n, Scalar* a, int *lda, RealSc PlainMatrixType mat(*m,*n); mat = matrix(a,*m,*n,*lda); + + int option = *jobz=='A' ? ComputeFullU|ComputeFullV + : *jobz=='S' ? ComputeThinU|ComputeThinV + : *jobz=='O' ? ComputeThinU|ComputeThinV + : 0; + + BDCSVD svd(mat,option); + + make_vector(s,diag_size) = svd.singularValues().head(diag_size); if(*jobz=='A') { - BDCSVD svd(mat); - make_vector(s,diag_size) = svd.singularValues().head(diag_size); - matrix(u,*m,*m,*ldu) = svd.matrixU(); - matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint(); + matrix(u,*m,*m,*ldu) = svd.matrixU(); + matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint(); } else if(*jobz=='S') { - BDCSVD svd(mat); - make_vector(s,diag_size) = svd.singularValues().head(diag_size); matrix(u,*m,diag_size,*ldu) = svd.matrixU(); matrix(vt,diag_size,*n,*ldvt) = svd.matrixV().adjoint(); } else if(*jobz=='O' && *m>=*n) { - BDCSVD svd(mat); - make_vector(s,diag_size) = svd.singularValues().head(diag_size); - matrix(a,*m,*n,*lda) = svd.matrixU(); - matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint(); + matrix(a,*m,*n,*lda) = svd.matrixU(); + matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint(); } else if(*jobz=='O') { - BDCSVD svd(mat); - make_vector(s,diag_size) = svd.singularValues().head(diag_size); matrix(u,*m,*m,*ldu) = svd.matrixU(); matrix(a,diag_size,*n,*lda) = svd.matrixV().adjoint(); } - else - { - BDCSVD svd(mat); - make_vector(s,diag_size) = svd.singularValues().head(diag_size); - } return 0; } - -template -void gesvdAssignmentHelper(MatrixType& mat, char* jobu, char* jobv, int* m, int* n, int diag_size, Scalar* a, int* lda, RealScalar* s, Scalar* u, int* ldu, Scalar* vt, int* ldvt) -{ - JacobiSVD svd(mat); - make_vector(s,diag_size) = svd.singularValues().head(diag_size); - { - if(*jobu=='A') matrix(u,*m,*m,*ldu) = svd.matrixU(); - else if(*jobu=='S') matrix(u,*m,diag_size,*ldu) = svd.matrixU(); - else if(*jobu=='O') matrix(a,*m,diag_size,*lda) = svd.matrixU(); - } - { - if(*jobv=='A') matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint(); - else if(*jobv=='S') matrix(vt,diag_size,*n,*ldvt) = svd.matrixV().adjoint(); - else if(*jobv=='O') matrix(a,diag_size,*n,*lda) = svd.matrixV().adjoint(); - } -} - -template -void gesvdSetVOptions(MatrixType& mat, char* jobu, char* jobv, Args... args) -{ - if (*jobv=='A') - { - gesvdAssignmentHelper(mat, jobu, jobv, args...); - } - else if (*jobv=='S' || *jobv=='O') - { - gesvdAssignmentHelper(mat, jobu, jobv, args...); - } - else - { - gesvdAssignmentHelper(mat, jobu, jobv, args...); - } -} - - -template -void gesvdSetUOptions(MatrixType& mat, char* jobu, char* jobv, Args... args) -{ - if (*jobu=='A') - { - gesvdSetVOptions(mat, jobu, jobv, args...); - } - else if (*jobu=='S' || *jobu=='O') - { - gesvdSetVOptions(mat, jobu, jobv, args...); - } - else - { - gesvdSetVOptions(mat, jobu, jobv, args...); - } -} - // computes the singular values/vectors a general M-by-N matrix A using two sided jacobi algorithm EIGEN_LAPACK_FUNC(gesvd,(char *jobu, char *jobv, int *m, int* n, Scalar* a, int *lda, RealScalar *s, Scalar *u, int *ldu, Scalar *vt, int *ldvt, Scalar* /*work*/, int* lwork, EIGEN_LAPACK_ARG_IF_COMPLEX(RealScalar */*rwork*/) int *info)) @@ -175,8 +117,22 @@ EIGEN_LAPACK_FUNC(gesvd,(char *jobu, char *jobv, int *m, int* n, Scalar* a, int PlainMatrixType mat(*m,*n); mat = matrix(a,*m,*n,*lda); - - gesvdSetUOptions(mat, jobu, jobv, m, n, diag_size, a, lda, s, u, ldu, vt, ldvt); + int option = (*jobu=='A' ? ComputeFullU : *jobu=='S' || *jobu=='O' ? ComputeThinU : 0) + | (*jobv=='A' ? ComputeFullV : *jobv=='S' || *jobv=='O' ? ComputeThinV : 0); + + JacobiSVD svd(mat,option); + + make_vector(s,diag_size) = svd.singularValues().head(diag_size); + { + if(*jobu=='A') matrix(u,*m,*m,*ldu) = svd.matrixU(); + else if(*jobu=='S') matrix(u,*m,diag_size,*ldu) = svd.matrixU(); + else if(*jobu=='O') matrix(a,*m,diag_size,*lda) = svd.matrixU(); + } + { + if(*jobv=='A') matrix(vt,*n,*n,*ldvt) = svd.matrixV().adjoint(); + else if(*jobv=='S') matrix(vt,diag_size,*n,*ldvt) = svd.matrixV().adjoint(); + else if(*jobv=='O') matrix(a,diag_size,*n,*lda) = svd.matrixV().adjoint(); + } return 0; -} \ No newline at end of file +} diff --git a/test/bdcsvd.cpp b/test/bdcsvd.cpp index b4f92bf2d..f98cdcaf6 100644 --- a/test/bdcsvd.cpp +++ b/test/bdcsvd.cpp @@ -19,11 +19,26 @@ #include #include + #define SVD_DEFAULT(M) BDCSVD #define SVD_FOR_MIN_NORM(M) BDCSVD -#define SVD_STATIC_OPTIONS(M, O) BDCSVD #include "svd_common.h" +// Check all variants of JacobiSVD +template +void bdcsvd(const MatrixType& a = MatrixType(), bool pickrandom = true) +{ + MatrixType m; + if(pickrandom) { + m.resizeLike(a); + svd_fill_random(m); + } + else + m = a; + + CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); +} + template void bdcsvd_method() { @@ -34,23 +49,28 @@ void bdcsvd_method() VERIFY_IS_APPROX(m.bdcSvd().singularValues(), RealVecType::Ones()); VERIFY_RAISES_ASSERT(m.bdcSvd().matrixU()); VERIFY_RAISES_ASSERT(m.bdcSvd().matrixV()); - VERIFY_IS_APPROX(m.template bdcSvd().solve(m), m); - VERIFY_IS_APPROX(m.template bdcSvd().transpose().solve(m), m); - VERIFY_IS_APPROX(m.template bdcSvd().adjoint().solve(m), m); + VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).solve(m), m); + VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m); + VERIFY_IS_APPROX(m.bdcSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m); } -// compare the Singular values returned with Jacobi and Bdc +// Compare the Singular values returned with Jacobi and Bdc. template -void compare_bdc_jacobi(const MatrixType& a = MatrixType(), int algoswap = 16, bool random = true) +void compare_bdc_jacobi(const MatrixType& a = MatrixType(), unsigned int computationOptions = 0, int algoswap = 16, bool random = true) { MatrixType m = random ? MatrixType::Random(a.rows(), a.cols()) : a; - BDCSVD bdc_svd(m.rows(), m.cols()); + BDCSVD bdc_svd(m.rows(), m.cols(), computationOptions); bdc_svd.setSwitchSize(algoswap); bdc_svd.compute(m); - + JacobiSVD jacobi_svd(m); VERIFY_IS_APPROX(bdc_svd.singularValues(), jacobi_svd.singularValues()); + + if(computationOptions & ComputeFullU) VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU()); + if(computationOptions & ComputeThinU) VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU()); + if(computationOptions & ComputeFullV) VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV()); + if(computationOptions & ComputeThinV) VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV()); } // Verifies total deflation is **not** triggered. @@ -71,59 +91,41 @@ void compare_bdc_jacobi_instance(bool structure_as_m, int algoswap = 16) -20.794, 8.68496, -4.83103, -8.4981, -10.5451, 23.9072; } - compare_bdc_jacobi(m, algoswap, false); -} - -template -void bdcsvd_all_options(const MatrixType& input = MatrixType()) -{ - MatrixType m = input; - svd_fill_random(m); - svd_option_checks(m); + compare_bdc_jacobi(m, 0, algoswap, false); } EIGEN_DECLARE_TEST(bdcsvd) { - CALL_SUBTEST_3(( svd_verify_assert() )); - CALL_SUBTEST_4(( svd_verify_assert() )); - CALL_SUBTEST_7(( svd_verify_assert >() )); - CALL_SUBTEST_7(( svd_verify_assert >() )); - CALL_SUBTEST_9(( svd_verify_assert, 20, 27> >() )); + CALL_SUBTEST_3(( svd_verify_assert >(Matrix3f()) )); + CALL_SUBTEST_4(( svd_verify_assert >(Matrix4d()) )); + CALL_SUBTEST_7(( svd_verify_assert >(MatrixXf(10,12)) )); + CALL_SUBTEST_8(( svd_verify_assert >(MatrixXcd(7,5)) )); - CALL_SUBTEST_101(( svd_all_trivial_2x2(bdcsvd_all_options) )); - CALL_SUBTEST_102(( svd_all_trivial_2x2(bdcsvd_all_options) )); + CALL_SUBTEST_101(( svd_all_trivial_2x2(bdcsvd) )); + CALL_SUBTEST_102(( svd_all_trivial_2x2(bdcsvd) )); for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_3(( bdcsvd() )); + CALL_SUBTEST_4(( bdcsvd() )); + CALL_SUBTEST_5(( bdcsvd >() )); + int r = internal::random(1, EIGEN_TEST_MAX_SIZE/2), c = internal::random(1, EIGEN_TEST_MAX_SIZE/2); TEST_SET_BUT_UNUSED_VARIABLE(r) TEST_SET_BUT_UNUSED_VARIABLE(c) - CALL_SUBTEST_7(( compare_bdc_jacobi(MatrixXf(r,c)) )); - CALL_SUBTEST_10(( compare_bdc_jacobi(MatrixXd(r,c)) )); - CALL_SUBTEST_8(( compare_bdc_jacobi(MatrixXcd(r,c)) )); + CALL_SUBTEST_6(( bdcsvd(Matrix(r,2)) )); + CALL_SUBTEST_7(( bdcsvd(MatrixXf(r,c)) )); + CALL_SUBTEST_7(( compare_bdc_jacobi(MatrixXf(r,c)) )); + CALL_SUBTEST_10(( bdcsvd(MatrixXd(r,c)) )); + CALL_SUBTEST_10(( compare_bdc_jacobi(MatrixXd(r,c)) )); + CALL_SUBTEST_8(( bdcsvd(MatrixXcd(r,c)) )); + CALL_SUBTEST_8(( compare_bdc_jacobi(MatrixXcd(r,c)) )); + // Test on inf/nan matrix - CALL_SUBTEST_7( (svd_inf_nan()) ); - CALL_SUBTEST_10( (svd_inf_nan()) ); - - // Verify some computations using all combinations of the Options template parameter. - CALL_SUBTEST_3(( bdcsvd_all_options() )); - CALL_SUBTEST_3(( bdcsvd_all_options >() )); - CALL_SUBTEST_4(( bdcsvd_all_options >() )); - CALL_SUBTEST_4(( bdcsvd_all_options >() )); - CALL_SUBTEST_5(( bdcsvd_all_options >(Matrix(r, 30)) )); - CALL_SUBTEST_5(( bdcsvd_all_options >(Matrix(20, c)) )); - CALL_SUBTEST_7(( bdcsvd_all_options(MatrixXf(r, c)) )); - CALL_SUBTEST_8(( bdcsvd_all_options(MatrixXcd(r, c)) )); - CALL_SUBTEST_10(( bdcsvd_all_options(MatrixXd(r, c)) )); - CALL_SUBTEST_14(( bdcsvd_all_options>() )); - CALL_SUBTEST_14(( bdcsvd_all_options>() )); - - CALL_SUBTEST_15(( svd_check_max_size_matrix, ColPivHouseholderQRPreconditioner>(r, c) )); - CALL_SUBTEST_15(( svd_check_max_size_matrix, HouseholderQRPreconditioner>(r, c) )); - CALL_SUBTEST_15(( svd_check_max_size_matrix, ColPivHouseholderQRPreconditioner>(r, c) )); - CALL_SUBTEST_15(( svd_check_max_size_matrix, HouseholderQRPreconditioner>(r, c) )); + CALL_SUBTEST_7( (svd_inf_nan, MatrixXf>()) ); + CALL_SUBTEST_10( (svd_inf_nan, MatrixXd>()) ); } // test matrixbase method diff --git a/test/boostmultiprec.cpp b/test/boostmultiprec.cpp index e2fc9a811..e83e97044 100644 --- a/test/boostmultiprec.cpp +++ b/test/boostmultiprec.cpp @@ -200,8 +200,8 @@ EIGEN_DECLARE_TEST(boostmultiprec) TEST_SET_BUT_UNUSED_VARIABLE(s) } - CALL_SUBTEST_9(( jacobisvd_all_options(Mat(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); - CALL_SUBTEST_10(( bdcsvd_all_options(Mat(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); + CALL_SUBTEST_9(( jacobisvd(Mat(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); + CALL_SUBTEST_10(( bdcsvd(Mat(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); CALL_SUBTEST_11(( test_simplicial_cholesky_T() )); } diff --git a/test/geo_alignedbox.cpp b/test/geo_alignedbox.cpp index e4dab32e0..7b1684f29 100644 --- a/test/geo_alignedbox.cpp +++ b/test/geo_alignedbox.cpp @@ -211,7 +211,7 @@ MatrixType randomRotationMatrix() // https://www.isprs-ann-photogramm-remote-sens-spatial-inf-sci.net/III-7/103/2016/isprs-annals-III-7-103-2016.pdf const MatrixType rand = MatrixType::Random(); const MatrixType q = rand.householderQr().householderQ(); - const JacobiSVD svd(q); + const JacobiSVD svd = q.jacobiSvd(ComputeFullU | ComputeFullV); const typename MatrixType::Scalar det = (svd.matrixU() * svd.matrixV().transpose()).determinant(); MatrixType diag = rand.Identity(); diag(MatrixType::RowsAtCompileTime - 1, MatrixType::ColsAtCompileTime - 1) = det; diff --git a/test/jacobisvd.cpp b/test/jacobisvd.cpp index 8966b4c64..5b15c5a27 100644 --- a/test/jacobisvd.cpp +++ b/test/jacobisvd.cpp @@ -16,9 +16,49 @@ #define SVD_DEFAULT(M) JacobiSVD #define SVD_FOR_MIN_NORM(M) JacobiSVD -#define SVD_STATIC_OPTIONS(M, O) JacobiSVD #include "svd_common.h" +// Check all variants of JacobiSVD +template +void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true) +{ + MatrixType m = a; + if(pickrandom) + svd_fill_random(m); + + CALL_SUBTEST(( svd_test_all_computation_options >(m, true) )); // check full only + CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); + CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); + if(m.rows()==m.cols()) + CALL_SUBTEST(( svd_test_all_computation_options >(m, false) )); +} + +template void jacobisvd_verify_assert(const MatrixType& m) +{ + svd_verify_assert >(m); + svd_verify_assert >(m, true); + svd_verify_assert >(m); + svd_verify_assert >(m); + Index rows = m.rows(); + Index cols = m.cols(); + + enum { + ColsAtCompileTime = MatrixType::ColsAtCompileTime + }; + + + MatrixType a = MatrixType::Zero(rows, cols); + a.setZero(); + + if (ColsAtCompileTime == Dynamic) + { + JacobiSVD svd_fullqr; + VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeFullU|ComputeThinV)) + VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeThinV)) + VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeFullV)) + } +} + template void jacobisvd_method() { @@ -29,47 +69,9 @@ void jacobisvd_method() VERIFY_IS_APPROX(m.jacobiSvd().singularValues(), RealVecType::Ones()); VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixU()); VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixV()); - VERIFY_IS_APPROX(m.template jacobiSvd().solve(m), m); - VERIFY_IS_APPROX(m.template jacobiSvd().transpose().solve(m), m); - VERIFY_IS_APPROX(m.template jacobiSvd().adjoint().solve(m), m); -} - -template -void jacobisvd_all_options(const MatrixType& input = MatrixType()) -{ - MatrixType m = input; - svd_fill_random(m); - svd_option_checks(m); - svd_option_checks(m); - svd_option_checks(m); - svd_option_checks_full_only(m); // FullPiv only used when computing full unitaries -} - -template -void jacobisvd_verify_assert(const MatrixType& m = MatrixType()) -{ - svd_verify_assert(m); - svd_verify_assert(m); - svd_verify_assert(m); - svd_verify_assert_full_only(m); -} - -template -void jacobisvd_verify_inputs(const MatrixType& m = MatrixType()) { - // check defaults - typedef JacobiSVD DefaultSVD; - DefaultSVD defaultSvd(m); - VERIFY((int)DefaultSVD::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner); - VERIFY(!defaultSvd.computeU()); - VERIFY(!defaultSvd.computeV()); - - // ColPivHouseholderQR is always default in presence of other options. - VERIFY(( (int)JacobiSVD::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner )); - VERIFY(( (int)JacobiSVD::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner )); - VERIFY(( (int)JacobiSVD::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner )); - VERIFY(( (int)JacobiSVD::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner )); - VERIFY(( (int)JacobiSVD::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner )); - VERIFY(( (int)JacobiSVD::QRPreconditioner == (int)ColPivHouseholderQRPreconditioner )); + VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m); + VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).transpose().solve(m), m); + VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).adjoint().solve(m), m); } namespace Foo { @@ -89,63 +91,45 @@ void msvc_workaround() EIGEN_DECLARE_TEST(jacobisvd) { - CALL_SUBTEST_4(( jacobisvd_verify_inputs() )); - CALL_SUBTEST_7(( jacobisvd_verify_inputs(Matrix(10, 12)) )); - CALL_SUBTEST_8(( jacobisvd_verify_inputs, 7, 5> >() )); - - CALL_SUBTEST_3(( jacobisvd_verify_assert() )); - CALL_SUBTEST_4(( jacobisvd_verify_assert() )); - CALL_SUBTEST_7(( jacobisvd_verify_assert>() )); - CALL_SUBTEST_7(( jacobisvd_verify_assert>() )); - CALL_SUBTEST_7(( jacobisvd_verify_assert(MatrixXf(10, 12)) )); - CALL_SUBTEST_8(( jacobisvd_verify_assert(MatrixXcd(7, 5)) )); + CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) )); + CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) )); + CALL_SUBTEST_7(( jacobisvd_verify_assert(MatrixXf(10,12)) )); + CALL_SUBTEST_8(( jacobisvd_verify_assert(MatrixXcd(7,5)) )); - CALL_SUBTEST_11(svd_all_trivial_2x2(jacobisvd_all_options)); - CALL_SUBTEST_12(svd_all_trivial_2x2(jacobisvd_all_options)); + CALL_SUBTEST_11(svd_all_trivial_2x2(jacobisvd)); + CALL_SUBTEST_12(svd_all_trivial_2x2(jacobisvd)); for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_3(( jacobisvd() )); + CALL_SUBTEST_4(( jacobisvd() )); + CALL_SUBTEST_5(( jacobisvd >() )); + CALL_SUBTEST_6(( jacobisvd >(Matrix(10,2)) )); int r = internal::random(1, 30), c = internal::random(1, 30); TEST_SET_BUT_UNUSED_VARIABLE(r) TEST_SET_BUT_UNUSED_VARIABLE(c) - - // Verify some computations using all combinations of the Options template parameter. - CALL_SUBTEST_3(( jacobisvd_all_options() )); - CALL_SUBTEST_3(( jacobisvd_all_options >() )); - CALL_SUBTEST_4(( jacobisvd_all_options() )); - CALL_SUBTEST_4(( jacobisvd_all_options >() )); - CALL_SUBTEST_4(( jacobisvd_all_options >() )); - CALL_SUBTEST_5(( jacobisvd_all_options >(Matrix(r, 16)) )); - CALL_SUBTEST_5(( jacobisvd_all_options >(Matrix(10, c)) )); - CALL_SUBTEST_7(( jacobisvd_all_options( MatrixXf(r, c)) )); - CALL_SUBTEST_8(( jacobisvd_all_options( MatrixXcd(r, c)) )); - CALL_SUBTEST_10(( jacobisvd_all_options( MatrixXd(r, c)) )); - CALL_SUBTEST_14(( jacobisvd_all_options>() )); - CALL_SUBTEST_14(( jacobisvd_all_options>() )); - MatrixXcd noQRTest = MatrixXcd(r, r); - svd_fill_random(noQRTest); - CALL_SUBTEST_16(( svd_option_checks(noQRTest) )); - - CALL_SUBTEST_15(( svd_check_max_size_matrix, ColPivHouseholderQRPreconditioner>(r, c) )); - CALL_SUBTEST_15(( svd_check_max_size_matrix, HouseholderQRPreconditioner>(r, c) )); - CALL_SUBTEST_15(( svd_check_max_size_matrix, ColPivHouseholderQRPreconditioner>(r, c) )); - CALL_SUBTEST_15(( svd_check_max_size_matrix, HouseholderQRPreconditioner>(r, c) )); + CALL_SUBTEST_10(( jacobisvd(MatrixXd(r,c)) )); + CALL_SUBTEST_7(( jacobisvd(MatrixXf(r,c)) )); + CALL_SUBTEST_8(( jacobisvd(MatrixXcd(r,c)) )); + (void) r; + (void) c; // Test on inf/nan matrix - CALL_SUBTEST_7( (svd_inf_nan()) ); - CALL_SUBTEST_10( (svd_inf_nan()) ); + CALL_SUBTEST_7( (svd_inf_nan, MatrixXf>()) ); + CALL_SUBTEST_10( (svd_inf_nan, MatrixXd>()) ); - CALL_SUBTEST_13(( jacobisvd_verify_assert>() )); - CALL_SUBTEST_13(( jacobisvd_verify_assert>() )); - CALL_SUBTEST_13(( jacobisvd_verify_assert>(Matrix(r)) )); - CALL_SUBTEST_13(( jacobisvd_verify_assert>(Matrix(c)) )); + // bug1395 test compile-time vectors as input + CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix()) )); + CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix()) )); + CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix(r)) )); + CALL_SUBTEST_13(( jacobisvd_verify_assert(Matrix(c)) )); } - CALL_SUBTEST_7(( jacobisvd_all_options(MatrixXd(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); - CALL_SUBTEST_8(( jacobisvd_all_options(MatrixXcd(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) )); + CALL_SUBTEST_7(( jacobisvd(MatrixXf(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); + CALL_SUBTEST_8(( jacobisvd(MatrixXcd(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) )); // test matrixbase method CALL_SUBTEST_1(( jacobisvd_method() )); diff --git a/test/nomalloc.cpp b/test/nomalloc.cpp index 689a4ccf4..cb4c073e9 100644 --- a/test/nomalloc.cpp +++ b/test/nomalloc.cpp @@ -152,7 +152,7 @@ void ctms_decompositions() x = fpQR.solve(b); // SVD module - Eigen::JacobiSVD jSVD; jSVD.compute(A); + Eigen::JacobiSVD jSVD; jSVD.compute(A, ComputeFullU | ComputeFullV); } void test_zerosized() { diff --git a/test/qr_colpivoting.cpp b/test/qr_colpivoting.cpp index e25167138..06f16438f 100644 --- a/test/qr_colpivoting.cpp +++ b/test/qr_colpivoting.cpp @@ -55,7 +55,7 @@ void cod() { MatrixType exact_solution = MatrixType::Random(cols, cols2); MatrixType rhs = matrix * exact_solution; MatrixType cod_solution = cod.solve(rhs); - JacobiSVD svd(matrix); + JacobiSVD svd(matrix, ComputeThinU | ComputeThinV); MatrixType svd_solution = svd.solve(rhs); VERIFY_IS_APPROX(cod_solution, svd_solution); @@ -88,7 +88,7 @@ void cod_fixedsize() { exact_solution.setRandom(Cols, Cols2); Matrix rhs = matrix * exact_solution; Matrix cod_solution = cod.solve(rhs); - JacobiSVD svd(matrix); + JacobiSVD svd(matrix, ComputeFullU | ComputeFullV); Matrix svd_solution = svd.solve(rhs); VERIFY_IS_APPROX(cod_solution, svd_solution); diff --git a/test/svd_common.h b/test/svd_common.h index 4091c6d31..eae4c0bfe 100644 --- a/test/svd_common.h +++ b/test/svd_common.h @@ -16,10 +16,6 @@ #error a macro SVD_FOR_MIN_NORM(MatrixType) must be defined prior to including svd_common.h #endif -#ifndef SVD_STATIC_OPTIONS -#error a macro SVD_STATIC_OPTIONS(MatrixType, Options) must be defined prior to including svd_common.h -#endif - #include "svd_fill.h" #include "solverbase.h" @@ -59,8 +55,9 @@ void svd_check_full(const MatrixType& m, const SvdType& svd) } // Compare partial SVD defined by computationOptions to a full SVD referenceSvd -template +template void svd_compare_to_full(const MatrixType& m, + unsigned int computationOptions, const SvdType& referenceSvd) { typedef typename MatrixType::RealScalar RealScalar; @@ -69,18 +66,18 @@ void svd_compare_to_full(const MatrixType& m, Index diagSize = (std::min)(rows, cols); RealScalar prec = test_precision(); - SVD_STATIC_OPTIONS(MatrixType, Options) svd(m); + SvdType svd(m, computationOptions); VERIFY_IS_APPROX(svd.singularValues(), referenceSvd.singularValues()); - if(Options & (ComputeFullV|ComputeThinV)) + if(computationOptions & (ComputeFullV|ComputeThinV)) { VERIFY( (svd.matrixV().adjoint()*svd.matrixV()).isIdentity(prec) ); VERIFY_IS_APPROX( svd.matrixV().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint(), referenceSvd.matrixV().leftCols(diagSize) * referenceSvd.singularValues().asDiagonal() * referenceSvd.matrixV().leftCols(diagSize).adjoint()); } - if(Options & (ComputeFullU|ComputeThinU)) + if(computationOptions & (ComputeFullU|ComputeThinU)) { VERIFY( (svd.matrixU().adjoint()*svd.matrixU()).isIdentity(prec) ); VERIFY_IS_APPROX( svd.matrixU().leftCols(diagSize) * svd.singularValues().cwiseAbs2().asDiagonal() * svd.matrixU().leftCols(diagSize).adjoint(), @@ -88,18 +85,19 @@ void svd_compare_to_full(const MatrixType& m, } // The following checks are not critical. - // For instance, with Dived&Conquer SVD, if only the factor 'V' is computed then different matrix-matrix product implementation will be used + // For instance, with Dived&Conquer SVD, if only the factor 'V' is computedt then different matrix-matrix product implementation will be used // and the resulting 'V' factor might be significantly different when the SVD decomposition is not unique, especially with single precision float. ++g_test_level; - if(Options & ComputeFullU) VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU()); - if(Options & ComputeThinU) VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize)); - if(Options & ComputeFullV) VERIFY_IS_APPROX(svd.matrixV().cwiseAbs(), referenceSvd.matrixV().cwiseAbs()); - if(Options & ComputeThinV) VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize)); + if(computationOptions & ComputeFullU) VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU()); + if(computationOptions & ComputeThinU) VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize)); + if(computationOptions & ComputeFullV) VERIFY_IS_APPROX(svd.matrixV().cwiseAbs(), referenceSvd.matrixV().cwiseAbs()); + if(computationOptions & ComputeThinV) VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize)); --g_test_level; } +// template -void svd_least_square(const MatrixType& m) +void svd_least_square(const MatrixType& m, unsigned int computationOptions) { typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; @@ -115,7 +113,7 @@ void svd_least_square(const MatrixType& m) typedef Matrix SolutionType; RhsType rhs = RhsType::Random(rows, internal::random(1, cols)); - SvdType svd(m); + SvdType svd(m, computationOptions); if(internal::is_same::value) svd.setThreshold(1e-8); else if(internal::is_same::value) svd.setThreshold(2e-4); @@ -164,9 +162,9 @@ void svd_least_square(const MatrixType& m) } } -// check minimal norm solutions, the input matrix m is only used to recover problem size -template -void svd_min_norm(const MatrixType& m) +// check minimal norm solutions, the inoput matrix m is only used to recover problem size +template +void svd_min_norm(const MatrixType& m, unsigned int computationOptions) { typedef typename MatrixType::Scalar Scalar; Index cols = m.cols(); @@ -201,7 +199,7 @@ void svd_min_norm(const MatrixType& m) tmp.tail(cols-rank).setZero(); SolutionType x21 = qr.householderQ() * tmp; // now check with SVD - SVD_STATIC_OPTIONS(MatrixType2, Options) svd2(m2); + SVD_FOR_MIN_NORM(MatrixType2) svd2(m2, computationOptions); SolutionType x22 = svd2.solve(rhs2); VERIFY_IS_APPROX(m2*x21, rhs2); VERIFY_IS_APPROX(m2*x22, rhs2); @@ -214,7 +212,7 @@ void svd_min_norm(const MatrixType& m) Matrix C = Matrix::Random(rows3,rank); MatrixType3 m3 = C * m2; RhsType3 rhs3 = C * rhs2; - SVD_STATIC_OPTIONS(MatrixType3, Options) svd3(m3); + SVD_FOR_MIN_NORM(MatrixType3) svd3(m3, computationOptions); SolutionType x3 = svd3.solve(rhs3); VERIFY_IS_APPROX(m3*x3, rhs3); VERIFY_IS_APPROX(m3*x21, rhs3); @@ -241,6 +239,57 @@ void svd_test_solvers(const MatrixType& m, const SolverType& solver) { check_solverbase(m, solver, rows, cols, cols2); } +// Check full, compare_to_full, least_square, and min_norm for all possible compute-options +template +void svd_test_all_computation_options(const MatrixType& m, bool full_only) +{ +// if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols()) +// return; + STATIC_CHECK(( internal::is_same::value )); + + SvdType fullSvd(m, ComputeFullU|ComputeFullV); + CALL_SUBTEST(( svd_check_full(m, fullSvd) )); + CALL_SUBTEST(( svd_least_square(m, ComputeFullU | ComputeFullV) )); + CALL_SUBTEST(( svd_min_norm(m, ComputeFullU | ComputeFullV) )); + + #if defined __INTEL_COMPILER + // remark #111: statement is unreachable + #pragma warning disable 111 + #endif + + svd_test_solvers(m, fullSvd); + + if(full_only) + return; + + CALL_SUBTEST(( svd_compare_to_full(m, ComputeFullU, fullSvd) )); + CALL_SUBTEST(( svd_compare_to_full(m, ComputeFullV, fullSvd) )); + CALL_SUBTEST(( svd_compare_to_full(m, 0, fullSvd) )); + + if (MatrixType::ColsAtCompileTime == Dynamic) { + // thin U/V are only available with dynamic number of columns + CALL_SUBTEST(( svd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd) )); + CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinV, fullSvd) )); + CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd) )); + CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinU , fullSvd) )); + CALL_SUBTEST(( svd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd) )); + + CALL_SUBTEST(( svd_least_square(m, ComputeFullU | ComputeThinV) )); + CALL_SUBTEST(( svd_least_square(m, ComputeThinU | ComputeFullV) )); + CALL_SUBTEST(( svd_least_square(m, ComputeThinU | ComputeThinV) )); + + CALL_SUBTEST(( svd_min_norm(m, ComputeFullU | ComputeThinV) )); + CALL_SUBTEST(( svd_min_norm(m, ComputeThinU | ComputeFullV) )); + CALL_SUBTEST(( svd_min_norm(m, ComputeThinU | ComputeThinV) )); + + // test reconstruction + Index diagSize = (std::min)(m.rows(), m.cols()); + SvdType svd(m, ComputeThinU | ComputeThinV); + VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint()); + } +} + + // work around stupid msvc error when constructing at compile time an expression that involves // a division by zero, even if the numeric type has floating point template @@ -249,32 +298,31 @@ EIGEN_DONT_INLINE Scalar zero() { return Scalar(0); } // workaround aggressive optimization in ICC template EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; } - // This function verifies we don't iterate infinitely on nan/inf values, // and that info() returns InvalidInput. -template +template void svd_inf_nan() { - SVD_STATIC_OPTIONS(MatrixType, ComputeFullU | ComputeFullV) svd; + SvdType svd; typedef typename MatrixType::Scalar Scalar; Scalar some_inf = Scalar(1) / zero(); VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf)); - svd.compute(MatrixType::Constant(10,10,some_inf)); + svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV); VERIFY(svd.info() == InvalidInput); Scalar nan = std::numeric_limits::quiet_NaN(); VERIFY(nan != nan); - svd.compute(MatrixType::Constant(10,10,nan)); + svd.compute(MatrixType::Constant(10,10,nan), ComputeFullU | ComputeFullV); VERIFY(svd.info() == InvalidInput); MatrixType m = MatrixType::Zero(10,10); m(internal::random(0,9), internal::random(0,9)) = some_inf; - svd.compute(m); + svd.compute(m, ComputeFullU | ComputeFullV); VERIFY(svd.info() == InvalidInput); m = MatrixType::Zero(10,10); m(internal::random(0,9), internal::random(0,9)) = nan; - svd.compute(m); + svd.compute(m, ComputeFullU | ComputeFullV); VERIFY(svd.info() == InvalidInput); // regression test for bug 791 @@ -282,7 +330,7 @@ void svd_inf_nan() m << 0, 2*NumTraits::epsilon(), 0.5, 0, -0.5, 0, nan, 0, 0; - svd.compute(m); + svd.compute(m, ComputeFullU | ComputeFullV); VERIFY(svd.info() == InvalidInput); m.resize(4,4); @@ -290,7 +338,7 @@ void svd_inf_nan() 0, 3, 1, 2e-308, 1, 0, 1, nan, 0, nan, nan, 0; - svd.compute(m); + svd.compute(m, ComputeFullU | ComputeFullV); VERIFY(svd.info() == InvalidInput); } @@ -307,8 +355,8 @@ void svd_underoverflow() Matrix2d M; M << -7.90884e-313, -4.94e-324, 0, 5.60844e-313; - SVD_STATIC_OPTIONS(Matrix2d, ComputeFullU | ComputeFullV) svd; - svd.compute(M); + SVD_DEFAULT(Matrix2d) svd; + svd.compute(M,ComputeFullU|ComputeFullV); CALL_SUBTEST( svd_check_full(M,svd) ); // Check all 2x2 matrices made with the following coefficients: @@ -319,7 +367,7 @@ void svd_underoverflow() do { M << value_set(id(0)), value_set(id(1)), value_set(id(2)), value_set(id(3)); - svd.compute(M); + svd.compute(M,ComputeFullU|ComputeFullV); CALL_SUBTEST( svd_check_full(M,svd) ); id(k)++; @@ -342,13 +390,15 @@ void svd_underoverflow() 3.7841695601406358e+307, 2.4331702789740617e+306, -3.5235707140272905e+307, -8.7190887618028355e+307, -7.3453213709232193e+307, -2.4367363684472105e+307; - SVD_STATIC_OPTIONS(Matrix3d, ComputeFullU|ComputeFullV) svd3; - svd3.compute(M3); // just check we don't loop indefinitely + SVD_DEFAULT(Matrix3d) svd3; + svd3.compute(M3,ComputeFullU|ComputeFullV); // just check we don't loop indefinitely CALL_SUBTEST( svd_check_full(M3,svd3) ); } +// void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true) + template -void svd_all_trivial_2x2( void (*cb)(const MatrixType&) ) +void svd_all_trivial_2x2( void (*cb)(const MatrixType&,bool) ) { MatrixType M; VectorXd value_set(3); @@ -359,7 +409,7 @@ void svd_all_trivial_2x2( void (*cb)(const MatrixType&) ) { M << value_set(id(0)), value_set(id(1)), value_set(id(2)), value_set(id(3)); - cb(M); + cb(M,false); id(k)++; if(id(k)>=value_set.size()) @@ -384,10 +434,22 @@ void svd_preallocate() internal::set_is_malloc_allowed(true); svd.compute(m); VERIFY_IS_APPROX(svd.singularValues(), v); - VERIFY_RAISES_ASSERT(svd.matrixU()); - VERIFY_RAISES_ASSERT(svd.matrixV()); - SVD_STATIC_OPTIONS(MatrixXf, ComputeFullU | ComputeFullV) svd2(3,3); + SVD_DEFAULT(MatrixXf) svd2(3,3); + internal::set_is_malloc_allowed(false); + svd2.compute(m); + internal::set_is_malloc_allowed(true); + VERIFY_IS_APPROX(svd2.singularValues(), v); + VERIFY_RAISES_ASSERT(svd2.matrixU()); + VERIFY_RAISES_ASSERT(svd2.matrixV()); + svd2.compute(m, ComputeFullU | ComputeFullV); + VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity()); + VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity()); + internal::set_is_malloc_allowed(false); + svd2.compute(m); + internal::set_is_malloc_allowed(true); + + SVD_DEFAULT(MatrixXf) svd3(3,3,ComputeFullU|ComputeFullV); internal::set_is_malloc_allowed(false); svd2.compute(m); internal::set_is_malloc_allowed(true); @@ -395,168 +457,65 @@ void svd_preallocate() VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity()); VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity()); internal::set_is_malloc_allowed(false); - svd2.compute(m); + svd2.compute(m, ComputeFullU|ComputeFullV); internal::set_is_malloc_allowed(true); } -template -void svd_verify_assert_full_only(const MatrixType& m = MatrixType()) +template +void svd_verify_assert(const MatrixType& m, bool fullOnly = false) { - enum { - RowsAtCompileTime = MatrixType::RowsAtCompileTime - }; - - typedef Matrix RhsType; - RhsType rhs = RhsType::Zero(m.rows()); - - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner) svd0; - VERIFY_RAISES_ASSERT(( svd0.matrixU() )); - VERIFY_RAISES_ASSERT(( svd0.singularValues() )); - VERIFY_RAISES_ASSERT(( svd0.matrixV() )); - VERIFY_RAISES_ASSERT(( svd0.solve(rhs) )); - VERIFY_RAISES_ASSERT(( svd0.transpose().solve(rhs) )); - VERIFY_RAISES_ASSERT(( svd0.adjoint().solve(rhs) )); - - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner) svd1(m); - VERIFY_RAISES_ASSERT(( svd1.matrixU() )); - VERIFY_RAISES_ASSERT(( svd1.matrixV() )); - VERIFY_RAISES_ASSERT(( svd1.solve(rhs))); - - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullU) svdFullU(m); - VERIFY_RAISES_ASSERT(( svdFullU.matrixV() )); - VERIFY_RAISES_ASSERT(( svdFullU.solve(rhs))); - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullV) svdFullV(m); - VERIFY_RAISES_ASSERT(( svdFullV.matrixU() )); - VERIFY_RAISES_ASSERT(( svdFullV.solve(rhs))); -} - -template -void svd_verify_assert(const MatrixType& m = MatrixType()) -{ - enum { - RowsAtCompileTime = MatrixType::RowsAtCompileTime - }; - - typedef Matrix RhsType; - RhsType rhs = RhsType::Zero(m.rows()); - - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeThinU) svdThinU(m); - VERIFY_RAISES_ASSERT(( svdThinU.matrixV() )); - VERIFY_RAISES_ASSERT(( svdThinU.solve(rhs))); - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeThinV) svdThinV(m); - VERIFY_RAISES_ASSERT(( svdThinV.matrixU() )); - VERIFY_RAISES_ASSERT(( svdThinV.solve(rhs))); - - svd_verify_assert_full_only(m); -} - -template -void svd_compute_checks(const MatrixType& m) -{ - typedef SVD_STATIC_OPTIONS(MatrixType, Options) SVDType; + typedef typename MatrixType::Scalar Scalar; + Index rows = m.rows(); + Index cols = m.cols(); enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, - ColsAtCompileTime = MatrixType::ColsAtCompileTime, - DiagAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), - MatrixURowsAtCompileTime = SVDType::MatrixUType::RowsAtCompileTime, - MatrixUColsAtCompileTime = SVDType::MatrixUType::ColsAtCompileTime, - MatrixVRowsAtCompileTime = SVDType::MatrixVType::RowsAtCompileTime, - MatrixVColsAtCompileTime = SVDType::MatrixVType::ColsAtCompileTime + ColsAtCompileTime = MatrixType::ColsAtCompileTime }; - - SVDType staticSvd(m); - VERIFY(MatrixURowsAtCompileTime == RowsAtCompileTime); - VERIFY(MatrixVRowsAtCompileTime == ColsAtCompileTime); - if (Options & ComputeThinU) VERIFY(MatrixUColsAtCompileTime == DiagAtCompileTime); - if (Options & ComputeFullU) VERIFY(MatrixUColsAtCompileTime == RowsAtCompileTime); - if (Options & ComputeThinV) VERIFY(MatrixVColsAtCompileTime == DiagAtCompileTime); - if (Options & ComputeFullV) VERIFY(MatrixVColsAtCompileTime == ColsAtCompileTime); + typedef Matrix RhsType; + RhsType rhs(rows); + SvdType svd; + VERIFY_RAISES_ASSERT(svd.matrixU()) + VERIFY_RAISES_ASSERT(svd.singularValues()) + VERIFY_RAISES_ASSERT(svd.matrixV()) + VERIFY_RAISES_ASSERT(svd.solve(rhs)) + VERIFY_RAISES_ASSERT(svd.transpose().solve(rhs)) + VERIFY_RAISES_ASSERT(svd.adjoint().solve(rhs)) + MatrixType a = MatrixType::Zero(rows, cols); + a.setZero(); + svd.compute(a, 0); + VERIFY_RAISES_ASSERT(svd.matrixU()) + VERIFY_RAISES_ASSERT(svd.matrixV()) + svd.singularValues(); + VERIFY_RAISES_ASSERT(svd.solve(rhs)) - if (Options & (ComputeThinU|ComputeFullU)) VERIFY(staticSvd.computeU()); - else VERIFY(!staticSvd.computeU()); - if (Options & (ComputeThinV|ComputeFullV)) VERIFY(staticSvd.computeV()); - else VERIFY(!staticSvd.computeV()); + svd.compute(a, ComputeFullU); + svd.matrixU(); + VERIFY_RAISES_ASSERT(svd.matrixV()) + VERIFY_RAISES_ASSERT(svd.solve(rhs)) + svd.compute(a, ComputeFullV); + svd.matrixV(); + VERIFY_RAISES_ASSERT(svd.matrixU()) + VERIFY_RAISES_ASSERT(svd.solve(rhs)) - if (staticSvd.computeU()) VERIFY(staticSvd.matrixU().isUnitary()); - if (staticSvd.computeV()) VERIFY(staticSvd.matrixV().isUnitary()); - - if (staticSvd.computeU() && staticSvd.computeV()) + if (!fullOnly && ColsAtCompileTime == Dynamic) { - svd_test_solvers(m, staticSvd); - svd_least_square(m); - // svd_min_norm generates non-square matrices so it can't be used with NoQRPreconditioner - if ((Options & internal::QRPreconditionerBits) != NoQRPreconditioner) - svd_min_norm(m); + svd.compute(a, ComputeThinU); + svd.matrixU(); + VERIFY_RAISES_ASSERT(svd.matrixV()) + VERIFY_RAISES_ASSERT(svd.solve(rhs)) + svd.compute(a, ComputeThinV); + svd.matrixV(); + VERIFY_RAISES_ASSERT(svd.matrixU()) + VERIFY_RAISES_ASSERT(svd.solve(rhs)) + } + else + { + VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinU)) + VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinV)) } -} - -template -void svd_option_checks(const MatrixType& m) -{ - // singular values only - svd_compute_checks(m); - // Thin only - svd_compute_checks(m); - svd_compute_checks(m); - svd_compute_checks(m); - // Full only - svd_compute_checks(m); - svd_compute_checks(m); - svd_compute_checks(m); - // Mixed - svd_compute_checks(m); - svd_compute_checks(m); - - typedef SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullU | ComputeFullV) FullSvdType; - FullSvdType fullSvd(m); - svd_check_full(m, fullSvd); - svd_compare_to_full(m, fullSvd); -} - -template -void svd_option_checks_full_only(const MatrixType& m) -{ - svd_compute_checks(m); - svd_compute_checks(m); - svd_compute_checks(m); - - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullU | ComputeFullV) fullSvd(m); - svd_check_full(m, fullSvd); -} - -template -void svd_check_max_size_matrix(int initialRows, int initialCols) -{ - enum { - MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, - MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime - }; - - int rows = MaxRowsAtCompileTime == Dynamic ? initialRows : (std::min)(initialRows, (int)MaxRowsAtCompileTime); - int cols = MaxColsAtCompileTime == Dynamic ? initialCols : (std::min)(initialCols, (int)MaxColsAtCompileTime); - - MatrixType m(rows, cols); - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeThinU | ComputeThinV) thinSvd(m); - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeThinU | ComputeFullV) mixedSvd1(m); - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullU | ComputeThinV) mixedSvd2(m); - SVD_STATIC_OPTIONS(MatrixType, QRPreconditioner | ComputeFullU | ComputeFullV) fullSvd(m); - - MatrixType n(MaxRowsAtCompileTime, MaxColsAtCompileTime); - thinSvd.compute(n); - mixedSvd1.compute(n); - mixedSvd2.compute(n); - fullSvd.compute(n); - - MatrixX dynamicMatrix(MaxRowsAtCompileTime + 1, MaxColsAtCompileTime + 1); - - VERIFY_RAISES_ASSERT(thinSvd.compute(dynamicMatrix)); - VERIFY_RAISES_ASSERT(mixedSvd1.compute(dynamicMatrix)); - VERIFY_RAISES_ASSERT(mixedSvd2.compute(dynamicMatrix)); - VERIFY_RAISES_ASSERT(fullSvd.compute(dynamicMatrix)); } #undef SVD_DEFAULT #undef SVD_FOR_MIN_NORM -#undef SVD_STATIC_OPTIONS