mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 11:49:02 +08:00
merge with default branch
This commit is contained in:
commit
934ce93886
@ -10,9 +10,11 @@
|
||||
*
|
||||
*
|
||||
* This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices.
|
||||
* Those decompositions are accessible via the following MatrixBase methods:
|
||||
* - MatrixBase::llt(),
|
||||
* Those decompositions are also accessible via the following methods:
|
||||
* - MatrixBase::llt()
|
||||
* - MatrixBase::ldlt()
|
||||
* - SelfAdjointView::llt()
|
||||
* - SelfAdjointView::ldlt()
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/Cholesky>
|
||||
|
@ -43,7 +43,7 @@ namespace internal {
|
||||
* Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky
|
||||
* decomposition to determine whether a system of equations has a solution.
|
||||
*
|
||||
* \sa MatrixBase::ldlt(), class LLT
|
||||
* \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT
|
||||
*/
|
||||
template<typename _MatrixType, int _UpLo> class LDLT
|
||||
{
|
||||
@ -179,7 +179,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
* least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
|
||||
* computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular.
|
||||
*
|
||||
* \sa MatrixBase::ldlt()
|
||||
* \sa MatrixBase::ldlt(), SelfAdjointView::ldlt()
|
||||
*/
|
||||
template<typename Rhs>
|
||||
inline const internal::solve_retval<LDLT, Rhs>
|
||||
@ -582,6 +582,7 @@ MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
|
||||
#ifndef __CUDACC__
|
||||
/** \cholesky_module
|
||||
* \returns the Cholesky decomposition with full pivoting without square root of \c *this
|
||||
* \sa MatrixBase::ldlt()
|
||||
*/
|
||||
template<typename MatrixType, unsigned int UpLo>
|
||||
inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
|
||||
@ -592,6 +593,7 @@ SelfAdjointView<MatrixType, UpLo>::ldlt() const
|
||||
|
||||
/** \cholesky_module
|
||||
* \returns the Cholesky decomposition with full pivoting without square root of \c *this
|
||||
* \sa SelfAdjointView::ldlt()
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline const LDLT<typename MatrixBase<Derived>::PlainObject>
|
||||
|
@ -41,7 +41,7 @@ template<typename MatrixType, int UpLo> struct LLT_Traits;
|
||||
* Example: \include LLT_example.cpp
|
||||
* Output: \verbinclude LLT_example.out
|
||||
*
|
||||
* \sa MatrixBase::llt(), class LDLT
|
||||
* \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT
|
||||
*/
|
||||
/* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH)
|
||||
* Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
|
||||
@ -115,7 +115,7 @@ template<typename _MatrixType, int _UpLo> class LLT
|
||||
* Example: \include LLT_solve.cpp
|
||||
* Output: \verbinclude LLT_solve.out
|
||||
*
|
||||
* \sa solveInPlace(), MatrixBase::llt()
|
||||
* \sa solveInPlace(), MatrixBase::llt(), SelfAdjointView::llt()
|
||||
*/
|
||||
template<typename Rhs>
|
||||
inline const internal::solve_retval<LLT, Rhs>
|
||||
@ -468,6 +468,7 @@ MatrixType LLT<MatrixType,_UpLo>::reconstructedMatrix() const
|
||||
#ifndef __CUDACC__
|
||||
/** \cholesky_module
|
||||
* \returns the LLT decomposition of \c *this
|
||||
* \sa SelfAdjointView::llt()
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline const LLT<typename MatrixBase<Derived>::PlainObject>
|
||||
@ -478,6 +479,7 @@ MatrixBase<Derived>::llt() const
|
||||
|
||||
/** \cholesky_module
|
||||
* \returns the LLT decomposition of \c *this
|
||||
* \sa SelfAdjointView::llt()
|
||||
*/
|
||||
template<typename MatrixType, unsigned int UpLo>
|
||||
inline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
|
||||
|
@ -669,6 +669,15 @@ bool (isfinite)(const T& x)
|
||||
return x<NumTraits<T>::highest() && x>NumTraits<T>::lowest();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
EIGEN_DEVICE_FUNC
|
||||
bool (isfinite)(const std::complex<T>& x)
|
||||
{
|
||||
using std::real;
|
||||
using std::imag;
|
||||
return isfinite(real(x)) && isfinite(imag(x));
|
||||
}
|
||||
|
||||
} // end namespace numext
|
||||
|
||||
namespace internal {
|
||||
|
@ -97,6 +97,7 @@ template<> struct packet_traits<int> : default_packet_traits
|
||||
// workaround gcc 4.2, 4.3 and 4.4 compilatin issue
|
||||
EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }
|
||||
EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); }
|
||||
EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32 (const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); }
|
||||
EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
|
||||
EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
|
||||
#endif
|
||||
|
@ -265,6 +265,8 @@ template<typename MatrixType, unsigned int UpLo>
|
||||
template<typename ProductDerived, typename _Lhs, typename _Rhs>
|
||||
TriangularView<MatrixType,UpLo>& TriangularView<MatrixType,UpLo>::assignProduct(const ProductBase<ProductDerived, _Lhs,_Rhs>& prod, const Scalar& alpha)
|
||||
{
|
||||
eigen_assert(m_matrix.rows() == prod.rows() && m_matrix.cols() == prod.cols());
|
||||
|
||||
general_product_to_triangular_selector<MatrixType, ProductDerived, UpLo, (_Lhs::ColsAtCompileTime==1) || (_Rhs::RowsAtCompileTime==1)>::run(m_matrix.const_cast_derived(), prod.derived(), alpha);
|
||||
|
||||
return *this;
|
||||
|
@ -54,8 +54,25 @@
|
||||
#endif
|
||||
|
||||
#if defined EIGEN_USE_MKL
|
||||
# include <mkl.h>
|
||||
/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/
|
||||
# ifndef INTEL_MKL_VERSION
|
||||
# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */
|
||||
# elif INTEL_MKL_VERSION < 100305 /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/
|
||||
# undef EIGEN_USE_MKL
|
||||
# endif
|
||||
# ifndef EIGEN_USE_MKL
|
||||
/*If the MKL version is too old, undef everything*/
|
||||
# undef EIGEN_USE_MKL_ALL
|
||||
# undef EIGEN_USE_BLAS
|
||||
# undef EIGEN_USE_LAPACKE
|
||||
# undef EIGEN_USE_MKL_VML
|
||||
# undef EIGEN_USE_LAPACKE_STRICT
|
||||
# undef EIGEN_USE_LAPACKE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include <mkl.h>
|
||||
#if defined EIGEN_USE_MKL
|
||||
#include <mkl_lapacke.h>
|
||||
#define EIGEN_MKL_VML_THRESHOLD 128
|
||||
|
||||
|
@ -89,7 +89,7 @@ inline void throw_std_bad_alloc()
|
||||
#ifdef EIGEN_EXCEPTIONS
|
||||
throw std::bad_alloc();
|
||||
#else
|
||||
std::size_t huge = -1;
|
||||
std::size_t huge = static_cast<std::size_t>(-1);
|
||||
new int[huge];
|
||||
#endif
|
||||
}
|
||||
|
@ -275,10 +275,11 @@ template<typename _MatrixType> class EigenSolver
|
||||
*/
|
||||
EigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
|
||||
|
||||
/** \returns NumericalIssue if the input contains INF or NaN values or overflow occured. Returns Success otherwise. */
|
||||
ComputationInfo info() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
|
||||
return m_realSchur.info();
|
||||
return m_info;
|
||||
}
|
||||
|
||||
/** \brief Sets the maximum number of iterations allowed. */
|
||||
@ -302,6 +303,7 @@ template<typename _MatrixType> class EigenSolver
|
||||
EigenvalueType m_eivalues;
|
||||
bool m_isInitialized;
|
||||
bool m_eigenvectorsOk;
|
||||
ComputationInfo m_info;
|
||||
RealSchur<MatrixType> m_realSchur;
|
||||
MatrixType m_matT;
|
||||
|
||||
@ -366,12 +368,16 @@ EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvect
|
||||
{
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
using std::max;
|
||||
using numext::isfinite;
|
||||
eigen_assert(matrix.cols() == matrix.rows());
|
||||
|
||||
// Reduce to real Schur form.
|
||||
m_realSchur.compute(matrix, computeEigenvectors);
|
||||
|
||||
if (m_realSchur.info() == Success)
|
||||
m_info = m_realSchur.info();
|
||||
|
||||
if (m_info == Success)
|
||||
{
|
||||
m_matT = m_realSchur.matrixT();
|
||||
if (computeEigenvectors)
|
||||
@ -385,14 +391,40 @@ EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvect
|
||||
if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0))
|
||||
{
|
||||
m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
|
||||
if(!isfinite(m_eivalues.coeffRef(i)))
|
||||
{
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = false;
|
||||
m_info = NumericalIssue;
|
||||
return *this;
|
||||
}
|
||||
++i;
|
||||
}
|
||||
else
|
||||
{
|
||||
Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
|
||||
Scalar z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
|
||||
Scalar z;
|
||||
// Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
|
||||
// without overflow
|
||||
{
|
||||
Scalar t0 = m_matT.coeff(i+1, i);
|
||||
Scalar t1 = m_matT.coeff(i, i+1);
|
||||
Scalar maxval = (max)(abs(p),(max)(abs(t0),abs(t1)));
|
||||
t0 /= maxval;
|
||||
t1 /= maxval;
|
||||
Scalar p0 = p/maxval;
|
||||
z = maxval * sqrt(abs(p0 * p0 + t0 * t1));
|
||||
}
|
||||
|
||||
m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
|
||||
m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
|
||||
if(!(isfinite(m_eivalues.coeffRef(i)) && isfinite(m_eivalues.coeffRef(i+1))))
|
||||
{
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = false;
|
||||
m_info = NumericalIssue;
|
||||
return *this;
|
||||
}
|
||||
i += 2;
|
||||
}
|
||||
}
|
||||
@ -581,7 +613,7 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
||||
}
|
||||
else
|
||||
{
|
||||
eigen_assert(0 && "Internal bug in EigenSolver"); // this should not happen
|
||||
eigen_assert(0 && "Internal bug in EigenSolver (INF or NaN has not been detected)"); // this should not happen
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -415,6 +415,7 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
|
||||
JacobiRotation<RealScalar> *j_right)
|
||||
{
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
Matrix<RealScalar,2,2> m;
|
||||
m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
|
||||
numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
|
||||
@ -428,9 +429,11 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
|
||||
}
|
||||
else
|
||||
{
|
||||
RealScalar u = d / t;
|
||||
rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
|
||||
rot1.s() = rot1.c() * u;
|
||||
RealScalar t2d2 = numext::hypot(t,d);
|
||||
rot1.c() = abs(t)/t2d2;
|
||||
rot1.s() = d/t2d2;
|
||||
if(t<RealScalar(0))
|
||||
rot1.s() = -rot1.s();
|
||||
}
|
||||
m.applyOnTheLeft(0,1,rot1);
|
||||
j_right->makeJacobi(m,0,1);
|
||||
|
@ -223,7 +223,7 @@ ALIASES = "only_for_vectors=This is only for vectors (either row-
|
||||
"note_about_using_kernel_to_study_multiple_solutions=If you need a complete analysis of the space of solutions, take the one solution obtained by this method and add to it elements of the kernel, as determined by kernel()." \
|
||||
"note_about_checking_solutions=This method just tries to find as good a solution as possible. If you want to check whether a solution exists or if it is accurate, just call this function to get a result and then compute the error of this result, or use MatrixBase::isApprox() directly, for instance like this: \code bool a_solution_exists = (A*result).isApprox(b, precision); \endcode This method avoids dividing by zero, so that the non-existence of a solution doesn't by itself mean that you'll get \c inf or \c nan values." \
|
||||
"note_try_to_help_rvo=This function returns the result by value. In order to make that efficient, it is implemented as just a return statement using a special constructor, hopefully allowing the compiler to perform a RVO (return value optimization)." \
|
||||
"nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\"
|
||||
"nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\""
|
||||
|
||||
|
||||
ALIASES += "eigenAutoToc= "
|
||||
@ -1583,6 +1583,7 @@ PREDEFINED = EIGEN_EMPTY_STRUCT \
|
||||
EIGEN_VECTORIZE \
|
||||
EIGEN_QT_SUPPORT \
|
||||
EIGEN_STRONG_INLINE=inline \
|
||||
EIGEN_DEVICE_FUNC= \
|
||||
"EIGEN2_SUPPORT_STAGE=99" \
|
||||
"EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR)=template<typename OtherDerived> const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const;" \
|
||||
"EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS)=CwiseBinaryOp<internal::scalar_product_op<typename LHS::Scalar, typename RHS::Scalar >, const LHS, const RHS>"
|
||||
|
@ -32,7 +32,7 @@ Eigen also provides the \link MatrixBase::norm() norm() \endlink method, which r
|
||||
|
||||
These operations can also operate on matrices; in that case, a n-by-p matrix is seen as a vector of size (n*p), so for example the \link MatrixBase::norm() norm() \endlink method returns the "Frobenius" or "Hilbert-Schmidt" norm. We refrain from speaking of the \f$\ell^2\f$ norm of a matrix because that can mean different things.
|
||||
|
||||
If you want other \f$\ell^p\f$ norms, use the \link MatrixBase::lpNorm() lpNnorm<p>() \endlink method. The template parameter \a p can take the special value \a Infinity if you want the \f$\ell^\infty\f$ norm, which is the maximum of the absolute values of the coefficients.
|
||||
If you want other \f$\ell^p\f$ norms, use the \link MatrixBase::lpNorm() lpNorm<p>() \endlink method. The template parameter \a p can take the special value \a Infinity if you want the \f$\ell^\infty\f$ norm, which is the maximum of the absolute values of the coefficients.
|
||||
|
||||
The following example demonstrates these methods.
|
||||
|
||||
|
@ -315,6 +315,9 @@ find_package(CUDA)
|
||||
if(CUDA_FOUND)
|
||||
|
||||
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
|
||||
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||
set(CUDA_NVCC_FLAGS "-ccbin /usr/bin/clang" CACHE STRING "nvcc flags" FORCE)
|
||||
endif()
|
||||
cuda_include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
||||
set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
|
||||
|
||||
|
@ -82,10 +82,6 @@ template<typename MatrixType> void cholesky(const MatrixType& m)
|
||||
symm += a1 * a1.adjoint();
|
||||
}
|
||||
|
||||
// to test if really Cholesky only uses the upper triangular part, uncomment the following
|
||||
// FIXME: currently that fails !!
|
||||
//symm.template part<StrictlyLower>().setZero();
|
||||
|
||||
{
|
||||
SquareMatrixType symmUp = symm.template triangularView<Upper>();
|
||||
SquareMatrixType symmLo = symm.template triangularView<Lower>();
|
||||
|
@ -129,7 +129,7 @@ void test_cuda_basic()
|
||||
CALL_SUBTEST( run_and_compare_to_cuda(prod<Matrix4f,Vector4f>(), nthreads, in, out) );
|
||||
|
||||
CALL_SUBTEST( run_and_compare_to_cuda(diagonal<Matrix3f,Vector3f>(), nthreads, in, out) );
|
||||
CALL_SUBTEST( run_and_compare_to_c<uda(diagonal<Matrix4f,Vector4f>(), nthreads, in, out) );
|
||||
CALL_SUBTEST( run_and_compare_to_cuda(diagonal<Matrix4f,Vector4f>(), nthreads, in, out) );
|
||||
|
||||
CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix3f>(), nthreads, in, out) );
|
||||
CALL_SUBTEST( run_and_compare_to_cuda(eigenvalues<Matrix2f>(), nthreads, in, out) );
|
||||
|
@ -121,5 +121,18 @@ void test_eigensolver_generic()
|
||||
}
|
||||
);
|
||||
|
||||
// regression test for bug 793
|
||||
#ifdef EIGEN_TEST_PART_2
|
||||
{
|
||||
MatrixXd a(3,3);
|
||||
a << 0, 0, 1,
|
||||
1, 1, 1,
|
||||
1, 1e+200, 1;
|
||||
Eigen::EigenSolver<MatrixXd> eig(a);
|
||||
VERIFY_IS_APPROX(a * eig.pseudoEigenvectors(), eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix());
|
||||
VERIFY_IS_APPROX(a * eig.eigenvectors(), eig.eigenvectors() * eig.eigenvalues().asDiagonal());
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST_SET_BUT_UNUSED_VARIABLE(s)
|
||||
}
|
||||
|
@ -285,7 +285,7 @@ struct equal_op { template<typename A, typename B> constexpr static inli
|
||||
struct not_equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a != b) { return a != b; } };
|
||||
struct lesser_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a < b) { return a < b; } };
|
||||
struct lesser_equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a <= b) { return a <= b; } };
|
||||
struct greater_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a < b) { return a < b; } };
|
||||
struct greater_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a > b) { return a > b; } };
|
||||
struct greater_equal_op { template<typename A, typename B> constexpr static inline auto run(A a, B b) -> decltype(a >= b) { return a >= b; } };
|
||||
|
||||
/* generic unary operations */
|
||||
|
@ -2,6 +2,7 @@ ADD_SUBDIRECTORY(AutoDiff)
|
||||
ADD_SUBDIRECTORY(BVH)
|
||||
ADD_SUBDIRECTORY(FFT)
|
||||
ADD_SUBDIRECTORY(IterativeSolvers)
|
||||
ADD_SUBDIRECTORY(LevenbergMarquardt)
|
||||
ADD_SUBDIRECTORY(MatrixFunctions)
|
||||
ADD_SUBDIRECTORY(MoreVectorization)
|
||||
ADD_SUBDIRECTORY(NonLinearOptimization)
|
||||
|
@ -2,5 +2,5 @@ FILE(GLOB Eigen_LevenbergMarquardt_SRCS "*.h")
|
||||
|
||||
INSTALL(FILES
|
||||
${Eigen_LevenbergMarquardt_SRCS}
|
||||
DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/LevenbergMarquardt COMPONENT Devel
|
||||
DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/LevenbergMarquardt COMPONENT Devel
|
||||
)
|
||||
|
Loading…
x
Reference in New Issue
Block a user