mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-11 19:29:02 +08:00
merge with main branch
This commit is contained in:
commit
2f593ee67c
@ -1,6 +1,6 @@
|
||||
project(Eigen)
|
||||
|
||||
cmake_minimum_required(VERSION 2.6.2)
|
||||
cmake_minimum_required(VERSION 2.8.2)
|
||||
|
||||
# guard against in-source builds
|
||||
|
||||
@ -150,11 +150,14 @@ if(NOT MSVC)
|
||||
ei_add_cxx_compiler_flag("-fno-check-new")
|
||||
ei_add_cxx_compiler_flag("-fno-common")
|
||||
ei_add_cxx_compiler_flag("-fstrict-aliasing")
|
||||
ei_add_cxx_compiler_flag("-wd981") # disbale ICC's "operands are evaluated in unspecified order" remark
|
||||
ei_add_cxx_compiler_flag("-wd981") # disable ICC's "operands are evaluated in unspecified order" remark
|
||||
ei_add_cxx_compiler_flag("-wd2304") # disbale ICC's "warning #2304: non-explicit constructor with single argument may cause implicit type conversion" produced by -Wnon-virtual-dtor
|
||||
|
||||
# The -ansi flag must be added last, otherwise it is also used as a linker flag by check_cxx_compiler_flag making it fails
|
||||
# Moreover we should not set both -strict-ansi and -ansi
|
||||
check_cxx_compiler_flag("-strict-ansi" COMPILER_SUPPORT_STRICTANSI)
|
||||
ei_add_cxx_compiler_flag("-Qunused-arguments") # disable clang warning: argument unused during compilation: '-ansi'
|
||||
|
||||
if(COMPILER_SUPPORT_STRICTANSI)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -strict-ansi")
|
||||
else()
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
## A tribute to Dynamic!
|
||||
set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_WARNINGS "33331")
|
||||
set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_ERRORS "33331")
|
||||
set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_WARNINGS "2000")
|
||||
set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_ERRORS "2000")
|
||||
|
@ -40,6 +40,12 @@
|
||||
// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization.
|
||||
#include "src/Core/util/Macros.h"
|
||||
|
||||
// Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3)
|
||||
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details.
|
||||
#if defined(__MINGW32__) && EIGEN_GNUC_AT_LEAST(4,6)
|
||||
#pragma GCC optimize ("-fno-ipa-cp-clone")
|
||||
#endif
|
||||
|
||||
#include <complex>
|
||||
|
||||
// this include file manages BLAS and MKL related macros
|
||||
@ -266,8 +272,8 @@ using std::ptrdiff_t;
|
||||
#include "src/Core/util/Constants.h"
|
||||
#include "src/Core/util/ForwardDeclarations.h"
|
||||
#include "src/Core/util/Meta.h"
|
||||
#include "src/Core/util/XprHelper.h"
|
||||
#include "src/Core/util/StaticAssert.h"
|
||||
#include "src/Core/util/XprHelper.h"
|
||||
#include "src/Core/util/Memory.h"
|
||||
|
||||
#include "src/Core/NumTraits.h"
|
||||
|
@ -26,4 +26,4 @@
|
||||
#include "src/CholmodSupport/CholmodSupport.h"
|
||||
#include "src/SPQRSupport/SuiteSparseQRSupport.h"
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
14
Eigen/Sparse
14
Eigen/Sparse
@ -1,15 +1,15 @@
|
||||
#ifndef EIGEN_SPARSE_MODULE_H
|
||||
#define EIGEN_SPARSE_MODULE_H
|
||||
|
||||
/** defgroup Sparse_modules Sparse modules
|
||||
/** \defgroup Sparse_Module Sparse meta-module
|
||||
*
|
||||
* Meta-module including all related modules:
|
||||
* - SparseCore
|
||||
* - OrderingMethods
|
||||
* - SparseCholesky
|
||||
* - SparseLU
|
||||
* - SparseQR
|
||||
* - IterativeLinearSolvers
|
||||
* - \ref SparseCore_Module
|
||||
* - \ref OrderingMethods_Module
|
||||
* - \ref SparseCholesky_Module
|
||||
* - \ref SparseLU_Module
|
||||
* - \ref SparseQR_Module
|
||||
* - \ref IterativeLinearSolvers_Module
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/Sparse>
|
||||
|
@ -1,7 +1,17 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2013 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSECHOLESKY_MODULE_H
|
||||
#define EIGEN_SPARSECHOLESKY_MODULE_H
|
||||
|
||||
#include "SparseCore"
|
||||
#include "OrderingMethods"
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
@ -26,7 +36,6 @@
|
||||
|
||||
#include "src/misc/Solve.h"
|
||||
#include "src/misc/SparseSolve.h"
|
||||
|
||||
#include "src/SparseCholesky/SimplicialCholesky.h"
|
||||
|
||||
#ifndef EIGEN_MPL2_ONLY
|
||||
|
@ -20,6 +20,9 @@
|
||||
* Please, see the documentation of the SparseLU class for more details.
|
||||
*/
|
||||
|
||||
#include "src/misc/Solve.h"
|
||||
#include "src/misc/SparseSolve.h"
|
||||
|
||||
// Ordering interface
|
||||
#include "OrderingMethods"
|
||||
|
||||
|
@ -20,6 +20,10 @@
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include "src/misc/Solve.h"
|
||||
#include "src/misc/SparseSolve.h"
|
||||
|
||||
#include "OrderingMethods"
|
||||
#include "src/SparseCore/SparseColEtree.h"
|
||||
#include "src/SparseQR/SparseQR.h"
|
||||
|
@ -259,7 +259,7 @@ template<> struct ldlt_inplace<Lower>
|
||||
{
|
||||
transpositions.setIdentity();
|
||||
if(sign)
|
||||
*sign = real(mat.coeff(0,0))>0 ? 1:-1;
|
||||
*sign = numext::real(mat.coeff(0,0))>0 ? 1:-1;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -278,22 +278,13 @@ template<> struct ldlt_inplace<Lower>
|
||||
// are compared; if any diagonal is negligible compared
|
||||
// to the largest overall, the algorithm bails.
|
||||
cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
|
||||
|
||||
if(sign)
|
||||
*sign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1;
|
||||
}
|
||||
else if(sign)
|
||||
{
|
||||
// LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right
|
||||
int newSign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0;
|
||||
if(newSign != *sign)
|
||||
*sign = 0;
|
||||
}
|
||||
|
||||
// Finish early if the matrix is not full rank.
|
||||
if(biggest_in_corner < cutoff)
|
||||
{
|
||||
for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
|
||||
if(sign) *sign = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -309,11 +300,11 @@ template<> struct ldlt_inplace<Lower>
|
||||
for(int i=k+1;i<index_of_biggest_in_corner;++i)
|
||||
{
|
||||
Scalar tmp = mat.coeffRef(i,k);
|
||||
mat.coeffRef(i,k) = conj(mat.coeffRef(index_of_biggest_in_corner,i));
|
||||
mat.coeffRef(index_of_biggest_in_corner,i) = conj(tmp);
|
||||
mat.coeffRef(i,k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner,i));
|
||||
mat.coeffRef(index_of_biggest_in_corner,i) = numext::conj(tmp);
|
||||
}
|
||||
if(NumTraits<Scalar>::IsComplex)
|
||||
mat.coeffRef(index_of_biggest_in_corner,k) = conj(mat.coeff(index_of_biggest_in_corner,k));
|
||||
mat.coeffRef(index_of_biggest_in_corner,k) = numext::conj(mat.coeff(index_of_biggest_in_corner,k));
|
||||
}
|
||||
|
||||
// partition the matrix:
|
||||
@ -334,6 +325,16 @@ template<> struct ldlt_inplace<Lower>
|
||||
}
|
||||
if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
|
||||
A21 /= mat.coeffRef(k,k);
|
||||
|
||||
if(sign)
|
||||
{
|
||||
// LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right
|
||||
int newSign = numext::real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0;
|
||||
if(k == 0)
|
||||
*sign = newSign;
|
||||
else if(*sign != newSign)
|
||||
*sign = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -349,7 +350,7 @@ template<> struct ldlt_inplace<Lower>
|
||||
template<typename MatrixType, typename WDerived>
|
||||
static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, const typename MatrixType::RealScalar& sigma=1)
|
||||
{
|
||||
using internal::isfinite;
|
||||
using numext::isfinite;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
@ -367,9 +368,9 @@ template<> struct ldlt_inplace<Lower>
|
||||
break;
|
||||
|
||||
// Update the diagonal terms
|
||||
RealScalar dj = real(mat.coeff(j,j));
|
||||
RealScalar dj = numext::real(mat.coeff(j,j));
|
||||
Scalar wj = w.coeff(j);
|
||||
RealScalar swj2 = sigma*abs2(wj);
|
||||
RealScalar swj2 = sigma*numext::abs2(wj);
|
||||
RealScalar gamma = dj*alpha + swj2;
|
||||
|
||||
mat.coeffRef(j,j) += swj2/alpha;
|
||||
@ -380,7 +381,7 @@ template<> struct ldlt_inplace<Lower>
|
||||
Index rs = size-j-1;
|
||||
w.tail(rs) -= wj * mat.col(j).tail(rs);
|
||||
if(gamma != 0)
|
||||
mat.col(j).tail(rs) += (sigma*conj(wj)/gamma)*w.tail(rs);
|
||||
mat.col(j).tail(rs) += (sigma*numext::conj(wj)/gamma)*w.tail(rs);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -232,10 +232,10 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
|
||||
RealScalar beta = 1;
|
||||
for(Index j=0; j<n; ++j)
|
||||
{
|
||||
RealScalar Ljj = real(mat.coeff(j,j));
|
||||
RealScalar dj = abs2(Ljj);
|
||||
RealScalar Ljj = numext::real(mat.coeff(j,j));
|
||||
RealScalar dj = numext::abs2(Ljj);
|
||||
Scalar wj = temp.coeff(j);
|
||||
RealScalar swj2 = sigma*abs2(wj);
|
||||
RealScalar swj2 = sigma*numext::abs2(wj);
|
||||
RealScalar gamma = dj*beta + swj2;
|
||||
|
||||
RealScalar x = dj + swj2/beta;
|
||||
@ -251,7 +251,7 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
|
||||
{
|
||||
temp.tail(rs) -= (wj/Ljj) * mat.col(j).tail(rs);
|
||||
if(gamma != 0)
|
||||
mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*conj(wj)/gamma)*temp.tail(rs);
|
||||
mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*numext::conj(wj)/gamma)*temp.tail(rs);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -277,7 +277,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
|
||||
Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
|
||||
Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
|
||||
|
||||
RealScalar x = real(mat.coeff(k,k));
|
||||
RealScalar x = numext::real(mat.coeff(k,k));
|
||||
if (k>0) x -= A10.squaredNorm();
|
||||
if (x<=RealScalar(0))
|
||||
return k;
|
||||
|
@ -126,7 +126,7 @@ cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)
|
||||
res.ncol = mat.cols();
|
||||
res.nzmax = res.nrow * res.ncol;
|
||||
res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride();
|
||||
res.x = mat.derived().data();
|
||||
res.x = (void*)(mat.derived().data());
|
||||
res.z = 0;
|
||||
|
||||
internal::cholmod_configure_matrix<Scalar>(res);
|
||||
@ -295,7 +295,8 @@ class CholmodBase : internal::noncopyable
|
||||
eigen_assert(size==b.rows());
|
||||
|
||||
// note: cd stands for Cholmod Dense
|
||||
cholmod_dense b_cd = viewAsCholmod(b.const_cast_derived());
|
||||
Rhs& b_ref(b.const_cast_derived());
|
||||
cholmod_dense b_cd = viewAsCholmod(b_ref);
|
||||
cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod);
|
||||
if(!x_cd)
|
||||
{
|
||||
@ -312,6 +313,7 @@ class CholmodBase : internal::noncopyable
|
||||
{
|
||||
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
|
||||
const Index size = m_cholmodFactor->n;
|
||||
EIGEN_UNUSED_VARIABLE(size);
|
||||
eigen_assert(size==b.rows());
|
||||
|
||||
// note: cs stands for Cholmod Sparse
|
||||
@ -344,7 +346,7 @@ class CholmodBase : internal::noncopyable
|
||||
}
|
||||
|
||||
template<typename Stream>
|
||||
void dumpMemory(Stream& s)
|
||||
void dumpMemory(Stream& /*s*/)
|
||||
{}
|
||||
|
||||
protected:
|
||||
|
@ -111,7 +111,7 @@ class Array
|
||||
* \sa resize(Index,Index)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE explicit Array() : Base()
|
||||
EIGEN_STRONG_INLINE Array() : Base()
|
||||
{
|
||||
Base::_check_template_params();
|
||||
EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
|
||||
|
@ -55,7 +55,7 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
|
||||
inline Index outerStride() const { return m_expression.outerStride(); }
|
||||
inline Index innerStride() const { return m_expression.innerStride(); }
|
||||
|
||||
inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
|
||||
inline ScalarWithConstIfNotLvalue* data() { return m_expression.const_cast_derived().data(); }
|
||||
inline const Scalar* data() const { return m_expression.data(); }
|
||||
|
||||
inline CoeffReturnType coeff(Index rowId, Index colId) const
|
||||
@ -175,7 +175,7 @@ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
|
||||
inline Index outerStride() const { return m_expression.outerStride(); }
|
||||
inline Index innerStride() const { return m_expression.innerStride(); }
|
||||
|
||||
inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
|
||||
inline ScalarWithConstIfNotLvalue* data() { return m_expression.const_cast_derived().data(); }
|
||||
inline const Scalar* data() const { return m_expression.data(); }
|
||||
|
||||
inline CoeffReturnType coeff(Index rowId, Index colId) const
|
||||
|
@ -519,20 +519,20 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>
|
||||
namespace internal {
|
||||
|
||||
template<typename Derived, typename OtherDerived,
|
||||
bool EvalBeforeAssigning = (int(OtherDerived::Flags) & EvalBeforeAssigningBit) != 0,
|
||||
bool NeedToTranspose = Derived::IsVectorAtCompileTime
|
||||
&& OtherDerived::IsVectorAtCompileTime
|
||||
&& ((int(Derived::RowsAtCompileTime) == 1 && int(OtherDerived::ColsAtCompileTime) == 1)
|
||||
| // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
|
||||
// revert to || as soon as not needed anymore.
|
||||
(int(Derived::ColsAtCompileTime) == 1 && int(OtherDerived::RowsAtCompileTime) == 1))
|
||||
&& int(Derived::SizeAtCompileTime) != 1>
|
||||
bool EvalBeforeAssigning = (int(internal::traits<OtherDerived>::Flags) & EvalBeforeAssigningBit) != 0,
|
||||
bool NeedToTranspose = ((int(Derived::RowsAtCompileTime) == 1 && int(OtherDerived::ColsAtCompileTime) == 1)
|
||||
| // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
|
||||
// revert to || as soon as not needed anymore.
|
||||
(int(Derived::ColsAtCompileTime) == 1 && int(OtherDerived::RowsAtCompileTime) == 1))
|
||||
&& int(Derived::SizeAtCompileTime) != 1>
|
||||
struct assign_selector;
|
||||
|
||||
template<typename Derived, typename OtherDerived>
|
||||
struct assign_selector<Derived,OtherDerived,false,false> {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); }
|
||||
template<typename ActualDerived, typename ActualOtherDerived>
|
||||
static EIGEN_STRONG_INLINE Derived& evalTo(ActualDerived& dst, const ActualOtherDerived& other) { other.evalTo(dst); return dst; }
|
||||
};
|
||||
template<typename Derived, typename OtherDerived>
|
||||
struct assign_selector<Derived,OtherDerived,true,false> {
|
||||
@ -543,6 +543,8 @@ template<typename Derived, typename OtherDerived>
|
||||
struct assign_selector<Derived,OtherDerived,false,true> {
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); }
|
||||
template<typename ActualDerived, typename ActualOtherDerived>
|
||||
static EIGEN_STRONG_INLINE Derived& evalTo(ActualDerived& dst, const ActualOtherDerived& other) { Transpose<ActualDerived> dstTrans(dst); other.evalTo(dstTrans); return dst; }
|
||||
};
|
||||
template<typename Derived, typename OtherDerived>
|
||||
struct assign_selector<Derived,OtherDerived,true,true> {
|
||||
@ -582,16 +584,14 @@ template<typename Derived>
|
||||
template <typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const EigenBase<OtherDerived>& other)
|
||||
{
|
||||
other.derived().evalTo(derived());
|
||||
return derived();
|
||||
return internal::assign_selector<Derived,OtherDerived,false>::evalTo(derived(), other.derived());
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
|
||||
{
|
||||
other.evalTo(derived());
|
||||
return derived();
|
||||
return internal::assign_selector<Derived,OtherDerived,false>::evalTo(derived(), other.derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -124,6 +124,8 @@ struct CommaInitializer
|
||||
*
|
||||
* Example: \include MatrixBase_set.cpp
|
||||
* Output: \verbinclude MatrixBase_set.out
|
||||
*
|
||||
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
|
||||
*
|
||||
* \sa CommaInitializer::finished(), class CommaInitializer
|
||||
*/
|
||||
|
@ -56,8 +56,7 @@ template<typename ViewOp, typename MatrixType, typename StorageKind>
|
||||
class CwiseUnaryViewImpl;
|
||||
|
||||
template<typename ViewOp, typename MatrixType>
|
||||
class CwiseUnaryView : internal::no_assignment_operator,
|
||||
public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind>
|
||||
class CwiseUnaryView : public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind>
|
||||
{
|
||||
public:
|
||||
|
||||
@ -99,6 +98,7 @@ class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
|
||||
typedef typename internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base;
|
||||
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl)
|
||||
|
||||
inline Scalar* data() { return &coeffRef(0); }
|
||||
inline const Scalar* data() const { return &coeff(0); }
|
||||
|
@ -13,6 +13,16 @@
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type.
|
||||
// This dummy function simply aims at checking that at compile time.
|
||||
static inline void check_DenseIndex_is_signed() {
|
||||
EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \class DenseBase
|
||||
* \ingroup Core_Module
|
||||
*
|
||||
@ -286,7 +296,7 @@ template<typename Derived> class DenseBase
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
Eigen::Transpose<Derived> transpose();
|
||||
typedef const Transpose<const Derived> ConstTransposeReturnType;
|
||||
typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
|
||||
EIGEN_DEVICE_FUNC
|
||||
ConstTransposeReturnType transpose() const;
|
||||
EIGEN_DEVICE_FUNC
|
||||
|
@ -118,7 +118,7 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
|
||||
{
|
||||
internal::plain_array<T,Size,_Options> m_data;
|
||||
public:
|
||||
EIGEN_DEVICE_FUNC inline explicit DenseStorage() {}
|
||||
EIGEN_DEVICE_FUNC inline DenseStorage() {}
|
||||
EIGEN_DEVICE_FUNC inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()) {}
|
||||
EIGEN_DEVICE_FUNC inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
@ -135,7 +135,7 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
|
||||
template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
|
||||
{
|
||||
public:
|
||||
inline explicit DenseStorage() {}
|
||||
inline DenseStorage() {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
|
||||
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline void swap(DenseStorage& ) {}
|
||||
@ -164,7 +164,7 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
|
||||
DenseIndex m_rows;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline explicit DenseStorage() : m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage() : m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
|
||||
@ -184,7 +184,7 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
|
||||
internal::plain_array<T,Size,_Options> m_data;
|
||||
DenseIndex m_rows;
|
||||
public:
|
||||
inline explicit DenseStorage() : m_rows(0) {}
|
||||
inline DenseStorage() : m_rows(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
|
||||
inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
|
||||
@ -203,7 +203,7 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
|
||||
internal::plain_array<T,Size,_Options> m_data;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline explicit DenseStorage() : m_cols(0) {}
|
||||
inline DenseStorage() : m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
|
||||
@ -223,7 +223,7 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
||||
DenseIndex m_rows;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline explicit DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(0), m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||
@ -264,7 +264,7 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
||||
T *m_data;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline explicit DenseStorage() : m_data(0), m_cols(0) {}
|
||||
inline DenseStorage() : m_data(0), m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
|
||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||
@ -300,7 +300,7 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
||||
T *m_data;
|
||||
DenseIndex m_rows;
|
||||
public:
|
||||
inline explicit DenseStorage() : m_data(0), m_rows(0) {}
|
||||
inline DenseStorage() : m_data(0), m_rows(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
|
||||
inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
|
||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||
|
@ -75,7 +75,7 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
|
||||
|
||||
inline Index rows() const
|
||||
{ return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
|
||||
{ return m_index.value()<0 ? (std::min<Index>)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min<Index>)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
|
||||
|
||||
inline Index cols() const { return 1; }
|
||||
|
||||
@ -172,7 +172,7 @@ MatrixBase<Derived>::diagonal()
|
||||
|
||||
/** This is the const version of diagonal(). */
|
||||
template<typename Derived>
|
||||
inline const typename MatrixBase<Derived>::ConstDiagonalReturnType
|
||||
inline typename MatrixBase<Derived>::ConstDiagonalReturnType
|
||||
MatrixBase<Derived>::diagonal() const
|
||||
{
|
||||
return ConstDiagonalReturnType(derived());
|
||||
|
@ -26,14 +26,15 @@ struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
|
||||
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
|
||||
|
||||
_StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor,
|
||||
_PacketOnDiag = !((int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
|
||||
||(int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)),
|
||||
_ScalarAccessOnDiag = !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft)
|
||||
||(int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheRight)),
|
||||
_SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value,
|
||||
// FIXME currently we need same types, but in the future the next rule should be the one
|
||||
//_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::Flags)&PacketAccessBit))),
|
||||
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && ((!_PacketOnDiag) || (bool(int(DiagonalType::Flags)&PacketAccessBit))),
|
||||
//_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
|
||||
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
|
||||
_LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
|
||||
|
||||
Flags = (HereditaryBits & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),
|
||||
Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
|
||||
CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
|
||||
};
|
||||
};
|
||||
@ -54,13 +55,21 @@ class DiagonalProduct : internal::no_assignment_operator,
|
||||
eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols()));
|
||||
}
|
||||
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
const Scalar coeff(Index row, Index col) const
|
||||
EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const
|
||||
{
|
||||
enum {
|
||||
StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor
|
||||
};
|
||||
return coeff(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx);
|
||||
}
|
||||
|
||||
template<int LoadMode>
|
||||
EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
|
||||
@ -69,11 +78,19 @@ class DiagonalProduct : internal::no_assignment_operator,
|
||||
StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor
|
||||
};
|
||||
const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
|
||||
|
||||
return packet_impl<LoadMode>(row,col,indexInDiagonalVector,typename internal::conditional<
|
||||
((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
|
||||
||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type());
|
||||
}
|
||||
|
||||
template<int LoadMode>
|
||||
EIGEN_STRONG_INLINE PacketScalar packet(Index idx) const
|
||||
{
|
||||
enum {
|
||||
StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor
|
||||
};
|
||||
return packet<LoadMode>(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx);
|
||||
}
|
||||
|
||||
protected:
|
||||
template<int LoadMode>
|
||||
@ -88,7 +105,7 @@ class DiagonalProduct : internal::no_assignment_operator,
|
||||
{
|
||||
enum {
|
||||
InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
|
||||
DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned
|
||||
DiagonalVectorPacketLoadMode = (LoadMode == Aligned && (((InnerSize%16) == 0) || (int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit)==AlignedBit) ? Aligned : Unaligned)
|
||||
};
|
||||
return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
|
||||
m_diagonal.diagonal().template packet<DiagonalVectorPacketLoadMode>(id));
|
||||
|
@ -115,7 +115,7 @@ MatrixBase<Derived>::eigen2_dot(const MatrixBase<OtherDerived>& other) const
|
||||
template<typename Derived>
|
||||
EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
|
||||
{
|
||||
return internal::real((*this).cwiseAbs2().sum());
|
||||
return numext::real((*this).cwiseAbs2().sum());
|
||||
}
|
||||
|
||||
/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
|
||||
@ -170,6 +170,7 @@ struct lpNorm_selector
|
||||
EIGEN_DEVICE_FUNC
|
||||
static inline RealScalar run(const MatrixBase<Derived>& m)
|
||||
{
|
||||
using std::pow;
|
||||
return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p);
|
||||
}
|
||||
};
|
||||
@ -235,7 +236,7 @@ bool MatrixBase<Derived>::isOrthogonal
|
||||
{
|
||||
typename internal::nested<Derived,2>::type nested(derived());
|
||||
typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
|
||||
return internal::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
|
||||
return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
|
||||
}
|
||||
|
||||
/** \returns true if *this is approximately an unitary matrix,
|
||||
|
@ -171,7 +171,8 @@ struct functor_traits<scalar_hypot_op<Scalar> > {
|
||||
*/
|
||||
template<typename Scalar, typename OtherScalar> struct scalar_binary_pow_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_binary_pow_op)
|
||||
EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return internal::pow(a, b); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return numext::pow(a, b); }
|
||||
};
|
||||
template<typename Scalar, typename OtherScalar>
|
||||
struct functor_traits<scalar_binary_pow_op<Scalar,OtherScalar> > {
|
||||
@ -310,7 +311,8 @@ struct functor_traits<scalar_abs_op<Scalar> >
|
||||
template<typename Scalar> struct scalar_abs2_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op)
|
||||
typedef typename NumTraits<Scalar>::Real result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs2(a); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs2(a); }
|
||||
template<typename Packet>
|
||||
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
|
||||
{ return internal::pmul(a,a); }
|
||||
@ -326,7 +328,8 @@ struct functor_traits<scalar_abs2_op<Scalar> >
|
||||
*/
|
||||
template<typename Scalar> struct scalar_conjugate_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op)
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { using internal::conj; return conj(a); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { using numext::conj; return conj(a); }
|
||||
template<typename Packet>
|
||||
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); }
|
||||
};
|
||||
@ -363,7 +366,8 @@ template<typename Scalar>
|
||||
struct scalar_real_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op)
|
||||
typedef typename NumTraits<Scalar>::Real result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::real(a); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::real(a); }
|
||||
};
|
||||
template<typename Scalar>
|
||||
struct functor_traits<scalar_real_op<Scalar> >
|
||||
@ -378,7 +382,8 @@ template<typename Scalar>
|
||||
struct scalar_imag_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op)
|
||||
typedef typename NumTraits<Scalar>::Real result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::imag(a); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::imag(a); }
|
||||
};
|
||||
template<typename Scalar>
|
||||
struct functor_traits<scalar_imag_op<Scalar> >
|
||||
@ -393,7 +398,8 @@ template<typename Scalar>
|
||||
struct scalar_real_ref_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op)
|
||||
typedef typename NumTraits<Scalar>::Real result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::real_ref(*const_cast<Scalar*>(&a)); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::real_ref(*const_cast<Scalar*>(&a)); }
|
||||
};
|
||||
template<typename Scalar>
|
||||
struct functor_traits<scalar_real_ref_op<Scalar> >
|
||||
@ -408,7 +414,8 @@ template<typename Scalar>
|
||||
struct scalar_imag_ref_op {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op)
|
||||
typedef typename NumTraits<Scalar>::Real result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::imag_ref(*const_cast<Scalar*>(&a)); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::imag_ref(*const_cast<Scalar*>(&a)); }
|
||||
};
|
||||
template<typename Scalar>
|
||||
struct functor_traits<scalar_imag_ref_op<Scalar> >
|
||||
@ -804,7 +811,8 @@ struct scalar_pow_op {
|
||||
// FIXME default copy constructors seems bugged with std::complex<>
|
||||
inline scalar_pow_op(const scalar_pow_op& other) : m_exponent(other.m_exponent) { }
|
||||
inline scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {}
|
||||
EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return internal::pow(a, m_exponent); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline Scalar operator() (const Scalar& a) const { return numext::pow(a, m_exponent); }
|
||||
const Scalar m_exponent;
|
||||
};
|
||||
template<typename Scalar>
|
||||
|
@ -45,7 +45,7 @@ struct isMuchSmallerThan_object_selector
|
||||
EIGEN_DEVICE_FUNC
|
||||
static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)
|
||||
{
|
||||
return x.cwiseAbs2().sum() <= abs2(prec) * y.cwiseAbs2().sum();
|
||||
return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum();
|
||||
}
|
||||
};
|
||||
|
||||
@ -65,7 +65,7 @@ struct isMuchSmallerThan_scalar_selector
|
||||
EIGEN_DEVICE_FUNC
|
||||
static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec)
|
||||
{
|
||||
return x.cwiseAbs2().sum() <= abs2(prec * y);
|
||||
return x.cwiseAbs2().sum() <= numext::abs2(prec * y);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -435,7 +435,7 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
|
||||
|
||||
gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
|
||||
|
||||
bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0));
|
||||
bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
|
||||
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
|
||||
|
||||
RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
|
||||
|
@ -105,8 +105,9 @@ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
|
||||
pnegate(const Packet& a) { return -a; }
|
||||
|
||||
/** \internal \returns conj(a) (coeff-wise) */
|
||||
|
||||
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
|
||||
pconj(const Packet& a) { return conj(a); }
|
||||
pconj(const Packet& a) { return numext::conj(a); }
|
||||
|
||||
/** \internal \returns a * b (coeff-wise) */
|
||||
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
|
||||
|
@ -39,6 +39,7 @@ namespace Eigen
|
||||
{
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real,scalar_real_op)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag,scalar_imag_op)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj,scalar_conjugate_op)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin,scalar_sin_op)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos,scalar_cos_op)
|
||||
EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin,scalar_asin_op)
|
||||
@ -86,6 +87,6 @@ namespace Eigen
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: cleanly disable those functions that are not supported on Array (internal::real_ref, internal::random, internal::isApprox...)
|
||||
// TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random, internal::isApprox...)
|
||||
|
||||
#endif // EIGEN_GLOBAL_FUNCTIONS_H
|
||||
|
@ -55,7 +55,7 @@ struct IOFormat
|
||||
const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="",
|
||||
const std::string& _matPrefix="", const std::string& _matSuffix="")
|
||||
: matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator),
|
||||
coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)
|
||||
rowSpacer(""), coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)
|
||||
{
|
||||
int i = int(matSuffix.length())-1;
|
||||
while (i>=0 && matSuffix[i]!='\n')
|
||||
|
@ -51,16 +51,15 @@ struct global_math_functions_filtering_base
|
||||
typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type;
|
||||
};
|
||||
|
||||
#define EIGEN_MATHFUNC_IMPL(func, scalar) func##_impl<typename global_math_functions_filtering_base<scalar>::type>
|
||||
#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename func##_retval<typename global_math_functions_filtering_base<scalar>::type>::type
|
||||
|
||||
#define EIGEN_MATHFUNC_IMPL(func, scalar) Eigen::internal::func##_impl<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>
|
||||
#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename Eigen::internal::func##_retval<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>::type
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of real *
|
||||
****************************************************************************/
|
||||
|
||||
template<typename Scalar>
|
||||
struct real_impl
|
||||
template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
|
||||
struct real_default_impl
|
||||
{
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
EIGEN_DEVICE_FUNC
|
||||
@ -70,36 +69,32 @@ struct real_impl
|
||||
}
|
||||
};
|
||||
|
||||
template<typename RealScalar>
|
||||
struct real_impl<std::complex<RealScalar> >
|
||||
template<typename Scalar>
|
||||
struct real_default_impl<Scalar,true>
|
||||
{
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
EIGEN_DEVICE_FUNC
|
||||
static inline RealScalar run(const std::complex<RealScalar>& x)
|
||||
static inline RealScalar run(const Scalar& x)
|
||||
{
|
||||
using std::real;
|
||||
return real(x);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Scalar> struct real_impl : real_default_impl<Scalar> {};
|
||||
|
||||
template<typename Scalar>
|
||||
struct real_retval
|
||||
{
|
||||
typedef typename NumTraits<Scalar>::Real type;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of imag *
|
||||
****************************************************************************/
|
||||
|
||||
template<typename Scalar>
|
||||
struct imag_impl
|
||||
template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
|
||||
struct imag_default_impl
|
||||
{
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
EIGEN_DEVICE_FUNC
|
||||
@ -109,30 +104,26 @@ struct imag_impl
|
||||
}
|
||||
};
|
||||
|
||||
template<typename RealScalar>
|
||||
struct imag_impl<std::complex<RealScalar> >
|
||||
template<typename Scalar>
|
||||
struct imag_default_impl<Scalar,true>
|
||||
{
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
EIGEN_DEVICE_FUNC
|
||||
static inline RealScalar run(const std::complex<RealScalar>& x)
|
||||
static inline RealScalar run(const Scalar& x)
|
||||
{
|
||||
using std::imag;
|
||||
return imag(x);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Scalar> struct imag_impl : imag_default_impl<Scalar> {};
|
||||
|
||||
template<typename Scalar>
|
||||
struct imag_retval
|
||||
{
|
||||
typedef typename NumTraits<Scalar>::Real type;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of real_ref *
|
||||
****************************************************************************/
|
||||
@ -159,20 +150,6 @@ struct real_ref_retval
|
||||
typedef typename NumTraits<Scalar>::Real & type;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)
|
||||
{
|
||||
return real_ref_impl<Scalar>::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of imag_ref *
|
||||
****************************************************************************/
|
||||
@ -217,25 +194,11 @@ struct imag_ref_retval
|
||||
typedef typename NumTraits<Scalar>::Real & type;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)
|
||||
{
|
||||
return imag_ref_impl<Scalar>::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of conj *
|
||||
****************************************************************************/
|
||||
|
||||
template<typename Scalar>
|
||||
template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
|
||||
struct conj_impl
|
||||
{
|
||||
EIGEN_DEVICE_FUNC
|
||||
@ -245,11 +208,11 @@ struct conj_impl
|
||||
}
|
||||
};
|
||||
|
||||
template<typename RealScalar>
|
||||
struct conj_impl<std::complex<RealScalar> >
|
||||
template<typename Scalar>
|
||||
struct conj_impl<Scalar,true>
|
||||
{
|
||||
EIGEN_DEVICE_FUNC
|
||||
static inline std::complex<RealScalar> run(const std::complex<RealScalar>& x)
|
||||
static inline Scalar run(const Scalar& x)
|
||||
{
|
||||
using std::conj;
|
||||
return conj(x);
|
||||
@ -262,13 +225,6 @@ struct conj_retval
|
||||
typedef Scalar type;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of abs2 *
|
||||
****************************************************************************/
|
||||
@ -300,13 +256,6 @@ struct abs2_retval
|
||||
typedef typename NumTraits<Scalar>::Real type;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of norm1 *
|
||||
****************************************************************************/
|
||||
@ -343,13 +292,6 @@ struct norm1_retval
|
||||
typedef typename NumTraits<Scalar>::Real type;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of hypot *
|
||||
****************************************************************************/
|
||||
@ -367,6 +309,7 @@ struct hypot_impl
|
||||
RealScalar _x = abs(x);
|
||||
RealScalar _y = abs(y);
|
||||
RealScalar p = (max)(_x, _y);
|
||||
if(p==RealScalar(0)) return 0;
|
||||
RealScalar q = (min)(_x, _y);
|
||||
RealScalar qp = q/p;
|
||||
return p * sqrt(RealScalar(1) + qp*qp);
|
||||
@ -379,12 +322,6 @@ struct hypot_retval
|
||||
typedef typename NumTraits<Scalar>::Real type;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of cast *
|
||||
****************************************************************************/
|
||||
@ -447,12 +384,6 @@ struct atanh2_retval
|
||||
typedef Scalar type;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
inline EIGEN_MATHFUNC_RETVAL(atanh2, Scalar) atanh2(const Scalar& x, const Scalar& y)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(atanh2, Scalar)::run(x, y);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of pow *
|
||||
****************************************************************************/
|
||||
@ -496,12 +427,6 @@ struct pow_retval
|
||||
typedef Scalar type;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of random *
|
||||
****************************************************************************/
|
||||
@ -600,11 +525,10 @@ struct random_default_impl<Scalar, false, true>
|
||||
#else
|
||||
enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value,
|
||||
scalar_bits = sizeof(Scalar) * CHAR_BIT,
|
||||
shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits))
|
||||
shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)),
|
||||
offset = NumTraits<Scalar>::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0
|
||||
};
|
||||
Scalar x = Scalar(std::rand() >> shift);
|
||||
Scalar offset = NumTraits<Scalar>::IsSigned ? Scalar(1 << (rand_bits-1)) : Scalar(0);
|
||||
return x - offset;
|
||||
return Scalar((std::rand() >> shift) - offset);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
@ -636,6 +560,111 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
|
||||
return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/****************************************************************************
|
||||
* Generic math function *
|
||||
****************************************************************************/
|
||||
|
||||
namespace numext {
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)
|
||||
{
|
||||
return internal::real_ref_impl<Scalar>::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)
|
||||
{
|
||||
return internal::imag_ref_impl<Scalar>::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(atanh2, Scalar) atanh2(const Scalar& x, const Scalar& y)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(atanh2, Scalar)::run(x, y);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
EIGEN_DEVICE_FUNC
|
||||
inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y)
|
||||
{
|
||||
return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y);
|
||||
}
|
||||
|
||||
// std::isfinite is non standard, so let's define our own version,
|
||||
// even though it is not very efficient.
|
||||
template<typename T>
|
||||
EIGEN_DEVICE_FUNC
|
||||
bool (isfinite)(const T& x)
|
||||
{
|
||||
return x<NumTraits<T>::highest() && x>NumTraits<T>::lowest();
|
||||
}
|
||||
|
||||
} // end namespace numext
|
||||
|
||||
namespace internal {
|
||||
|
||||
/****************************************************************************
|
||||
* Implementation of fuzzy comparisons *
|
||||
****************************************************************************/
|
||||
@ -697,12 +726,12 @@ struct scalar_fuzzy_default_impl<Scalar, true, false>
|
||||
template<typename OtherScalar>
|
||||
static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
|
||||
{
|
||||
return abs2(x) <= abs2(y) * prec * prec;
|
||||
return numext::abs2(x) <= numext::abs2(y) * prec * prec;
|
||||
}
|
||||
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
|
||||
{
|
||||
EIGEN_USING_STD_MATH(min);
|
||||
return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec;
|
||||
return numext::abs2(x - y) <= (min)(numext::abs2(x), numext::abs2(y)) * prec * prec;
|
||||
}
|
||||
};
|
||||
|
||||
@ -766,17 +795,7 @@ template<> struct scalar_fuzzy_impl<bool>
|
||||
|
||||
};
|
||||
|
||||
/****************************************************************************
|
||||
* Special functions *
|
||||
****************************************************************************/
|
||||
|
||||
// std::isfinite is non standard, so let's define our own version,
|
||||
// even though it is not very efficient.
|
||||
template<typename T> bool (isfinite)(const T& x)
|
||||
{
|
||||
return x<NumTraits<T>::highest() && x>NumTraits<T>::lowest();
|
||||
}
|
||||
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -205,7 +205,7 @@ class Matrix
|
||||
* \sa resize(Index,Index)
|
||||
*/
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE explicit Matrix() : Base()
|
||||
EIGEN_STRONG_INLINE Matrix() : Base()
|
||||
{
|
||||
Base::_check_template_params();
|
||||
EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
|
||||
|
@ -232,8 +232,8 @@ template<typename Derived> class MatrixBase
|
||||
|
||||
typedef Diagonal<Derived> DiagonalReturnType;
|
||||
DiagonalReturnType diagonal();
|
||||
typedef const Diagonal<const Derived> ConstDiagonalReturnType;
|
||||
const ConstDiagonalReturnType diagonal() const;
|
||||
typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
|
||||
ConstDiagonalReturnType diagonal() const;
|
||||
|
||||
template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
|
||||
template<int Index> struct ConstDiagonalIndexReturnType { typedef const Diagonal<const Derived,Index> Type; };
|
||||
|
@ -86,6 +86,10 @@ class NoAlias
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE ExpressionType& operator-=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
|
||||
{ return m_expression.derived() -= CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
|
||||
|
||||
template<typename OtherDerived>
|
||||
ExpressionType& operator=(const ReturnByValue<OtherDerived>& func)
|
||||
{ return m_expression = func; }
|
||||
#endif
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
|
@ -437,7 +437,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE explicit PlainObjectBase() : m_storage()
|
||||
EIGEN_STRONG_INLINE PlainObjectBase() : m_storage()
|
||||
{
|
||||
// _check_template_params();
|
||||
// EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
|
||||
|
@ -48,7 +48,7 @@ struct nested<ReturnByValue<Derived>, n, PlainObject>
|
||||
} // end namespace internal
|
||||
|
||||
template<typename Derived> class ReturnByValue
|
||||
: public internal::dense_xpr_base< ReturnByValue<Derived> >::type
|
||||
: internal::no_assignment_operator, public internal::dense_xpr_base< ReturnByValue<Derived> >::type
|
||||
{
|
||||
public:
|
||||
typedef typename internal::traits<Derived>::ReturnType ReturnType;
|
||||
|
@ -214,9 +214,9 @@ struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), U
|
||||
triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), UnrollCount-1, ClearOpposite>::run(dst, src);
|
||||
|
||||
if(row == col)
|
||||
dst.coeffRef(row, col) = real(src.coeff(row, col));
|
||||
dst.coeffRef(row, col) = numext::real(src.coeff(row, col));
|
||||
else if(row < col)
|
||||
dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col));
|
||||
dst.coeffRef(col, row) = numext::conj(dst.coeffRef(row, col) = src.coeff(row, col));
|
||||
}
|
||||
};
|
||||
|
||||
@ -239,9 +239,9 @@ struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), U
|
||||
triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), UnrollCount-1, ClearOpposite>::run(dst, src);
|
||||
|
||||
if(row == col)
|
||||
dst.coeffRef(row, col) = real(src.coeff(row, col));
|
||||
dst.coeffRef(row, col) = numext::real(src.coeff(row, col));
|
||||
else if(row > col)
|
||||
dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col));
|
||||
dst.coeffRef(col, row) = numext::conj(dst.coeffRef(row, col) = src.coeff(row, col));
|
||||
}
|
||||
};
|
||||
|
||||
@ -262,7 +262,7 @@ struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, Dyn
|
||||
for(Index i = 0; i < j; ++i)
|
||||
{
|
||||
dst.copyCoeff(i, j, src);
|
||||
dst.coeffRef(j,i) = conj(dst.coeff(i,j));
|
||||
dst.coeffRef(j,i) = numext::conj(dst.coeff(i,j));
|
||||
}
|
||||
dst.copyCoeff(j, j, src);
|
||||
}
|
||||
@ -280,7 +280,7 @@ struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, Dyn
|
||||
for(Index j = 0; j < i; ++j)
|
||||
{
|
||||
dst.copyCoeff(i, j, src);
|
||||
dst.coeffRef(j,i) = conj(dst.coeff(i,j));
|
||||
dst.coeffRef(j,i) = numext::conj(dst.coeff(i,j));
|
||||
}
|
||||
dst.copyCoeff(i, i, src);
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& sc
|
||||
Scalar max = bl.cwiseAbs().maxCoeff();
|
||||
if (max>scale)
|
||||
{
|
||||
ssq = ssq * abs2(scale/max);
|
||||
ssq = ssq * numext::abs2(scale/max);
|
||||
scale = max;
|
||||
invScale = Scalar(1)/scale;
|
||||
}
|
||||
@ -84,9 +84,9 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
|
||||
for(typename Derived::InnerIterator it(vec, 0); it; ++it)
|
||||
{
|
||||
RealScalar ax = abs(it.value());
|
||||
if(ax > ab2) abig += internal::abs2(ax*s2m);
|
||||
else if(ax < b1) asml += internal::abs2(ax*s1m);
|
||||
else amed += internal::abs2(ax);
|
||||
if(ax > ab2) abig += numext::abs2(ax*s2m);
|
||||
else if(ax < b1) asml += numext::abs2(ax*s1m);
|
||||
else amed += numext::abs2(ax);
|
||||
}
|
||||
if(abig > RealScalar(0))
|
||||
{
|
||||
@ -120,7 +120,7 @@ blueNorm_impl(const EigenBase<Derived>& _vec)
|
||||
if(asml <= abig*relerr)
|
||||
return abig;
|
||||
else
|
||||
return abig * sqrt(RealScalar(1) + internal::abs2(asml/abig));
|
||||
return abig * sqrt(RealScalar(1) + numext::abs2(asml/abig));
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
@ -107,6 +107,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
|
||||
|
||||
typedef typename internal::TransposeImpl_base<MatrixType>::type Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl)
|
||||
|
||||
EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
|
||||
EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
|
||||
@ -215,7 +216,7 @@ DenseBase<Derived>::transpose()
|
||||
*
|
||||
* \sa transposeInPlace(), adjoint() */
|
||||
template<typename Derived>
|
||||
inline const typename DenseBase<Derived>::ConstTransposeReturnType
|
||||
inline typename DenseBase<Derived>::ConstTransposeReturnType
|
||||
DenseBase<Derived>::transpose() const
|
||||
{
|
||||
return ConstTransposeReturnType(derived());
|
||||
@ -261,7 +262,7 @@ struct inplace_transpose_selector;
|
||||
template<typename MatrixType>
|
||||
struct inplace_transpose_selector<MatrixType,true> { // square matrix
|
||||
static void run(MatrixType& m) {
|
||||
m.template triangularView<StrictlyUpper>().swap(m.transpose());
|
||||
m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
|
||||
}
|
||||
};
|
||||
|
||||
@ -269,7 +270,7 @@ template<typename MatrixType>
|
||||
struct inplace_transpose_selector<MatrixType,false> { // non square matrix
|
||||
static void run(MatrixType& m) {
|
||||
if (m.rows()==m.cols())
|
||||
m.template triangularView<StrictlyUpper>().swap(m.transpose());
|
||||
m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
|
||||
else
|
||||
m = m.transpose().eval();
|
||||
}
|
||||
@ -396,7 +397,7 @@ struct checkTransposeAliasing_impl
|
||||
eigen_assert((!check_transpose_aliasing_run_time_selector
|
||||
<typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived>
|
||||
::run(extract_data(dst), other))
|
||||
&& "aliasing detected during tranposition, use transposeInPlace() "
|
||||
&& "aliasing detected during transposition, use transposeInPlace() "
|
||||
"or evaluate the rhs into a temporary using .eval()");
|
||||
|
||||
}
|
||||
|
@ -173,6 +173,9 @@ template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return psub<Packet4f>(p4f_ZERO, a); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return psub<Packet4i>(p4i_ZERO, a); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b,p4f_ZERO); }
|
||||
/* Commented out: it's actually slower than processing it scalar
|
||||
*
|
||||
|
@ -68,7 +68,6 @@ template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
|
||||
template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
|
||||
{
|
||||
Packet4f v1, v2;
|
||||
float32x2_t a_lo, a_hi;
|
||||
|
||||
// Get the real values of a | a1_re | a1_re | a2_re | a2_re |
|
||||
v1 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 0), vdup_lane_f32(vget_high_f32(a.v), 0));
|
||||
@ -81,9 +80,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, con
|
||||
// Conjugate v2
|
||||
v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR));
|
||||
// Swap real/imag elements in v2.
|
||||
a_lo = vrev64_f32(vget_low_f32(v2));
|
||||
a_hi = vrev64_f32(vget_high_f32(v2));
|
||||
v2 = vcombine_f32(a_lo, a_hi);
|
||||
v2 = vrev64q_f32(v2);
|
||||
// Add and return the result
|
||||
return Packet2cf(vaddq_f32(v1, v2));
|
||||
}
|
||||
@ -241,13 +238,10 @@ template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, con
|
||||
// TODO optimize it for AltiVec
|
||||
Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
|
||||
Packet4f s, rev_s;
|
||||
float32x2_t a_lo, a_hi;
|
||||
|
||||
// this computes the norm
|
||||
s = vmulq_f32(b.v, b.v);
|
||||
a_lo = vrev64_f32(vget_low_f32(s));
|
||||
a_hi = vrev64_f32(vget_high_f32(s));
|
||||
rev_s = vcombine_f32(a_lo, a_hi);
|
||||
rev_s = vrev64q_f32(s);
|
||||
|
||||
return Packet2cf(pdiv(res.v, vaddq_f32(s,rev_s)));
|
||||
}
|
||||
|
@ -115,6 +115,9 @@ template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); }
|
||||
|
||||
@ -188,15 +191,15 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EI
|
||||
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
|
||||
{
|
||||
float32x2_t lo, hi;
|
||||
lo = vdup_n_f32(*from);
|
||||
hi = vdup_n_f32(*(from+1));
|
||||
lo = vld1_dup_f32(from);
|
||||
hi = vld1_dup_f32(from+1);
|
||||
return vcombine_f32(lo, hi);
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
|
||||
{
|
||||
int32x2_t lo, hi;
|
||||
lo = vdup_n_s32(*from);
|
||||
hi = vdup_n_s32(*(from+1));
|
||||
lo = vld1_dup_s32(from);
|
||||
hi = vld1_dup_s32(from+1);
|
||||
return vcombine_s32(lo, hi);
|
||||
}
|
||||
|
||||
|
@ -81,8 +81,8 @@ template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a,
|
||||
template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); }
|
||||
template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(a.v,b.v)); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&real_ref(*from))); }
|
||||
template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&real_ref(*from))); }
|
||||
template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&numext::real_ref(*from))); }
|
||||
template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&numext::real_ref(*from))); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
|
||||
{
|
||||
@ -104,8 +104,8 @@ template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<flo
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&real_ref(*to), from.v); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&real_ref(*to), from.v); }
|
||||
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
|
||||
|
||||
|
@ -450,7 +450,7 @@ Packet4f psqrt<Packet4f>(const Packet4f& _x)
|
||||
Packet4f half = pmul(_x, pset1<Packet4f>(.5f));
|
||||
|
||||
/* select only the inverse sqrt of non-zero inputs */
|
||||
Packet4f non_zero_mask = _mm_cmpgt_ps(_x, pset1<Packet4f>(std::numeric_limits<float>::epsilon()));
|
||||
Packet4f non_zero_mask = _mm_cmpge_ps(_x, pset1<Packet4f>((std::numeric_limits<float>::min)()));
|
||||
Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x));
|
||||
|
||||
x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x))));
|
||||
|
@ -141,6 +141,10 @@ template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
|
||||
return psub(_mm_setr_epi32(0,0,0,0), a);
|
||||
}
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
|
||||
template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
|
||||
template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
|
||||
|
@ -86,7 +86,7 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
|
||||
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
|
||||
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
|
||||
if(ConjugateRhs)
|
||||
alpha = conj(alpha);
|
||||
alpha = numext::conj(alpha);
|
||||
|
||||
enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
|
||||
const Index columnsAtOnce = 4;
|
||||
|
@ -30,9 +30,9 @@ struct symm_pack_lhs
|
||||
for(Index k=i; k<i+BlockRows; k++)
|
||||
{
|
||||
for(Index w=0; w<h; w++)
|
||||
blockA[count++] = conj(lhs(k, i+w)); // transposed
|
||||
blockA[count++] = numext::conj(lhs(k, i+w)); // transposed
|
||||
|
||||
blockA[count++] = real(lhs(k,k)); // real (diagonal)
|
||||
blockA[count++] = numext::real(lhs(k,k)); // real (diagonal)
|
||||
|
||||
for(Index w=h+1; w<BlockRows; w++)
|
||||
blockA[count++] = lhs(i+w, k); // normal
|
||||
@ -41,7 +41,7 @@ struct symm_pack_lhs
|
||||
// transposed copy
|
||||
for(Index k=i+BlockRows; k<cols; k++)
|
||||
for(Index w=0; w<BlockRows; w++)
|
||||
blockA[count++] = conj(lhs(k, i+w)); // transposed
|
||||
blockA[count++] = numext::conj(lhs(k, i+w)); // transposed
|
||||
}
|
||||
void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
|
||||
{
|
||||
@ -65,10 +65,10 @@ struct symm_pack_lhs
|
||||
for(Index k=0; k<i; k++)
|
||||
blockA[count++] = lhs(i, k); // normal
|
||||
|
||||
blockA[count++] = real(lhs(i, i)); // real (diagonal)
|
||||
blockA[count++] = numext::real(lhs(i, i)); // real (diagonal)
|
||||
|
||||
for(Index k=i+1; k<cols; k++)
|
||||
blockA[count++] = conj(lhs(k, i)); // transposed
|
||||
blockA[count++] = numext::conj(lhs(k, i)); // transposed
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -107,12 +107,12 @@ struct symm_pack_rhs
|
||||
// transpose
|
||||
for(Index k=k2; k<j2; k++)
|
||||
{
|
||||
blockB[count+0] = conj(rhs(j2+0,k));
|
||||
blockB[count+1] = conj(rhs(j2+1,k));
|
||||
blockB[count+0] = numext::conj(rhs(j2+0,k));
|
||||
blockB[count+1] = numext::conj(rhs(j2+1,k));
|
||||
if (nr==4)
|
||||
{
|
||||
blockB[count+2] = conj(rhs(j2+2,k));
|
||||
blockB[count+3] = conj(rhs(j2+3,k));
|
||||
blockB[count+2] = numext::conj(rhs(j2+2,k));
|
||||
blockB[count+3] = numext::conj(rhs(j2+3,k));
|
||||
}
|
||||
count += nr;
|
||||
}
|
||||
@ -124,11 +124,11 @@ struct symm_pack_rhs
|
||||
for (Index w=0 ; w<h; ++w)
|
||||
blockB[count+w] = rhs(k,j2+w);
|
||||
|
||||
blockB[count+h] = real(rhs(k,k));
|
||||
blockB[count+h] = numext::real(rhs(k,k));
|
||||
|
||||
// transpose
|
||||
for (Index w=h+1 ; w<nr; ++w)
|
||||
blockB[count+w] = conj(rhs(j2+w,k));
|
||||
blockB[count+w] = numext::conj(rhs(j2+w,k));
|
||||
count += nr;
|
||||
++h;
|
||||
}
|
||||
@ -151,12 +151,12 @@ struct symm_pack_rhs
|
||||
{
|
||||
for(Index k=k2; k<end_k; k++)
|
||||
{
|
||||
blockB[count+0] = conj(rhs(j2+0,k));
|
||||
blockB[count+1] = conj(rhs(j2+1,k));
|
||||
blockB[count+0] = numext::conj(rhs(j2+0,k));
|
||||
blockB[count+1] = numext::conj(rhs(j2+1,k));
|
||||
if (nr==4)
|
||||
{
|
||||
blockB[count+2] = conj(rhs(j2+2,k));
|
||||
blockB[count+3] = conj(rhs(j2+3,k));
|
||||
blockB[count+2] = numext::conj(rhs(j2+2,k));
|
||||
blockB[count+3] = numext::conj(rhs(j2+3,k));
|
||||
}
|
||||
count += nr;
|
||||
}
|
||||
@ -169,13 +169,13 @@ struct symm_pack_rhs
|
||||
Index half = (std::min)(end_k,j2);
|
||||
for(Index k=k2; k<half; k++)
|
||||
{
|
||||
blockB[count] = conj(rhs(j2,k));
|
||||
blockB[count] = numext::conj(rhs(j2,k));
|
||||
count += 1;
|
||||
}
|
||||
|
||||
if(half==j2 && half<k2+rows)
|
||||
{
|
||||
blockB[count] = real(rhs(j2,j2));
|
||||
blockB[count] = numext::real(rhs(j2,j2));
|
||||
count += 1;
|
||||
}
|
||||
else
|
||||
|
@ -59,7 +59,7 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrd
|
||||
conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0;
|
||||
conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
|
||||
|
||||
Scalar cjAlpha = ConjugateRhs ? conj(alpha) : alpha;
|
||||
Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;
|
||||
|
||||
// FIXME this copy is now handled outside product_selfadjoint_vector, so it could probably be removed.
|
||||
// if the rhs is not sequentially stored in memory we copy it to a temporary buffer,
|
||||
@ -98,8 +98,8 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrd
|
||||
size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);
|
||||
|
||||
// TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
|
||||
res[j] += cjd.pmul(internal::real(A0[j]), t0);
|
||||
res[j+1] += cjd.pmul(internal::real(A1[j+1]), t1);
|
||||
res[j] += cjd.pmul(numext::real(A0[j]), t0);
|
||||
res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1);
|
||||
if(FirstTriangular)
|
||||
{
|
||||
res[j] += cj0.pmul(A1[j], t1);
|
||||
@ -114,8 +114,8 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrd
|
||||
for (size_t i=starti; i<alignedStart; ++i)
|
||||
{
|
||||
res[i] += t0 * A0[i] + t1 * A1[i];
|
||||
t2 += conj(A0[i]) * rhs[i];
|
||||
t3 += conj(A1[i]) * rhs[i];
|
||||
t2 += numext::conj(A0[i]) * rhs[i];
|
||||
t3 += numext::conj(A1[i]) * rhs[i];
|
||||
}
|
||||
// Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)
|
||||
// gcc 4.2 does this optimization automatically.
|
||||
@ -152,7 +152,7 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrd
|
||||
Scalar t1 = cjAlpha * rhs[j];
|
||||
Scalar t2(0);
|
||||
// TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
|
||||
res[j] += cjd.pmul(internal::real(A0[j]), t1);
|
||||
res[j] += cjd.pmul(numext::real(A0[j]), t1);
|
||||
for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)
|
||||
{
|
||||
res[i] += cj0.pmul(A0[i], t1);
|
||||
|
@ -30,8 +30,8 @@ struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
|
||||
for (Index i=0; i<size; ++i)
|
||||
{
|
||||
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) +=
|
||||
(conj(alpha) * conj(u.coeff(i))) * v.tail(size-i)
|
||||
+ (alpha * conj(v.coeff(i))) * u.tail(size-i);
|
||||
(numext::conj(alpha) * numext::conj(u.coeff(i))) * v.tail(size-i)
|
||||
+ (alpha * numext::conj(v.coeff(i))) * u.tail(size-i);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -44,8 +44,8 @@ struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Upper>
|
||||
const Index size = u.size();
|
||||
for (Index i=0; i<size; ++i)
|
||||
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) +=
|
||||
(conj(alpha) * conj(u.coeff(i))) * v.head(i+1)
|
||||
+ (alpha * conj(v.coeff(i))) * u.head(i+1);
|
||||
(numext::conj(alpha) * numext::conj(u.coeff(i))) * v.head(i+1)
|
||||
+ (alpha * numext::conj(v.coeff(i))) * u.head(i+1);
|
||||
}
|
||||
};
|
||||
|
||||
@ -75,9 +75,9 @@ SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
|
||||
|
||||
enum { IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
|
||||
Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived())
|
||||
* internal::conj(VBlasTraits::extractScalarFactor(v.derived()));
|
||||
* numext::conj(VBlasTraits::extractScalarFactor(v.derived()));
|
||||
if (IsRowMajor)
|
||||
actualAlpha = internal::conj(actualAlpha);
|
||||
actualAlpha = numext::conj(actualAlpha);
|
||||
|
||||
internal::selfadjoint_rank2_update_selector<Scalar, Index,
|
||||
typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::type>::type,
|
||||
|
@ -245,7 +245,7 @@ template<> struct trmv_selector<ColMajor>
|
||||
|
||||
gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
|
||||
|
||||
bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0));
|
||||
bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
|
||||
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
|
||||
|
||||
RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
|
||||
|
@ -42,7 +42,7 @@ template<bool Conjugate> struct conj_if;
|
||||
|
||||
template<> struct conj_if<true> {
|
||||
template<typename T>
|
||||
inline T operator()(const T& x) { return conj(x); }
|
||||
inline T operator()(const T& x) { return numext::conj(x); }
|
||||
template<typename T>
|
||||
inline T pconj(const T& x) { return internal::pconj(x); }
|
||||
};
|
||||
@ -67,7 +67,7 @@ template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::
|
||||
{ return c + pmul(x,y); }
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
|
||||
{ return Scalar(real(x)*real(y) + imag(x)*imag(y), imag(x)*real(y) - real(x)*imag(y)); }
|
||||
{ return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::imag(x)*numext::real(y) - numext::real(x)*numext::imag(y)); }
|
||||
};
|
||||
|
||||
template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,false>
|
||||
@ -77,7 +77,7 @@ template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::
|
||||
{ return c + pmul(x,y); }
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
|
||||
{ return Scalar(real(x)*real(y) + imag(x)*imag(y), real(x)*imag(y) - imag(x)*real(y)); }
|
||||
{ return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
|
||||
};
|
||||
|
||||
template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,true>
|
||||
@ -87,7 +87,7 @@ template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::
|
||||
{ return c + pmul(x,y); }
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
|
||||
{ return Scalar(real(x)*real(y) - imag(x)*imag(y), - real(x)*imag(y) - imag(x)*real(y)); }
|
||||
{ return Scalar(numext::real(x)*numext::real(y) - numext::imag(x)*numext::imag(y), - numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
|
||||
};
|
||||
|
||||
template<typename RealScalar,bool Conj> struct conj_helper<std::complex<RealScalar>, RealScalar, Conj,false>
|
||||
@ -113,7 +113,8 @@ template<typename From,typename To> struct get_factor {
|
||||
};
|
||||
|
||||
template<typename Scalar> struct get_factor<Scalar,typename NumTraits<Scalar>::Real> {
|
||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE typename NumTraits<Scalar>::Real run(const Scalar& x) { return real(x); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
static EIGEN_STRONG_INLINE typename NumTraits<Scalar>::Real run(const Scalar& x) { return numext::real(x); }
|
||||
};
|
||||
|
||||
// Lightweight helper class to access matrix coefficients.
|
||||
|
@ -12,8 +12,8 @@
|
||||
#define EIGEN_MACROS_H
|
||||
|
||||
#define EIGEN_WORLD_VERSION 3
|
||||
#define EIGEN_MAJOR_VERSION 1
|
||||
#define EIGEN_MINOR_VERSION 91
|
||||
#define EIGEN_MAJOR_VERSION 2
|
||||
#define EIGEN_MINOR_VERSION 90
|
||||
|
||||
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
||||
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
||||
@ -240,10 +240,12 @@
|
||||
// Suppresses 'unused variable' warnings.
|
||||
#define EIGEN_UNUSED_VARIABLE(var) (void)var;
|
||||
|
||||
#if !defined(EIGEN_ASM_COMMENT) && (defined __GNUC__)
|
||||
#define EIGEN_ASM_COMMENT(X) asm("#" X)
|
||||
#else
|
||||
#define EIGEN_ASM_COMMENT(X)
|
||||
#if !defined(EIGEN_ASM_COMMENT)
|
||||
#if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
|
||||
#define EIGEN_ASM_COMMENT(X) asm("#" X)
|
||||
#else
|
||||
#define EIGEN_ASM_COMMENT(X)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
|
||||
|
@ -58,10 +58,17 @@
|
||||
|
||||
#endif
|
||||
|
||||
#if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) \
|
||||
&& (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
|
||||
#define EIGEN_HAS_POSIX_MEMALIGN 1
|
||||
#else
|
||||
// See bug 554 (http://eigen.tuxfamily.org/bz/show_bug.cgi?id=554)
|
||||
// It seems to be unsafe to check _POSIX_ADVISORY_INFO without including unistd.h first.
|
||||
// Currently, let's include it only on unix systems:
|
||||
#if defined(__unix__) || defined(__unix)
|
||||
#include <unistd.h>
|
||||
#if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
|
||||
#define EIGEN_HAS_POSIX_MEMALIGN 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_HAS_POSIX_MEMALIGN
|
||||
#define EIGEN_HAS_POSIX_MEMALIGN 0
|
||||
#endif
|
||||
|
||||
@ -215,7 +222,7 @@ inline void* aligned_malloc(size_t size)
|
||||
if(posix_memalign(&result, 16, size)) result = 0;
|
||||
#elif EIGEN_HAS_MM_MALLOC
|
||||
result = _mm_malloc(size, 16);
|
||||
#elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
|
||||
#elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
|
||||
result = _aligned_malloc(size, 16);
|
||||
#else
|
||||
result = handmade_aligned_malloc(size);
|
||||
|
@ -91,7 +91,8 @@ template<typename T> struct functor_traits
|
||||
enum
|
||||
{
|
||||
Cost = 10,
|
||||
PacketAccess = false
|
||||
PacketAccess = false,
|
||||
IsRepeatable = false
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -34,7 +34,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
|
||||
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
|
||||
|
||||
/** Default constructor initializing a null box. */
|
||||
inline explicit AlignedBox()
|
||||
inline AlignedBox()
|
||||
{ if (AmbientDimAtCompileTime!=Dynamic) setNull(); }
|
||||
|
||||
/** Constructs a null box with \a _dim the dimension of the ambient space. */
|
||||
|
@ -44,7 +44,7 @@ public:
|
||||
typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
|
||||
|
||||
/** Default constructor without initialization */
|
||||
inline explicit Hyperplane() {}
|
||||
inline Hyperplane() {}
|
||||
|
||||
/** Constructs a dynamic-size hyperplane with \a _dim the dimension
|
||||
* of the ambient space */
|
||||
|
@ -36,7 +36,7 @@ public:
|
||||
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
|
||||
|
||||
/** Default constructor without initialization */
|
||||
inline explicit ParametrizedLine() {}
|
||||
inline ParametrizedLine() {}
|
||||
|
||||
/** Constructs a dynamic-size line with \a _dim the dimension
|
||||
* of the ambient space */
|
||||
|
@ -12,18 +12,18 @@
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename T> inline typename NumTraits<T>::Real ei_real(const T& x) { return internal::real(x); }
|
||||
template<typename T> inline typename NumTraits<T>::Real ei_imag(const T& x) { return internal::imag(x); }
|
||||
template<typename T> inline T ei_conj(const T& x) { return internal::conj(x); }
|
||||
template<typename T> inline typename NumTraits<T>::Real ei_real(const T& x) { return numext::real(x); }
|
||||
template<typename T> inline typename NumTraits<T>::Real ei_imag(const T& x) { return numext::imag(x); }
|
||||
template<typename T> inline T ei_conj(const T& x) { return numext::conj(x); }
|
||||
template<typename T> inline typename NumTraits<T>::Real ei_abs (const T& x) { using std::abs; return abs(x); }
|
||||
template<typename T> inline typename NumTraits<T>::Real ei_abs2(const T& x) { return internal::abs2(x); }
|
||||
template<typename T> inline typename NumTraits<T>::Real ei_abs2(const T& x) { return numext::abs2(x); }
|
||||
template<typename T> inline T ei_sqrt(const T& x) { using std::sqrt; return sqrt(x); }
|
||||
template<typename T> inline T ei_exp (const T& x) { using std::exp; return exp(x); }
|
||||
template<typename T> inline T ei_log (const T& x) { using std::log; return log(x); }
|
||||
template<typename T> inline T ei_sin (const T& x) { using std::sin; return sin(x); }
|
||||
template<typename T> inline T ei_cos (const T& x) { using std::cos; return cos(x); }
|
||||
template<typename T> inline T ei_atan2(const T& x,const T& y) { using std::atan2; return atan2(x,y); }
|
||||
template<typename T> inline T ei_pow (const T& x,const T& y) { return internal::pow(x,y); }
|
||||
template<typename T> inline T ei_pow (const T& x,const T& y) { return numext::pow(x,y); }
|
||||
template<typename T> inline T ei_random () { return internal::random<T>(); }
|
||||
template<typename T> inline T ei_random (const T& x, const T& y) { return internal::random(x, y); }
|
||||
|
||||
|
@ -315,7 +315,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
e[p-2] = 0.0;
|
||||
for (j = p-2; j >= k; --j)
|
||||
{
|
||||
Scalar t(internal::hypot(m_sigma[j],f));
|
||||
Scalar t(numext::hypot(m_sigma[j],f));
|
||||
Scalar cs(m_sigma[j]/t);
|
||||
Scalar sn(f/t);
|
||||
m_sigma[j] = t;
|
||||
@ -344,7 +344,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
e[k-1] = 0.0;
|
||||
for (j = k; j < p; ++j)
|
||||
{
|
||||
Scalar t(internal::hypot(m_sigma[j],f));
|
||||
Scalar t(numext::hypot(m_sigma[j],f));
|
||||
Scalar cs( m_sigma[j]/t);
|
||||
Scalar sn(f/t);
|
||||
m_sigma[j] = t;
|
||||
@ -392,7 +392,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
|
||||
for (j = k; j < p-1; ++j)
|
||||
{
|
||||
Scalar t = internal::hypot(f,g);
|
||||
Scalar t = numext::hypot(f,g);
|
||||
Scalar cs = f/t;
|
||||
Scalar sn = g/t;
|
||||
if (j != k)
|
||||
@ -410,7 +410,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
|
||||
m_matV(i,j) = t;
|
||||
}
|
||||
}
|
||||
t = internal::hypot(f,g);
|
||||
t = numext::hypot(f,g);
|
||||
cs = f/t;
|
||||
sn = g/t;
|
||||
m_sigma[j] = t;
|
||||
|
@ -294,7 +294,7 @@ void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(const RealScalar& mat
|
||||
{
|
||||
// If the i-th and k-th eigenvalue are equal, then z equals 0.
|
||||
// Use a small value instead, to prevent division by zero.
|
||||
internal::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;
|
||||
numext::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;
|
||||
}
|
||||
m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z;
|
||||
}
|
||||
|
@ -263,8 +263,8 @@ template<typename _MatrixType> class ComplexSchur
|
||||
template<typename MatrixType>
|
||||
inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
|
||||
{
|
||||
RealScalar d = internal::norm1(m_matT.coeff(i,i)) + internal::norm1(m_matT.coeff(i+1,i+1));
|
||||
RealScalar sd = internal::norm1(m_matT.coeff(i+1,i));
|
||||
RealScalar d = numext::norm1(m_matT.coeff(i,i)) + numext::norm1(m_matT.coeff(i+1,i+1));
|
||||
RealScalar sd = numext::norm1(m_matT.coeff(i+1,i));
|
||||
if (internal::isMuchSmallerThan(sd, d, NumTraits<RealScalar>::epsilon()))
|
||||
{
|
||||
m_matT.coeffRef(i+1,i) = ComplexScalar(0);
|
||||
@ -282,7 +282,7 @@ typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::compu
|
||||
if (iter == 10 || iter == 20)
|
||||
{
|
||||
// exceptional shift, taken from http://www.netlib.org/eispack/comqr.f
|
||||
return abs(internal::real(m_matT.coeff(iu,iu-1))) + abs(internal::real(m_matT.coeff(iu-1,iu-2)));
|
||||
return abs(numext::real(m_matT.coeff(iu,iu-1))) + abs(numext::real(m_matT.coeff(iu-1,iu-2)));
|
||||
}
|
||||
|
||||
// compute the shift as one of the eigenvalues of t, the 2x2
|
||||
@ -299,13 +299,13 @@ typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::compu
|
||||
ComplexScalar eival1 = (trace + disc) / RealScalar(2);
|
||||
ComplexScalar eival2 = (trace - disc) / RealScalar(2);
|
||||
|
||||
if(internal::norm1(eival1) > internal::norm1(eival2))
|
||||
if(numext::norm1(eival1) > numext::norm1(eival2))
|
||||
eival2 = det / eival1;
|
||||
else
|
||||
eival1 = det / eival2;
|
||||
|
||||
// choose the eigenvalue closest to the bottom entry of the diagonal
|
||||
if(internal::norm1(eival1-t.coeff(1,1)) < internal::norm1(eival2-t.coeff(1,1)))
|
||||
if(numext::norm1(eival1-t.coeff(1,1)) < numext::norm1(eival2-t.coeff(1,1)))
|
||||
return normt * eival1;
|
||||
else
|
||||
return normt * eival2;
|
||||
|
@ -317,12 +317,12 @@ MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
|
||||
MatrixType matD = MatrixType::Zero(n,n);
|
||||
for (Index i=0; i<n; ++i)
|
||||
{
|
||||
if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i))))
|
||||
matD.coeffRef(i,i) = internal::real(m_eivalues.coeff(i));
|
||||
if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i))))
|
||||
matD.coeffRef(i,i) = numext::real(m_eivalues.coeff(i));
|
||||
else
|
||||
{
|
||||
matD.template block<2,2>(i,i) << internal::real(m_eivalues.coeff(i)), internal::imag(m_eivalues.coeff(i)),
|
||||
-internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i));
|
||||
matD.template block<2,2>(i,i) << numext::real(m_eivalues.coeff(i)), numext::imag(m_eivalues.coeff(i)),
|
||||
-numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i));
|
||||
++i;
|
||||
}
|
||||
}
|
||||
@ -338,7 +338,7 @@ typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eige
|
||||
EigenvectorsType matV(n,n);
|
||||
for (Index j=0; j<n; ++j)
|
||||
{
|
||||
if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(j)), internal::real(m_eivalues.coeff(j))) || j+1==n)
|
||||
if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(j)), numext::real(m_eivalues.coeff(j))) || j+1==n)
|
||||
{
|
||||
// we have a real eigen value
|
||||
matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
|
||||
@ -515,8 +515,8 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
||||
else
|
||||
{
|
||||
std::complex<Scalar> cc = cdiv<Scalar>(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q);
|
||||
m_matT.coeffRef(n-1,n-1) = internal::real(cc);
|
||||
m_matT.coeffRef(n-1,n) = internal::imag(cc);
|
||||
m_matT.coeffRef(n-1,n-1) = numext::real(cc);
|
||||
m_matT.coeffRef(n-1,n) = numext::imag(cc);
|
||||
}
|
||||
m_matT.coeffRef(n,n-1) = 0.0;
|
||||
m_matT.coeffRef(n,n) = 1.0;
|
||||
@ -538,8 +538,8 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
||||
if (m_eivalues.coeff(i).imag() == RealScalar(0))
|
||||
{
|
||||
std::complex<Scalar> cc = cdiv(-ra,-sa,w,q);
|
||||
m_matT.coeffRef(i,n-1) = internal::real(cc);
|
||||
m_matT.coeffRef(i,n) = internal::imag(cc);
|
||||
m_matT.coeffRef(i,n-1) = numext::real(cc);
|
||||
m_matT.coeffRef(i,n) = numext::imag(cc);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -552,8 +552,8 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
||||
vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw));
|
||||
|
||||
std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi);
|
||||
m_matT.coeffRef(i,n-1) = internal::real(cc);
|
||||
m_matT.coeffRef(i,n) = internal::imag(cc);
|
||||
m_matT.coeffRef(i,n-1) = numext::real(cc);
|
||||
m_matT.coeffRef(i,n) = numext::imag(cc);
|
||||
if (abs(x) > (abs(lastw) + abs(q)))
|
||||
{
|
||||
m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x;
|
||||
@ -562,8 +562,8 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
|
||||
else
|
||||
{
|
||||
cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q);
|
||||
m_matT.coeffRef(i+1,n-1) = internal::real(cc);
|
||||
m_matT.coeffRef(i+1,n) = internal::imag(cc);
|
||||
m_matT.coeffRef(i+1,n-1) = numext::real(cc);
|
||||
m_matT.coeffRef(i+1,n) = numext::imag(cc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ template<typename _MatrixType> class HessenbergDecomposition
|
||||
typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
|
||||
|
||||
/** \brief Return type of matrixQ() */
|
||||
typedef typename HouseholderSequence<MatrixType,CoeffVectorType>::ConjugateReturnType HouseholderSequenceType;
|
||||
typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
|
||||
|
||||
typedef internal::HessenbergDecompositionMatrixHReturnType<MatrixType> MatrixHReturnType;
|
||||
|
||||
@ -313,7 +313,7 @@ void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVector
|
||||
|
||||
// A = A H'
|
||||
matA.rightCols(remainingSize)
|
||||
.applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), internal::conj(h), &temp.coeffRef(0));
|
||||
.applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), numext::conj(h), &temp.coeffRef(0));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -395,7 +395,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||
|
||||
if(n==1)
|
||||
{
|
||||
m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0));
|
||||
m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0));
|
||||
if(computeEigenvectors)
|
||||
m_eivec.setOnes(n,n);
|
||||
m_info = Success;
|
||||
@ -669,7 +669,7 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
||||
static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
||||
{
|
||||
using std::sqrt;
|
||||
const Scalar t0 = Scalar(0.5) * sqrt( abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
|
||||
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
|
||||
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
|
||||
roots(0) = t1 - t0;
|
||||
roots(1) = t1 + t0;
|
||||
@ -699,9 +699,9 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
||||
if(computeEigenvectors)
|
||||
{
|
||||
scaledMat.diagonal().array () -= eivals(1);
|
||||
Scalar a2 = abs2(scaledMat(0,0));
|
||||
Scalar c2 = abs2(scaledMat(1,1));
|
||||
Scalar b2 = abs2(scaledMat(1,0));
|
||||
Scalar a2 = numext::abs2(scaledMat(0,0));
|
||||
Scalar c2 = numext::abs2(scaledMat(1,1));
|
||||
Scalar b2 = numext::abs2(scaledMat(1,0));
|
||||
if(a2>c2)
|
||||
{
|
||||
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
|
||||
@ -744,7 +744,7 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta
|
||||
RealScalar e = subdiag[end-1];
|
||||
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
|
||||
// underflow thus leading to inf/NaN values when using the following commented code:
|
||||
// RealScalar e2 = abs2(subdiag[end-1]);
|
||||
// RealScalar e2 = numext::abs2(subdiag[end-1]);
|
||||
// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
|
||||
// This explain the following, somewhat more complicated, version:
|
||||
RealScalar mu = diag[end];
|
||||
@ -752,8 +752,8 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta
|
||||
mu -= abs(e);
|
||||
else
|
||||
{
|
||||
RealScalar e2 = abs2(subdiag[end-1]);
|
||||
RealScalar h = hypot(td,e);
|
||||
RealScalar e2 = numext::abs2(subdiag[end-1]);
|
||||
RealScalar h = numext::hypot(td,e);
|
||||
if(e2==0) mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h);
|
||||
else mu -= e2 / (td + (td>0 ? h : -h));
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(c
|
||||
\
|
||||
if(n==1) \
|
||||
{ \
|
||||
m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0)); \
|
||||
m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0)); \
|
||||
if(computeEigenvectors) m_eivec.setOnes(n,n); \
|
||||
m_info = Success; \
|
||||
m_isInitialized = true; \
|
||||
|
@ -96,7 +96,7 @@ template<typename _MatrixType> class Tridiagonalization
|
||||
>::type SubDiagonalReturnType;
|
||||
|
||||
/** \brief Return type of matrixQ() */
|
||||
typedef typename HouseholderSequence<MatrixType,CoeffVectorType>::ConjugateReturnType HouseholderSequenceType;
|
||||
typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
|
||||
|
||||
/** \brief Default constructor.
|
||||
*
|
||||
@ -345,7 +345,7 @@ namespace internal {
|
||||
template<typename MatrixType, typename CoeffVectorType>
|
||||
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
|
||||
{
|
||||
using internal::conj;
|
||||
using numext::conj;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
@ -468,7 +468,7 @@ struct tridiagonalization_inplace_selector<MatrixType,3,false>
|
||||
{
|
||||
using std::sqrt;
|
||||
diag[0] = mat(0,0);
|
||||
RealScalar v1norm2 = abs2(mat(2,0));
|
||||
RealScalar v1norm2 = numext::abs2(mat(2,0));
|
||||
if(v1norm2 == RealScalar(0))
|
||||
{
|
||||
diag[1] = mat(1,1);
|
||||
@ -480,7 +480,7 @@ struct tridiagonalization_inplace_selector<MatrixType,3,false>
|
||||
}
|
||||
else
|
||||
{
|
||||
RealScalar beta = sqrt(abs2(mat(1,0)) + v1norm2);
|
||||
RealScalar beta = sqrt(numext::abs2(mat(1,0)) + v1norm2);
|
||||
RealScalar invBeta = RealScalar(1)/beta;
|
||||
Scalar m01 = mat(1,0) * invBeta;
|
||||
Scalar m02 = mat(2,0) * invBeta;
|
||||
@ -510,7 +510,7 @@ struct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>
|
||||
template<typename DiagonalType, typename SubDiagonalType>
|
||||
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
|
||||
{
|
||||
diag(0,0) = real(mat(0,0));
|
||||
diag(0,0) = numext::real(mat(0,0));
|
||||
if(extractQ)
|
||||
mat(0,0) = Scalar(1);
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
|
||||
|
||||
/** Default constructor initializing a null box. */
|
||||
inline explicit AlignedBox()
|
||||
inline AlignedBox()
|
||||
{ if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); }
|
||||
|
||||
/** Constructs a null box with \a _dim the dimension of the ambient space. */
|
||||
|
@ -27,56 +27,75 @@ namespace Eigen {
|
||||
* * AngleAxisf(ea[1], Vector3f::UnitX())
|
||||
* * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode
|
||||
* This corresponds to the right-multiply conventions (with right hand side frames).
|
||||
*
|
||||
* The returned angles are in the ranges [0:pi]x[0:pi]x[-pi:pi].
|
||||
*
|
||||
* \sa class AngleAxis
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>
|
||||
MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const
|
||||
{
|
||||
using std::atan2;
|
||||
using std::sin;
|
||||
using std::cos;
|
||||
/* Implemented from Graphics Gems IV */
|
||||
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3)
|
||||
|
||||
Matrix<Scalar,3,1> res;
|
||||
typedef Matrix<typename Derived::Scalar,2,1> Vector2;
|
||||
const Scalar epsilon = NumTraits<Scalar>::dummy_precision();
|
||||
|
||||
const Index odd = ((a0+1)%3 == a1) ? 0 : 1;
|
||||
const Index i = a0;
|
||||
const Index j = (a0 + 1 + odd)%3;
|
||||
const Index k = (a0 + 2 - odd)%3;
|
||||
|
||||
|
||||
if (a0==a2)
|
||||
{
|
||||
Scalar s = Vector2(coeff(j,i) , coeff(k,i)).norm();
|
||||
res[1] = atan2(s, coeff(i,i));
|
||||
if (s > epsilon)
|
||||
res[0] = atan2(coeff(j,i), coeff(k,i));
|
||||
if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0)))
|
||||
{
|
||||
res[0] = atan2(coeff(j,i), coeff(k,i));
|
||||
res[2] = atan2(coeff(i,j),-coeff(i,k));
|
||||
res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI);
|
||||
Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();
|
||||
res[1] = -atan2(s2, coeff(i,i));
|
||||
}
|
||||
else
|
||||
{
|
||||
res[0] = Scalar(0);
|
||||
res[2] = (coeff(i,i)>0?1:-1)*atan2(-coeff(k,j), coeff(j,j));
|
||||
Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();
|
||||
res[1] = atan2(s2, coeff(i,i));
|
||||
}
|
||||
}
|
||||
|
||||
// With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles,
|
||||
// we can compute their respective rotation, and apply its inverse to M. Since the result must
|
||||
// be a rotation around x, we have:
|
||||
//
|
||||
// c2 s1.s2 c1.s2 1 0 0
|
||||
// 0 c1 -s1 * M = 0 c3 s3
|
||||
// -s2 s1.c2 c1.c2 0 -s3 c3
|
||||
//
|
||||
// Thus: m11.c1 - m21.s1 = c3 & m12.c1 - m22.s1 = s3
|
||||
|
||||
Scalar s1 = sin(res[0]);
|
||||
Scalar c1 = cos(res[0]);
|
||||
res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j));
|
||||
}
|
||||
else
|
||||
{
|
||||
Scalar c = Vector2(coeff(i,i) , coeff(i,j)).norm();
|
||||
res[1] = atan2(-coeff(i,k), c);
|
||||
if (c > epsilon)
|
||||
{
|
||||
res[0] = atan2(coeff(j,k), coeff(k,k));
|
||||
res[2] = atan2(coeff(i,j), coeff(i,i));
|
||||
res[0] = atan2(coeff(j,k), coeff(k,k));
|
||||
Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm();
|
||||
if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0))) {
|
||||
res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI);
|
||||
res[1] = atan2(-coeff(i,k), -c2);
|
||||
}
|
||||
else
|
||||
{
|
||||
res[0] = Scalar(0);
|
||||
res[2] = (coeff(i,k)>0?1:-1)*atan2(-coeff(k,j), coeff(j,j));
|
||||
}
|
||||
res[1] = atan2(-coeff(i,k), c2);
|
||||
Scalar s1 = sin(res[0]);
|
||||
Scalar c1 = cos(res[0]);
|
||||
res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j));
|
||||
}
|
||||
if (!odd)
|
||||
res = -res;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ template<typename MatrixType,typename Rhs> struct homogeneous_right_product_impl
|
||||
} // end namespace internal
|
||||
|
||||
template<typename MatrixType,int _Direction> class Homogeneous
|
||||
: public MatrixBase<Homogeneous<MatrixType,_Direction> >
|
||||
: internal::no_assignment_operator, public MatrixBase<Homogeneous<MatrixType,_Direction> >
|
||||
{
|
||||
public:
|
||||
|
||||
|
@ -50,7 +50,7 @@ public:
|
||||
typedef const Block<const Coefficients,AmbientDimAtCompileTime,1> ConstNormalReturnType;
|
||||
|
||||
/** Default constructor without initialization */
|
||||
inline explicit Hyperplane() {}
|
||||
inline Hyperplane() {}
|
||||
|
||||
template<int OtherOptions>
|
||||
Hyperplane(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
|
||||
|
@ -33,9 +33,9 @@ MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
|
||||
typename internal::nested<Derived,2>::type lhs(derived());
|
||||
typename internal::nested<OtherDerived,2>::type rhs(other.derived());
|
||||
return typename cross_product_return_type<OtherDerived>::type(
|
||||
internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
|
||||
internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
|
||||
internal::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0))
|
||||
numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
|
||||
numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
|
||||
numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0))
|
||||
);
|
||||
}
|
||||
|
||||
@ -49,9 +49,9 @@ struct cross3_impl {
|
||||
run(const VectorLhs& lhs, const VectorRhs& rhs)
|
||||
{
|
||||
return typename internal::plain_matrix_type<VectorLhs>::type(
|
||||
internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
|
||||
internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
|
||||
internal::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)),
|
||||
numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
|
||||
numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
|
||||
numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)),
|
||||
0
|
||||
);
|
||||
}
|
||||
@ -141,8 +141,8 @@ struct unitOrthogonal_selector
|
||||
if (maxi==0)
|
||||
sndi = 1;
|
||||
RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm();
|
||||
perp.coeffRef(maxi) = -conj(src.coeff(sndi)) * invnm;
|
||||
perp.coeffRef(sndi) = conj(src.coeff(maxi)) * invnm;
|
||||
perp.coeffRef(maxi) = -numext::conj(src.coeff(sndi)) * invnm;
|
||||
perp.coeffRef(sndi) = numext::conj(src.coeff(maxi)) * invnm;
|
||||
|
||||
return perp;
|
||||
}
|
||||
@ -168,8 +168,8 @@ struct unitOrthogonal_selector<Derived,3>
|
||||
|| (!isMuchSmallerThan(src.y(), src.z())))
|
||||
{
|
||||
RealScalar invnm = RealScalar(1)/src.template head<2>().norm();
|
||||
perp.coeffRef(0) = -conj(src.y())*invnm;
|
||||
perp.coeffRef(1) = conj(src.x())*invnm;
|
||||
perp.coeffRef(0) = -numext::conj(src.y())*invnm;
|
||||
perp.coeffRef(1) = numext::conj(src.x())*invnm;
|
||||
perp.coeffRef(2) = 0;
|
||||
}
|
||||
/* if both x and y are close to zero, then the vector is close
|
||||
@ -180,8 +180,8 @@ struct unitOrthogonal_selector<Derived,3>
|
||||
{
|
||||
RealScalar invnm = RealScalar(1)/src.template tail<2>().norm();
|
||||
perp.coeffRef(0) = 0;
|
||||
perp.coeffRef(1) = -conj(src.z())*invnm;
|
||||
perp.coeffRef(2) = conj(src.y())*invnm;
|
||||
perp.coeffRef(1) = -numext::conj(src.z())*invnm;
|
||||
perp.coeffRef(2) = numext::conj(src.y())*invnm;
|
||||
}
|
||||
|
||||
return perp;
|
||||
@ -193,7 +193,7 @@ struct unitOrthogonal_selector<Derived,2>
|
||||
{
|
||||
typedef typename plain_matrix_type<Derived>::type VectorType;
|
||||
static inline VectorType run(const Derived& src)
|
||||
{ return VectorType(-conj(src.y()), conj(src.x())).normalized(); }
|
||||
{ return VectorType(-numext::conj(src.y()), numext::conj(src.x())).normalized(); }
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
|
||||
|
||||
/** Default constructor without initialization */
|
||||
inline explicit ParametrizedLine() {}
|
||||
inline ParametrizedLine() {}
|
||||
|
||||
template<int OtherOptions>
|
||||
ParametrizedLine(const ParametrizedLine<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
|
||||
|
@ -68,7 +68,7 @@ void MatrixBase<Derived>::makeHouseholder(
|
||||
RealScalar& beta) const
|
||||
{
|
||||
using std::sqrt;
|
||||
using internal::conj;
|
||||
using numext::conj;
|
||||
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart)
|
||||
VectorBlock<const Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1);
|
||||
@ -76,16 +76,16 @@ void MatrixBase<Derived>::makeHouseholder(
|
||||
RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm();
|
||||
Scalar c0 = coeff(0);
|
||||
|
||||
if(tailSqNorm == RealScalar(0) && internal::imag(c0)==RealScalar(0))
|
||||
if(tailSqNorm == RealScalar(0) && numext::imag(c0)==RealScalar(0))
|
||||
{
|
||||
tau = RealScalar(0);
|
||||
beta = internal::real(c0);
|
||||
beta = numext::real(c0);
|
||||
essential.setZero();
|
||||
}
|
||||
else
|
||||
{
|
||||
beta = sqrt(internal::abs2(c0) + tailSqNorm);
|
||||
if (internal::real(c0)>=RealScalar(0))
|
||||
beta = sqrt(numext::abs2(c0) + tailSqNorm);
|
||||
if (numext::real(c0)>=RealScalar(0))
|
||||
beta = -beta;
|
||||
essential = tail / (c0 - beta);
|
||||
tau = conj((beta - c0) / beta);
|
||||
|
@ -112,6 +112,9 @@ template<typename OtherScalarType, typename MatrixType> struct matrix_type_times
|
||||
template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence
|
||||
: public EigenBase<HouseholderSequence<VectorsType,CoeffsType,Side> >
|
||||
{
|
||||
typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType EssentialVectorType;
|
||||
|
||||
public:
|
||||
enum {
|
||||
RowsAtCompileTime = internal::traits<HouseholderSequence>::RowsAtCompileTime,
|
||||
ColsAtCompileTime = internal::traits<HouseholderSequence>::ColsAtCompileTime,
|
||||
@ -121,13 +124,10 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
|
||||
typedef typename internal::traits<HouseholderSequence>::Scalar Scalar;
|
||||
typedef typename VectorsType::Index Index;
|
||||
|
||||
typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType
|
||||
EssentialVectorType;
|
||||
|
||||
public:
|
||||
|
||||
typedef HouseholderSequence<
|
||||
VectorsType,
|
||||
typename internal::conditional<NumTraits<Scalar>::IsComplex,
|
||||
typename internal::remove_all<typename VectorsType::ConjugateReturnType>::type,
|
||||
VectorsType>::type,
|
||||
typename internal::conditional<NumTraits<Scalar>::IsComplex,
|
||||
typename internal::remove_all<typename CoeffsType::ConjugateReturnType>::type,
|
||||
CoeffsType>::type,
|
||||
@ -208,7 +208,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
|
||||
/** \brief Complex conjugate of the Householder sequence. */
|
||||
ConjugateReturnType conjugate() const
|
||||
{
|
||||
return ConjugateReturnType(m_vectors, m_coeffs.conjugate())
|
||||
return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate())
|
||||
.setTrans(m_trans)
|
||||
.setLength(m_length)
|
||||
.setShift(m_shift);
|
||||
|
@ -43,8 +43,9 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
VectorType r = rhs - mat * x;
|
||||
VectorType r0 = r;
|
||||
|
||||
RealScalar r0_sqnorm = rhs.squaredNorm();
|
||||
if(r0_sqnorm == 0)
|
||||
RealScalar r0_sqnorm = r0.squaredNorm();
|
||||
RealScalar rhs_sqnorm = rhs.squaredNorm();
|
||||
if(rhs_sqnorm == 0)
|
||||
{
|
||||
x.setZero();
|
||||
return true;
|
||||
@ -61,13 +62,22 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
|
||||
RealScalar tol2 = tol*tol;
|
||||
int i = 0;
|
||||
int restarts = 0;
|
||||
|
||||
while ( r.squaredNorm()/r0_sqnorm > tol2 && i<maxIters )
|
||||
while ( r.squaredNorm()/rhs_sqnorm > tol2 && i<maxIters )
|
||||
{
|
||||
Scalar rho_old = rho;
|
||||
|
||||
rho = r0.dot(r);
|
||||
if (rho == Scalar(0)) return false; /* New search directions cannot be found */
|
||||
if (internal::isMuchSmallerThan(rho,r0_sqnorm))
|
||||
{
|
||||
// The new residual vector became too orthogonal to the arbitrarily choosen direction r0
|
||||
// Let's restart with a new r0:
|
||||
r0 = r;
|
||||
rho = r0_sqnorm = r.squaredNorm();
|
||||
if(restarts++ == 0)
|
||||
i = 0;
|
||||
}
|
||||
Scalar beta = (rho/rho_old) * (alpha / w);
|
||||
p = r + beta * (p - w * v);
|
||||
|
||||
@ -81,12 +91,16 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
z = precond.solve(s);
|
||||
t.noalias() = mat * z;
|
||||
|
||||
w = t.dot(s) / t.squaredNorm();
|
||||
RealScalar tmp = t.squaredNorm();
|
||||
if(tmp>RealScalar(0))
|
||||
w = t.dot(s) / tmp;
|
||||
else
|
||||
w = Scalar(0);
|
||||
x += alpha * y + w * z;
|
||||
r = s - w * t;
|
||||
++i;
|
||||
}
|
||||
tol_error = sqrt(r.squaredNorm()/r0_sqnorm);
|
||||
tol_error = sqrt(r.squaredNorm()/rhs_sqnorm);
|
||||
iters = i;
|
||||
return true;
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
p = precond.solve(residual); //initial search direction
|
||||
|
||||
VectorType z(n), tmp(n);
|
||||
RealScalar absNew = internal::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
|
||||
RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
|
||||
int i = 0;
|
||||
while(i < maxIters)
|
||||
{
|
||||
@ -80,7 +80,7 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
z = precond.solve(residual); // approximately solve for "A z = residual"
|
||||
|
||||
RealScalar absOld = absNew;
|
||||
absNew = internal::real(residual.dot(z)); // update the absolute value of r
|
||||
absNew = numext::real(residual.dot(z)); // update the absolute value of r
|
||||
RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
|
||||
p = z + beta * p; // update search direction
|
||||
i++;
|
||||
|
@ -150,8 +150,7 @@ class IncompleteLUT : internal::noncopyable
|
||||
{
|
||||
analyzePattern(amat);
|
||||
factorize(amat);
|
||||
eigen_assert(m_factorizationIsOk == true);
|
||||
m_isInitialized = true;
|
||||
m_isInitialized = m_factorizationIsOk;
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -310,7 +309,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
jr(k) = jpos;
|
||||
++sizeu;
|
||||
}
|
||||
rownorm += internal::abs2(j_it.value());
|
||||
rownorm += numext::abs2(j_it.value());
|
||||
}
|
||||
|
||||
// 2 - detect possible zero row
|
||||
|
@ -50,16 +50,16 @@ template<typename Scalar> class JacobiRotation
|
||||
/** Concatenates two planar rotation */
|
||||
JacobiRotation operator*(const JacobiRotation& other)
|
||||
{
|
||||
using internal::conj;
|
||||
using numext::conj;
|
||||
return JacobiRotation(m_c * other.m_c - conj(m_s) * other.m_s,
|
||||
conj(m_c * conj(other.m_s) + conj(m_s) * conj(other.m_c)));
|
||||
}
|
||||
|
||||
/** Returns the transposed transformation */
|
||||
JacobiRotation transpose() const { using internal::conj; return JacobiRotation(m_c, -conj(m_s)); }
|
||||
JacobiRotation transpose() const { using numext::conj; return JacobiRotation(m_c, -conj(m_s)); }
|
||||
|
||||
/** Returns the adjoint transformation */
|
||||
JacobiRotation adjoint() const { using internal::conj; return JacobiRotation(conj(m_c), -m_s); }
|
||||
JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); }
|
||||
|
||||
template<typename Derived>
|
||||
bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
|
||||
@ -94,7 +94,7 @@ bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, co
|
||||
else
|
||||
{
|
||||
RealScalar tau = (x-z)/(RealScalar(2)*abs(y));
|
||||
RealScalar w = sqrt(internal::abs2(tau) + RealScalar(1));
|
||||
RealScalar w = sqrt(numext::abs2(tau) + RealScalar(1));
|
||||
RealScalar t;
|
||||
if(tau>RealScalar(0))
|
||||
{
|
||||
@ -105,8 +105,8 @@ bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, co
|
||||
t = RealScalar(1) / (tau - w);
|
||||
}
|
||||
RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
|
||||
RealScalar n = RealScalar(1) / sqrt(internal::abs2(t)+RealScalar(1));
|
||||
m_s = - sign_t * (internal::conj(y) / abs(y)) * abs(t) * n;
|
||||
RealScalar n = RealScalar(1) / sqrt(numext::abs2(t)+RealScalar(1));
|
||||
m_s = - sign_t * (numext::conj(y) / abs(y)) * abs(t) * n;
|
||||
m_c = n;
|
||||
return true;
|
||||
}
|
||||
@ -125,7 +125,7 @@ template<typename Scalar>
|
||||
template<typename Derived>
|
||||
inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, typename Derived::Index p, typename Derived::Index q)
|
||||
{
|
||||
return makeJacobi(internal::real(m.coeff(p,p)), m.coeff(p,q), internal::real(m.coeff(q,q)));
|
||||
return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q)));
|
||||
}
|
||||
|
||||
/** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector
|
||||
@ -157,11 +157,11 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
|
||||
{
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
using internal::conj;
|
||||
using numext::conj;
|
||||
|
||||
if(q==Scalar(0))
|
||||
{
|
||||
m_c = internal::real(p)<0 ? Scalar(-1) : Scalar(1);
|
||||
m_c = numext::real(p)<0 ? Scalar(-1) : Scalar(1);
|
||||
m_s = 0;
|
||||
if(r) *r = m_c * p;
|
||||
}
|
||||
@ -173,17 +173,17 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
|
||||
}
|
||||
else
|
||||
{
|
||||
RealScalar p1 = internal::norm1(p);
|
||||
RealScalar q1 = internal::norm1(q);
|
||||
RealScalar p1 = numext::norm1(p);
|
||||
RealScalar q1 = numext::norm1(q);
|
||||
if(p1>=q1)
|
||||
{
|
||||
Scalar ps = p / p1;
|
||||
RealScalar p2 = internal::abs2(ps);
|
||||
RealScalar p2 = numext::abs2(ps);
|
||||
Scalar qs = q / p1;
|
||||
RealScalar q2 = internal::abs2(qs);
|
||||
RealScalar q2 = numext::abs2(qs);
|
||||
|
||||
RealScalar u = sqrt(RealScalar(1) + q2/p2);
|
||||
if(internal::real(p)<RealScalar(0))
|
||||
if(numext::real(p)<RealScalar(0))
|
||||
u = -u;
|
||||
|
||||
m_c = Scalar(1)/u;
|
||||
@ -193,12 +193,12 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
|
||||
else
|
||||
{
|
||||
Scalar ps = p / q1;
|
||||
RealScalar p2 = internal::abs2(ps);
|
||||
RealScalar p2 = numext::abs2(ps);
|
||||
Scalar qs = q / q1;
|
||||
RealScalar q2 = internal::abs2(qs);
|
||||
RealScalar q2 = numext::abs2(qs);
|
||||
|
||||
RealScalar u = q1 * sqrt(p2 + q2);
|
||||
if(internal::real(p)<RealScalar(0))
|
||||
if(numext::real(p)<RealScalar(0))
|
||||
u = -u;
|
||||
|
||||
p1 = abs(p);
|
||||
@ -231,7 +231,7 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
|
||||
else if(abs(p) > abs(q))
|
||||
{
|
||||
Scalar t = q/p;
|
||||
Scalar u = sqrt(Scalar(1) + internal::abs2(t));
|
||||
Scalar u = sqrt(Scalar(1) + numext::abs2(t));
|
||||
if(p<Scalar(0))
|
||||
u = -u;
|
||||
m_c = Scalar(1)/u;
|
||||
@ -241,7 +241,7 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
|
||||
else
|
||||
{
|
||||
Scalar t = p/q;
|
||||
Scalar u = sqrt(Scalar(1) + internal::abs2(t));
|
||||
Scalar u = sqrt(Scalar(1) + numext::abs2(t));
|
||||
if(q<Scalar(0))
|
||||
u = -u;
|
||||
m_s = -Scalar(1)/u;
|
||||
@ -337,8 +337,8 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
|
||||
{
|
||||
Scalar xi = x[i];
|
||||
Scalar yi = y[i];
|
||||
x[i] = c * xi + conj(s) * yi;
|
||||
y[i] = -s * xi + conj(c) * yi;
|
||||
x[i] = c * xi + numext::conj(s) * yi;
|
||||
y[i] = -s * xi + numext::conj(c) * yi;
|
||||
}
|
||||
|
||||
Scalar* EIGEN_RESTRICT px = x + alignedStart;
|
||||
@ -385,8 +385,8 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
|
||||
{
|
||||
Scalar xi = x[i];
|
||||
Scalar yi = y[i];
|
||||
x[i] = c * xi + conj(s) * yi;
|
||||
y[i] = -s * xi + conj(c) * yi;
|
||||
x[i] = c * xi + numext::conj(s) * yi;
|
||||
y[i] = -s * xi + numext::conj(c) * yi;
|
||||
}
|
||||
}
|
||||
|
||||
@ -418,8 +418,8 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
|
||||
{
|
||||
Scalar xi = *x;
|
||||
Scalar yi = *y;
|
||||
*x = c * xi + conj(s) * yi;
|
||||
*y = -s * xi + conj(c) * yi;
|
||||
*x = c * xi + numext::conj(s) * yi;
|
||||
*y = -s * xi + numext::conj(c) * yi;
|
||||
x += incrx;
|
||||
y += incry;
|
||||
}
|
||||
|
@ -348,7 +348,7 @@ inline const internal::inverse_impl<Derived> MatrixBase<Derived>::inverse() cons
|
||||
* This is only for fixed-size square matrices of size up to 4x4.
|
||||
*
|
||||
* \param inverse Reference to the matrix in which to store the inverse.
|
||||
* \param determinant Reference to the variable in which to store the inverse.
|
||||
* \param determinant Reference to the variable in which to store the determinant.
|
||||
* \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
|
||||
* \param absDeterminantThreshold Optional parameter controlling the invertibility check.
|
||||
* The matrix will be declared invertible if the absolute value of its
|
||||
|
@ -134,4 +134,4 @@ public:
|
||||
};
|
||||
|
||||
}// end namespace eigen
|
||||
#endif
|
||||
#endif
|
||||
|
@ -55,7 +55,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
|
||||
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
|
||||
typedef typename internal::plain_row_type<MatrixType, RealScalar>::type RealRowVectorType;
|
||||
typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
|
||||
typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;
|
||||
|
||||
private:
|
||||
|
||||
@ -94,6 +94,18 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
m_isInitialized(false),
|
||||
m_usePrescribedThreshold(false) {}
|
||||
|
||||
/** \brief Constructs a QR factorization from a given matrix
|
||||
*
|
||||
* This constructor computes the QR factorization of the matrix \a matrix by calling
|
||||
* the method compute(). It is a short cut for:
|
||||
*
|
||||
* \code
|
||||
* ColPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
|
||||
* qr.compute(matrix);
|
||||
* \endcode
|
||||
*
|
||||
* \sa compute()
|
||||
*/
|
||||
ColPivHouseholderQR(const MatrixType& matrix)
|
||||
: m_qr(matrix.rows(), matrix.cols()),
|
||||
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
|
||||
@ -163,6 +175,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
|
||||
ColPivHouseholderQR& compute(const MatrixType& matrix);
|
||||
|
||||
/** \returns a const reference to the column permutation matrix */
|
||||
const PermutationType& colsPermutation() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
|
||||
@ -281,6 +294,11 @@ template<typename _MatrixType> class ColPivHouseholderQR
|
||||
|
||||
inline Index rows() const { return m_qr.rows(); }
|
||||
inline Index cols() const { return m_qr.cols(); }
|
||||
|
||||
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
|
||||
*
|
||||
* For advanced uses only.
|
||||
*/
|
||||
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
|
||||
|
||||
/** Allows to prescribe a threshold to be used by certain methods, such as rank(),
|
||||
@ -394,6 +412,12 @@ typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDetermina
|
||||
return m_qr.diagonal().cwiseAbs().array().log().sum();
|
||||
}
|
||||
|
||||
/** Performs the QR factorization of the given matrix \a matrix. The result of
|
||||
* the factorization is stored into \c *this, and a reference to \c *this
|
||||
* is returned.
|
||||
*
|
||||
* \sa class ColPivHouseholderQR, ColPivHouseholderQR(const MatrixType&)
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
@ -417,7 +441,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
|
||||
for(Index k = 0; k < cols; ++k)
|
||||
m_colSqNorms.coeffRef(k) = m_qr.col(k).squaredNorm();
|
||||
|
||||
RealScalar threshold_helper = m_colSqNorms.maxCoeff() * internal::abs2(NumTraits<Scalar>::epsilon()) / RealScalar(rows);
|
||||
RealScalar threshold_helper = m_colSqNorms.maxCoeff() * numext::abs2(NumTraits<Scalar>::epsilon()) / RealScalar(rows);
|
||||
|
||||
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
|
||||
m_maxpivot = RealScalar(0);
|
||||
@ -501,8 +525,8 @@ struct solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
|
||||
{
|
||||
eigen_assert(rhs().rows() == dec().rows());
|
||||
|
||||
const int cols = dec().cols(),
|
||||
nonzero_pivots = dec().nonzeroPivots();
|
||||
const Index cols = dec().cols(),
|
||||
nonzero_pivots = dec().nonzeroPivots();
|
||||
|
||||
if(nonzero_pivots == 0)
|
||||
{
|
||||
|
@ -100,6 +100,18 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
||||
m_isInitialized(false),
|
||||
m_usePrescribedThreshold(false) {}
|
||||
|
||||
/** \brief Constructs a QR factorization from a given matrix
|
||||
*
|
||||
* This constructor computes the QR factorization of the matrix \a matrix by calling
|
||||
* the method compute(). It is a short cut for:
|
||||
*
|
||||
* \code
|
||||
* FullPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
|
||||
* qr.compute(matrix);
|
||||
* \endcode
|
||||
*
|
||||
* \sa compute()
|
||||
*/
|
||||
FullPivHouseholderQR(const MatrixType& matrix)
|
||||
: m_qr(matrix.rows(), matrix.cols()),
|
||||
m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
|
||||
@ -152,12 +164,14 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
||||
|
||||
FullPivHouseholderQR& compute(const MatrixType& matrix);
|
||||
|
||||
/** \returns a const reference to the column permutation matrix */
|
||||
const PermutationType& colsPermutation() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
|
||||
return m_cols_permutation;
|
||||
}
|
||||
|
||||
/** \returns a const reference to the vector of indices representing the rows transpositions */
|
||||
const IntColVectorType& rowsTranspositions() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
|
||||
@ -275,6 +289,11 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
||||
|
||||
inline Index rows() const { return m_qr.rows(); }
|
||||
inline Index cols() const { return m_qr.cols(); }
|
||||
|
||||
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
|
||||
*
|
||||
* For advanced uses only.
|
||||
*/
|
||||
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
|
||||
|
||||
/** Allows to prescribe a threshold to be used by certain methods, such as rank(),
|
||||
@ -377,6 +396,12 @@ typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDetermin
|
||||
return m_qr.diagonal().cwiseAbs().array().log().sum();
|
||||
}
|
||||
|
||||
/** Performs the QR factorization of the given matrix \a matrix. The result of
|
||||
* the factorization is stored into \c *this, and a reference to \c *this
|
||||
* is returned.
|
||||
*
|
||||
* \sa class FullPivHouseholderQR, FullPivHouseholderQR(const MatrixType&)
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
@ -547,7 +572,7 @@ public:
|
||||
template <typename ResultType>
|
||||
void evalTo(ResultType& result, WorkVectorType& workspace) const
|
||||
{
|
||||
using internal::conj;
|
||||
using numext::conj;
|
||||
// compute the product H'_0 H'_1 ... H'_n-1,
|
||||
// where H_k is the k-th Householder transformation I - h_k v_k v_k'
|
||||
// and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
|
||||
|
@ -57,14 +57,14 @@ template<typename _MatrixType> class HouseholderQR
|
||||
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
|
||||
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
|
||||
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
|
||||
typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
|
||||
typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;
|
||||
|
||||
/**
|
||||
* \brief Default Constructor.
|
||||
*
|
||||
* The default constructor is useful in cases in which the user intends to
|
||||
* perform decompositions via HouseholderQR::compute(const MatrixType&).
|
||||
*/
|
||||
* \brief Default Constructor.
|
||||
*
|
||||
* The default constructor is useful in cases in which the user intends to
|
||||
* perform decompositions via HouseholderQR::compute(const MatrixType&).
|
||||
*/
|
||||
HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {}
|
||||
|
||||
/** \brief Default Constructor with memory preallocation
|
||||
@ -79,6 +79,18 @@ template<typename _MatrixType> class HouseholderQR
|
||||
m_temp(cols),
|
||||
m_isInitialized(false) {}
|
||||
|
||||
/** \brief Constructs a QR factorization from a given matrix
|
||||
*
|
||||
* This constructor computes the QR factorization of the matrix \a matrix by calling
|
||||
* the method compute(). It is a short cut for:
|
||||
*
|
||||
* \code
|
||||
* HouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
|
||||
* qr.compute(matrix);
|
||||
* \endcode
|
||||
*
|
||||
* \sa compute()
|
||||
*/
|
||||
HouseholderQR(const MatrixType& matrix)
|
||||
: m_qr(matrix.rows(), matrix.cols()),
|
||||
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
|
||||
@ -169,6 +181,11 @@ template<typename _MatrixType> class HouseholderQR
|
||||
|
||||
inline Index rows() const { return m_qr.rows(); }
|
||||
inline Index cols() const { return m_qr.cols(); }
|
||||
|
||||
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
|
||||
*
|
||||
* For advanced uses only.
|
||||
*/
|
||||
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
|
||||
|
||||
protected:
|
||||
@ -317,6 +334,12 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** Performs the QR factorization of the given matrix \a matrix. The result of
|
||||
* the factorization is stored into \c *this, and a reference to \c *this
|
||||
* is returned.
|
||||
*
|
||||
* \sa class HouseholderQR, HouseholderQR(const MatrixType&)
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
|
@ -83,10 +83,12 @@ class SPQR
|
||||
~SPQR()
|
||||
{
|
||||
// Calls SuiteSparseQR_free()
|
||||
cholmod_free_sparse(&m_H, &m_cc);
|
||||
cholmod_free_dense(&m_HTau, &m_cc);
|
||||
delete[] m_E;
|
||||
delete[] m_HPinv;
|
||||
cholmod_l_free_sparse(&m_H, &m_cc);
|
||||
cholmod_l_free_sparse(&m_cR, &m_cc);
|
||||
cholmod_l_free_dense(&m_HTau, &m_cc);
|
||||
std::free(m_E);
|
||||
std::free(m_HPinv);
|
||||
cholmod_l_finish(&m_cc);
|
||||
}
|
||||
void compute(const _MatrixType& matrix)
|
||||
{
|
||||
@ -244,7 +246,7 @@ struct SPQR_QProduct : ReturnByValue<SPQR_QProduct<SPQRType,Derived> >
|
||||
y_cd = viewAsCholmod(m_other.const_cast_derived());
|
||||
x_cd = SuiteSparseQR_qmult<Scalar>(method, m_spqr.m_H, m_spqr.m_HTau, m_spqr.m_HPinv, &y_cd, cc);
|
||||
res = Matrix<Scalar,ResType::RowsAtCompileTime,ResType::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x), x_cd->nrow, x_cd->ncol);
|
||||
cholmod_free_dense(&x_cd, cc);
|
||||
cholmod_l_free_dense(&x_cd, cc);
|
||||
}
|
||||
const SPQRType& m_spqr;
|
||||
const Derived& m_other;
|
||||
@ -301,4 +303,4 @@ struct solve_retval<SPQR<_MatrixType>, Rhs>
|
||||
} // end namespace internal
|
||||
|
||||
}// End namespace Eigen
|
||||
#endif
|
||||
#endif
|
||||
|
@ -374,7 +374,7 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
|
||||
using std::sqrt;
|
||||
Scalar z;
|
||||
JacobiRotation<Scalar> rot;
|
||||
RealScalar n = sqrt(abs2(work_matrix.coeff(p,p)) + abs2(work_matrix.coeff(q,p)));
|
||||
RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
|
||||
if(n==0)
|
||||
{
|
||||
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
|
||||
@ -413,8 +413,8 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
|
||||
{
|
||||
using std::sqrt;
|
||||
Matrix<RealScalar,2,2> m;
|
||||
m << real(matrix.coeff(p,p)), real(matrix.coeff(p,q)),
|
||||
real(matrix.coeff(q,p)), real(matrix.coeff(q,q));
|
||||
m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
|
||||
numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
|
||||
JacobiRotation<RealScalar> rot1;
|
||||
RealScalar t = m.coeff(0,0) + m.coeff(1,1);
|
||||
RealScalar d = m.coeff(1,0) - m.coeff(0,1);
|
||||
@ -426,7 +426,7 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
|
||||
else
|
||||
{
|
||||
RealScalar u = d / t;
|
||||
rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + abs2(u));
|
||||
rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
|
||||
rot1.s() = rot1.c() * u;
|
||||
}
|
||||
m.applyOnTheLeft(0,1,rot1);
|
||||
@ -850,17 +850,12 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
|
||||
// A = U S V^*
|
||||
// So A^{-1} = V S^{-1} U^*
|
||||
|
||||
Index diagSize = (std::min)(dec().rows(), dec().cols());
|
||||
typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
|
||||
|
||||
Matrix<Scalar, Dynamic, Rhs::ColsAtCompileTime, 0, _MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime> tmp;
|
||||
Index nonzeroSingVals = dec().nonzeroSingularValues();
|
||||
invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
|
||||
invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
|
||||
|
||||
dst = dec().matrixV().leftCols(diagSize)
|
||||
* invertedSingVals.asDiagonal()
|
||||
* dec().matrixU().leftCols(diagSize).adjoint()
|
||||
* rhs();
|
||||
|
||||
tmp.noalias() = dec().matrixU().leftCols(nonzeroSingVals).adjoint() * rhs();
|
||||
tmp = dec().singularValues().head(nonzeroSingVals).asDiagonal().inverse() * tmp;
|
||||
dst = dec().matrixV().leftCols(nonzeroSingVals) * tmp;
|
||||
}
|
||||
};
|
||||
} // end namespace internal
|
||||
|
@ -39,7 +39,7 @@ template<typename _MatrixType> class UpperBidiagonalization
|
||||
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Diagonal<const MatrixType,0> >
|
||||
> HouseholderUSequenceType;
|
||||
typedef HouseholderSequence<
|
||||
const MatrixType,
|
||||
const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,
|
||||
Diagonal<const MatrixType,1>,
|
||||
OnTheRight
|
||||
> HouseholderVSequenceType;
|
||||
@ -74,7 +74,7 @@ template<typename _MatrixType> class UpperBidiagonalization
|
||||
const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
|
||||
{
|
||||
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
|
||||
return HouseholderVSequenceType(m_householder, m_householder.const_derived().template diagonal<1>())
|
||||
return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())
|
||||
.setLength(m_householder.cols()-1)
|
||||
.setShift(1);
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ public:
|
||||
Scalar determinant() const
|
||||
{
|
||||
Scalar detL = Base::m_matrix.diagonal().prod();
|
||||
return internal::abs2(detL);
|
||||
return numext::abs2(detL);
|
||||
}
|
||||
};
|
||||
|
||||
@ -599,7 +599,7 @@ public:
|
||||
else
|
||||
{
|
||||
Scalar detL = Diagonal<const CholMatrixType>(Base::m_matrix).prod();
|
||||
return internal::abs2(detL);
|
||||
return numext::abs2(detL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
|
||||
Index i = it.index();
|
||||
if(i <= k)
|
||||
{
|
||||
y[i] += internal::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */
|
||||
y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */
|
||||
Index len;
|
||||
for(len = 0; tags[i] != k; i = m_parent[i])
|
||||
{
|
||||
@ -145,7 +145,7 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
|
||||
|
||||
/* compute numerical values kth row of L (a sparse triangular solve) */
|
||||
|
||||
RealScalar d = internal::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k)
|
||||
RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k)
|
||||
y[k] = 0.0;
|
||||
for(; top < size; ++top)
|
||||
{
|
||||
@ -163,8 +163,8 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
|
||||
Index p2 = Lp[i] + m_nonZerosPerCol[i];
|
||||
Index p;
|
||||
for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)
|
||||
y[Li[p]] -= internal::conj(Lx[p]) * yi;
|
||||
d -= internal::real(l_ki * internal::conj(yi));
|
||||
y[Li[p]] -= numext::conj(Lx[p]) * yi;
|
||||
d -= numext::real(l_ki * numext::conj(yi));
|
||||
Li[p] = k; /* store L(k,i) in column form of L */
|
||||
Lx[p] = l_ki;
|
||||
++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
|
||||
|
@ -27,6 +27,7 @@ public:
|
||||
|
||||
class InnerIterator: public XprType::InnerIterator
|
||||
{
|
||||
typedef typename BlockImpl::Index Index;
|
||||
public:
|
||||
inline InnerIterator(const BlockType& xpr, Index outer)
|
||||
: XprType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
|
||||
@ -38,6 +39,7 @@ public:
|
||||
};
|
||||
class ReverseInnerIterator: public XprType::ReverseInnerIterator
|
||||
{
|
||||
typedef typename BlockImpl::Index Index;
|
||||
public:
|
||||
inline ReverseInnerIterator(const BlockType& xpr, Index outer)
|
||||
: XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
|
||||
|
@ -73,7 +73,7 @@ class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
|
||||
typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
|
||||
BinaryOp,Lhs,Rhs, InnerIterator> Base;
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename CwiseBinaryOpImpl::Index outer)
|
||||
EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer)
|
||||
: Base(binOp.derived(),outer)
|
||||
{}
|
||||
};
|
||||
@ -300,7 +300,7 @@ template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Derived &
|
||||
SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
|
||||
{
|
||||
return *this = derived() - other.derived();
|
||||
return derived() = derived() - other.derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
@ -308,7 +308,7 @@ template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Derived &
|
||||
SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
return *this = derived() + other.derived();
|
||||
return derived() = derived() + other.derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
|
@ -111,6 +111,7 @@ template<typename Lhs, typename Rhs, bool Transpose>
|
||||
class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
|
||||
{
|
||||
typedef typename _LhsNested::InnerIterator Base;
|
||||
typedef typename SparseDenseOuterProduct::Index Index;
|
||||
public:
|
||||
EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
|
||||
: Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
|
||||
|
@ -78,7 +78,11 @@ class SparseDiagonalProduct
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
|
||||
|
||||
typedef internal::sparse_diagonal_product_inner_iterator_selector
|
||||
<_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
|
||||
<_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
|
||||
|
||||
// We do not want ReverseInnerIterator for diagonal-sparse products,
|
||||
// but this dummy declaration is needed to make diag * sparse * diag compile.
|
||||
class ReverseInnerIterator;
|
||||
|
||||
EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
|
||||
: m_lhs(lhs), m_rhs(rhs)
|
||||
@ -118,13 +122,13 @@ class sparse_diagonal_product_inner_iterator_selector
|
||||
<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
|
||||
: public CwiseBinaryOp<
|
||||
scalar_product_op<typename Lhs::Scalar>,
|
||||
typename Rhs::ConstInnerVectorReturnType,
|
||||
typename Lhs::DiagonalVectorType>::InnerIterator
|
||||
const typename Rhs::ConstInnerVectorReturnType,
|
||||
const typename Lhs::DiagonalVectorType>::InnerIterator
|
||||
{
|
||||
typedef typename CwiseBinaryOp<
|
||||
scalar_product_op<typename Lhs::Scalar>,
|
||||
typename Rhs::ConstInnerVectorReturnType,
|
||||
typename Lhs::DiagonalVectorType>::InnerIterator Base;
|
||||
const typename Rhs::ConstInnerVectorReturnType,
|
||||
const typename Lhs::DiagonalVectorType>::InnerIterator Base;
|
||||
typedef typename Lhs::Index Index;
|
||||
Index m_outer;
|
||||
public:
|
||||
@ -156,13 +160,13 @@ class sparse_diagonal_product_inner_iterator_selector
|
||||
<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
|
||||
: public CwiseBinaryOp<
|
||||
scalar_product_op<typename Rhs::Scalar>,
|
||||
typename Lhs::ConstInnerVectorReturnType,
|
||||
Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
|
||||
const typename Lhs::ConstInnerVectorReturnType,
|
||||
const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
|
||||
{
|
||||
typedef typename CwiseBinaryOp<
|
||||
scalar_product_op<typename Rhs::Scalar>,
|
||||
typename Lhs::ConstInnerVectorReturnType,
|
||||
Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
|
||||
const typename Lhs::ConstInnerVectorReturnType,
|
||||
const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
|
||||
typedef typename Lhs::Index Index;
|
||||
Index m_outer;
|
||||
public:
|
||||
|
@ -30,7 +30,7 @@ SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
|
||||
Scalar res(0);
|
||||
while (i)
|
||||
{
|
||||
res += internal::conj(i.value()) * other.coeff(i.index());
|
||||
res += numext::conj(i.value()) * other.coeff(i.index());
|
||||
++i;
|
||||
}
|
||||
return res;
|
||||
@ -64,7 +64,7 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
|
||||
{
|
||||
if (i.index()==j.index())
|
||||
{
|
||||
res += internal::conj(i.value()) * j.value();
|
||||
res += numext::conj(i.value()) * j.value();
|
||||
++i; ++j;
|
||||
}
|
||||
else if (i.index()<j.index())
|
||||
@ -79,7 +79,7 @@ template<typename Derived>
|
||||
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
||||
SparseMatrixBase<Derived>::squaredNorm() const
|
||||
{
|
||||
return internal::real((*this).cwiseAbs2().sum());
|
||||
return numext::real((*this).cwiseAbs2().sum());
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
|
@ -31,7 +31,7 @@ namespace Eigen {
|
||||
*
|
||||
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
|
||||
* \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
|
||||
* is RowMajor. The default is 0 which means column-major.
|
||||
* is ColMajor or RowMajor. The default is 0 which means column-major.
|
||||
* \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
|
||||
*
|
||||
* This class can be extended with the help of the plugin mechanism described on the page
|
||||
@ -170,6 +170,8 @@ class SparseMatrix
|
||||
* This function returns Scalar(0) if the element is an explicit \em zero */
|
||||
inline Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
|
||||
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
|
||||
@ -186,6 +188,8 @@ class SparseMatrix
|
||||
*/
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
|
||||
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
@ -215,6 +219,8 @@ class SparseMatrix
|
||||
*/
|
||||
Scalar& insert(Index row, Index col)
|
||||
{
|
||||
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
|
||||
|
||||
if(isCompressed())
|
||||
{
|
||||
reserve(VectorXi::Constant(outerSize(), 2));
|
||||
@ -281,7 +287,6 @@ class SparseMatrix
|
||||
template<class SizesType>
|
||||
inline void reserveInnerVectors(const SizesType& reserveSizes)
|
||||
{
|
||||
|
||||
if(isCompressed())
|
||||
{
|
||||
std::size_t totalReserveSize = 0;
|
||||
@ -526,59 +531,63 @@ class SparseMatrix
|
||||
*/
|
||||
void conservativeResize(Index rows, Index cols)
|
||||
{
|
||||
// No change
|
||||
if (this->rows() == rows && this->cols() == cols) return;
|
||||
// No change
|
||||
if (this->rows() == rows && this->cols() == cols) return;
|
||||
|
||||
// If one dimension is null, then there is nothing to be preserved
|
||||
if(rows==0 || cols==0) return resize(rows,cols);
|
||||
|
||||
Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
|
||||
Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
|
||||
Index newInnerSize = IsRowMajor ? cols : rows;
|
||||
Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
|
||||
Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
|
||||
Index newInnerSize = IsRowMajor ? cols : rows;
|
||||
|
||||
// Deals with inner non zeros
|
||||
if (m_innerNonZeros)
|
||||
// Deals with inner non zeros
|
||||
if (m_innerNonZeros)
|
||||
{
|
||||
// Resize m_innerNonZeros
|
||||
Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
|
||||
if (!newInnerNonZeros) internal::throw_std_bad_alloc();
|
||||
m_innerNonZeros = newInnerNonZeros;
|
||||
|
||||
for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
|
||||
m_innerNonZeros[i] = 0;
|
||||
}
|
||||
else if (innerChange < 0)
|
||||
{
|
||||
// Inner size decreased: allocate a new m_innerNonZeros
|
||||
m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
|
||||
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
||||
for(Index i = 0; i < m_outerSize; i++)
|
||||
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
||||
}
|
||||
|
||||
// Change the m_innerNonZeros in case of a decrease of inner size
|
||||
if (m_innerNonZeros && innerChange < 0)
|
||||
{
|
||||
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
|
||||
{
|
||||
// Resize m_innerNonZeros
|
||||
Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
|
||||
if (!newInnerNonZeros) internal::throw_std_bad_alloc();
|
||||
m_innerNonZeros = newInnerNonZeros;
|
||||
Index &n = m_innerNonZeros[i];
|
||||
Index start = m_outerIndex[i];
|
||||
while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
|
||||
}
|
||||
}
|
||||
|
||||
m_innerSize = newInnerSize;
|
||||
|
||||
// Re-allocate outer index structure if necessary
|
||||
if (outerChange == 0)
|
||||
return;
|
||||
|
||||
for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
|
||||
m_innerNonZeros[i] = 0;
|
||||
}
|
||||
else if (innerChange < 0)
|
||||
{
|
||||
// Inner size decreased: allocate a new m_innerNonZeros
|
||||
m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
|
||||
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
||||
for(Index i = 0; i < m_outerSize; i++)
|
||||
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
||||
}
|
||||
|
||||
// Change the m_innerNonZeros in case of a decrease of inner size
|
||||
if (m_innerNonZeros && innerChange < 0) {
|
||||
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
|
||||
{
|
||||
Index &n = m_innerNonZeros[i];
|
||||
Index start = m_outerIndex[i];
|
||||
while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
|
||||
}
|
||||
}
|
||||
|
||||
m_innerSize = newInnerSize;
|
||||
|
||||
// Re-allocate outer index structure if necessary
|
||||
if (outerChange == 0)
|
||||
return;
|
||||
|
||||
Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
|
||||
if (!newOuterIndex) internal::throw_std_bad_alloc();
|
||||
m_outerIndex = newOuterIndex;
|
||||
if (outerChange > 0) {
|
||||
Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
|
||||
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
|
||||
m_outerIndex[i] = last;
|
||||
}
|
||||
m_outerSize += outerChange;
|
||||
|
||||
Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
|
||||
if (!newOuterIndex) internal::throw_std_bad_alloc();
|
||||
m_outerIndex = newOuterIndex;
|
||||
if (outerChange > 0)
|
||||
{
|
||||
Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
|
||||
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
|
||||
m_outerIndex[i] = last;
|
||||
}
|
||||
m_outerSize += outerChange;
|
||||
}
|
||||
|
||||
/** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
|
||||
@ -637,9 +646,20 @@ class SparseMatrix
|
||||
inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
|
||||
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
|
||||
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
|
||||
check_template_parameters();
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
/** Constructs a sparse matrix from the sparse selfadjoint view \a other */
|
||||
template<typename OtherDerived, unsigned int UpLo>
|
||||
inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
|
||||
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
*this = other;
|
||||
}
|
||||
|
||||
/** Copy constructor (it performs a deep copy) */
|
||||
inline SparseMatrix(const SparseMatrix& other)
|
||||
@ -671,13 +691,22 @@ class SparseMatrix
|
||||
m_data.swap(other.m_data);
|
||||
}
|
||||
|
||||
/** Sets *this to the identity matrix */
|
||||
inline void setIdentity()
|
||||
{
|
||||
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
|
||||
this->m_data.resize(rows());
|
||||
Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
|
||||
Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
|
||||
Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
|
||||
}
|
||||
inline SparseMatrix& operator=(const SparseMatrix& other)
|
||||
{
|
||||
if (other.isRValue())
|
||||
{
|
||||
swap(other.const_cast_derived());
|
||||
}
|
||||
else
|
||||
else if(this!=&other)
|
||||
{
|
||||
initAssignment(other);
|
||||
if(other.isCompressed())
|
||||
@ -822,6 +851,7 @@ private:
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
||||
EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
|
||||
}
|
||||
|
||||
struct default_prunning_func {
|
||||
@ -911,19 +941,25 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
|
||||
typedef typename SparseMatrixType::Scalar Scalar;
|
||||
SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
|
||||
|
||||
// pass 1: count the nnz per inner-vector
|
||||
VectorXi wi(trMat.outerSize());
|
||||
wi.setZero();
|
||||
for(InputIterator it(begin); it!=end; ++it)
|
||||
wi(IsRowMajor ? it->col() : it->row())++;
|
||||
if(begin<end)
|
||||
{
|
||||
// pass 1: count the nnz per inner-vector
|
||||
VectorXi wi(trMat.outerSize());
|
||||
wi.setZero();
|
||||
for(InputIterator it(begin); it!=end; ++it)
|
||||
{
|
||||
eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
|
||||
wi(IsRowMajor ? it->col() : it->row())++;
|
||||
}
|
||||
|
||||
// pass 2: insert all the elements into trMat
|
||||
trMat.reserve(wi);
|
||||
for(InputIterator it(begin); it!=end; ++it)
|
||||
trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
|
||||
// pass 2: insert all the elements into trMat
|
||||
trMat.reserve(wi);
|
||||
for(InputIterator it(begin); it!=end; ++it)
|
||||
trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
|
||||
|
||||
// pass 3:
|
||||
trMat.sumupDuplicates();
|
||||
// pass 3:
|
||||
trMat.sumupDuplicates();
|
||||
}
|
||||
|
||||
// pass 4: transposed copy -> implicit sorting
|
||||
mat = trMat;
|
||||
@ -1020,6 +1056,9 @@ template<typename Scalar, int _Options, typename _Index>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
|
||||
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
|
||||
|
||||
const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
|
||||
if (needToTranspose)
|
||||
{
|
||||
|
@ -89,6 +89,9 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
*/
|
||||
|
||||
IsRowMajor = Flags&RowMajorBit ? 1 : 0,
|
||||
|
||||
InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
|
||||
: int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
_HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
|
||||
@ -322,8 +325,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
typename internal::traits<OtherDerived>::Scalar \
|
||||
>::ReturnType \
|
||||
>, \
|
||||
Derived, \
|
||||
OtherDerived \
|
||||
const Derived, \
|
||||
const OtherDerived \
|
||||
>
|
||||
|
||||
template<typename OtherDerived>
|
||||
@ -403,20 +406,20 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
Block<Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize);
|
||||
const Block<const Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize) const;
|
||||
|
||||
/** \internal use operator= */
|
||||
template<typename DenseDerived>
|
||||
void evalTo(MatrixBase<DenseDerived>& dst) const
|
||||
{
|
||||
dst.setZero();
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
for (typename Derived::InnerIterator i(derived(),j); i; ++i)
|
||||
dst.coeffRef(i.row(),i.col()) = i.value();
|
||||
}
|
||||
/** \internal use operator= */
|
||||
template<typename DenseDerived>
|
||||
void evalTo(MatrixBase<DenseDerived>& dst) const
|
||||
{
|
||||
dst.setZero();
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
for (typename Derived::InnerIterator i(derived(),j); i; ++i)
|
||||
dst.coeffRef(i.row(),i.col()) = i.value();
|
||||
}
|
||||
|
||||
Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
|
||||
{
|
||||
return derived();
|
||||
}
|
||||
Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
|
||||
{
|
||||
return derived();
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
bool isApprox(const SparseMatrixBase<OtherDerived>& other,
|
||||
|
@ -69,6 +69,30 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
|
||||
const _MatrixTypeNested& matrix() const { return m_matrix; }
|
||||
_MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
|
||||
|
||||
/** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
|
||||
*
|
||||
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
|
||||
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
|
||||
*/
|
||||
template<typename OtherDerived>
|
||||
SparseSparseProduct<SparseMatrix<Scalar, ((internal::traits<OtherDerived>::Flags&RowMajorBit) ? RowMajor : ColMajor),Index>, OtherDerived>
|
||||
operator*(const SparseMatrixBase<OtherDerived>& rhs) const
|
||||
{
|
||||
return SparseSparseProduct<SparseMatrix<Scalar, (internal::traits<OtherDerived>::Flags&RowMajorBit) ? RowMajor : ColMajor, Index>, OtherDerived>(*this, rhs.derived());
|
||||
}
|
||||
|
||||
/** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
|
||||
*
|
||||
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
|
||||
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
|
||||
*/
|
||||
template<typename OtherDerived> friend
|
||||
SparseSparseProduct<OtherDerived, SparseMatrix<Scalar, ((internal::traits<OtherDerived>::Flags&RowMajorBit) ? RowMajor : ColMajor),Index> >
|
||||
operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
|
||||
{
|
||||
return SparseSparseProduct< OtherDerived, SparseMatrix<Scalar, (internal::traits<OtherDerived>::Flags&RowMajorBit) ? RowMajor : ColMajor, Index> >(lhs.derived(), rhs.derived());
|
||||
}
|
||||
|
||||
/** Efficient sparse self-adjoint matrix times dense vector/matrix product */
|
||||
template<typename OtherDerived>
|
||||
SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
|
||||
@ -240,7 +264,7 @@ class SparseSelfAdjointTimeDenseProduct
|
||||
Index b = LhsIsRowMajor ? i.index() : j;
|
||||
typename Lhs::Scalar v = i.value();
|
||||
dest.row(a) += (v) * m_rhs.row(b);
|
||||
dest.row(b) += internal::conj(v) * m_rhs.row(a);
|
||||
dest.row(b) += numext::conj(v) * m_rhs.row(a);
|
||||
}
|
||||
if (ProcessFirstHalf && i && (i.index()==j))
|
||||
dest.row(j) += i.value() * m_rhs.row(j);
|
||||
@ -367,7 +391,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
||||
dest.valuePtr()[k] = it.value();
|
||||
k = count[ip]++;
|
||||
dest.innerIndexPtr()[k] = jp;
|
||||
dest.valuePtr()[k] = internal::conj(it.value());
|
||||
dest.valuePtr()[k] = numext::conj(it.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -428,7 +452,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
||||
|
||||
if(!StorageOrderMatch) std::swap(ip,jp);
|
||||
if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
|
||||
dest.valuePtr()[k] = conj(it.value());
|
||||
dest.valuePtr()[k] = numext::conj(it.value());
|
||||
else
|
||||
dest.valuePtr()[k] = it.value();
|
||||
}
|
||||
@ -461,7 +485,10 @@ class SparseSymmetricPermutationProduct
|
||||
template<typename DestScalar, int Options, typename DstIndex>
|
||||
void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
|
||||
{
|
||||
internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
|
||||
// internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
|
||||
SparseMatrix<DestScalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
|
||||
internal::permute_symm_to_fullsymm<UpLo>(m_matrix,tmp,m_perm.indices().data());
|
||||
_dest = tmp;
|
||||
}
|
||||
|
||||
template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
|
||||
|
@ -34,26 +34,28 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerItera
|
||||
: public _MatrixTypeNested::InnerIterator
|
||||
{
|
||||
typedef typename _MatrixTypeNested::InnerIterator Base;
|
||||
typedef typename TransposeImpl::Index Index;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
|
||||
: Base(trans.derived().nestedExpression(), outer)
|
||||
{}
|
||||
inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
|
||||
inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
|
||||
Index row() const { return Base::col(); }
|
||||
Index col() const { return Base::row(); }
|
||||
};
|
||||
|
||||
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
|
||||
: public _MatrixTypeNested::ReverseInnerIterator
|
||||
{
|
||||
typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
|
||||
typedef typename TransposeImpl::Index Index;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
|
||||
: Base(xpr.derived().nestedExpression(), outer)
|
||||
{}
|
||||
inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
|
||||
inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
|
||||
Index row() const { return Base::col(); }
|
||||
Index col() const { return Base::row(); }
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -2,6 +2,7 @@
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
@ -27,6 +28,7 @@ template<typename MatrixType, int Mode> class SparseTriangularView
|
||||
enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
|
||||
|| ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)),
|
||||
SkipLast = !SkipFirst,
|
||||
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
|
||||
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
|
||||
};
|
||||
|
||||
@ -64,6 +66,7 @@ template<typename MatrixType, int Mode>
|
||||
class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
|
||||
{
|
||||
typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
|
||||
typedef typename SparseTriangularView::Index Index;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
|
||||
@ -71,7 +74,7 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe
|
||||
{
|
||||
if(SkipFirst)
|
||||
{
|
||||
while((*this) && (HasUnitDiag ? this->index()<=outer : this->index()<outer))
|
||||
while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
|
||||
Base::operator++();
|
||||
if(HasUnitDiag)
|
||||
m_returnOne = true;
|
||||
@ -101,8 +104,8 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline Index row() const { return Base::row(); }
|
||||
inline Index col() const { return Base::col(); }
|
||||
inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); }
|
||||
inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); }
|
||||
inline Index index() const
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne) return Base::outer();
|
||||
@ -118,7 +121,12 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne)
|
||||
return true;
|
||||
return (SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer()));
|
||||
if(SkipFirst) return Base::operator bool();
|
||||
else
|
||||
{
|
||||
if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
|
||||
else return (Base::operator bool() && this->index() <= this->outer());
|
||||
}
|
||||
}
|
||||
protected:
|
||||
bool m_returnOne;
|
||||
@ -128,18 +136,20 @@ template<typename MatrixType, int Mode>
|
||||
class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
|
||||
{
|
||||
typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
|
||||
typedef typename SparseTriangularView::Index Index;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer)
|
||||
: Base(view.nestedExpression(), outer)
|
||||
{
|
||||
eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal");
|
||||
if(SkipLast)
|
||||
while((*this) && this->index()>outer)
|
||||
if(SkipLast) {
|
||||
while((*this) && (SkipDiag ? this->index()>=outer : this->index()>outer))
|
||||
--(*this);
|
||||
}
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator--()
|
||||
EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
|
||||
{ Base::operator--(); return *this; }
|
||||
|
||||
inline Index row() const { return Base::row(); }
|
||||
@ -147,7 +157,12 @@ class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public Matri
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const
|
||||
{
|
||||
return SkipLast ? Base::operator bool() : (Base::operator bool() && this->index() >= this->outer());
|
||||
if (SkipLast) return Base::operator bool() ;
|
||||
else
|
||||
{
|
||||
if(SkipDiag) return (Base::operator bool() && this->index() > this->outer());
|
||||
else return (Base::operator bool() && this->index() >= this->outer());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -98,16 +98,16 @@ template<typename T> struct eval<T,Sparse>
|
||||
|
||||
template<typename T,int Cols> struct sparse_eval<T,1,Cols> {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
enum { _Flags = traits<T>::Flags| RowMajorBit };
|
||||
typedef typename traits<T>::Index _Index;
|
||||
public:
|
||||
typedef SparseVector<_Scalar, _Flags> type;
|
||||
typedef SparseVector<_Scalar, RowMajor, _Index> type;
|
||||
};
|
||||
|
||||
template<typename T,int Rows> struct sparse_eval<T,Rows,1> {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
enum { _Flags = traits<T>::Flags & (~RowMajorBit) };
|
||||
typedef typename traits<T>::Index _Index;
|
||||
public:
|
||||
typedef SparseVector<_Scalar, _Flags> type;
|
||||
typedef SparseVector<_Scalar, ColMajor, _Index> type;
|
||||
};
|
||||
|
||||
template<typename T,int Rows,int Cols> struct sparse_eval {
|
||||
@ -127,12 +127,10 @@ template<typename T> struct sparse_eval<T,1,1> {
|
||||
template<typename T> struct plain_matrix_type<T,Sparse>
|
||||
{
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
enum {
|
||||
_Flags = traits<T>::Flags
|
||||
};
|
||||
|
||||
typedef typename traits<T>::Index _Index;
|
||||
enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
|
||||
public:
|
||||
typedef SparseMatrix<_Scalar, _Flags> type;
|
||||
typedef SparseMatrix<_Scalar, _Options, _Index> type;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
@ -45,6 +45,20 @@ struct traits<SparseVector<_Scalar, _Options, _Index> >
|
||||
SupportedAccessPatterns = InnerRandomAccessPattern
|
||||
};
|
||||
};
|
||||
|
||||
// Sparse-Vector-Assignment kinds:
|
||||
enum {
|
||||
SVA_RuntimeSwitch,
|
||||
SVA_Inner,
|
||||
SVA_Outer
|
||||
};
|
||||
|
||||
template< typename Dest, typename Src,
|
||||
int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
|
||||
: Src::InnerSizeAtCompileTime==1 ? SVA_Outer
|
||||
: SVA_Inner>
|
||||
struct sparse_vector_assign_selector;
|
||||
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
@ -83,14 +97,18 @@ class SparseVector
|
||||
|
||||
inline Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
eigen_assert((IsColVector ? col : row)==0);
|
||||
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
|
||||
return coeff(IsColVector ? row : col);
|
||||
}
|
||||
inline Scalar coeff(Index i) const { return m_data.at(i); }
|
||||
inline Scalar coeff(Index i) const
|
||||
{
|
||||
eigen_assert(i>=0 && i<m_size);
|
||||
return m_data.at(i);
|
||||
}
|
||||
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
eigen_assert((IsColVector ? col : row)==0);
|
||||
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
|
||||
return coeff(IsColVector ? row : col);
|
||||
}
|
||||
|
||||
@ -102,6 +120,7 @@ class SparseVector
|
||||
*/
|
||||
inline Scalar& coeffRef(Index i)
|
||||
{
|
||||
eigen_assert(i>=0 && i<m_size);
|
||||
return m_data.atWithInsertion(i);
|
||||
}
|
||||
|
||||
@ -135,6 +154,8 @@ class SparseVector
|
||||
|
||||
inline Scalar& insert(Index row, Index col)
|
||||
{
|
||||
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
|
||||
|
||||
Index inner = IsColVector ? row : col;
|
||||
Index outer = IsColVector ? col : row;
|
||||
eigen_assert(outer==0);
|
||||
@ -142,6 +163,8 @@ class SparseVector
|
||||
}
|
||||
Scalar& insert(Index i)
|
||||
{
|
||||
eigen_assert(i>=0 && i<m_size);
|
||||
|
||||
Index startId = 0;
|
||||
Index p = Index(m_data.size()) - 1;
|
||||
// TODO smart realloc
|
||||
@ -184,22 +207,24 @@ class SparseVector
|
||||
|
||||
void resizeNonZeros(Index size) { m_data.resize(size); }
|
||||
|
||||
inline SparseVector() : m_size(0) { resize(0); }
|
||||
inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
|
||||
|
||||
inline SparseVector(Index size) : m_size(0) { resize(size); }
|
||||
inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
|
||||
|
||||
inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
|
||||
inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
|
||||
: m_size(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
inline SparseVector(const SparseVector& other)
|
||||
: SparseBase(other), m_size(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
@ -230,11 +255,10 @@ class SparseVector
|
||||
template<typename OtherDerived>
|
||||
inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
if ( (bool(OtherDerived::IsVectorAtCompileTime) && int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
|
||||
|| ((!bool(OtherDerived::IsVectorAtCompileTime)) && ( bool(IsColVector) ? other.cols()>1 : other.rows()>1 )))
|
||||
return assign(other.transpose());
|
||||
else
|
||||
return assign(other);
|
||||
SparseVector tmp(other.size());
|
||||
internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());
|
||||
this->swap(tmp);
|
||||
return *this;
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
@ -309,8 +333,12 @@ class SparseVector
|
||||
# endif
|
||||
|
||||
protected:
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DONT_INLINE SparseVector& assign(const SparseMatrixBase<OtherDerived>& _other);
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
||||
EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
|
||||
}
|
||||
|
||||
Storage m_data;
|
||||
Index m_size;
|
||||
@ -380,33 +408,40 @@ class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
|
||||
const Index m_start;
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Options, typename _Index>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DONT_INLINE SparseVector<Scalar,_Options,_Index>& SparseVector<Scalar,_Options,_Index>::assign(const SparseMatrixBase<OtherDerived>& _other)
|
||||
{
|
||||
const OtherDerived& other(_other.derived());
|
||||
const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
|
||||
if(needToTranspose)
|
||||
{
|
||||
Index size = other.size();
|
||||
Index nnz = other.nonZeros();
|
||||
resize(size);
|
||||
reserve(nnz);
|
||||
for(Index i=0; i<size; ++i)
|
||||
namespace internal {
|
||||
|
||||
template< typename Dest, typename Src>
|
||||
struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
|
||||
static void run(Dest& dst, const Src& src) {
|
||||
eigen_internal_assert(src.innerSize()==src.size());
|
||||
for(typename Src::InnerIterator it(src, 0); it; ++it)
|
||||
dst.insert(it.index()) = it.value();
|
||||
}
|
||||
};
|
||||
|
||||
template< typename Dest, typename Src>
|
||||
struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
|
||||
static void run(Dest& dst, const Src& src) {
|
||||
eigen_internal_assert(src.outerSize()==src.size());
|
||||
for(typename Dest::Index i=0; i<src.size(); ++i)
|
||||
{
|
||||
typename OtherDerived::InnerIterator it(other, i);
|
||||
typename Src::InnerIterator it(src, i);
|
||||
if(it)
|
||||
insert(i) = it.value();
|
||||
dst.insert(i) = it.value();
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
else
|
||||
{
|
||||
// there is no special optimization
|
||||
return Base::operator=(other);
|
||||
};
|
||||
|
||||
template< typename Dest, typename Src>
|
||||
struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
|
||||
static void run(Dest& dst, const Src& src) {
|
||||
if(src.outerSize()==1) sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
|
||||
else sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSEVECTOR_H
|
||||
|
@ -18,7 +18,7 @@ namespace internal {
|
||||
template<typename MatrixType>
|
||||
struct traits<SparseView<MatrixType> > : traits<MatrixType>
|
||||
{
|
||||
typedef int Index;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef Sparse StorageKind;
|
||||
enum {
|
||||
Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
|
||||
@ -56,6 +56,7 @@ protected:
|
||||
template<typename MatrixType>
|
||||
class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
|
||||
{
|
||||
typedef typename SparseView::Index Index;
|
||||
public:
|
||||
typedef typename _MatrixTypeNested::InnerIterator IterBase;
|
||||
InnerIterator(const SparseView& view, Index outer) :
|
||||
|
@ -14,8 +14,10 @@
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template <typename _MatrixType, typename _OrderingType> class SparseLU;
|
||||
template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
|
||||
template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::Index> > class SparseLU;
|
||||
template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
|
||||
template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
|
||||
|
||||
/** \ingroup SparseLU_Module
|
||||
* \class SparseLU
|
||||
*
|
||||
@ -61,7 +63,7 @@ template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
|
||||
* "unsupported/Eigen/src/IterativeSolvers/Scaling.h"
|
||||
*
|
||||
* \tparam _MatrixType The type of the sparse matrix. It must be a column-major SparseMatrix<>
|
||||
* \tparam _OrderingType The ordering method to use, either AMD, COLAMD or METIS
|
||||
* \tparam _OrderingType The ordering method to use, either AMD, COLAMD or METIS. Default is COLMAD
|
||||
*
|
||||
*
|
||||
* \sa \ref TutorialSparseDirectSolvers
|
||||
@ -84,11 +86,11 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||
typedef internal::SparseLUImpl<Scalar, Index> Base;
|
||||
|
||||
public:
|
||||
SparseLU():m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0)
|
||||
SparseLU():m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
|
||||
{
|
||||
initperfvalues();
|
||||
}
|
||||
SparseLU(const MatrixType& matrix):m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0)
|
||||
SparseLU(const MatrixType& matrix):m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
|
||||
{
|
||||
initperfvalues();
|
||||
compute(matrix);
|
||||
@ -104,9 +106,9 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||
void simplicialfactorize(const MatrixType& matrix);
|
||||
|
||||
/**
|
||||
* Compute the symbolic and numeric factorization of the input sparse matrix.
|
||||
* The input matrix should be in column-major storage.
|
||||
*/
|
||||
* Compute the symbolic and numeric factorization of the input sparse matrix.
|
||||
* The input matrix should be in column-major storage.
|
||||
*/
|
||||
void compute (const MatrixType& matrix)
|
||||
{
|
||||
// Analyze
|
||||
@ -123,16 +125,43 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||
m_symmetricmode = sym;
|
||||
}
|
||||
|
||||
/** Returns an expression of the matrix L, internally stored as supernodes
|
||||
* For a triangular solve with this matrix, use
|
||||
* \code
|
||||
* y = b; matrixL().solveInPlace(y);
|
||||
* \endcode
|
||||
*/
|
||||
/** \returns an expression of the matrix L, internally stored as supernodes
|
||||
* The only operation available with this expression is the triangular solve
|
||||
* \code
|
||||
* y = b; matrixL().solveInPlace(y);
|
||||
* \endcode
|
||||
*/
|
||||
SparseLUMatrixLReturnType<SCMatrix> matrixL() const
|
||||
{
|
||||
return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
|
||||
}
|
||||
/** \returns an expression of the matrix U,
|
||||
* The only operation available with this expression is the triangular solve
|
||||
* \code
|
||||
* y = b; matrixU().solveInPlace(y);
|
||||
* \endcode
|
||||
*/
|
||||
SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,Index> > matrixU() const
|
||||
{
|
||||
return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,Index> >(m_Lstore, m_Ustore);
|
||||
}
|
||||
|
||||
/**
|
||||
* \returns a reference to the row matrix permutation \f$ P_r \f$ such that \f$P_r A P_c^T = L U\f$
|
||||
* \sa colsPermutation()
|
||||
*/
|
||||
inline const PermutationType& rowsPermutation() const
|
||||
{
|
||||
return m_perm_r;
|
||||
}
|
||||
/**
|
||||
* \returns a reference to the column matrix permutation\f$ P_c^T \f$ such that \f$P_r A P_c^T = L U\f$
|
||||
* \sa rowsPermutation()
|
||||
*/
|
||||
inline const PermutationType& colsPermutation() const
|
||||
{
|
||||
return m_perm_c;
|
||||
}
|
||||
/** Set the threshold used for a diagonal entry to be an acceptable pivot. */
|
||||
void setPivotThreshold(const RealScalar& thresh)
|
||||
{
|
||||
@ -154,7 +183,7 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||
return internal::solve_retval<SparseLU, Rhs>(*this, B.derived());
|
||||
}
|
||||
|
||||
/** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
|
||||
/** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
|
||||
*
|
||||
* \sa compute()
|
||||
*/
|
||||
@ -167,7 +196,7 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||
return internal::sparse_solve_retval<SparseLU, Rhs>(*this, B.derived());
|
||||
}
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
* \returns \c Success if computation was succesful,
|
||||
* \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance
|
||||
@ -180,13 +209,15 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
|
||||
return m_info;
|
||||
}
|
||||
|
||||
/**
|
||||
* \returns A string describing the type of error
|
||||
*/
|
||||
* \returns A string describing the type of error
|
||||
*/
|
||||
std::string lastErrorMessage() const
|
||||
{
|
||||
return m_lastError;
|
||||
}
|
||||
|
||||
template<typename Rhs, typename Dest>
|
||||
bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &_X) const
|
||||
{
|
||||
@ -195,68 +226,97 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
|
||||
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||
|
||||
|
||||
Index nrhs = B.cols();
|
||||
Index n = B.rows();
|
||||
|
||||
// Permute the right hand side to form X = Pr*B
|
||||
// on return, X is overwritten by the computed solution
|
||||
X.resize(n,nrhs);
|
||||
for(Index j = 0; j < nrhs; ++j)
|
||||
X.col(j) = m_perm_r * B.col(j);
|
||||
X.resize(B.rows(),B.cols());
|
||||
for(Index j = 0; j < B.cols(); ++j)
|
||||
X.col(j) = rowsPermutation() * B.col(j);
|
||||
|
||||
//Forward substitution with L
|
||||
// m_Lstore.solveInPlace(X);
|
||||
this->matrixL().solveInPlace(X);
|
||||
|
||||
// Backward solve with U
|
||||
for (Index k = m_Lstore.nsuper(); k >= 0; k--)
|
||||
{
|
||||
Index fsupc = m_Lstore.supToCol()[k];
|
||||
Index lda = m_Lstore.colIndexPtr()[fsupc+1] - m_Lstore.colIndexPtr()[fsupc]; // leading dimension
|
||||
Index nsupc = m_Lstore.supToCol()[k+1] - fsupc;
|
||||
Index luptr = m_Lstore.colIndexPtr()[fsupc];
|
||||
|
||||
if (nsupc == 1)
|
||||
{
|
||||
for (Index j = 0; j < nrhs; j++)
|
||||
{
|
||||
X(fsupc, j) /= m_Lstore.valuePtr()[luptr];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(m_Lstore.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||
Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
||||
U = A.template triangularView<Upper>().solve(U);
|
||||
}
|
||||
|
||||
for (Index j = 0; j < nrhs; ++j)
|
||||
{
|
||||
for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
|
||||
{
|
||||
typename MappedSparseMatrix<Scalar,ColMajor, Index>::InnerIterator it(m_Ustore, jcol);
|
||||
for ( ; it; ++it)
|
||||
{
|
||||
Index irow = it.index();
|
||||
X(irow, j) -= X(jcol, j) * it.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
} // End For U-solve
|
||||
//Forward substitution with L
|
||||
this->matrixL().solveInPlace(X);
|
||||
this->matrixU().solveInPlace(X);
|
||||
|
||||
// Permute back the solution
|
||||
for (Index j = 0; j < nrhs; ++j)
|
||||
X.col(j) = m_perm_c.inverse() * X.col(j);
|
||||
for (Index j = 0; j < B.cols(); ++j)
|
||||
X.col(j) = colsPermutation().inverse() * X.col(j);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* \returns the absolute value of the determinant of the matrix of which
|
||||
* *this is the QR decomposition.
|
||||
*
|
||||
* \warning a determinant can be very big or small, so for matrices
|
||||
* of large enough dimension, there is a risk of overflow/underflow.
|
||||
* One way to work around that is to use logAbsDeterminant() instead.
|
||||
*
|
||||
* \sa logAbsDeterminant(), signDeterminant()
|
||||
*/
|
||||
Scalar absDeterminant()
|
||||
{
|
||||
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
|
||||
// Initialize with the determinant of the row matrix
|
||||
Scalar det = Scalar(1.);
|
||||
//Note that the diagonal blocks of U are stored in supernodes,
|
||||
// which are available in the L part :)
|
||||
for (Index j = 0; j < this->cols(); ++j)
|
||||
{
|
||||
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
|
||||
{
|
||||
if(it.row() < j) continue;
|
||||
if(it.row() == j)
|
||||
{
|
||||
det *= (std::abs)(it.value());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return det;
|
||||
}
|
||||
|
||||
/** \returns the natural log of the absolute value of the determinant of the matrix
|
||||
* of which **this is the QR decomposition
|
||||
*
|
||||
* \note This method is useful to work around the risk of overflow/underflow that's
|
||||
* inherent to the determinant computation.
|
||||
*
|
||||
* \sa absDeterminant(), signDeterminant()
|
||||
*/
|
||||
Scalar logAbsDeterminant() const
|
||||
{
|
||||
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
|
||||
Scalar det = Scalar(0.);
|
||||
for (Index j = 0; j < this->cols(); ++j)
|
||||
{
|
||||
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
|
||||
{
|
||||
if(it.row() < j) continue;
|
||||
if(it.row() == j)
|
||||
{
|
||||
det += (std::log)((std::abs)(it.value()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return det;
|
||||
}
|
||||
|
||||
/** \returns A number representing the sign of the determinant
|
||||
*
|
||||
* \sa absDeterminant(), logAbsDeterminant()
|
||||
*/
|
||||
Scalar signDeterminant()
|
||||
{
|
||||
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
|
||||
return Scalar(m_detPermR);
|
||||
}
|
||||
|
||||
protected:
|
||||
// Functions
|
||||
void initperfvalues()
|
||||
{
|
||||
m_perfv.panel_size = 12;
|
||||
m_perfv.panel_size = 1;
|
||||
m_perfv.relax = 1;
|
||||
m_perfv.maxsuper = 128;
|
||||
m_perfv.rowblk = 16;
|
||||
@ -285,25 +345,26 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
|
||||
internal::perfvalues<Index> m_perfv;
|
||||
RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
|
||||
Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
|
||||
|
||||
Index m_detPermR; // Determinant of the coefficient matrix
|
||||
private:
|
||||
// Copy constructor
|
||||
SparseLU (SparseLU& ) {}
|
||||
// Disable copy constructor
|
||||
SparseLU (const SparseLU& );
|
||||
|
||||
}; // End class SparseLU
|
||||
|
||||
|
||||
|
||||
// Functions needed by the anaysis phase
|
||||
/**
|
||||
* Compute the column permutation to minimize the fill-in
|
||||
*
|
||||
* - Apply this permutation to the input matrix -
|
||||
*
|
||||
* - Compute the column elimination tree on the permuted matrix
|
||||
*
|
||||
* - Postorder the elimination tree and the column permutation
|
||||
*
|
||||
*/
|
||||
* Compute the column permutation to minimize the fill-in
|
||||
*
|
||||
* - Apply this permutation to the input matrix -
|
||||
*
|
||||
* - Compute the column elimination tree on the permuted matrix
|
||||
*
|
||||
* - Postorder the elimination tree and the column permutation
|
||||
*
|
||||
*/
|
||||
template <typename MatrixType, typename OrderingType>
|
||||
void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
|
||||
{
|
||||
@ -319,11 +380,20 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
|
||||
if (m_perm_c.size()) {
|
||||
m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
|
||||
//Then, permute only the column pointers
|
||||
const Index * outerIndexPtr;
|
||||
if (mat.isCompressed()) outerIndexPtr = mat.outerIndexPtr();
|
||||
else
|
||||
{
|
||||
Index *outerIndexPtr_t = new Index[mat.cols()+1];
|
||||
for(Index i = 0; i <= mat.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
|
||||
outerIndexPtr = outerIndexPtr_t;
|
||||
}
|
||||
for (Index i = 0; i < mat.cols(); i++)
|
||||
{
|
||||
m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = mat.outerIndexPtr()[i];
|
||||
m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i];
|
||||
m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
|
||||
m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
|
||||
}
|
||||
if(!mat.isCompressed()) delete[] outerIndexPtr;
|
||||
}
|
||||
// Compute the column elimination tree of the permuted matrix
|
||||
IndexVector firstRowElt;
|
||||
@ -361,23 +431,23 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
|
||||
|
||||
|
||||
/**
|
||||
* - Numerical factorization
|
||||
* - Interleaved with the symbolic factorization
|
||||
* On exit, info is
|
||||
*
|
||||
* = 0: successful factorization
|
||||
*
|
||||
* > 0: if info = i, and i is
|
||||
*
|
||||
* <= A->ncol: U(i,i) is exactly zero. The factorization has
|
||||
* been completed, but the factor U is exactly singular,
|
||||
* and division by zero will occur if it is used to solve a
|
||||
* system of equations.
|
||||
*
|
||||
* > A->ncol: number of bytes allocated when memory allocation
|
||||
* failure occurred, plus A->ncol. If lwork = -1, it is
|
||||
* the estimated amount of space needed, plus A->ncol.
|
||||
*/
|
||||
* - Numerical factorization
|
||||
* - Interleaved with the symbolic factorization
|
||||
* On exit, info is
|
||||
*
|
||||
* = 0: successful factorization
|
||||
*
|
||||
* > 0: if info = i, and i is
|
||||
*
|
||||
* <= A->ncol: U(i,i) is exactly zero. The factorization has
|
||||
* been completed, but the factor U is exactly singular,
|
||||
* and division by zero will occur if it is used to solve a
|
||||
* system of equations.
|
||||
*
|
||||
* > A->ncol: number of bytes allocated when memory allocation
|
||||
* failure occurred, plus A->ncol. If lwork = -1, it is
|
||||
* the estimated amount of space needed, plus A->ncol.
|
||||
*/
|
||||
template <typename MatrixType, typename OrderingType>
|
||||
void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
||||
{
|
||||
@ -395,11 +465,20 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
||||
{
|
||||
m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
|
||||
//Then, permute only the column pointers
|
||||
const Index * outerIndexPtr;
|
||||
if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
|
||||
else
|
||||
{
|
||||
Index* outerIndexPtr_t = new Index[matrix.cols()+1];
|
||||
for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
|
||||
outerIndexPtr = outerIndexPtr_t;
|
||||
}
|
||||
for (Index i = 0; i < matrix.cols(); i++)
|
||||
{
|
||||
m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = matrix.outerIndexPtr()[i];
|
||||
m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = matrix.outerIndexPtr()[i+1] - matrix.outerIndexPtr()[i];
|
||||
m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
|
||||
m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
|
||||
}
|
||||
if(!matrix.isCompressed()) delete[] outerIndexPtr;
|
||||
}
|
||||
else
|
||||
{ //FIXME This should not be needed if the empty permutation is handled transparently
|
||||
@ -453,6 +532,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
||||
m_perm_r.resize(m);
|
||||
m_perm_r.indices().setConstant(-1);
|
||||
marker.setConstant(-1);
|
||||
m_detPermR = 1; // Record the determinant of the row permutation
|
||||
|
||||
m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);
|
||||
m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);
|
||||
@ -540,6 +620,9 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the determinant of the row permutation matrix
|
||||
if (pivrow != jj) m_detPermR *= -1;
|
||||
|
||||
// Prune columns (0:jj-1) using column jj
|
||||
Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
|
||||
|
||||
@ -568,7 +651,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
||||
}
|
||||
|
||||
template<typename MappedSupernodalType>
|
||||
struct SparseLUMatrixLReturnType
|
||||
struct SparseLUMatrixLReturnType : internal::no_assignment_operator
|
||||
{
|
||||
typedef typename MappedSupernodalType::Index Index;
|
||||
typedef typename MappedSupernodalType::Scalar Scalar;
|
||||
@ -584,6 +667,61 @@ struct SparseLUMatrixLReturnType
|
||||
const MappedSupernodalType& m_mapL;
|
||||
};
|
||||
|
||||
template<typename MatrixLType, typename MatrixUType>
|
||||
struct SparseLUMatrixUReturnType : internal::no_assignment_operator
|
||||
{
|
||||
typedef typename MatrixLType::Index Index;
|
||||
typedef typename MatrixLType::Scalar Scalar;
|
||||
SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
|
||||
: m_mapL(mapL),m_mapU(mapU)
|
||||
{ }
|
||||
Index rows() { return m_mapL.rows(); }
|
||||
Index cols() { return m_mapL.cols(); }
|
||||
|
||||
template<typename Dest> void solveInPlace(MatrixBase<Dest> &X) const
|
||||
{
|
||||
Index nrhs = X.cols();
|
||||
Index n = X.rows();
|
||||
// Backward solve with U
|
||||
for (Index k = m_mapL.nsuper(); k >= 0; k--)
|
||||
{
|
||||
Index fsupc = m_mapL.supToCol()[k];
|
||||
Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension
|
||||
Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
|
||||
Index luptr = m_mapL.colIndexPtr()[fsupc];
|
||||
|
||||
if (nsupc == 1)
|
||||
{
|
||||
for (Index j = 0; j < nrhs; j++)
|
||||
{
|
||||
X(fsupc, j) /= m_mapL.valuePtr()[luptr];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
|
||||
Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
|
||||
U = A.template triangularView<Upper>().solve(U);
|
||||
}
|
||||
|
||||
for (Index j = 0; j < nrhs; ++j)
|
||||
{
|
||||
for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
|
||||
{
|
||||
typename MatrixUType::InnerIterator it(m_mapU, jcol);
|
||||
for ( ; it; ++it)
|
||||
{
|
||||
Index irow = it.index();
|
||||
X(irow, j) -= X(jcol, j) * it.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
} // End For U-solve
|
||||
}
|
||||
const MatrixLType& m_mapL;
|
||||
const MatrixUType& m_mapU;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename _MatrixType, typename Derived, typename Rhs>
|
||||
|
@ -70,7 +70,7 @@ Index SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index
|
||||
if(num_expansions == 0 || keep_prev)
|
||||
new_len = length ; // First time allocate requested
|
||||
else
|
||||
new_len = alpha * length ;
|
||||
new_len = Index(alpha * length);
|
||||
|
||||
VectorType old_vec; // Temporary vector to hold the previous values
|
||||
if (nbElts > 0 )
|
||||
@ -100,7 +100,7 @@ Index SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index
|
||||
do
|
||||
{
|
||||
alpha = (alpha + 1)/2;
|
||||
new_len = alpha * length ;
|
||||
new_len = Index(alpha * length);
|
||||
try
|
||||
{
|
||||
vec.resize(new_len);
|
||||
@ -141,7 +141,7 @@ Index SparseLUImpl<Scalar,Index>::memInit(Index m, Index n, Index annz, Index lw
|
||||
Index& num_expansions = glu.num_expansions; //No memory expansions so far
|
||||
num_expansions = 0;
|
||||
glu.nzumax = glu.nzlumax = (std::max)(fillratio * annz, m*n); // estimated number of nonzeros in U
|
||||
glu.nzlmax = (std::max)(1., fillratio/4.) * annz; // estimated nnz in L factor
|
||||
glu.nzlmax = (std::max)(Index(4), fillratio) * annz / 4; // estimated nnz in L factor
|
||||
|
||||
// Return the estimated size to the user if necessary
|
||||
Index tempSpace;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user