mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-14 20:56:00 +08:00
Fix cuda device warnings
This commit is contained in:
parent
c53002f5fb
commit
23b1682723
@ -176,7 +176,7 @@ if(NOT MSVC)
|
|||||||
ei_add_cxx_compiler_flag("-Wall")
|
ei_add_cxx_compiler_flag("-Wall")
|
||||||
ei_add_cxx_compiler_flag("-Wextra")
|
ei_add_cxx_compiler_flag("-Wextra")
|
||||||
#ei_add_cxx_compiler_flag("-Weverything") # clang
|
#ei_add_cxx_compiler_flag("-Weverything") # clang
|
||||||
|
|
||||||
ei_add_cxx_compiler_flag("-Wundef")
|
ei_add_cxx_compiler_flag("-Wundef")
|
||||||
ei_add_cxx_compiler_flag("-Wcast-align")
|
ei_add_cxx_compiler_flag("-Wcast-align")
|
||||||
ei_add_cxx_compiler_flag("-Wchar-subscripts")
|
ei_add_cxx_compiler_flag("-Wchar-subscripts")
|
||||||
@ -191,29 +191,29 @@ if(NOT MSVC)
|
|||||||
ei_add_cxx_compiler_flag("-Wc++11-extensions")
|
ei_add_cxx_compiler_flag("-Wc++11-extensions")
|
||||||
ei_add_cxx_compiler_flag("-Wdouble-promotion")
|
ei_add_cxx_compiler_flag("-Wdouble-promotion")
|
||||||
# ei_add_cxx_compiler_flag("-Wconversion")
|
# ei_add_cxx_compiler_flag("-Wconversion")
|
||||||
|
|
||||||
# -Wshadow is insanely too strict with gcc, hopefully it will become usable with gcc 6
|
# -Wshadow is insanely too strict with gcc, hopefully it will become usable with gcc 6
|
||||||
# if(NOT CMAKE_COMPILER_IS_GNUCXX OR (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "5.0.0"))
|
# if(NOT CMAKE_COMPILER_IS_GNUCXX OR (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "5.0.0"))
|
||||||
if(NOT CMAKE_COMPILER_IS_GNUCXX)
|
if(NOT CMAKE_COMPILER_IS_GNUCXX)
|
||||||
ei_add_cxx_compiler_flag("-Wshadow")
|
ei_add_cxx_compiler_flag("-Wshadow")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
ei_add_cxx_compiler_flag("-Wno-psabi")
|
ei_add_cxx_compiler_flag("-Wno-psabi")
|
||||||
ei_add_cxx_compiler_flag("-Wno-variadic-macros")
|
ei_add_cxx_compiler_flag("-Wno-variadic-macros")
|
||||||
ei_add_cxx_compiler_flag("-Wno-long-long")
|
ei_add_cxx_compiler_flag("-Wno-long-long")
|
||||||
|
|
||||||
ei_add_cxx_compiler_flag("-fno-check-new")
|
ei_add_cxx_compiler_flag("-fno-check-new")
|
||||||
ei_add_cxx_compiler_flag("-fno-common")
|
ei_add_cxx_compiler_flag("-fno-common")
|
||||||
ei_add_cxx_compiler_flag("-fstrict-aliasing")
|
ei_add_cxx_compiler_flag("-fstrict-aliasing")
|
||||||
ei_add_cxx_compiler_flag("-wd981") # disable ICC's "operands are evaluated in unspecified order" remark
|
ei_add_cxx_compiler_flag("-wd981") # disable ICC's "operands are evaluated in unspecified order" remark
|
||||||
ei_add_cxx_compiler_flag("-wd2304") # disable ICC's "warning #2304: non-explicit constructor with single argument may cause implicit type conversion" produced by -Wnon-virtual-dtor
|
ei_add_cxx_compiler_flag("-wd2304") # disable ICC's "warning #2304: non-explicit constructor with single argument may cause implicit type conversion" produced by -Wnon-virtual-dtor
|
||||||
|
|
||||||
|
|
||||||
# The -ansi flag must be added last, otherwise it is also used as a linker flag by check_cxx_compiler_flag making it fails
|
# The -ansi flag must be added last, otherwise it is also used as a linker flag by check_cxx_compiler_flag making it fails
|
||||||
# Moreover we should not set both -strict-ansi and -ansi
|
# Moreover we should not set both -strict-ansi and -ansi
|
||||||
check_cxx_compiler_flag("-strict-ansi" COMPILER_SUPPORT_STRICTANSI)
|
check_cxx_compiler_flag("-strict-ansi" COMPILER_SUPPORT_STRICTANSI)
|
||||||
ei_add_cxx_compiler_flag("-Qunused-arguments") # disable clang warning: argument unused during compilation: '-ansi'
|
ei_add_cxx_compiler_flag("-Qunused-arguments") # disable clang warning: argument unused during compilation: '-ansi'
|
||||||
|
|
||||||
if(COMPILER_SUPPORT_STRICTANSI)
|
if(COMPILER_SUPPORT_STRICTANSI)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -strict-ansi")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -strict-ansi")
|
||||||
else()
|
else()
|
||||||
@ -224,7 +224,7 @@ if(NOT MSVC)
|
|||||||
ei_add_cxx_compiler_flag("-pie")
|
ei_add_cxx_compiler_flag("-pie")
|
||||||
ei_add_cxx_compiler_flag("-fPIE")
|
ei_add_cxx_compiler_flag("-fPIE")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(CMAKE_REQUIRED_FLAGS "")
|
set(CMAKE_REQUIRED_FLAGS "")
|
||||||
|
|
||||||
option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF)
|
option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF)
|
||||||
@ -398,6 +398,7 @@ if(EIGEN_TEST_NO_EXCEPTIONS)
|
|||||||
message(STATUS "Disabling exceptions in tests/examples")
|
message(STATUS "Disabling exceptions in tests/examples")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(EIGEN_CUDA_CXX_FLAGS "" CACHE STRING "Additional flags to pass to the cuda compiler.")
|
||||||
set(EIGEN_CUDA_COMPUTE_ARCH 30 CACHE STRING "The CUDA compute architecture level to target when compiling CUDA code")
|
set(EIGEN_CUDA_COMPUTE_ARCH 30 CACHE STRING "The CUDA compute architecture level to target when compiling CUDA code")
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
@ -600,11 +601,11 @@ if (NOT CMAKE_VERSION VERSION_LESS 3.0)
|
|||||||
|
|
||||||
else (NOT CMAKE_VERSION VERSION_LESS 3.0)
|
else (NOT CMAKE_VERSION VERSION_LESS 3.0)
|
||||||
# Fallback to legacy Eigen3Config.cmake without the imported target
|
# Fallback to legacy Eigen3Config.cmake without the imported target
|
||||||
|
|
||||||
# If CMakePackageConfigHelpers module is available (CMake >= 2.8.8)
|
# If CMakePackageConfigHelpers module is available (CMake >= 2.8.8)
|
||||||
# create a relocatable Config file, otherwise leave the hardcoded paths
|
# create a relocatable Config file, otherwise leave the hardcoded paths
|
||||||
include(CMakePackageConfigHelpers OPTIONAL RESULT_VARIABLE CPCH_PATH)
|
include(CMakePackageConfigHelpers OPTIONAL RESULT_VARIABLE CPCH_PATH)
|
||||||
|
|
||||||
if(CPCH_PATH)
|
if(CPCH_PATH)
|
||||||
configure_package_config_file (
|
configure_package_config_file (
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/cmake/Eigen3ConfigLegacy.cmake.in
|
${CMAKE_CURRENT_SOURCE_DIR}/cmake/Eigen3ConfigLegacy.cmake.in
|
||||||
@ -613,7 +614,7 @@ else (NOT CMAKE_VERSION VERSION_LESS 3.0)
|
|||||||
INSTALL_DESTINATION ${CMAKEPACKAGE_INSTALL_DIR}
|
INSTALL_DESTINATION ${CMAKEPACKAGE_INSTALL_DIR}
|
||||||
NO_CHECK_REQUIRED_COMPONENTS_MACRO # Eigen does not provide components
|
NO_CHECK_REQUIRED_COMPONENTS_MACRO # Eigen does not provide components
|
||||||
)
|
)
|
||||||
else()
|
else()
|
||||||
# The PACKAGE_* variables are defined by the configure_package_config_file
|
# The PACKAGE_* variables are defined by the configure_package_config_file
|
||||||
# but without it we define them manually to the hardcoded paths
|
# but without it we define them manually to the hardcoded paths
|
||||||
set(PACKAGE_INIT "")
|
set(PACKAGE_INIT "")
|
||||||
|
@ -123,7 +123,7 @@
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef EIGEN_DONT_VECTORIZE
|
#if !defined(EIGEN_DONT_VECTORIZE) && !defined(EIGEN_CUDACC)
|
||||||
|
|
||||||
#if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
|
#if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ namespace internal {
|
|||||||
* decomposition to determine whether a system of equations has a solution.
|
* decomposition to determine whether a system of equations has a solution.
|
||||||
*
|
*
|
||||||
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
||||||
*
|
*
|
||||||
* \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT
|
* \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT
|
||||||
*/
|
*/
|
||||||
template<typename _MatrixType, int _UpLo> class LDLT
|
template<typename _MatrixType, int _UpLo> class LDLT
|
||||||
@ -558,7 +558,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri
|
|||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename _MatrixType, int _UpLo>
|
template<typename _MatrixType, int _UpLo>
|
||||||
template<typename RhsType, typename DstType>
|
template<typename RhsType, typename DstType>
|
||||||
void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
EIGEN_DEVICE_FUNC void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
||||||
{
|
{
|
||||||
eigen_assert(rhs.rows() == rows());
|
eigen_assert(rhs.rows() == rows());
|
||||||
// dst = P b
|
// dst = P b
|
||||||
|
@ -475,7 +475,7 @@ LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, c
|
|||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename _MatrixType,int _UpLo>
|
template<typename _MatrixType,int _UpLo>
|
||||||
template<typename RhsType, typename DstType>
|
template<typename RhsType, typename DstType>
|
||||||
void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
EIGEN_DEVICE_FUNC void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
||||||
{
|
{
|
||||||
dst = rhs;
|
dst = rhs;
|
||||||
solveInPlace(dst);
|
solveInPlace(dst);
|
||||||
|
@ -16,7 +16,7 @@ namespace Eigen {
|
|||||||
|
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
EIGEN_STRONG_INLINE Derived& DenseBase<Derived>
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>
|
||||||
::lazyAssign(const DenseBase<OtherDerived>& other)
|
::lazyAssign(const DenseBase<OtherDerived>& other)
|
||||||
{
|
{
|
||||||
enum{
|
enum{
|
||||||
@ -29,7 +29,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>
|
|||||||
|
|
||||||
eigen_assert(rows() == other.rows() && cols() == other.cols());
|
eigen_assert(rows() == other.rows() && cols() == other.cols());
|
||||||
internal::call_assignment_no_alias(derived(),other.derived());
|
internal::call_assignment_no_alias(derived(),other.derived());
|
||||||
|
|
||||||
return derived();
|
return derived();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ namespace Eigen {
|
|||||||
// This implementation is based on Assign.h
|
// This implementation is based on Assign.h
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
/***************************************************************************
|
/***************************************************************************
|
||||||
* Part 1 : the logic deciding a strategy for traversal and unrolling *
|
* Part 1 : the logic deciding a strategy for traversal and unrolling *
|
||||||
***************************************************************************/
|
***************************************************************************/
|
||||||
@ -29,12 +29,12 @@ struct copy_using_evaluator_traits
|
|||||||
{
|
{
|
||||||
typedef typename DstEvaluator::XprType Dst;
|
typedef typename DstEvaluator::XprType Dst;
|
||||||
typedef typename Dst::Scalar DstScalar;
|
typedef typename Dst::Scalar DstScalar;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
DstFlags = DstEvaluator::Flags,
|
DstFlags = DstEvaluator::Flags,
|
||||||
SrcFlags = SrcEvaluator::Flags
|
SrcFlags = SrcEvaluator::Flags
|
||||||
};
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum {
|
enum {
|
||||||
DstAlignment = DstEvaluator::Alignment,
|
DstAlignment = DstEvaluator::Alignment,
|
||||||
@ -135,7 +135,7 @@ public:
|
|||||||
? int(CompleteUnrolling)
|
? int(CompleteUnrolling)
|
||||||
: int(NoUnrolling) )
|
: int(NoUnrolling) )
|
||||||
: int(Traversal) == int(LinearTraversal)
|
: int(Traversal) == int(LinearTraversal)
|
||||||
? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling)
|
? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling)
|
||||||
: int(NoUnrolling) )
|
: int(NoUnrolling) )
|
||||||
#if EIGEN_UNALIGNED_VECTORIZE
|
#if EIGEN_UNALIGNED_VECTORIZE
|
||||||
: int(Traversal) == int(SliceVectorizedTraversal)
|
: int(Traversal) == int(SliceVectorizedTraversal)
|
||||||
@ -195,7 +195,7 @@ struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling
|
|||||||
// FIXME: this is not very clean, perhaps this information should be provided by the kernel?
|
// FIXME: this is not very clean, perhaps this information should be provided by the kernel?
|
||||||
typedef typename Kernel::DstEvaluatorType DstEvaluatorType;
|
typedef typename Kernel::DstEvaluatorType DstEvaluatorType;
|
||||||
typedef typename DstEvaluatorType::XprType DstXprType;
|
typedef typename DstEvaluatorType::XprType DstXprType;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
outer = Index / DstXprType::InnerSizeAtCompileTime,
|
outer = Index / DstXprType::InnerSizeAtCompileTime,
|
||||||
inner = Index % DstXprType::InnerSizeAtCompileTime
|
inner = Index % DstXprType::InnerSizeAtCompileTime
|
||||||
@ -261,7 +261,7 @@ struct copy_using_evaluator_innervec_CompleteUnrolling
|
|||||||
typedef typename Kernel::DstEvaluatorType DstEvaluatorType;
|
typedef typename Kernel::DstEvaluatorType DstEvaluatorType;
|
||||||
typedef typename DstEvaluatorType::XprType DstXprType;
|
typedef typename DstEvaluatorType::XprType DstXprType;
|
||||||
typedef typename Kernel::PacketType PacketType;
|
typedef typename Kernel::PacketType PacketType;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
outer = Index / DstXprType::InnerSizeAtCompileTime,
|
outer = Index / DstXprType::InnerSizeAtCompileTime,
|
||||||
inner = Index % DstXprType::InnerSizeAtCompileTime,
|
inner = Index % DstXprType::InnerSizeAtCompileTime,
|
||||||
@ -426,7 +426,7 @@ struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, CompleteUnrollin
|
|||||||
{
|
{
|
||||||
typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
|
typedef typename Kernel::DstEvaluatorType::XprType DstXprType;
|
||||||
typedef typename Kernel::PacketType PacketType;
|
typedef typename Kernel::PacketType PacketType;
|
||||||
|
|
||||||
enum { size = DstXprType::SizeAtCompileTime,
|
enum { size = DstXprType::SizeAtCompileTime,
|
||||||
packetSize =unpacket_traits<PacketType>::size,
|
packetSize =unpacket_traits<PacketType>::size,
|
||||||
alignedSize = (size/packetSize)*packetSize };
|
alignedSize = (size/packetSize)*packetSize };
|
||||||
@ -599,14 +599,14 @@ protected:
|
|||||||
typedef typename DstEvaluatorTypeT::XprType DstXprType;
|
typedef typename DstEvaluatorTypeT::XprType DstXprType;
|
||||||
typedef typename SrcEvaluatorTypeT::XprType SrcXprType;
|
typedef typename SrcEvaluatorTypeT::XprType SrcXprType;
|
||||||
public:
|
public:
|
||||||
|
|
||||||
typedef DstEvaluatorTypeT DstEvaluatorType;
|
typedef DstEvaluatorTypeT DstEvaluatorType;
|
||||||
typedef SrcEvaluatorTypeT SrcEvaluatorType;
|
typedef SrcEvaluatorTypeT SrcEvaluatorType;
|
||||||
typedef typename DstEvaluatorType::Scalar Scalar;
|
typedef typename DstEvaluatorType::Scalar Scalar;
|
||||||
typedef copy_using_evaluator_traits<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor> AssignmentTraits;
|
typedef copy_using_evaluator_traits<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor> AssignmentTraits;
|
||||||
typedef typename AssignmentTraits::PacketType PacketType;
|
typedef typename AssignmentTraits::PacketType PacketType;
|
||||||
|
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
|
EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
|
||||||
: m_dst(dst), m_src(src), m_functor(func), m_dstExpr(dstExpr)
|
: m_dst(dst), m_src(src), m_functor(func), m_dstExpr(dstExpr)
|
||||||
{
|
{
|
||||||
@ -614,58 +614,58 @@ public:
|
|||||||
AssignmentTraits::debug();
|
AssignmentTraits::debug();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC Index size() const { return m_dstExpr.size(); }
|
EIGEN_DEVICE_FUNC Index size() const { return m_dstExpr.size(); }
|
||||||
EIGEN_DEVICE_FUNC Index innerSize() const { return m_dstExpr.innerSize(); }
|
EIGEN_DEVICE_FUNC Index innerSize() const { return m_dstExpr.innerSize(); }
|
||||||
EIGEN_DEVICE_FUNC Index outerSize() const { return m_dstExpr.outerSize(); }
|
EIGEN_DEVICE_FUNC Index outerSize() const { return m_dstExpr.outerSize(); }
|
||||||
EIGEN_DEVICE_FUNC Index rows() const { return m_dstExpr.rows(); }
|
EIGEN_DEVICE_FUNC Index rows() const { return m_dstExpr.rows(); }
|
||||||
EIGEN_DEVICE_FUNC Index cols() const { return m_dstExpr.cols(); }
|
EIGEN_DEVICE_FUNC Index cols() const { return m_dstExpr.cols(); }
|
||||||
EIGEN_DEVICE_FUNC Index outerStride() const { return m_dstExpr.outerStride(); }
|
EIGEN_DEVICE_FUNC Index outerStride() const { return m_dstExpr.outerStride(); }
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC DstEvaluatorType& dstEvaluator() { return m_dst; }
|
EIGEN_DEVICE_FUNC DstEvaluatorType& dstEvaluator() { return m_dst; }
|
||||||
EIGEN_DEVICE_FUNC const SrcEvaluatorType& srcEvaluator() const { return m_src; }
|
EIGEN_DEVICE_FUNC const SrcEvaluatorType& srcEvaluator() const { return m_src; }
|
||||||
|
|
||||||
/// Assign src(row,col) to dst(row,col) through the assignment functor.
|
/// Assign src(row,col) to dst(row,col) through the assignment functor.
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index row, Index col)
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index row, Index col)
|
||||||
{
|
{
|
||||||
m_functor.assignCoeff(m_dst.coeffRef(row,col), m_src.coeff(row,col));
|
m_functor.assignCoeff(m_dst.coeffRef(row,col), m_src.coeff(row,col));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// \sa assignCoeff(Index,Index)
|
/// \sa assignCoeff(Index,Index)
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index index)
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index index)
|
||||||
{
|
{
|
||||||
m_functor.assignCoeff(m_dst.coeffRef(index), m_src.coeff(index));
|
m_functor.assignCoeff(m_dst.coeffRef(index), m_src.coeff(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// \sa assignCoeff(Index,Index)
|
/// \sa assignCoeff(Index,Index)
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeffByOuterInner(Index outer, Index inner)
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeffByOuterInner(Index outer, Index inner)
|
||||||
{
|
{
|
||||||
Index row = rowIndexByOuterInner(outer, inner);
|
Index row = rowIndexByOuterInner(outer, inner);
|
||||||
Index col = colIndexByOuterInner(outer, inner);
|
Index col = colIndexByOuterInner(outer, inner);
|
||||||
assignCoeff(row, col);
|
assignCoeff(row, col);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<int StoreMode, int LoadMode, typename PacketType>
|
template<int StoreMode, int LoadMode, typename PacketType>
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index row, Index col)
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index row, Index col)
|
||||||
{
|
{
|
||||||
m_functor.template assignPacket<StoreMode>(&m_dst.coeffRef(row,col), m_src.template packet<LoadMode,PacketType>(row,col));
|
m_functor.template assignPacket<StoreMode>(&m_dst.coeffRef(row,col), m_src.template packet<LoadMode,PacketType>(row,col));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<int StoreMode, int LoadMode, typename PacketType>
|
template<int StoreMode, int LoadMode, typename PacketType>
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index index)
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index index)
|
||||||
{
|
{
|
||||||
m_functor.template assignPacket<StoreMode>(&m_dst.coeffRef(index), m_src.template packet<LoadMode,PacketType>(index));
|
m_functor.template assignPacket<StoreMode>(&m_dst.coeffRef(index), m_src.template packet<LoadMode,PacketType>(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<int StoreMode, int LoadMode, typename PacketType>
|
template<int StoreMode, int LoadMode, typename PacketType>
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner)
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner)
|
||||||
{
|
{
|
||||||
Index row = rowIndexByOuterInner(outer, inner);
|
Index row = rowIndexByOuterInner(outer, inner);
|
||||||
Index col = colIndexByOuterInner(outer, inner);
|
Index col = colIndexByOuterInner(outer, inner);
|
||||||
assignPacket<StoreMode,LoadMode,PacketType>(row, col);
|
assignPacket<StoreMode,LoadMode,PacketType>(row, col);
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner)
|
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner)
|
||||||
{
|
{
|
||||||
typedef typename DstEvaluatorType::ExpressionTraits Traits;
|
typedef typename DstEvaluatorType::ExpressionTraits Traits;
|
||||||
@ -688,7 +688,7 @@ public:
|
|||||||
{
|
{
|
||||||
return m_dstExpr.data();
|
return m_dstExpr.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
DstEvaluatorType& m_dst;
|
DstEvaluatorType& m_dst;
|
||||||
const SrcEvaluatorType& m_src;
|
const SrcEvaluatorType& m_src;
|
||||||
@ -734,7 +734,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType
|
|||||||
resize_if_allowed(dst, src, func);
|
resize_if_allowed(dst, src, func);
|
||||||
|
|
||||||
DstEvaluatorType dstEvaluator(dst);
|
DstEvaluatorType dstEvaluator(dst);
|
||||||
|
|
||||||
typedef generic_dense_assignment_kernel<DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;
|
typedef generic_dense_assignment_kernel<DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;
|
||||||
Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
|
Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
|
||||||
|
|
||||||
@ -762,7 +762,7 @@ struct EigenBase2EigenBase {};
|
|||||||
|
|
||||||
template<typename,typename> struct AssignmentKind { typedef EigenBase2EigenBase Kind; };
|
template<typename,typename> struct AssignmentKind { typedef EigenBase2EigenBase Kind; };
|
||||||
template<> struct AssignmentKind<DenseShape,DenseShape> { typedef Dense2Dense Kind; };
|
template<> struct AssignmentKind<DenseShape,DenseShape> { typedef Dense2Dense Kind; };
|
||||||
|
|
||||||
// This is the main assignment class
|
// This is the main assignment class
|
||||||
template< typename DstXprType, typename SrcXprType, typename Functor,
|
template< typename DstXprType, typename SrcXprType, typename Functor,
|
||||||
typename Kind = typename AssignmentKind< typename evaluator_traits<DstXprType>::Shape , typename evaluator_traits<SrcXprType>::Shape >::Kind,
|
typename Kind = typename AssignmentKind< typename evaluator_traits<DstXprType>::Shape , typename evaluator_traits<SrcXprType>::Shape >::Kind,
|
||||||
@ -787,7 +787,7 @@ void call_assignment(const Dst& dst, const Src& src)
|
|||||||
{
|
{
|
||||||
call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());
|
call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deal with "assume-aliasing"
|
// Deal with "assume-aliasing"
|
||||||
template<typename Dst, typename Src, typename Func>
|
template<typename Dst, typename Src, typename Func>
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||||
@ -827,12 +827,12 @@ void call_assignment_no_alias(Dst& dst, const Src& src, const Func& func)
|
|||||||
typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst>::type ActualDstTypeCleaned;
|
typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst>::type ActualDstTypeCleaned;
|
||||||
typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst&>::type ActualDstType;
|
typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst&>::type ActualDstType;
|
||||||
ActualDstType actualDst(dst);
|
ActualDstType actualDst(dst);
|
||||||
|
|
||||||
// TODO check whether this is the right place to perform these checks:
|
// TODO check whether this is the right place to perform these checks:
|
||||||
EIGEN_STATIC_ASSERT_LVALUE(Dst)
|
EIGEN_STATIC_ASSERT_LVALUE(Dst)
|
||||||
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned,Src)
|
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned,Src)
|
||||||
EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar);
|
EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar);
|
||||||
|
|
||||||
Assignment<ActualDstTypeCleaned,Src,Func>::run(actualDst, src, func);
|
Assignment<ActualDstTypeCleaned,Src,Func>::run(actualDst, src, func);
|
||||||
}
|
}
|
||||||
template<typename Dst, typename Src>
|
template<typename Dst, typename Src>
|
||||||
@ -869,13 +869,12 @@ template<typename Dst, typename Src> void check_for_aliasing(const Dst &dst, con
|
|||||||
template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>
|
template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>
|
||||||
struct Assignment<DstXprType, SrcXprType, Functor, Dense2Dense, Weak>
|
struct Assignment<DstXprType, SrcXprType, Functor, Dense2Dense, Weak>
|
||||||
{
|
{
|
||||||
EIGEN_DEVICE_FUNC
|
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
||||||
static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
|
||||||
{
|
{
|
||||||
#ifndef EIGEN_NO_DEBUG
|
#ifndef EIGEN_NO_DEBUG
|
||||||
internal::check_for_aliasing(dst, src);
|
internal::check_for_aliasing(dst, src);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
call_dense_assignment_loop(dst, src, func);
|
call_dense_assignment_loop(dst, src, func);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -887,8 +886,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, Dense2Dense, Weak>
|
|||||||
template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>
|
template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>
|
||||||
struct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Weak>
|
struct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Weak>
|
||||||
{
|
{
|
||||||
EIGEN_DEVICE_FUNC
|
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||||
static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
Index dstCols = src.cols();
|
Index dstCols = src.cols();
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#ifndef EIGEN_COMMAINITIALIZER_H
|
#ifndef EIGEN_COMMAINITIALIZER_H
|
||||||
#define EIGEN_COMMAINITIALIZER_H
|
#define EIGEN_COMMAINITIALIZER_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
/** \class CommaInitializer
|
/** \class CommaInitializer
|
||||||
* \ingroup Core_Module
|
* \ingroup Core_Module
|
||||||
@ -44,7 +44,7 @@ struct CommaInitializer
|
|||||||
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
|
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy/Move constructor which transfers ownership. This is crucial in
|
/* Copy/Move constructor which transfers ownership. This is crucial in
|
||||||
* absence of return value optimization to avoid assertions during destruction. */
|
* absence of return value optimization to avoid assertions during destruction. */
|
||||||
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
|
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
@ -135,13 +135,13 @@ struct CommaInitializer
|
|||||||
*
|
*
|
||||||
* Example: \include MatrixBase_set.cpp
|
* Example: \include MatrixBase_set.cpp
|
||||||
* Output: \verbinclude MatrixBase_set.out
|
* Output: \verbinclude MatrixBase_set.out
|
||||||
*
|
*
|
||||||
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
|
* \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
|
||||||
*
|
*
|
||||||
* \sa CommaInitializer::finished(), class CommaInitializer
|
* \sa CommaInitializer::finished(), class CommaInitializer
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
|
EIGEN_DEVICE_FUNC inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
|
||||||
{
|
{
|
||||||
return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
|
return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
|
||||||
}
|
}
|
||||||
@ -149,7 +149,7 @@ inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s
|
|||||||
/** \sa operator<<(const Scalar&) */
|
/** \sa operator<<(const Scalar&) */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
inline CommaInitializer<Derived>
|
EIGEN_DEVICE_FUNC inline CommaInitializer<Derived>
|
||||||
DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
|
DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
|
||||||
{
|
{
|
||||||
return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
|
return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
|
||||||
|
@ -74,7 +74,7 @@ class CwiseBinaryOpImpl;
|
|||||||
* \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
|
* \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
|
||||||
*/
|
*/
|
||||||
template<typename BinaryOp, typename LhsType, typename RhsType>
|
template<typename BinaryOp, typename LhsType, typename RhsType>
|
||||||
class CwiseBinaryOp :
|
class CwiseBinaryOp :
|
||||||
public CwiseBinaryOpImpl<
|
public CwiseBinaryOpImpl<
|
||||||
BinaryOp, LhsType, RhsType,
|
BinaryOp, LhsType, RhsType,
|
||||||
typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind,
|
typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind,
|
||||||
@ -83,7 +83,7 @@ class CwiseBinaryOp :
|
|||||||
internal::no_assignment_operator
|
internal::no_assignment_operator
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
typedef typename internal::remove_all<BinaryOp>::type Functor;
|
typedef typename internal::remove_all<BinaryOp>::type Functor;
|
||||||
typedef typename internal::remove_all<LhsType>::type Lhs;
|
typedef typename internal::remove_all<LhsType>::type Lhs;
|
||||||
typedef typename internal::remove_all<RhsType>::type Rhs;
|
typedef typename internal::remove_all<RhsType>::type Rhs;
|
||||||
@ -158,7 +158,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
EIGEN_STRONG_INLINE Derived &
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
|
||||||
MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
|
MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
|
||||||
{
|
{
|
||||||
call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
|
call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
|
||||||
@ -171,7 +171,7 @@ MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
EIGEN_STRONG_INLINE Derived &
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &
|
||||||
MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
|
MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
|
||||||
{
|
{
|
||||||
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
|
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
|
||||||
|
@ -126,12 +126,12 @@ DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& f
|
|||||||
*
|
*
|
||||||
* Here is an example with C++11 random generators: \include random_cpp11.cpp
|
* Here is an example with C++11 random generators: \include random_cpp11.cpp
|
||||||
* Output: \verbinclude random_cpp11.out
|
* Output: \verbinclude random_cpp11.out
|
||||||
*
|
*
|
||||||
* \sa class CwiseNullaryOp
|
* \sa class CwiseNullaryOp
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename CustomNullaryOp>
|
template<typename CustomNullaryOp>
|
||||||
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>
|
||||||
DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
|
DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
|
||||||
{
|
{
|
||||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||||
@ -170,7 +170,7 @@ DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
|
|||||||
* \sa class CwiseNullaryOp
|
* \sa class CwiseNullaryOp
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
|
||||||
DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
|
DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
|
||||||
{
|
{
|
||||||
return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value));
|
return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value));
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#ifndef EIGEN_DIAGONAL_H
|
#ifndef EIGEN_DIAGONAL_H
|
||||||
#define EIGEN_DIAGONAL_H
|
#define EIGEN_DIAGONAL_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
/** \class Diagonal
|
/** \class Diagonal
|
||||||
* \ingroup Core_Module
|
* \ingroup Core_Module
|
||||||
@ -149,8 +149,8 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
|
|||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
inline const typename internal::remove_all<typename MatrixType::Nested>::type&
|
inline const typename internal::remove_all<typename MatrixType::Nested>::type&
|
||||||
nestedExpression() const
|
nestedExpression() const
|
||||||
{
|
{
|
||||||
return m_matrix;
|
return m_matrix;
|
||||||
}
|
}
|
||||||
@ -187,7 +187,7 @@ template<typename MatrixType, int _DiagIndex> class Diagonal
|
|||||||
*
|
*
|
||||||
* \sa class Diagonal */
|
* \sa class Diagonal */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline typename MatrixBase<Derived>::DiagonalReturnType
|
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::DiagonalReturnType
|
||||||
MatrixBase<Derived>::diagonal()
|
MatrixBase<Derived>::diagonal()
|
||||||
{
|
{
|
||||||
return DiagonalReturnType(derived());
|
return DiagonalReturnType(derived());
|
||||||
@ -195,7 +195,7 @@ MatrixBase<Derived>::diagonal()
|
|||||||
|
|
||||||
/** This is the const version of diagonal(). */
|
/** This is the const version of diagonal(). */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline typename MatrixBase<Derived>::ConstDiagonalReturnType
|
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::ConstDiagonalReturnType
|
||||||
MatrixBase<Derived>::diagonal() const
|
MatrixBase<Derived>::diagonal() const
|
||||||
{
|
{
|
||||||
return ConstDiagonalReturnType(derived());
|
return ConstDiagonalReturnType(derived());
|
||||||
@ -213,7 +213,7 @@ MatrixBase<Derived>::diagonal() const
|
|||||||
*
|
*
|
||||||
* \sa MatrixBase::diagonal(), class Diagonal */
|
* \sa MatrixBase::diagonal(), class Diagonal */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
|
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
|
||||||
MatrixBase<Derived>::diagonal(Index index)
|
MatrixBase<Derived>::diagonal(Index index)
|
||||||
{
|
{
|
||||||
return DiagonalDynamicIndexReturnType(derived(), index);
|
return DiagonalDynamicIndexReturnType(derived(), index);
|
||||||
@ -221,7 +221,7 @@ MatrixBase<Derived>::diagonal(Index index)
|
|||||||
|
|
||||||
/** This is the const version of diagonal(Index). */
|
/** This is the const version of diagonal(Index). */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
|
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
|
||||||
MatrixBase<Derived>::diagonal(Index index) const
|
MatrixBase<Derived>::diagonal(Index index) const
|
||||||
{
|
{
|
||||||
return ConstDiagonalDynamicIndexReturnType(derived(), index);
|
return ConstDiagonalDynamicIndexReturnType(derived(), index);
|
||||||
@ -240,7 +240,7 @@ MatrixBase<Derived>::diagonal(Index index) const
|
|||||||
* \sa MatrixBase::diagonal(), class Diagonal */
|
* \sa MatrixBase::diagonal(), class Diagonal */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<int Index_>
|
template<int Index_>
|
||||||
inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index_>::Type
|
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index_>::Type
|
||||||
MatrixBase<Derived>::diagonal()
|
MatrixBase<Derived>::diagonal()
|
||||||
{
|
{
|
||||||
return typename DiagonalIndexReturnType<Index_>::Type(derived());
|
return typename DiagonalIndexReturnType<Index_>::Type(derived());
|
||||||
@ -249,7 +249,7 @@ MatrixBase<Derived>::diagonal()
|
|||||||
/** This is the const version of diagonal<int>(). */
|
/** This is the const version of diagonal<int>(). */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<int Index_>
|
template<int Index_>
|
||||||
inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index_>::Type
|
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index_>::Type
|
||||||
MatrixBase<Derived>::diagonal() const
|
MatrixBase<Derived>::diagonal() const
|
||||||
{
|
{
|
||||||
return typename ConstDiagonalIndexReturnType<Index_>::Type(derived());
|
return typename ConstDiagonalIndexReturnType<Index_>::Type(derived());
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#ifndef EIGEN_DIAGONALMATRIX_H
|
#ifndef EIGEN_DIAGONALMATRIX_H
|
||||||
#define EIGEN_DIAGONALMATRIX_H
|
#define EIGEN_DIAGONALMATRIX_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
@ -44,7 +44,7 @@ class DiagonalBase : public EigenBase<Derived>
|
|||||||
|
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
DenseMatrixType toDenseMatrix() const { return derived(); }
|
DenseMatrixType toDenseMatrix() const { return derived(); }
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
|
inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
@ -70,7 +70,7 @@ class DiagonalBase : public EigenBase<Derived>
|
|||||||
{
|
{
|
||||||
return InverseReturnType(diagonal().cwiseInverse());
|
return InverseReturnType(diagonal().cwiseInverse());
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
inline const DiagonalWrapper<const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DiagonalVectorType,Scalar,product) >
|
inline const DiagonalWrapper<const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DiagonalVectorType,Scalar,product) >
|
||||||
operator*(const Scalar& scalar) const
|
operator*(const Scalar& scalar) const
|
||||||
@ -273,7 +273,7 @@ class DiagonalWrapper
|
|||||||
* \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal()
|
* \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal()
|
||||||
**/
|
**/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline const DiagonalWrapper<const Derived>
|
EIGEN_DEVICE_FUNC inline const DiagonalWrapper<const Derived>
|
||||||
MatrixBase<Derived>::asDiagonal() const
|
MatrixBase<Derived>::asDiagonal() const
|
||||||
{
|
{
|
||||||
return DiagonalWrapper<const Derived>(derived());
|
return DiagonalWrapper<const Derived>(derived());
|
||||||
@ -318,20 +318,20 @@ template<> struct AssignmentKind<DenseShape,DiagonalShape> { typedef Diagonal2De
|
|||||||
template< typename DstXprType, typename SrcXprType, typename Functor>
|
template< typename DstXprType, typename SrcXprType, typename Functor>
|
||||||
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Dense>
|
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Dense>
|
||||||
{
|
{
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
Index dstCols = src.cols();
|
Index dstCols = src.cols();
|
||||||
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
||||||
dst.resize(dstRows, dstCols);
|
dst.resize(dstRows, dstCols);
|
||||||
|
|
||||||
dst.setZero();
|
dst.setZero();
|
||||||
dst.diagonal() = src.diagonal();
|
dst.diagonal() = src.diagonal();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||||
{ dst.diagonal() += src.diagonal(); }
|
{ dst.diagonal() += src.diagonal(); }
|
||||||
|
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||||
{ dst.diagonal() -= src.diagonal(); }
|
{ dst.diagonal() -= src.diagonal(); }
|
||||||
};
|
};
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#ifndef EIGEN_DOT_H
|
#ifndef EIGEN_DOT_H
|
||||||
#define EIGEN_DOT_H
|
#define EIGEN_DOT_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
@ -78,7 +78,7 @@ MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
|
|||||||
typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func;
|
typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func;
|
||||||
EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar);
|
EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
eigen_assert(size() == other.size());
|
eigen_assert(size() == other.size());
|
||||||
|
|
||||||
return internal::dot_nocheck<Derived,OtherDerived>::run(*this, other);
|
return internal::dot_nocheck<Derived,OtherDerived>::run(*this, other);
|
||||||
@ -93,7 +93,7 @@ MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
|
|||||||
* \sa dot(), norm(), lpNorm()
|
* \sa dot(), norm(), lpNorm()
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
|
||||||
{
|
{
|
||||||
return numext::real((*this).cwiseAbs2().sum());
|
return numext::real((*this).cwiseAbs2().sum());
|
||||||
}
|
}
|
||||||
@ -105,7 +105,7 @@ EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scala
|
|||||||
* \sa lpNorm(), dot(), squaredNorm()
|
* \sa lpNorm(), dot(), squaredNorm()
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
|
||||||
{
|
{
|
||||||
return numext::sqrt(squaredNorm());
|
return numext::sqrt(squaredNorm());
|
||||||
}
|
}
|
||||||
@ -120,7 +120,7 @@ EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scala
|
|||||||
* \sa norm(), normalize()
|
* \sa norm(), normalize()
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
|
||||||
MatrixBase<Derived>::normalized() const
|
MatrixBase<Derived>::normalized() const
|
||||||
{
|
{
|
||||||
typedef typename internal::nested_eval<Derived,2>::type _Nested;
|
typedef typename internal::nested_eval<Derived,2>::type _Nested;
|
||||||
@ -142,7 +142,7 @@ MatrixBase<Derived>::normalized() const
|
|||||||
* \sa norm(), normalized()
|
* \sa norm(), normalized()
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE void MatrixBase<Derived>::normalize()
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase<Derived>::normalize()
|
||||||
{
|
{
|
||||||
RealScalar z = squaredNorm();
|
RealScalar z = squaredNorm();
|
||||||
// NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU
|
// NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU
|
||||||
@ -163,7 +163,7 @@ EIGEN_STRONG_INLINE void MatrixBase<Derived>::normalize()
|
|||||||
* \sa stableNorm(), stableNormalize(), normalized()
|
* \sa stableNorm(), stableNormalize(), normalized()
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject
|
||||||
MatrixBase<Derived>::stableNormalized() const
|
MatrixBase<Derived>::stableNormalized() const
|
||||||
{
|
{
|
||||||
typedef typename internal::nested_eval<Derived,3>::type _Nested;
|
typedef typename internal::nested_eval<Derived,3>::type _Nested;
|
||||||
@ -188,7 +188,7 @@ MatrixBase<Derived>::stableNormalized() const
|
|||||||
* \sa stableNorm(), stableNormalized(), normalize()
|
* \sa stableNorm(), stableNormalized(), normalize()
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE void MatrixBase<Derived>::stableNormalize()
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void MatrixBase<Derived>::stableNormalize()
|
||||||
{
|
{
|
||||||
RealScalar w = cwiseAbs().maxCoeff();
|
RealScalar w = cwiseAbs().maxCoeff();
|
||||||
RealScalar z = (derived()/w).squaredNorm();
|
RealScalar z = (derived()/w).squaredNorm();
|
||||||
@ -260,9 +260,9 @@ struct lpNorm_selector<Derived, Infinity>
|
|||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<int p>
|
template<int p>
|
||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
EIGEN_DEVICE_FUNC inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
||||||
#else
|
#else
|
||||||
MatrixBase<Derived>::RealScalar
|
EIGEN_DEVICE_FUNC MatrixBase<Derived>::RealScalar
|
||||||
#endif
|
#endif
|
||||||
MatrixBase<Derived>::lpNorm() const
|
MatrixBase<Derived>::lpNorm() const
|
||||||
{
|
{
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#ifndef EIGEN_FUZZY_H
|
#ifndef EIGEN_FUZZY_H
|
||||||
#define EIGEN_FUZZY_H
|
#define EIGEN_FUZZY_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal
|
namespace internal
|
||||||
{
|
{
|
||||||
@ -100,7 +100,7 @@ struct isMuchSmallerThan_scalar_selector<Derived, true>
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
bool DenseBase<Derived>::isApprox(
|
EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApprox(
|
||||||
const DenseBase<OtherDerived>& other,
|
const DenseBase<OtherDerived>& other,
|
||||||
const RealScalar& prec
|
const RealScalar& prec
|
||||||
) const
|
) const
|
||||||
@ -122,7 +122,7 @@ bool DenseBase<Derived>::isApprox(
|
|||||||
* \sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const
|
* \sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
bool DenseBase<Derived>::isMuchSmallerThan(
|
EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isMuchSmallerThan(
|
||||||
const typename NumTraits<Scalar>::Real& other,
|
const typename NumTraits<Scalar>::Real& other,
|
||||||
const RealScalar& prec
|
const RealScalar& prec
|
||||||
) const
|
) const
|
||||||
@ -142,7 +142,7 @@ bool DenseBase<Derived>::isMuchSmallerThan(
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
bool DenseBase<Derived>::isMuchSmallerThan(
|
EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isMuchSmallerThan(
|
||||||
const DenseBase<OtherDerived>& other,
|
const DenseBase<OtherDerived>& other,
|
||||||
const RealScalar& prec
|
const RealScalar& prec
|
||||||
) const
|
) const
|
||||||
|
@ -207,12 +207,12 @@ template<> struct gemv_dense_selector<OnTheRight,ColMajor,true>
|
|||||||
typedef typename Rhs::Scalar RhsScalar;
|
typedef typename Rhs::Scalar RhsScalar;
|
||||||
typedef typename Dest::Scalar ResScalar;
|
typedef typename Dest::Scalar ResScalar;
|
||||||
typedef typename Dest::RealScalar RealScalar;
|
typedef typename Dest::RealScalar RealScalar;
|
||||||
|
|
||||||
typedef internal::blas_traits<Lhs> LhsBlasTraits;
|
typedef internal::blas_traits<Lhs> LhsBlasTraits;
|
||||||
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
|
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
|
||||||
typedef internal::blas_traits<Rhs> RhsBlasTraits;
|
typedef internal::blas_traits<Rhs> RhsBlasTraits;
|
||||||
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
|
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
|
||||||
|
|
||||||
typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;
|
typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;
|
||||||
|
|
||||||
ActualLhsType actualLhs = LhsBlasTraits::extract(lhs);
|
ActualLhsType actualLhs = LhsBlasTraits::extract(lhs);
|
||||||
@ -300,7 +300,7 @@ template<> struct gemv_dense_selector<OnTheRight,RowMajor,true>
|
|||||||
typedef typename Lhs::Scalar LhsScalar;
|
typedef typename Lhs::Scalar LhsScalar;
|
||||||
typedef typename Rhs::Scalar RhsScalar;
|
typedef typename Rhs::Scalar RhsScalar;
|
||||||
typedef typename Dest::Scalar ResScalar;
|
typedef typename Dest::Scalar ResScalar;
|
||||||
|
|
||||||
typedef internal::blas_traits<Lhs> LhsBlasTraits;
|
typedef internal::blas_traits<Lhs> LhsBlasTraits;
|
||||||
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
|
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
|
||||||
typedef internal::blas_traits<Rhs> RhsBlasTraits;
|
typedef internal::blas_traits<Rhs> RhsBlasTraits;
|
||||||
@ -386,7 +386,7 @@ template<> struct gemv_dense_selector<OnTheRight,RowMajor,false>
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
inline const Product<Derived, OtherDerived>
|
EIGEN_DEVICE_FUNC inline const Product<Derived, OtherDerived>
|
||||||
MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
|
MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
|
||||||
{
|
{
|
||||||
// A note regarding the function declaration: In MSVC, this function will sometimes
|
// A note regarding the function declaration: In MSVC, this function will sometimes
|
||||||
@ -428,7 +428,7 @@ MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
const Product<Derived,OtherDerived,LazyProduct>
|
EIGEN_DEVICE_FUNC const Product<Derived,OtherDerived,LazyProduct>
|
||||||
MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
|
MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
|
||||||
{
|
{
|
||||||
enum {
|
enum {
|
||||||
|
@ -237,7 +237,7 @@ ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
|
|||||||
* For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and
|
* For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and
|
||||||
* replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]}
|
* replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]}
|
||||||
* Currently, this function is only used in matrix products.
|
* Currently, this function is only used in matrix products.
|
||||||
* For packet-size smaller or equal to 4, this function is equivalent to pload1
|
* For packet-size smaller or equal to 4, this function is equivalent to pload1
|
||||||
*/
|
*/
|
||||||
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
|
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
|
||||||
ploadquad(const typename unpacket_traits<Packet>::type* from)
|
ploadquad(const typename unpacket_traits<Packet>::type* from)
|
||||||
@ -359,77 +359,77 @@ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pcplxflip(const Packet
|
|||||||
***************************/
|
***************************/
|
||||||
|
|
||||||
/** \internal \returns the sine of \a a (coeff-wise) */
|
/** \internal \returns the sine of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet psin(const Packet& a) { using std::sin; return sin(a); }
|
Packet psin(const Packet& a) { using std::sin; return sin(a); }
|
||||||
|
|
||||||
/** \internal \returns the cosine of \a a (coeff-wise) */
|
/** \internal \returns the cosine of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet pcos(const Packet& a) { using std::cos; return cos(a); }
|
Packet pcos(const Packet& a) { using std::cos; return cos(a); }
|
||||||
|
|
||||||
/** \internal \returns the tan of \a a (coeff-wise) */
|
/** \internal \returns the tan of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet ptan(const Packet& a) { using std::tan; return tan(a); }
|
Packet ptan(const Packet& a) { using std::tan; return tan(a); }
|
||||||
|
|
||||||
/** \internal \returns the arc sine of \a a (coeff-wise) */
|
/** \internal \returns the arc sine of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet pasin(const Packet& a) { using std::asin; return asin(a); }
|
Packet pasin(const Packet& a) { using std::asin; return asin(a); }
|
||||||
|
|
||||||
/** \internal \returns the arc cosine of \a a (coeff-wise) */
|
/** \internal \returns the arc cosine of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet pacos(const Packet& a) { using std::acos; return acos(a); }
|
Packet pacos(const Packet& a) { using std::acos; return acos(a); }
|
||||||
|
|
||||||
/** \internal \returns the arc tangent of \a a (coeff-wise) */
|
/** \internal \returns the arc tangent of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet patan(const Packet& a) { using std::atan; return atan(a); }
|
Packet patan(const Packet& a) { using std::atan; return atan(a); }
|
||||||
|
|
||||||
/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
|
/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet psinh(const Packet& a) { using std::sinh; return sinh(a); }
|
Packet psinh(const Packet& a) { using std::sinh; return sinh(a); }
|
||||||
|
|
||||||
/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
|
/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet pcosh(const Packet& a) { using std::cosh; return cosh(a); }
|
Packet pcosh(const Packet& a) { using std::cosh; return cosh(a); }
|
||||||
|
|
||||||
/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
|
/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet ptanh(const Packet& a) { using std::tanh; return tanh(a); }
|
Packet ptanh(const Packet& a) { using std::tanh; return tanh(a); }
|
||||||
|
|
||||||
/** \internal \returns the exp of \a a (coeff-wise) */
|
/** \internal \returns the exp of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet pexp(const Packet& a) { using std::exp; return exp(a); }
|
Packet pexp(const Packet& a) { using std::exp; return exp(a); }
|
||||||
|
|
||||||
/** \internal \returns the log of \a a (coeff-wise) */
|
/** \internal \returns the log of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet plog(const Packet& a) { using std::log; return log(a); }
|
Packet plog(const Packet& a) { using std::log; return log(a); }
|
||||||
|
|
||||||
/** \internal \returns the log1p of \a a (coeff-wise) */
|
/** \internal \returns the log1p of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet plog1p(const Packet& a) { return numext::log1p(a); }
|
Packet plog1p(const Packet& a) { return numext::log1p(a); }
|
||||||
|
|
||||||
/** \internal \returns the log10 of \a a (coeff-wise) */
|
/** \internal \returns the log10 of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet plog10(const Packet& a) { using std::log10; return log10(a); }
|
Packet plog10(const Packet& a) { using std::log10; return log10(a); }
|
||||||
|
|
||||||
/** \internal \returns the square-root of \a a (coeff-wise) */
|
/** \internal \returns the square-root of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet psqrt(const Packet& a) { using std::sqrt; return sqrt(a); }
|
Packet psqrt(const Packet& a) { using std::sqrt; return sqrt(a); }
|
||||||
|
|
||||||
/** \internal \returns the reciprocal square-root of \a a (coeff-wise) */
|
/** \internal \returns the reciprocal square-root of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet prsqrt(const Packet& a) {
|
Packet prsqrt(const Packet& a) {
|
||||||
return pdiv(pset1<Packet>(1), psqrt(a));
|
return pdiv(pset1<Packet>(1), psqrt(a));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \internal \returns the rounded value of \a a (coeff-wise) */
|
/** \internal \returns the rounded value of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet pround(const Packet& a) { using numext::round; return round(a); }
|
Packet pround(const Packet& a) { using numext::round; return round(a); }
|
||||||
|
|
||||||
/** \internal \returns the floor of \a a (coeff-wise) */
|
/** \internal \returns the floor of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet pfloor(const Packet& a) { using numext::floor; return floor(a); }
|
Packet pfloor(const Packet& a) { using numext::floor; return floor(a); }
|
||||||
|
|
||||||
/** \internal \returns the ceil of \a a (coeff-wise) */
|
/** \internal \returns the ceil of \a a (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet pceil(const Packet& a) { using numext::ceil; return ceil(a); }
|
Packet pceil(const Packet& a) { using numext::ceil; return ceil(a); }
|
||||||
|
|
||||||
/***************************************************************************
|
/***************************************************************************
|
||||||
@ -494,14 +494,14 @@ struct palign_impl
|
|||||||
|
|
||||||
/** \internal update \a first using the concatenation of the packet_size minus \a Offset last elements
|
/** \internal update \a first using the concatenation of the packet_size minus \a Offset last elements
|
||||||
* of \a first and \a Offset first elements of \a second.
|
* of \a first and \a Offset first elements of \a second.
|
||||||
*
|
*
|
||||||
* This function is currently only used to optimize matrix-vector products on unligned matrices.
|
* This function is currently only used to optimize matrix-vector products on unligned matrices.
|
||||||
* It takes 2 packets that represent a contiguous memory array, and returns a packet starting
|
* It takes 2 packets that represent a contiguous memory array, and returns a packet starting
|
||||||
* at the position \a Offset. For instance, for packets of 4 elements, we have:
|
* at the position \a Offset. For instance, for packets of 4 elements, we have:
|
||||||
* Input:
|
* Input:
|
||||||
* - first = {f0,f1,f2,f3}
|
* - first = {f0,f1,f2,f3}
|
||||||
* - second = {s0,s1,s2,s3}
|
* - second = {s0,s1,s2,s3}
|
||||||
* Output:
|
* Output:
|
||||||
* - if Offset==0 then {f0,f1,f2,f3}
|
* - if Offset==0 then {f0,f1,f2,f3}
|
||||||
* - if Offset==1 then {f1,f2,f3,s0}
|
* - if Offset==1 then {f1,f2,f3,s0}
|
||||||
* - if Offset==2 then {f2,f3,s0,s1}
|
* - if Offset==2 then {f2,f3,s0,s1}
|
||||||
|
@ -99,7 +99,7 @@ template<typename ExpressionType> class NestByValue
|
|||||||
/** \returns an expression of the temporary version of *this.
|
/** \returns an expression of the temporary version of *this.
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline const NestByValue<Derived>
|
EIGEN_DEVICE_FUNC inline const NestByValue<Derived>
|
||||||
DenseBase<Derived>::nestByValue() const
|
DenseBase<Derived>::nestByValue() const
|
||||||
{
|
{
|
||||||
return NestByValue<Derived>(derived());
|
return NestByValue<Derived>(derived());
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
#define EIGEN_PRODUCTEVALUATORS_H
|
#define EIGEN_PRODUCTEVALUATORS_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
/** \internal
|
/** \internal
|
||||||
@ -22,19 +22,19 @@ namespace internal {
|
|||||||
* Since products require special treatments to handle all possible cases,
|
* Since products require special treatments to handle all possible cases,
|
||||||
* we simply deffer the evaluation logic to a product_evaluator class
|
* we simply deffer the evaluation logic to a product_evaluator class
|
||||||
* which offers more partial specialization possibilities.
|
* which offers more partial specialization possibilities.
|
||||||
*
|
*
|
||||||
* \sa class product_evaluator
|
* \sa class product_evaluator
|
||||||
*/
|
*/
|
||||||
template<typename Lhs, typename Rhs, int Options>
|
template<typename Lhs, typename Rhs, int Options>
|
||||||
struct evaluator<Product<Lhs, Rhs, Options> >
|
struct evaluator<Product<Lhs, Rhs, Options> >
|
||||||
: public product_evaluator<Product<Lhs, Rhs, Options> >
|
: public product_evaluator<Product<Lhs, Rhs, Options> >
|
||||||
{
|
{
|
||||||
typedef Product<Lhs, Rhs, Options> XprType;
|
typedef Product<Lhs, Rhs, Options> XprType;
|
||||||
typedef product_evaluator<XprType> Base;
|
typedef product_evaluator<XprType> Base;
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {}
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Catch "scalar * ( A * B )" and transform it to "(A*scalar) * B"
|
// Catch "scalar * ( A * B )" and transform it to "(A*scalar) * B"
|
||||||
// TODO we should apply that rule only if that's really helpful
|
// TODO we should apply that rule only if that's really helpful
|
||||||
template<typename Lhs, typename Rhs, typename Scalar1, typename Scalar2, typename Plain1>
|
template<typename Lhs, typename Rhs, typename Scalar1, typename Scalar2, typename Plain1>
|
||||||
@ -62,12 +62,12 @@ struct evaluator<CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>,
|
|||||||
|
|
||||||
|
|
||||||
template<typename Lhs, typename Rhs, int DiagIndex>
|
template<typename Lhs, typename Rhs, int DiagIndex>
|
||||||
struct evaluator<Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> >
|
struct evaluator<Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> >
|
||||||
: public evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> >
|
: public evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> >
|
||||||
{
|
{
|
||||||
typedef Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> XprType;
|
typedef Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> XprType;
|
||||||
typedef evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> > Base;
|
typedef evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> > Base;
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr)
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr)
|
||||||
: Base(Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex>(
|
: Base(Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex>(
|
||||||
Product<Lhs, Rhs, LazyProduct>(xpr.nestedExpression().lhs(), xpr.nestedExpression().rhs()),
|
Product<Lhs, Rhs, LazyProduct>(xpr.nestedExpression().lhs(), xpr.nestedExpression().rhs()),
|
||||||
@ -108,23 +108,23 @@ struct product_evaluator<Product<Lhs, Rhs, Options>, ProductTag, LhsShape, RhsSh
|
|||||||
: m_result(xpr.rows(), xpr.cols())
|
: m_result(xpr.rows(), xpr.cols())
|
||||||
{
|
{
|
||||||
::new (static_cast<Base*>(this)) Base(m_result);
|
::new (static_cast<Base*>(this)) Base(m_result);
|
||||||
|
|
||||||
// FIXME shall we handle nested_eval here?,
|
// FIXME shall we handle nested_eval here?,
|
||||||
// if so, then we must take care at removing the call to nested_eval in the specializations (e.g., in permutation_matrix_product, transposition_matrix_product, etc.)
|
// if so, then we must take care at removing the call to nested_eval in the specializations (e.g., in permutation_matrix_product, transposition_matrix_product, etc.)
|
||||||
// typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
|
// typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
|
||||||
// typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
|
// typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
|
||||||
// typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
|
// typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
|
||||||
// typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
|
// typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
|
||||||
//
|
//
|
||||||
// const LhsNested lhs(xpr.lhs());
|
// const LhsNested lhs(xpr.lhs());
|
||||||
// const RhsNested rhs(xpr.rhs());
|
// const RhsNested rhs(xpr.rhs());
|
||||||
//
|
//
|
||||||
// generic_product_impl<LhsNestedCleaned, RhsNestedCleaned>::evalTo(m_result, lhs, rhs);
|
// generic_product_impl<LhsNestedCleaned, RhsNestedCleaned>::evalTo(m_result, lhs, rhs);
|
||||||
|
|
||||||
generic_product_impl<Lhs, Rhs, LhsShape, RhsShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
|
generic_product_impl<Lhs, Rhs, LhsShape, RhsShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
PlainObject m_result;
|
PlainObject m_result;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -137,7 +137,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::assign_op<Scal
|
|||||||
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
|
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
|
||||||
{
|
{
|
||||||
typedef Product<Lhs,Rhs,Options> SrcXprType;
|
typedef Product<Lhs,Rhs,Options> SrcXprType;
|
||||||
static EIGEN_STRONG_INLINE
|
static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
|
||||||
void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
@ -155,7 +155,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::add_assign_op<
|
|||||||
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
|
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
|
||||||
{
|
{
|
||||||
typedef Product<Lhs,Rhs,Options> SrcXprType;
|
typedef Product<Lhs,Rhs,Options> SrcXprType;
|
||||||
static EIGEN_STRONG_INLINE
|
static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
|
||||||
void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,Scalar> &)
|
void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,Scalar> &)
|
||||||
{
|
{
|
||||||
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
|
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
|
||||||
@ -170,7 +170,7 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::sub_assign_op<
|
|||||||
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
|
typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>
|
||||||
{
|
{
|
||||||
typedef Product<Lhs,Rhs,Options> SrcXprType;
|
typedef Product<Lhs,Rhs,Options> SrcXprType;
|
||||||
static EIGEN_STRONG_INLINE
|
static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
|
||||||
void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,Scalar> &)
|
void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,Scalar> &)
|
||||||
{
|
{
|
||||||
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
|
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
|
||||||
@ -190,7 +190,7 @@ struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_product_op<ScalarBi
|
|||||||
typedef CwiseBinaryOp<internal::scalar_product_op<ScalarBis,Scalar>,
|
typedef CwiseBinaryOp<internal::scalar_product_op<ScalarBis,Scalar>,
|
||||||
const CwiseNullaryOp<internal::scalar_constant_op<ScalarBis>,Plain>,
|
const CwiseNullaryOp<internal::scalar_constant_op<ScalarBis>,Plain>,
|
||||||
const Product<Lhs,Rhs,DefaultProduct> > SrcXprType;
|
const Product<Lhs,Rhs,DefaultProduct> > SrcXprType;
|
||||||
static EIGEN_STRONG_INLINE
|
static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
|
||||||
void run(DstXprType &dst, const SrcXprType &src, const AssignFunc& func)
|
void run(DstXprType &dst, const SrcXprType &src, const AssignFunc& func)
|
||||||
{
|
{
|
||||||
call_assignment_no_alias(dst, (src.lhs().functor().m_other * src.rhs().lhs())*src.rhs().rhs(), func);
|
call_assignment_no_alias(dst, (src.lhs().functor().m_other * src.rhs().lhs())*src.rhs().rhs(), func);
|
||||||
@ -250,13 +250,13 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct>
|
|||||||
{
|
{
|
||||||
dst.coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();
|
dst.coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
dst.coeffRef(0,0) += (lhs.transpose().cwiseProduct(rhs)).sum();
|
dst.coeffRef(0,0) += (lhs.transpose().cwiseProduct(rhs)).sum();
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{ dst.coeffRef(0,0) -= (lhs.transpose().cwiseProduct(rhs)).sum(); }
|
{ dst.coeffRef(0,0) -= (lhs.transpose().cwiseProduct(rhs)).sum(); }
|
||||||
@ -298,7 +298,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,OuterProduct>
|
|||||||
{
|
{
|
||||||
template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
|
template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
|
||||||
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
||||||
|
|
||||||
// TODO it would be nice to be able to exploit our *_assign_op functors for that purpose
|
// TODO it would be nice to be able to exploit our *_assign_op functors for that purpose
|
||||||
struct set { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } };
|
struct set { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } };
|
||||||
struct add { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };
|
struct add { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };
|
||||||
@ -310,31 +310,31 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,OuterProduct>
|
|||||||
dst.const_cast_derived() += m_scale * src;
|
dst.const_cast_derived() += m_scale * src;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
internal::outer_product_selector_run(dst, lhs, rhs, set(), is_row_major<Dst>());
|
internal::outer_product_selector_run(dst, lhs, rhs, set(), is_row_major<Dst>());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
internal::outer_product_selector_run(dst, lhs, rhs, add(), is_row_major<Dst>());
|
internal::outer_product_selector_run(dst, lhs, rhs, add(), is_row_major<Dst>());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
internal::outer_product_selector_run(dst, lhs, rhs, sub(), is_row_major<Dst>());
|
internal::outer_product_selector_run(dst, lhs, rhs, sub(), is_row_major<Dst>());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
static EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
||||||
{
|
{
|
||||||
internal::outer_product_selector_run(dst, lhs, rhs, adds(alpha), is_row_major<Dst>());
|
internal::outer_product_selector_run(dst, lhs, rhs, adds(alpha), is_row_major<Dst>());
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -343,7 +343,7 @@ template<typename Lhs, typename Rhs, typename Derived>
|
|||||||
struct generic_product_impl_base
|
struct generic_product_impl_base
|
||||||
{
|
{
|
||||||
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
||||||
|
|
||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{ dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); }
|
{ dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); }
|
||||||
@ -355,7 +355,7 @@ struct generic_product_impl_base
|
|||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{ scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); }
|
{ scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); }
|
||||||
|
|
||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
static EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
||||||
{ Derived::scaleAndAddTo(dst,lhs,rhs,alpha); }
|
{ Derived::scaleAndAddTo(dst,lhs,rhs,alpha); }
|
||||||
@ -385,12 +385,12 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemvProduct>
|
|||||||
};
|
};
|
||||||
|
|
||||||
template<typename Lhs, typename Rhs>
|
template<typename Lhs, typename Rhs>
|
||||||
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode>
|
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode>
|
||||||
{
|
{
|
||||||
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
||||||
|
|
||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
// Same as: dst.noalias() = lhs.lazyProduct(rhs);
|
// Same as: dst.noalias() = lhs.lazyProduct(rhs);
|
||||||
// but easier on the compiler side
|
// but easier on the compiler side
|
||||||
@ -403,7 +403,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode>
|
|||||||
// dst.noalias() += lhs.lazyProduct(rhs);
|
// dst.noalias() += lhs.lazyProduct(rhs);
|
||||||
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op<typename Dst::Scalar,Scalar>());
|
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op<typename Dst::Scalar,Scalar>());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Dst>
|
template<typename Dst>
|
||||||
static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
@ -435,8 +435,8 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode>
|
|||||||
{
|
{
|
||||||
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), func);
|
call_assignment_no_alias(dst, lhs.lazyProduct(rhs), func);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// template<typename Dst>
|
// template<typename Dst>
|
||||||
// static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
// static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
||||||
// { dst.noalias() += alpha * lhs.lazyProduct(rhs); }
|
// { dst.noalias() += alpha * lhs.lazyProduct(rhs); }
|
||||||
@ -497,7 +497,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
|
|||||||
|
|
||||||
typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
|
typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
|
||||||
typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
|
typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
|
||||||
|
|
||||||
typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
|
typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
|
||||||
typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
|
typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
|
||||||
|
|
||||||
@ -516,7 +516,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
|
|||||||
typedef typename find_best_packet<Scalar,ColsAtCompileTime>::type RhsVecPacketType;
|
typedef typename find_best_packet<Scalar,ColsAtCompileTime>::type RhsVecPacketType;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
|
||||||
LhsCoeffReadCost = LhsEtorType::CoeffReadCost,
|
LhsCoeffReadCost = LhsEtorType::CoeffReadCost,
|
||||||
RhsCoeffReadCost = RhsEtorType::CoeffReadCost,
|
RhsCoeffReadCost = RhsEtorType::CoeffReadCost,
|
||||||
CoeffReadCost = InnerSize==0 ? NumTraits<Scalar>::ReadCost
|
CoeffReadCost = InnerSize==0 ? NumTraits<Scalar>::ReadCost
|
||||||
@ -525,10 +525,10 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
|
|||||||
+ (InnerSize - 1) * NumTraits<Scalar>::AddCost,
|
+ (InnerSize - 1) * NumTraits<Scalar>::AddCost,
|
||||||
|
|
||||||
Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT,
|
Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT,
|
||||||
|
|
||||||
LhsFlags = LhsEtorType::Flags,
|
LhsFlags = LhsEtorType::Flags,
|
||||||
RhsFlags = RhsEtorType::Flags,
|
RhsFlags = RhsEtorType::Flags,
|
||||||
|
|
||||||
LhsRowMajor = LhsFlags & RowMajorBit,
|
LhsRowMajor = LhsFlags & RowMajorBit,
|
||||||
RhsRowMajor = RhsFlags & RowMajorBit,
|
RhsRowMajor = RhsFlags & RowMajorBit,
|
||||||
|
|
||||||
@ -538,7 +538,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
|
|||||||
// Here, we don't care about alignment larger than the usable packet size.
|
// Here, we don't care about alignment larger than the usable packet size.
|
||||||
LhsAlignment = EIGEN_PLAIN_ENUM_MIN(LhsEtorType::Alignment,LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))),
|
LhsAlignment = EIGEN_PLAIN_ENUM_MIN(LhsEtorType::Alignment,LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))),
|
||||||
RhsAlignment = EIGEN_PLAIN_ENUM_MIN(RhsEtorType::Alignment,RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))),
|
RhsAlignment = EIGEN_PLAIN_ENUM_MIN(RhsEtorType::Alignment,RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))),
|
||||||
|
|
||||||
SameType = is_same<typename LhsNestedCleaned::Scalar,typename RhsNestedCleaned::Scalar>::value,
|
SameType = is_same<typename LhsNestedCleaned::Scalar,typename RhsNestedCleaned::Scalar>::value,
|
||||||
|
|
||||||
CanVectorizeRhs = bool(RhsRowMajor) && (RhsFlags & PacketAccessBit) && (ColsAtCompileTime!=1),
|
CanVectorizeRhs = bool(RhsRowMajor) && (RhsFlags & PacketAccessBit) && (ColsAtCompileTime!=1),
|
||||||
@ -553,7 +553,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
|
|||||||
// TODO enable vectorization for mixed types
|
// TODO enable vectorization for mixed types
|
||||||
| (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0)
|
| (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0)
|
||||||
| (XprType::IsVectorAtCompileTime ? LinearAccessBit : 0),
|
| (XprType::IsVectorAtCompileTime ? LinearAccessBit : 0),
|
||||||
|
|
||||||
LhsOuterStrideBytes = int(LhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename LhsNestedCleaned::Scalar)),
|
LhsOuterStrideBytes = int(LhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename LhsNestedCleaned::Scalar)),
|
||||||
RhsOuterStrideBytes = int(RhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename RhsNestedCleaned::Scalar)),
|
RhsOuterStrideBytes = int(RhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename RhsNestedCleaned::Scalar)),
|
||||||
|
|
||||||
@ -572,7 +572,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
|
|||||||
&& (LhsFlags & RhsFlags & ActualPacketAccessBit)
|
&& (LhsFlags & RhsFlags & ActualPacketAccessBit)
|
||||||
&& (InnerSize % packet_traits<Scalar>::size == 0)
|
&& (InnerSize % packet_traits<Scalar>::size == 0)
|
||||||
};
|
};
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const
|
||||||
{
|
{
|
||||||
return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum();
|
return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum();
|
||||||
@ -611,7 +611,7 @@ struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape,
|
|||||||
protected:
|
protected:
|
||||||
typename internal::add_const_on_value_type<LhsNested>::type m_lhs;
|
typename internal::add_const_on_value_type<LhsNested>::type m_lhs;
|
||||||
typename internal::add_const_on_value_type<RhsNested>::type m_rhs;
|
typename internal::add_const_on_value_type<RhsNested>::type m_rhs;
|
||||||
|
|
||||||
LhsEtorType m_lhsImpl;
|
LhsEtorType m_lhsImpl;
|
||||||
RhsEtorType m_rhsImpl;
|
RhsEtorType m_rhsImpl;
|
||||||
|
|
||||||
@ -730,7 +730,7 @@ struct generic_product_impl<Lhs,Rhs,TriangularShape,DenseShape,ProductTag>
|
|||||||
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,TriangularShape,DenseShape,ProductTag> >
|
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,TriangularShape,DenseShape,ProductTag> >
|
||||||
{
|
{
|
||||||
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
||||||
|
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
||||||
{
|
{
|
||||||
@ -744,7 +744,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,TriangularShape,ProductTag>
|
|||||||
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,TriangularShape,ProductTag> >
|
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,TriangularShape,ProductTag> >
|
||||||
{
|
{
|
||||||
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
||||||
|
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
||||||
{
|
{
|
||||||
@ -765,7 +765,7 @@ struct generic_product_impl<Lhs,Rhs,SelfAdjointShape,DenseShape,ProductTag>
|
|||||||
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SelfAdjointShape,DenseShape,ProductTag> >
|
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SelfAdjointShape,DenseShape,ProductTag> >
|
||||||
{
|
{
|
||||||
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
||||||
|
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
||||||
{
|
{
|
||||||
@ -778,7 +778,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag>
|
|||||||
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag> >
|
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag> >
|
||||||
{
|
{
|
||||||
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
||||||
|
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
||||||
{
|
{
|
||||||
@ -790,7 +790,7 @@ struct generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag>
|
|||||||
/***************************************************************************
|
/***************************************************************************
|
||||||
* Diagonal products
|
* Diagonal products
|
||||||
***************************************************************************/
|
***************************************************************************/
|
||||||
|
|
||||||
template<typename MatrixType, typename DiagonalType, typename Derived, int ProductOrder>
|
template<typename MatrixType, typename DiagonalType, typename Derived, int ProductOrder>
|
||||||
struct diagonal_product_evaluator_base
|
struct diagonal_product_evaluator_base
|
||||||
: evaluator_base<Derived>
|
: evaluator_base<Derived>
|
||||||
@ -799,7 +799,7 @@ struct diagonal_product_evaluator_base
|
|||||||
public:
|
public:
|
||||||
enum {
|
enum {
|
||||||
CoeffReadCost = NumTraits<Scalar>::MulCost + evaluator<MatrixType>::CoeffReadCost + evaluator<DiagonalType>::CoeffReadCost,
|
CoeffReadCost = NumTraits<Scalar>::MulCost + evaluator<MatrixType>::CoeffReadCost + evaluator<DiagonalType>::CoeffReadCost,
|
||||||
|
|
||||||
MatrixFlags = evaluator<MatrixType>::Flags,
|
MatrixFlags = evaluator<MatrixType>::Flags,
|
||||||
DiagFlags = evaluator<DiagonalType>::Flags,
|
DiagFlags = evaluator<DiagonalType>::Flags,
|
||||||
_StorageOrder = MatrixFlags & RowMajorBit ? RowMajor : ColMajor,
|
_StorageOrder = MatrixFlags & RowMajorBit ? RowMajor : ColMajor,
|
||||||
@ -817,14 +817,14 @@ public:
|
|||||||
|| (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::RowsAtCompileTime==1 && ProductOrder==OnTheLeft)
|
|| (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::RowsAtCompileTime==1 && ProductOrder==OnTheLeft)
|
||||||
|| (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==1 && ProductOrder==OnTheRight)
|
|| (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==1 && ProductOrder==OnTheRight)
|
||||||
};
|
};
|
||||||
|
|
||||||
diagonal_product_evaluator_base(const MatrixType &mat, const DiagonalType &diag)
|
diagonal_product_evaluator_base(const MatrixType &mat, const DiagonalType &diag)
|
||||||
: m_diagImpl(diag), m_matImpl(mat)
|
: m_diagImpl(diag), m_matImpl(mat)
|
||||||
{
|
{
|
||||||
EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits<Scalar>::MulCost);
|
EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits<Scalar>::MulCost);
|
||||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const
|
||||||
{
|
{
|
||||||
if(AsScalarProduct)
|
if(AsScalarProduct)
|
||||||
@ -832,7 +832,7 @@ public:
|
|||||||
else
|
else
|
||||||
return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx);
|
return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
template<int LoadMode,typename PacketType>
|
template<int LoadMode,typename PacketType>
|
||||||
EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::true_type) const
|
EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::true_type) const
|
||||||
@ -840,7 +840,7 @@ protected:
|
|||||||
return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col),
|
return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col),
|
||||||
internal::pset1<PacketType>(m_diagImpl.coeff(id)));
|
internal::pset1<PacketType>(m_diagImpl.coeff(id)));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<int LoadMode,typename PacketType>
|
template<int LoadMode,typename PacketType>
|
||||||
EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::false_type) const
|
EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::false_type) const
|
||||||
{
|
{
|
||||||
@ -851,7 +851,7 @@ protected:
|
|||||||
return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col),
|
return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col),
|
||||||
m_diagImpl.template packet<DiagonalPacketLoadMode,PacketType>(id));
|
m_diagImpl.template packet<DiagonalPacketLoadMode,PacketType>(id));
|
||||||
}
|
}
|
||||||
|
|
||||||
evaluator<DiagonalType> m_diagImpl;
|
evaluator<DiagonalType> m_diagImpl;
|
||||||
evaluator<MatrixType> m_matImpl;
|
evaluator<MatrixType> m_matImpl;
|
||||||
};
|
};
|
||||||
@ -866,10 +866,10 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DiagonalSha
|
|||||||
using Base::m_matImpl;
|
using Base::m_matImpl;
|
||||||
using Base::coeff;
|
using Base::coeff;
|
||||||
typedef typename Base::Scalar Scalar;
|
typedef typename Base::Scalar Scalar;
|
||||||
|
|
||||||
typedef Product<Lhs, Rhs, ProductKind> XprType;
|
typedef Product<Lhs, Rhs, ProductKind> XprType;
|
||||||
typedef typename XprType::PlainObject PlainObject;
|
typedef typename XprType::PlainObject PlainObject;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
StorageOrder = int(Rhs::Flags) & RowMajorBit ? RowMajor : ColMajor
|
StorageOrder = int(Rhs::Flags) & RowMajorBit ? RowMajor : ColMajor
|
||||||
};
|
};
|
||||||
@ -878,12 +878,12 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DiagonalSha
|
|||||||
: Base(xpr.rhs(), xpr.lhs().diagonal())
|
: Base(xpr.rhs(), xpr.lhs().diagonal())
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
|
||||||
{
|
{
|
||||||
return m_diagImpl.coeff(row) * m_matImpl.coeff(row, col);
|
return m_diagImpl.coeff(row) * m_matImpl.coeff(row, col);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef __CUDACC__
|
#ifndef __CUDACC__
|
||||||
template<int LoadMode,typename PacketType>
|
template<int LoadMode,typename PacketType>
|
||||||
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
|
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
|
||||||
@ -893,7 +893,7 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DiagonalSha
|
|||||||
return this->template packet_impl<LoadMode,PacketType>(row,col, row,
|
return this->template packet_impl<LoadMode,PacketType>(row,col, row,
|
||||||
typename internal::conditional<int(StorageOrder)==RowMajor, internal::true_type, internal::false_type>::type());
|
typename internal::conditional<int(StorageOrder)==RowMajor, internal::true_type, internal::false_type>::type());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<int LoadMode,typename PacketType>
|
template<int LoadMode,typename PacketType>
|
||||||
EIGEN_STRONG_INLINE PacketType packet(Index idx) const
|
EIGEN_STRONG_INLINE PacketType packet(Index idx) const
|
||||||
{
|
{
|
||||||
@ -912,22 +912,22 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DenseShape,
|
|||||||
using Base::m_matImpl;
|
using Base::m_matImpl;
|
||||||
using Base::coeff;
|
using Base::coeff;
|
||||||
typedef typename Base::Scalar Scalar;
|
typedef typename Base::Scalar Scalar;
|
||||||
|
|
||||||
typedef Product<Lhs, Rhs, ProductKind> XprType;
|
typedef Product<Lhs, Rhs, ProductKind> XprType;
|
||||||
typedef typename XprType::PlainObject PlainObject;
|
typedef typename XprType::PlainObject PlainObject;
|
||||||
|
|
||||||
enum { StorageOrder = int(Lhs::Flags) & RowMajorBit ? RowMajor : ColMajor };
|
enum { StorageOrder = int(Lhs::Flags) & RowMajorBit ? RowMajor : ColMajor };
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)
|
EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)
|
||||||
: Base(xpr.lhs(), xpr.rhs().diagonal())
|
: Base(xpr.lhs(), xpr.rhs().diagonal())
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
|
||||||
{
|
{
|
||||||
return m_matImpl.coeff(row, col) * m_diagImpl.coeff(col);
|
return m_matImpl.coeff(row, col) * m_diagImpl.coeff(col);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef __CUDACC__
|
#ifndef __CUDACC__
|
||||||
template<int LoadMode,typename PacketType>
|
template<int LoadMode,typename PacketType>
|
||||||
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
|
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const
|
||||||
@ -935,7 +935,7 @@ struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DenseShape,
|
|||||||
return this->template packet_impl<LoadMode,PacketType>(row,col, col,
|
return this->template packet_impl<LoadMode,PacketType>(row,col, col,
|
||||||
typename internal::conditional<int(StorageOrder)==ColMajor, internal::true_type, internal::false_type>::type());
|
typename internal::conditional<int(StorageOrder)==ColMajor, internal::true_type, internal::false_type>::type());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<int LoadMode,typename PacketType>
|
template<int LoadMode,typename PacketType>
|
||||||
EIGEN_STRONG_INLINE PacketType packet(Index idx) const
|
EIGEN_STRONG_INLINE PacketType packet(Index idx) const
|
||||||
{
|
{
|
||||||
@ -1017,7 +1017,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
|
|||||||
struct generic_product_impl<Lhs, Rhs, PermutationShape, MatrixShape, ProductTag>
|
struct generic_product_impl<Lhs, Rhs, PermutationShape, MatrixShape, ProductTag>
|
||||||
{
|
{
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_DEVICE_FUNC void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
permutation_matrix_product<Rhs, OnTheLeft, false, MatrixShape>::run(dst, lhs, rhs);
|
permutation_matrix_product<Rhs, OnTheLeft, false, MatrixShape>::run(dst, lhs, rhs);
|
||||||
}
|
}
|
||||||
@ -1027,7 +1027,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
|
|||||||
struct generic_product_impl<Lhs, Rhs, MatrixShape, PermutationShape, ProductTag>
|
struct generic_product_impl<Lhs, Rhs, MatrixShape, PermutationShape, ProductTag>
|
||||||
{
|
{
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_DEVICE_FUNC void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
permutation_matrix_product<Lhs, OnTheRight, false, MatrixShape>::run(dst, rhs, lhs);
|
permutation_matrix_product<Lhs, OnTheRight, false, MatrixShape>::run(dst, rhs, lhs);
|
||||||
}
|
}
|
||||||
@ -1037,7 +1037,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
|
|||||||
struct generic_product_impl<Inverse<Lhs>, Rhs, PermutationShape, MatrixShape, ProductTag>
|
struct generic_product_impl<Inverse<Lhs>, Rhs, PermutationShape, MatrixShape, ProductTag>
|
||||||
{
|
{
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void evalTo(Dest& dst, const Inverse<Lhs>& lhs, const Rhs& rhs)
|
static EIGEN_DEVICE_FUNC void evalTo(Dest& dst, const Inverse<Lhs>& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
permutation_matrix_product<Rhs, OnTheLeft, true, MatrixShape>::run(dst, lhs.nestedExpression(), rhs);
|
permutation_matrix_product<Rhs, OnTheLeft, true, MatrixShape>::run(dst, lhs.nestedExpression(), rhs);
|
||||||
}
|
}
|
||||||
@ -1047,7 +1047,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
|
|||||||
struct generic_product_impl<Lhs, Inverse<Rhs>, MatrixShape, PermutationShape, ProductTag>
|
struct generic_product_impl<Lhs, Inverse<Rhs>, MatrixShape, PermutationShape, ProductTag>
|
||||||
{
|
{
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void evalTo(Dest& dst, const Lhs& lhs, const Inverse<Rhs>& rhs)
|
static EIGEN_DEVICE_FUNC void evalTo(Dest& dst, const Lhs& lhs, const Inverse<Rhs>& rhs)
|
||||||
{
|
{
|
||||||
permutation_matrix_product<Lhs, OnTheRight, true, MatrixShape>::run(dst, rhs.nestedExpression(), lhs);
|
permutation_matrix_product<Lhs, OnTheRight, true, MatrixShape>::run(dst, rhs.nestedExpression(), lhs);
|
||||||
}
|
}
|
||||||
@ -1069,7 +1069,7 @@ struct transposition_matrix_product
|
|||||||
{
|
{
|
||||||
typedef typename nested_eval<ExpressionType, 1>::type MatrixType;
|
typedef typename nested_eval<ExpressionType, 1>::type MatrixType;
|
||||||
typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
|
typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
|
||||||
|
|
||||||
template<typename Dest, typename TranspositionType>
|
template<typename Dest, typename TranspositionType>
|
||||||
static inline void run(Dest& dst, const TranspositionType& tr, const ExpressionType& xpr)
|
static inline void run(Dest& dst, const TranspositionType& tr, const ExpressionType& xpr)
|
||||||
{
|
{
|
||||||
@ -1094,7 +1094,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
|
|||||||
struct generic_product_impl<Lhs, Rhs, TranspositionsShape, MatrixShape, ProductTag>
|
struct generic_product_impl<Lhs, Rhs, TranspositionsShape, MatrixShape, ProductTag>
|
||||||
{
|
{
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_DEVICE_FUNC void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
transposition_matrix_product<Rhs, OnTheLeft, false, MatrixShape>::run(dst, lhs, rhs);
|
transposition_matrix_product<Rhs, OnTheLeft, false, MatrixShape>::run(dst, lhs, rhs);
|
||||||
}
|
}
|
||||||
@ -1104,7 +1104,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
|
|||||||
struct generic_product_impl<Lhs, Rhs, MatrixShape, TranspositionsShape, ProductTag>
|
struct generic_product_impl<Lhs, Rhs, MatrixShape, TranspositionsShape, ProductTag>
|
||||||
{
|
{
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
|
static EIGEN_DEVICE_FUNC void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
transposition_matrix_product<Lhs, OnTheRight, false, MatrixShape>::run(dst, rhs, lhs);
|
transposition_matrix_product<Lhs, OnTheRight, false, MatrixShape>::run(dst, rhs, lhs);
|
||||||
}
|
}
|
||||||
@ -1115,7 +1115,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
|
|||||||
struct generic_product_impl<Transpose<Lhs>, Rhs, TranspositionsShape, MatrixShape, ProductTag>
|
struct generic_product_impl<Transpose<Lhs>, Rhs, TranspositionsShape, MatrixShape, ProductTag>
|
||||||
{
|
{
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void evalTo(Dest& dst, const Transpose<Lhs>& lhs, const Rhs& rhs)
|
static EIGEN_DEVICE_FUNC void evalTo(Dest& dst, const Transpose<Lhs>& lhs, const Rhs& rhs)
|
||||||
{
|
{
|
||||||
transposition_matrix_product<Rhs, OnTheLeft, true, MatrixShape>::run(dst, lhs.nestedExpression(), rhs);
|
transposition_matrix_product<Rhs, OnTheLeft, true, MatrixShape>::run(dst, lhs.nestedExpression(), rhs);
|
||||||
}
|
}
|
||||||
@ -1125,7 +1125,7 @@ template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>
|
|||||||
struct generic_product_impl<Lhs, Transpose<Rhs>, MatrixShape, TranspositionsShape, ProductTag>
|
struct generic_product_impl<Lhs, Transpose<Rhs>, MatrixShape, TranspositionsShape, ProductTag>
|
||||||
{
|
{
|
||||||
template<typename Dest>
|
template<typename Dest>
|
||||||
static void evalTo(Dest& dst, const Lhs& lhs, const Transpose<Rhs>& rhs)
|
static EIGEN_DEVICE_FUNC void evalTo(Dest& dst, const Lhs& lhs, const Transpose<Rhs>& rhs)
|
||||||
{
|
{
|
||||||
transposition_matrix_product<Lhs, OnTheRight, true, MatrixShape>::run(dst, rhs.nestedExpression(), lhs);
|
transposition_matrix_product<Lhs, OnTheRight, true, MatrixShape>::run(dst, rhs.nestedExpression(), lhs);
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#ifndef EIGEN_RANDOM_H
|
#ifndef EIGEN_RANDOM_H
|
||||||
#define EIGEN_RANDOM_H
|
#define EIGEN_RANDOM_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
@ -29,16 +29,16 @@ struct functor_traits<scalar_random_op<Scalar> >
|
|||||||
*
|
*
|
||||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||||
* and in the [-1:1] range for floating point scalar types.
|
* and in the [-1:1] range for floating point scalar types.
|
||||||
*
|
*
|
||||||
* The parameters \a rows and \a cols are the number of rows and of columns of
|
* The parameters \a rows and \a cols are the number of rows and of columns of
|
||||||
* the returned matrix. Must be compatible with this MatrixBase type.
|
* the returned matrix. Must be compatible with this MatrixBase type.
|
||||||
*
|
*
|
||||||
* \not_reentrant
|
* \not_reentrant
|
||||||
*
|
*
|
||||||
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
|
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
|
||||||
* it is redundant to pass \a rows and \a cols as arguments, so Random() should be used
|
* it is redundant to pass \a rows and \a cols as arguments, so Random() should be used
|
||||||
* instead.
|
* instead.
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* Example: \include MatrixBase_random_int_int.cpp
|
* Example: \include MatrixBase_random_int_int.cpp
|
||||||
* Output: \verbinclude MatrixBase_random_int_int.out
|
* Output: \verbinclude MatrixBase_random_int_int.out
|
||||||
@ -46,7 +46,7 @@ struct functor_traits<scalar_random_op<Scalar> >
|
|||||||
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
|
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
|
||||||
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
|
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
|
||||||
* behavior with expressions involving random matrices.
|
* behavior with expressions involving random matrices.
|
||||||
*
|
*
|
||||||
* See DenseBase::NullaryExpr(Index, const CustomNullaryOp&) for an example using C++11 random generators.
|
* See DenseBase::NullaryExpr(Index, const CustomNullaryOp&) for an example using C++11 random generators.
|
||||||
*
|
*
|
||||||
* \sa DenseBase::setRandom(), DenseBase::Random(Index), DenseBase::Random()
|
* \sa DenseBase::setRandom(), DenseBase::Random(Index), DenseBase::Random()
|
||||||
@ -93,7 +93,7 @@ DenseBase<Derived>::Random(Index size)
|
|||||||
*
|
*
|
||||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||||
* and in the [-1:1] range for floating point scalar types.
|
* and in the [-1:1] range for floating point scalar types.
|
||||||
*
|
*
|
||||||
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
|
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
|
||||||
* need to use the variants taking size arguments.
|
* need to use the variants taking size arguments.
|
||||||
*
|
*
|
||||||
@ -103,7 +103,7 @@ DenseBase<Derived>::Random(Index size)
|
|||||||
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
|
* This expression has the "evaluate before nesting" flag so that it will be evaluated into
|
||||||
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
|
* a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
|
||||||
* behavior with expressions involving random matrices.
|
* behavior with expressions involving random matrices.
|
||||||
*
|
*
|
||||||
* \not_reentrant
|
* \not_reentrant
|
||||||
*
|
*
|
||||||
* \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random(Index)
|
* \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random(Index)
|
||||||
@ -119,16 +119,16 @@ DenseBase<Derived>::Random()
|
|||||||
*
|
*
|
||||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||||
* and in the [-1:1] range for floating point scalar types.
|
* and in the [-1:1] range for floating point scalar types.
|
||||||
*
|
*
|
||||||
* \not_reentrant
|
* \not_reentrant
|
||||||
*
|
*
|
||||||
* Example: \include MatrixBase_setRandom.cpp
|
* Example: \include MatrixBase_setRandom.cpp
|
||||||
* Output: \verbinclude MatrixBase_setRandom.out
|
* Output: \verbinclude MatrixBase_setRandom.out
|
||||||
*
|
*
|
||||||
* \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
|
* \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline Derived& DenseBase<Derived>::setRandom()
|
EIGEN_DEVICE_FUNC inline Derived& DenseBase<Derived>::setRandom()
|
||||||
{
|
{
|
||||||
return *this = Random(rows(), cols());
|
return *this = Random(rows(), cols());
|
||||||
}
|
}
|
||||||
@ -137,7 +137,7 @@ inline Derived& DenseBase<Derived>::setRandom()
|
|||||||
*
|
*
|
||||||
* Numbers are uniformly spread through their whole definition range for integer types,
|
* Numbers are uniformly spread through their whole definition range for integer types,
|
||||||
* and in the [-1:1] range for floating point scalar types.
|
* and in the [-1:1] range for floating point scalar types.
|
||||||
*
|
*
|
||||||
* \only_for_vectors
|
* \only_for_vectors
|
||||||
* \not_reentrant
|
* \not_reentrant
|
||||||
*
|
*
|
||||||
@ -160,7 +160,7 @@ PlainObjectBase<Derived>::setRandom(Index newSize)
|
|||||||
* and in the [-1:1] range for floating point scalar types.
|
* and in the [-1:1] range for floating point scalar types.
|
||||||
*
|
*
|
||||||
* \not_reentrant
|
* \not_reentrant
|
||||||
*
|
*
|
||||||
* \param rows the new number of rows
|
* \param rows the new number of rows
|
||||||
* \param cols the new number of columns
|
* \param cols the new number of columns
|
||||||
*
|
*
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#ifndef EIGEN_REDUX_H
|
#ifndef EIGEN_REDUX_H
|
||||||
#define EIGEN_REDUX_H
|
#define EIGEN_REDUX_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
@ -60,7 +60,7 @@ public:
|
|||||||
enum {
|
enum {
|
||||||
Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling
|
Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef EIGEN_DEBUG_ASSIGN
|
#ifdef EIGEN_DEBUG_ASSIGN
|
||||||
static void debug()
|
static void debug()
|
||||||
{
|
{
|
||||||
@ -128,7 +128,7 @@ template<typename Func, typename Derived, int Start>
|
|||||||
struct redux_novec_unroller<Func, Derived, Start, 0>
|
struct redux_novec_unroller<Func, Derived, Start, 0>
|
||||||
{
|
{
|
||||||
typedef typename Derived::Scalar Scalar;
|
typedef typename Derived::Scalar Scalar;
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); }
|
static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -215,7 +215,7 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
|
|||||||
static Scalar run(const Derived &mat, const Func& func)
|
static Scalar run(const Derived &mat, const Func& func)
|
||||||
{
|
{
|
||||||
const Index size = mat.size();
|
const Index size = mat.size();
|
||||||
|
|
||||||
const Index packetSize = redux_traits<Func, Derived>::PacketSize;
|
const Index packetSize = redux_traits<Func, Derived>::PacketSize;
|
||||||
const int packetAlignment = unpacket_traits<PacketScalar>::alignment;
|
const int packetAlignment = unpacket_traits<PacketScalar>::alignment;
|
||||||
enum {
|
enum {
|
||||||
@ -336,12 +336,12 @@ class redux_evaluator
|
|||||||
public:
|
public:
|
||||||
typedef _XprType XprType;
|
typedef _XprType XprType;
|
||||||
EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}
|
EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}
|
||||||
|
|
||||||
typedef typename XprType::Scalar Scalar;
|
typedef typename XprType::Scalar Scalar;
|
||||||
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
typedef typename XprType::CoeffReturnType CoeffReturnType;
|
||||||
typedef typename XprType::PacketScalar PacketScalar;
|
typedef typename XprType::PacketScalar PacketScalar;
|
||||||
typedef typename XprType::PacketReturnType PacketReturnType;
|
typedef typename XprType::PacketReturnType PacketReturnType;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime,
|
MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime,
|
||||||
MaxColsAtCompileTime = XprType::MaxColsAtCompileTime,
|
MaxColsAtCompileTime = XprType::MaxColsAtCompileTime,
|
||||||
@ -353,7 +353,7 @@ public:
|
|||||||
CoeffReadCost = evaluator<XprType>::CoeffReadCost,
|
CoeffReadCost = evaluator<XprType>::CoeffReadCost,
|
||||||
Alignment = evaluator<XprType>::Alignment
|
Alignment = evaluator<XprType>::Alignment
|
||||||
};
|
};
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }
|
EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }
|
||||||
EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }
|
EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }
|
||||||
EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); }
|
EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); }
|
||||||
@ -375,17 +375,17 @@ public:
|
|||||||
template<int LoadMode, typename PacketType>
|
template<int LoadMode, typename PacketType>
|
||||||
PacketType packet(Index index) const
|
PacketType packet(Index index) const
|
||||||
{ return m_evaluator.template packet<LoadMode,PacketType>(index); }
|
{ return m_evaluator.template packet<LoadMode,PacketType>(index); }
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
|
CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
|
||||||
{ return m_evaluator.coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
|
{ return m_evaluator.coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
|
||||||
|
|
||||||
template<int LoadMode, typename PacketType>
|
template<int LoadMode, typename PacketType>
|
||||||
PacketType packetByOuterInner(Index outer, Index inner) const
|
PacketType packetByOuterInner(Index outer, Index inner) const
|
||||||
{ return m_evaluator.template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
|
{ return m_evaluator.template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
|
||||||
|
|
||||||
const XprType & nestedExpression() const { return m_xpr; }
|
const XprType & nestedExpression() const { return m_xpr; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
internal::evaluator<XprType> m_evaluator;
|
internal::evaluator<XprType> m_evaluator;
|
||||||
const XprType &m_xpr;
|
const XprType &m_xpr;
|
||||||
@ -407,14 +407,14 @@ protected:
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename Func>
|
template<typename Func>
|
||||||
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
||||||
DenseBase<Derived>::redux(const Func& func) const
|
DenseBase<Derived>::redux(const Func& func) const
|
||||||
{
|
{
|
||||||
eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
|
eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
|
||||||
|
|
||||||
typedef typename internal::redux_evaluator<Derived> ThisEvaluator;
|
typedef typename internal::redux_evaluator<Derived> ThisEvaluator;
|
||||||
ThisEvaluator thisEval(derived());
|
ThisEvaluator thisEval(derived());
|
||||||
|
|
||||||
return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func);
|
return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -422,7 +422,7 @@ DenseBase<Derived>::redux(const Func& func) const
|
|||||||
* \warning the result is undefined if \c *this contains NaN.
|
* \warning the result is undefined if \c *this contains NaN.
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
||||||
DenseBase<Derived>::minCoeff() const
|
DenseBase<Derived>::minCoeff() const
|
||||||
{
|
{
|
||||||
return derived().redux(Eigen::internal::scalar_min_op<Scalar,Scalar>());
|
return derived().redux(Eigen::internal::scalar_min_op<Scalar,Scalar>());
|
||||||
@ -432,7 +432,7 @@ DenseBase<Derived>::minCoeff() const
|
|||||||
* \warning the result is undefined if \c *this contains NaN.
|
* \warning the result is undefined if \c *this contains NaN.
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
||||||
DenseBase<Derived>::maxCoeff() const
|
DenseBase<Derived>::maxCoeff() const
|
||||||
{
|
{
|
||||||
return derived().redux(Eigen::internal::scalar_max_op<Scalar,Scalar>());
|
return derived().redux(Eigen::internal::scalar_max_op<Scalar,Scalar>());
|
||||||
@ -445,7 +445,7 @@ DenseBase<Derived>::maxCoeff() const
|
|||||||
* \sa trace(), prod(), mean()
|
* \sa trace(), prod(), mean()
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
||||||
DenseBase<Derived>::sum() const
|
DenseBase<Derived>::sum() const
|
||||||
{
|
{
|
||||||
if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
|
if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
|
||||||
@ -458,7 +458,7 @@ DenseBase<Derived>::sum() const
|
|||||||
* \sa trace(), prod(), sum()
|
* \sa trace(), prod(), sum()
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
||||||
DenseBase<Derived>::mean() const
|
DenseBase<Derived>::mean() const
|
||||||
{
|
{
|
||||||
#ifdef __INTEL_COMPILER
|
#ifdef __INTEL_COMPILER
|
||||||
@ -479,7 +479,7 @@ DenseBase<Derived>::mean() const
|
|||||||
* \sa sum(), mean(), trace()
|
* \sa sum(), mean(), trace()
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
||||||
DenseBase<Derived>::prod() const
|
DenseBase<Derived>::prod() const
|
||||||
{
|
{
|
||||||
if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
|
if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
|
||||||
@ -494,7 +494,7 @@ DenseBase<Derived>::prod() const
|
|||||||
* \sa diagonal(), sum()
|
* \sa diagonal(), sum()
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
|
||||||
MatrixBase<Derived>::trace() const
|
MatrixBase<Derived>::trace() const
|
||||||
{
|
{
|
||||||
return derived().diagonal().sum();
|
return derived().diagonal().sum();
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#ifndef EIGEN_REPLICATE_H
|
#ifndef EIGEN_REPLICATE_H
|
||||||
#define EIGEN_REPLICATE_H
|
#define EIGEN_REPLICATE_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
template<typename MatrixType,int RowFactor,int ColFactor>
|
template<typename MatrixType,int RowFactor,int ColFactor>
|
||||||
@ -35,7 +35,7 @@ struct traits<Replicate<MatrixType,RowFactor,ColFactor> >
|
|||||||
IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1
|
IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1
|
||||||
: MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0
|
: MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0
|
||||||
: (MatrixType::Flags & RowMajorBit) ? 1 : 0,
|
: (MatrixType::Flags & RowMajorBit) ? 1 : 0,
|
||||||
|
|
||||||
// FIXME enable DirectAccess with negative strides?
|
// FIXME enable DirectAccess with negative strides?
|
||||||
Flags = IsRowMajor ? RowMajorBit : 0
|
Flags = IsRowMajor ? RowMajorBit : 0
|
||||||
};
|
};
|
||||||
@ -95,8 +95,8 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
|
|||||||
|
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
const _MatrixTypeNested& nestedExpression() const
|
const _MatrixTypeNested& nestedExpression() const
|
||||||
{
|
{
|
||||||
return m_matrix;
|
return m_matrix;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -115,7 +115,7 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<int RowFactor, int ColFactor>
|
template<int RowFactor, int ColFactor>
|
||||||
const Replicate<Derived,RowFactor,ColFactor>
|
EIGEN_DEVICE_FUNC const Replicate<Derived,RowFactor,ColFactor>
|
||||||
DenseBase<Derived>::replicate() const
|
DenseBase<Derived>::replicate() const
|
||||||
{
|
{
|
||||||
return Replicate<Derived,RowFactor,ColFactor>(derived());
|
return Replicate<Derived,RowFactor,ColFactor>(derived());
|
||||||
@ -130,7 +130,7 @@ DenseBase<Derived>::replicate() const
|
|||||||
* \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
|
* \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
|
||||||
*/
|
*/
|
||||||
template<typename ExpressionType, int Direction>
|
template<typename ExpressionType, int Direction>
|
||||||
const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
|
EIGEN_DEVICE_FUNC const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
|
||||||
VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const
|
VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const
|
||||||
{
|
{
|
||||||
return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
|
return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
|
||||||
|
@ -79,7 +79,7 @@ template<typename Derived> class ReturnByValue
|
|||||||
|
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
|
EIGEN_DEVICE_FUNC Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
|
||||||
{
|
{
|
||||||
other.evalTo(derived());
|
other.evalTo(derived());
|
||||||
return derived();
|
return derived();
|
||||||
@ -90,7 +90,7 @@ namespace internal {
|
|||||||
// Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that
|
// Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that
|
||||||
// when a ReturnByValue expression is assigned, the evaluator is not constructed.
|
// when a ReturnByValue expression is assigned, the evaluator is not constructed.
|
||||||
// TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world
|
// TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world
|
||||||
|
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
struct evaluator<ReturnByValue<Derived> >
|
struct evaluator<ReturnByValue<Derived> >
|
||||||
: public evaluator<typename internal::traits<Derived>::ReturnType>
|
: public evaluator<typename internal::traits<Derived>::ReturnType>
|
||||||
@ -98,7 +98,7 @@ struct evaluator<ReturnByValue<Derived> >
|
|||||||
typedef ReturnByValue<Derived> XprType;
|
typedef ReturnByValue<Derived> XprType;
|
||||||
typedef typename internal::traits<Derived>::ReturnType PlainObject;
|
typedef typename internal::traits<Derived>::ReturnType PlainObject;
|
||||||
typedef evaluator<PlainObject> Base;
|
typedef evaluator<PlainObject> Base;
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
|
EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
|
||||||
: m_result(xpr.rows(), xpr.cols())
|
: m_result(xpr.rows(), xpr.cols())
|
||||||
{
|
{
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
#ifndef EIGEN_REVERSE_H
|
#ifndef EIGEN_REVERSE_H
|
||||||
#define EIGEN_REVERSE_H
|
#define EIGEN_REVERSE_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ template<typename PacketType> struct reverse_packet_cond<PacketType,false>
|
|||||||
static inline PacketType run(const PacketType& x) { return x; }
|
static inline PacketType run(const PacketType& x) { return x; }
|
||||||
};
|
};
|
||||||
|
|
||||||
} // end namespace internal
|
} // end namespace internal
|
||||||
|
|
||||||
/** \class Reverse
|
/** \class Reverse
|
||||||
* \ingroup Core_Module
|
* \ingroup Core_Module
|
||||||
@ -98,7 +98,7 @@ template<typename MatrixType, int Direction> class Reverse
|
|||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC const typename internal::remove_all<typename MatrixType::Nested>::type&
|
EIGEN_DEVICE_FUNC const typename internal::remove_all<typename MatrixType::Nested>::type&
|
||||||
nestedExpression() const
|
nestedExpression() const
|
||||||
{
|
{
|
||||||
return m_matrix;
|
return m_matrix;
|
||||||
}
|
}
|
||||||
@ -114,7 +114,7 @@ template<typename MatrixType, int Direction> class Reverse
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline typename DenseBase<Derived>::ReverseReturnType
|
EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ReverseReturnType
|
||||||
DenseBase<Derived>::reverse()
|
DenseBase<Derived>::reverse()
|
||||||
{
|
{
|
||||||
return ReverseReturnType(derived());
|
return ReverseReturnType(derived());
|
||||||
@ -136,7 +136,7 @@ DenseBase<Derived>::reverse()
|
|||||||
*
|
*
|
||||||
* \sa VectorwiseOp::reverseInPlace(), reverse() */
|
* \sa VectorwiseOp::reverseInPlace(), reverse() */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline void DenseBase<Derived>::reverseInPlace()
|
EIGEN_DEVICE_FUNC inline void DenseBase<Derived>::reverseInPlace()
|
||||||
{
|
{
|
||||||
if(cols()>rows())
|
if(cols()>rows())
|
||||||
{
|
{
|
||||||
@ -161,7 +161,7 @@ inline void DenseBase<Derived>::reverseInPlace()
|
|||||||
}
|
}
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
template<int Direction>
|
template<int Direction>
|
||||||
struct vectorwise_reverse_inplace_impl;
|
struct vectorwise_reverse_inplace_impl;
|
||||||
|
|
||||||
@ -201,7 +201,7 @@ struct vectorwise_reverse_inplace_impl<Horizontal>
|
|||||||
*
|
*
|
||||||
* \sa DenseBase::reverseInPlace(), reverse() */
|
* \sa DenseBase::reverseInPlace(), reverse() */
|
||||||
template<typename ExpressionType, int Direction>
|
template<typename ExpressionType, int Direction>
|
||||||
void VectorwiseOp<ExpressionType,Direction>::reverseInPlace()
|
EIGEN_DEVICE_FUNC void VectorwiseOp<ExpressionType,Direction>::reverseInPlace()
|
||||||
{
|
{
|
||||||
internal::vectorwise_reverse_inplace_impl<Direction>::run(_expression().const_cast_derived());
|
internal::vectorwise_reverse_inplace_impl<Direction>::run(_expression().const_cast_derived());
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#ifndef EIGEN_SELFADJOINTMATRIX_H
|
#ifndef EIGEN_SELFADJOINTMATRIX_H
|
||||||
#define EIGEN_SELFADJOINTMATRIX_H
|
#define EIGEN_SELFADJOINTMATRIX_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
/** \class SelfAdjointView
|
/** \class SelfAdjointView
|
||||||
* \ingroup Core_Module
|
* \ingroup Core_Module
|
||||||
@ -58,7 +58,7 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
|
|||||||
typedef MatrixTypeNestedCleaned NestedExpression;
|
typedef MatrixTypeNestedCleaned NestedExpression;
|
||||||
|
|
||||||
/** \brief The type of coefficients in this matrix */
|
/** \brief The type of coefficients in this matrix */
|
||||||
typedef typename internal::traits<SelfAdjointView>::Scalar Scalar;
|
typedef typename internal::traits<SelfAdjointView>::Scalar Scalar;
|
||||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||||
typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
|
typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
|
||||||
|
|
||||||
@ -131,7 +131,7 @@ template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView
|
|||||||
{
|
{
|
||||||
return Product<OtherDerived,SelfAdjointView>(lhs.derived(),rhs);
|
return Product<OtherDerived,SelfAdjointView>(lhs.derived(),rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
friend EIGEN_DEVICE_FUNC
|
friend EIGEN_DEVICE_FUNC
|
||||||
const SelfAdjointView<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,MatrixType,product),UpLo>
|
const SelfAdjointView<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,MatrixType,product),UpLo>
|
||||||
operator*(const Scalar& s, const SelfAdjointView& mat)
|
operator*(const Scalar& s, const SelfAdjointView& mat)
|
||||||
@ -287,17 +287,17 @@ protected:
|
|||||||
using Base::m_src;
|
using Base::m_src;
|
||||||
using Base::m_functor;
|
using Base::m_functor;
|
||||||
public:
|
public:
|
||||||
|
|
||||||
typedef typename Base::DstEvaluatorType DstEvaluatorType;
|
typedef typename Base::DstEvaluatorType DstEvaluatorType;
|
||||||
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
|
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
|
||||||
typedef typename Base::Scalar Scalar;
|
typedef typename Base::Scalar Scalar;
|
||||||
typedef typename Base::AssignmentTraits AssignmentTraits;
|
typedef typename Base::AssignmentTraits AssignmentTraits;
|
||||||
|
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
|
EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
|
||||||
: Base(dst, src, func, dstExpr)
|
: Base(dst, src, func, dstExpr)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col)
|
EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col)
|
||||||
{
|
{
|
||||||
eigen_internal_assert(row!=col);
|
eigen_internal_assert(row!=col);
|
||||||
@ -305,12 +305,12 @@ public:
|
|||||||
m_functor.assignCoeff(m_dst.coeffRef(row,col), tmp);
|
m_functor.assignCoeff(m_dst.coeffRef(row,col), tmp);
|
||||||
m_functor.assignCoeff(m_dst.coeffRef(col,row), numext::conj(tmp));
|
m_functor.assignCoeff(m_dst.coeffRef(col,row), numext::conj(tmp));
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id)
|
EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id)
|
||||||
{
|
{
|
||||||
Base::assignCoeff(id,id);
|
Base::assignCoeff(id,id);
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index, Index)
|
EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index, Index)
|
||||||
{ eigen_internal_assert(false && "should never be called"); }
|
{ eigen_internal_assert(false && "should never be called"); }
|
||||||
};
|
};
|
||||||
@ -324,7 +324,7 @@ public:
|
|||||||
/** This is the const version of MatrixBase::selfadjointView() */
|
/** This is the const version of MatrixBase::selfadjointView() */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<unsigned int UpLo>
|
template<unsigned int UpLo>
|
||||||
typename MatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type
|
EIGEN_DEVICE_FUNC typename MatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type
|
||||||
MatrixBase<Derived>::selfadjointView() const
|
MatrixBase<Derived>::selfadjointView() const
|
||||||
{
|
{
|
||||||
return typename ConstSelfAdjointViewReturnType<UpLo>::Type(derived());
|
return typename ConstSelfAdjointViewReturnType<UpLo>::Type(derived());
|
||||||
@ -341,7 +341,7 @@ MatrixBase<Derived>::selfadjointView() const
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<unsigned int UpLo>
|
template<unsigned int UpLo>
|
||||||
typename MatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type
|
EIGEN_DEVICE_FUNC typename MatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type
|
||||||
MatrixBase<Derived>::selfadjointView()
|
MatrixBase<Derived>::selfadjointView()
|
||||||
{
|
{
|
||||||
return typename SelfAdjointViewReturnType<UpLo>::Type(derived());
|
return typename SelfAdjointViewReturnType<UpLo>::Type(derived());
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
template<typename Decomposition, typename RhsType, typename StorageKind> class SolveImpl;
|
template<typename Decomposition, typename RhsType, typename StorageKind> class SolveImpl;
|
||||||
|
|
||||||
/** \class Solve
|
/** \class Solve
|
||||||
* \ingroup Core_Module
|
* \ingroup Core_Module
|
||||||
*
|
*
|
||||||
@ -64,11 +64,11 @@ class Solve : public SolveImpl<Decomposition,RhsType,typename internal::traits<R
|
|||||||
public:
|
public:
|
||||||
typedef typename internal::traits<Solve>::PlainObject PlainObject;
|
typedef typename internal::traits<Solve>::PlainObject PlainObject;
|
||||||
typedef typename internal::traits<Solve>::StorageIndex StorageIndex;
|
typedef typename internal::traits<Solve>::StorageIndex StorageIndex;
|
||||||
|
|
||||||
Solve(const Decomposition &dec, const RhsType &rhs)
|
Solve(const Decomposition &dec, const RhsType &rhs)
|
||||||
: m_dec(dec), m_rhs(rhs)
|
: m_dec(dec), m_rhs(rhs)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); }
|
EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); }
|
||||||
EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); }
|
EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); }
|
||||||
|
|
||||||
@ -87,14 +87,14 @@ class SolveImpl<Decomposition,RhsType,Dense>
|
|||||||
: public MatrixBase<Solve<Decomposition,RhsType> >
|
: public MatrixBase<Solve<Decomposition,RhsType> >
|
||||||
{
|
{
|
||||||
typedef Solve<Decomposition,RhsType> Derived;
|
typedef Solve<Decomposition,RhsType> Derived;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
typedef MatrixBase<Solve<Decomposition,RhsType> > Base;
|
typedef MatrixBase<Solve<Decomposition,RhsType> > Base;
|
||||||
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
|
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
Scalar coeff(Index row, Index col) const;
|
Scalar coeff(Index row, Index col) const;
|
||||||
Scalar coeff(Index i) const;
|
Scalar coeff(Index i) const;
|
||||||
};
|
};
|
||||||
@ -119,15 +119,15 @@ struct evaluator<Solve<Decomposition,RhsType> >
|
|||||||
typedef evaluator<PlainObject> Base;
|
typedef evaluator<PlainObject> Base;
|
||||||
|
|
||||||
enum { Flags = Base::Flags | EvalBeforeNestingBit };
|
enum { Flags = Base::Flags | EvalBeforeNestingBit };
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC explicit evaluator(const SolveType& solve)
|
EIGEN_DEVICE_FUNC explicit evaluator(const SolveType& solve)
|
||||||
: m_result(solve.rows(), solve.cols())
|
: m_result(solve.rows(), solve.cols())
|
||||||
{
|
{
|
||||||
::new (static_cast<Base*>(this)) Base(m_result);
|
::new (static_cast<Base*>(this)) Base(m_result);
|
||||||
solve.dec()._solve_impl(solve.rhs(), m_result);
|
solve.dec()._solve_impl(solve.rhs(), m_result);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
PlainObject m_result;
|
PlainObject m_result;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -137,7 +137,7 @@ template<typename DstXprType, typename DecType, typename RhsType, typename Scala
|
|||||||
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>
|
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>
|
||||||
{
|
{
|
||||||
typedef Solve<DecType,RhsType> SrcXprType;
|
typedef Solve<DecType,RhsType> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
Index dstCols = src.cols();
|
Index dstCols = src.cols();
|
||||||
@ -153,7 +153,7 @@ template<typename DstXprType, typename DecType, typename RhsType, typename Scala
|
|||||||
struct Assignment<DstXprType, Solve<Transpose<const DecType>,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>
|
struct Assignment<DstXprType, Solve<Transpose<const DecType>,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>
|
||||||
{
|
{
|
||||||
typedef Solve<Transpose<const DecType>,RhsType> SrcXprType;
|
typedef Solve<Transpose<const DecType>,RhsType> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
Index dstCols = src.cols();
|
Index dstCols = src.cols();
|
||||||
@ -170,13 +170,13 @@ struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<t
|
|||||||
internal::assign_op<Scalar,Scalar>, Dense2Dense>
|
internal::assign_op<Scalar,Scalar>, Dense2Dense>
|
||||||
{
|
{
|
||||||
typedef Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType> SrcXprType;
|
typedef Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
Index dstCols = src.cols();
|
Index dstCols = src.cols();
|
||||||
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
||||||
dst.resize(dstRows, dstCols);
|
dst.resize(dstRows, dstCols);
|
||||||
|
|
||||||
src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed<true>(src.rhs(), dst);
|
src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed<true>(src.rhs(), dst);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#ifndef EIGEN_SOLVETRIANGULAR_H
|
#ifndef EIGEN_SOLVETRIANGULAR_H
|
||||||
#define EIGEN_SOLVETRIANGULAR_H
|
#define EIGEN_SOLVETRIANGULAR_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,1>
|
|||||||
|
|
||||||
ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhs,rhs.size(),
|
ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhs,rhs.size(),
|
||||||
(useRhsDirectly ? rhs.data() : 0));
|
(useRhsDirectly ? rhs.data() : 0));
|
||||||
|
|
||||||
if(!useRhsDirectly)
|
if(!useRhsDirectly)
|
||||||
MappedRhs(actualRhs,rhs.size()) = rhs;
|
MappedRhs(actualRhs,rhs.size()) = rhs;
|
||||||
|
|
||||||
@ -148,7 +148,7 @@ struct triangular_solver_selector<Lhs,Rhs,OnTheRight,Mode,CompleteUnrolling,1> {
|
|||||||
{
|
{
|
||||||
Transpose<const Lhs> trLhs(lhs);
|
Transpose<const Lhs> trLhs(lhs);
|
||||||
Transpose<Rhs> trRhs(rhs);
|
Transpose<Rhs> trRhs(rhs);
|
||||||
|
|
||||||
triangular_solver_unroller<Transpose<const Lhs>,Transpose<Rhs>,
|
triangular_solver_unroller<Transpose<const Lhs>,Transpose<Rhs>,
|
||||||
((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag),
|
((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag),
|
||||||
0,Rhs::SizeAtCompileTime>::run(trLhs,trRhs);
|
0,Rhs::SizeAtCompileTime>::run(trLhs,trRhs);
|
||||||
@ -164,7 +164,7 @@ struct triangular_solver_selector<Lhs,Rhs,OnTheRight,Mode,CompleteUnrolling,1> {
|
|||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename MatrixType, unsigned int Mode>
|
template<typename MatrixType, unsigned int Mode>
|
||||||
template<int Side, typename OtherDerived>
|
template<int Side, typename OtherDerived>
|
||||||
void TriangularViewImpl<MatrixType,Mode,Dense>::solveInPlace(const MatrixBase<OtherDerived>& _other) const
|
EIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType,Mode,Dense>::solveInPlace(const MatrixBase<OtherDerived>& _other) const
|
||||||
{
|
{
|
||||||
OtherDerived& other = _other.const_cast_derived();
|
OtherDerived& other = _other.const_cast_derived();
|
||||||
eigen_assert( derived().cols() == derived().rows() && ((Side==OnTheLeft && derived().cols() == other.rows()) || (Side==OnTheRight && derived().cols() == other.cols())) );
|
eigen_assert( derived().cols() == derived().rows() && ((Side==OnTheLeft && derived().cols() == other.rows()) || (Side==OnTheRight && derived().cols() == other.cols())) );
|
||||||
@ -187,7 +187,7 @@ void TriangularViewImpl<MatrixType,Mode,Dense>::solveInPlace(const MatrixBase<Ot
|
|||||||
|
|
||||||
template<typename Derived, unsigned int Mode>
|
template<typename Derived, unsigned int Mode>
|
||||||
template<int Side, typename Other>
|
template<int Side, typename Other>
|
||||||
const internal::triangular_solve_retval<Side,TriangularView<Derived,Mode>,Other>
|
EIGEN_DEVICE_FUNC const internal::triangular_solve_retval<Side,TriangularView<Derived,Mode>,Other>
|
||||||
TriangularViewImpl<Derived,Mode,Dense>::solve(const MatrixBase<Other>& other) const
|
TriangularViewImpl<Derived,Mode,Dense>::solve(const MatrixBase<Other>& other) const
|
||||||
{
|
{
|
||||||
return internal::triangular_solve_retval<Side,TriangularViewType,Other>(derived(), other.derived());
|
return internal::triangular_solve_retval<Side,TriangularViewType,Other>(derived(), other.derived());
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#ifndef EIGEN_TRANSPOSE_H
|
#ifndef EIGEN_TRANSPOSE_H
|
||||||
#define EIGEN_TRANSPOSE_H
|
#define EIGEN_TRANSPOSE_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
template<typename MatrixType>
|
template<typename MatrixType>
|
||||||
@ -170,7 +170,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
|
|||||||
*
|
*
|
||||||
* \sa transposeInPlace(), adjoint() */
|
* \sa transposeInPlace(), adjoint() */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline Transpose<Derived>
|
EIGEN_DEVICE_FUNC inline Transpose<Derived>
|
||||||
DenseBase<Derived>::transpose()
|
DenseBase<Derived>::transpose()
|
||||||
{
|
{
|
||||||
return TransposeReturnType(derived());
|
return TransposeReturnType(derived());
|
||||||
@ -182,7 +182,7 @@ DenseBase<Derived>::transpose()
|
|||||||
*
|
*
|
||||||
* \sa transposeInPlace(), adjoint() */
|
* \sa transposeInPlace(), adjoint() */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline typename DenseBase<Derived>::ConstTransposeReturnType
|
EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ConstTransposeReturnType
|
||||||
DenseBase<Derived>::transpose() const
|
DenseBase<Derived>::transpose() const
|
||||||
{
|
{
|
||||||
return ConstTransposeReturnType(derived());
|
return ConstTransposeReturnType(derived());
|
||||||
@ -208,7 +208,7 @@ DenseBase<Derived>::transpose() const
|
|||||||
*
|
*
|
||||||
* \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */
|
* \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline const typename MatrixBase<Derived>::AdjointReturnType
|
EIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::AdjointReturnType
|
||||||
MatrixBase<Derived>::adjoint() const
|
MatrixBase<Derived>::adjoint() const
|
||||||
{
|
{
|
||||||
return AdjointReturnType(this->transpose());
|
return AdjointReturnType(this->transpose());
|
||||||
@ -278,12 +278,12 @@ struct inplace_transpose_selector<MatrixType,false,MatchPacketSize> { // non squ
|
|||||||
* Notice however that this method is only useful if you want to replace a matrix by its own transpose.
|
* Notice however that this method is only useful if you want to replace a matrix by its own transpose.
|
||||||
* If you just need the transpose of a matrix, use transpose().
|
* If you just need the transpose of a matrix, use transpose().
|
||||||
*
|
*
|
||||||
* \note if the matrix is not square, then \c *this must be a resizable matrix.
|
* \note if the matrix is not square, then \c *this must be a resizable matrix.
|
||||||
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
|
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
|
||||||
*
|
*
|
||||||
* \sa transpose(), adjoint(), adjointInPlace() */
|
* \sa transpose(), adjoint(), adjointInPlace() */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline void DenseBase<Derived>::transposeInPlace()
|
EIGEN_DEVICE_FUNC inline void DenseBase<Derived>::transposeInPlace()
|
||||||
{
|
{
|
||||||
eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic))
|
eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic))
|
||||||
&& "transposeInPlace() called on a non-square non-resizable matrix");
|
&& "transposeInPlace() called on a non-square non-resizable matrix");
|
||||||
@ -314,7 +314,7 @@ inline void DenseBase<Derived>::transposeInPlace()
|
|||||||
*
|
*
|
||||||
* \sa transpose(), adjoint(), transposeInPlace() */
|
* \sa transpose(), adjoint(), transposeInPlace() */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline void MatrixBase<Derived>::adjointInPlace()
|
EIGEN_DEVICE_FUNC inline void MatrixBase<Derived>::adjointInPlace()
|
||||||
{
|
{
|
||||||
derived() = adjoint().eval();
|
derived() = adjoint().eval();
|
||||||
}
|
}
|
||||||
|
@ -11,12 +11,12 @@
|
|||||||
#ifndef EIGEN_TRIANGULARMATRIX_H
|
#ifndef EIGEN_TRIANGULARMATRIX_H
|
||||||
#define EIGEN_TRIANGULARMATRIX_H
|
#define EIGEN_TRIANGULARMATRIX_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval;
|
template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \class TriangularBase
|
/** \class TriangularBase
|
||||||
@ -34,16 +34,16 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
|
|||||||
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
|
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
|
||||||
MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
|
MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
|
||||||
MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
|
MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
|
||||||
|
|
||||||
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
|
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
|
||||||
internal::traits<Derived>::ColsAtCompileTime>::ret),
|
internal::traits<Derived>::ColsAtCompileTime>::ret),
|
||||||
/**< This is equal to the number of coefficients, i.e. the number of
|
/**< This is equal to the number of coefficients, i.e. the number of
|
||||||
* rows times the number of columns, or to \a Dynamic if this is not
|
* rows times the number of columns, or to \a Dynamic if this is not
|
||||||
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
|
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
|
||||||
|
|
||||||
MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
|
MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
|
||||||
internal::traits<Derived>::MaxColsAtCompileTime>::ret)
|
internal::traits<Derived>::MaxColsAtCompileTime>::ret)
|
||||||
|
|
||||||
};
|
};
|
||||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||||
@ -63,7 +63,7 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
|
|||||||
inline Index outerStride() const { return derived().outerStride(); }
|
inline Index outerStride() const { return derived().outerStride(); }
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
inline Index innerStride() const { return derived().innerStride(); }
|
inline Index innerStride() const { return derived().innerStride(); }
|
||||||
|
|
||||||
// dummy resize function
|
// dummy resize function
|
||||||
void resize(Index rows, Index cols)
|
void resize(Index rows, Index cols)
|
||||||
{
|
{
|
||||||
@ -155,7 +155,7 @@ template<typename Derived> class TriangularBase : public EigenBase<Derived>
|
|||||||
* \param MatrixType the type of the object in which we are taking the triangular part
|
* \param MatrixType the type of the object in which we are taking the triangular part
|
||||||
* \param Mode the kind of triangular matrix expression to construct. Can be #Upper,
|
* \param Mode the kind of triangular matrix expression to construct. Can be #Upper,
|
||||||
* #Lower, #UnitUpper, #UnitLower, #StrictlyUpper, or #StrictlyLower.
|
* #Lower, #UnitUpper, #UnitLower, #StrictlyUpper, or #StrictlyLower.
|
||||||
* This is in fact a bit field; it must have either #Upper or #Lower,
|
* This is in fact a bit field; it must have either #Upper or #Lower,
|
||||||
* and additionally it may have #UnitDiag or #ZeroDiag or neither.
|
* and additionally it may have #UnitDiag or #ZeroDiag or neither.
|
||||||
*
|
*
|
||||||
* This class represents a triangular part of a matrix, not necessarily square. Strictly speaking, for rectangular
|
* This class represents a triangular part of a matrix, not necessarily square. Strictly speaking, for rectangular
|
||||||
@ -197,7 +197,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
|||||||
typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef;
|
typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef;
|
||||||
|
|
||||||
typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
|
typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
typedef typename internal::traits<TriangularView>::StorageKind StorageKind;
|
typedef typename internal::traits<TriangularView>::StorageKind StorageKind;
|
||||||
@ -216,7 +216,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
|||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
explicit inline TriangularView(MatrixType& matrix) : m_matrix(matrix)
|
explicit inline TriangularView(MatrixType& matrix) : m_matrix(matrix)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TriangularView)
|
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TriangularView)
|
||||||
|
|
||||||
/** \copydoc EigenBase::rows() */
|
/** \copydoc EigenBase::rows() */
|
||||||
@ -233,7 +233,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
|||||||
/** \returns a reference to the nested expression */
|
/** \returns a reference to the nested expression */
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
NestedExpression& nestedExpression() { return m_matrix; }
|
NestedExpression& nestedExpression() { return m_matrix; }
|
||||||
|
|
||||||
typedef TriangularView<const MatrixConjugateReturnType,Mode> ConjugateReturnType;
|
typedef TriangularView<const MatrixConjugateReturnType,Mode> ConjugateReturnType;
|
||||||
/** \sa MatrixBase::conjugate() const */
|
/** \sa MatrixBase::conjugate() const */
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
@ -255,7 +255,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
|||||||
typename MatrixType::TransposeReturnType tmp(m_matrix);
|
typename MatrixType::TransposeReturnType tmp(m_matrix);
|
||||||
return TransposeReturnType(tmp);
|
return TransposeReturnType(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef TriangularView<const typename MatrixType::ConstTransposeReturnType,TransposeMode> ConstTransposeReturnType;
|
typedef TriangularView<const typename MatrixType::ConstTransposeReturnType,TransposeMode> ConstTransposeReturnType;
|
||||||
/** \sa MatrixBase::transpose() const */
|
/** \sa MatrixBase::transpose() const */
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
@ -266,10 +266,10 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
|||||||
|
|
||||||
template<typename Other>
|
template<typename Other>
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
inline const Solve<TriangularView, Other>
|
inline const Solve<TriangularView, Other>
|
||||||
solve(const MatrixBase<Other>& other) const
|
solve(const MatrixBase<Other>& other) const
|
||||||
{ return Solve<TriangularView, Other>(*this, other.derived()); }
|
{ return Solve<TriangularView, Other>(*this, other.derived()); }
|
||||||
|
|
||||||
// workaround MSVC ICE
|
// workaround MSVC ICE
|
||||||
#if EIGEN_COMP_MSVC
|
#if EIGEN_COMP_MSVC
|
||||||
template<int Side, typename Other>
|
template<int Side, typename Other>
|
||||||
@ -313,7 +313,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
|||||||
else
|
else
|
||||||
return m_matrix.diagonal().prod();
|
return m_matrix.diagonal().prod();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
MatrixTypeNested m_matrix;
|
MatrixTypeNested m_matrix;
|
||||||
@ -375,7 +375,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
|
|||||||
internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename Other::Scalar>());
|
internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename Other::Scalar>());
|
||||||
return derived();
|
return derived();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \sa MatrixBase::operator*=() */
|
/** \sa MatrixBase::operator*=() */
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
TriangularViewType& operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() * other; }
|
TriangularViewType& operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() * other; }
|
||||||
@ -556,7 +556,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
|
|||||||
// FIXME should we keep that possibility
|
// FIXME should we keep that possibility
|
||||||
template<typename MatrixType, unsigned int Mode>
|
template<typename MatrixType, unsigned int Mode>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
inline TriangularView<MatrixType, Mode>&
|
EIGEN_DEVICE_FUNC inline TriangularView<MatrixType, Mode>&
|
||||||
TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDerived>& other)
|
TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDerived>& other)
|
||||||
{
|
{
|
||||||
internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
|
internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
|
||||||
@ -566,7 +566,7 @@ TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDer
|
|||||||
// FIXME should we keep that possibility
|
// FIXME should we keep that possibility
|
||||||
template<typename MatrixType, unsigned int Mode>
|
template<typename MatrixType, unsigned int Mode>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<OtherDerived>& other)
|
EIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<OtherDerived>& other)
|
||||||
{
|
{
|
||||||
internal::call_assignment_no_alias(derived(), other.template triangularView<Mode>());
|
internal::call_assignment_no_alias(derived(), other.template triangularView<Mode>());
|
||||||
}
|
}
|
||||||
@ -575,7 +575,7 @@ void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<Ot
|
|||||||
|
|
||||||
template<typename MatrixType, unsigned int Mode>
|
template<typename MatrixType, unsigned int Mode>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
inline TriangularView<MatrixType, Mode>&
|
EIGEN_DEVICE_FUNC inline TriangularView<MatrixType, Mode>&
|
||||||
TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const TriangularBase<OtherDerived>& other)
|
TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const TriangularBase<OtherDerived>& other)
|
||||||
{
|
{
|
||||||
eigen_assert(Mode == int(OtherDerived::Mode));
|
eigen_assert(Mode == int(OtherDerived::Mode));
|
||||||
@ -585,7 +585,7 @@ TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const TriangularBase<Othe
|
|||||||
|
|
||||||
template<typename MatrixType, unsigned int Mode>
|
template<typename MatrixType, unsigned int Mode>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const TriangularBase<OtherDerived>& other)
|
EIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const TriangularBase<OtherDerived>& other)
|
||||||
{
|
{
|
||||||
eigen_assert(Mode == int(OtherDerived::Mode));
|
eigen_assert(Mode == int(OtherDerived::Mode));
|
||||||
internal::call_assignment_no_alias(derived(), other.derived());
|
internal::call_assignment_no_alias(derived(), other.derived());
|
||||||
@ -600,7 +600,7 @@ void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const TriangularBas
|
|||||||
* If the matrix is triangular, the opposite part is set to zero. */
|
* If the matrix is triangular, the opposite part is set to zero. */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename DenseDerived>
|
template<typename DenseDerived>
|
||||||
void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
|
EIGEN_DEVICE_FUNC void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
|
||||||
{
|
{
|
||||||
evalToLazy(other.derived());
|
evalToLazy(other.derived());
|
||||||
}
|
}
|
||||||
@ -626,7 +626,7 @@ void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<unsigned int Mode>
|
template<unsigned int Mode>
|
||||||
typename MatrixBase<Derived>::template TriangularViewReturnType<Mode>::Type
|
EIGEN_DEVICE_FUNC typename MatrixBase<Derived>::template TriangularViewReturnType<Mode>::Type
|
||||||
MatrixBase<Derived>::triangularView()
|
MatrixBase<Derived>::triangularView()
|
||||||
{
|
{
|
||||||
return typename TriangularViewReturnType<Mode>::Type(derived());
|
return typename TriangularViewReturnType<Mode>::Type(derived());
|
||||||
@ -635,7 +635,7 @@ MatrixBase<Derived>::triangularView()
|
|||||||
/** This is the const version of MatrixBase::triangularView() */
|
/** This is the const version of MatrixBase::triangularView() */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<unsigned int Mode>
|
template<unsigned int Mode>
|
||||||
typename MatrixBase<Derived>::template ConstTriangularViewReturnType<Mode>::Type
|
EIGEN_DEVICE_FUNC typename MatrixBase<Derived>::template ConstTriangularViewReturnType<Mode>::Type
|
||||||
MatrixBase<Derived>::triangularView() const
|
MatrixBase<Derived>::triangularView() const
|
||||||
{
|
{
|
||||||
return typename ConstTriangularViewReturnType<Mode>::Type(derived());
|
return typename ConstTriangularViewReturnType<Mode>::Type(derived());
|
||||||
@ -700,7 +700,7 @@ bool MatrixBase<Derived>::isLowerTriangular(const RealScalar& prec) const
|
|||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
|
|
||||||
// TODO currently a triangular expression has the form TriangularView<.,.>
|
// TODO currently a triangular expression has the form TriangularView<.,.>
|
||||||
// in the future triangular-ness should be defined by the expression traits
|
// in the future triangular-ness should be defined by the expression traits
|
||||||
// such that Transpose<TriangularView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
|
// such that Transpose<TriangularView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
|
||||||
@ -728,7 +728,7 @@ struct Dense2Triangular {};
|
|||||||
|
|
||||||
template<typename Kernel, unsigned int Mode, int UnrollCount, bool ClearOpposite> struct triangular_assignment_loop;
|
template<typename Kernel, unsigned int Mode, int UnrollCount, bool ClearOpposite> struct triangular_assignment_loop;
|
||||||
|
|
||||||
|
|
||||||
/** \internal Specialization of the dense assignment kernel for triangular matrices.
|
/** \internal Specialization of the dense assignment kernel for triangular matrices.
|
||||||
* The main difference is that the triangular, diagonal, and opposite parts are processed through three different functions.
|
* The main difference is that the triangular, diagonal, and opposite parts are processed through three different functions.
|
||||||
* \tparam UpLo must be either Lower or Upper
|
* \tparam UpLo must be either Lower or Upper
|
||||||
@ -745,17 +745,17 @@ protected:
|
|||||||
using Base::m_src;
|
using Base::m_src;
|
||||||
using Base::m_functor;
|
using Base::m_functor;
|
||||||
public:
|
public:
|
||||||
|
|
||||||
typedef typename Base::DstEvaluatorType DstEvaluatorType;
|
typedef typename Base::DstEvaluatorType DstEvaluatorType;
|
||||||
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
|
typedef typename Base::SrcEvaluatorType SrcEvaluatorType;
|
||||||
typedef typename Base::Scalar Scalar;
|
typedef typename Base::Scalar Scalar;
|
||||||
typedef typename Base::AssignmentTraits AssignmentTraits;
|
typedef typename Base::AssignmentTraits AssignmentTraits;
|
||||||
|
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
|
EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)
|
||||||
: Base(dst, src, func, dstExpr)
|
: Base(dst, src, func, dstExpr)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
#ifdef EIGEN_INTERNAL_DEBUGGING
|
#ifdef EIGEN_INTERNAL_DEBUGGING
|
||||||
EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col)
|
EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col)
|
||||||
{
|
{
|
||||||
@ -765,16 +765,16 @@ public:
|
|||||||
#else
|
#else
|
||||||
using Base::assignCoeff;
|
using Base::assignCoeff;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id)
|
EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id)
|
||||||
{
|
{
|
||||||
if(Mode==UnitDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(1));
|
if(Mode==UnitDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(1));
|
||||||
else if(Mode==ZeroDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(0));
|
else if(Mode==ZeroDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(0));
|
||||||
else if(Mode==0) Base::assignCoeff(id,id);
|
else if(Mode==0) Base::assignCoeff(id,id);
|
||||||
}
|
}
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index row, Index col)
|
EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index row, Index col)
|
||||||
{
|
{
|
||||||
eigen_internal_assert(row!=col);
|
eigen_internal_assert(row!=col);
|
||||||
if(SetOpposite)
|
if(SetOpposite)
|
||||||
m_functor.assignCoeff(m_dst.coeffRef(row,col), Scalar(0));
|
m_functor.assignCoeff(m_dst.coeffRef(row,col), Scalar(0));
|
||||||
@ -795,17 +795,17 @@ void call_triangular_assignment_loop(DstXprType& dst, const SrcXprType& src, con
|
|||||||
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
||||||
dst.resize(dstRows, dstCols);
|
dst.resize(dstRows, dstCols);
|
||||||
DstEvaluatorType dstEvaluator(dst);
|
DstEvaluatorType dstEvaluator(dst);
|
||||||
|
|
||||||
typedef triangular_dense_assignment_kernel< Mode&(Lower|Upper),Mode&(UnitDiag|ZeroDiag|SelfAdjoint),SetOpposite,
|
typedef triangular_dense_assignment_kernel< Mode&(Lower|Upper),Mode&(UnitDiag|ZeroDiag|SelfAdjoint),SetOpposite,
|
||||||
DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;
|
DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;
|
||||||
Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
|
Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
unroll = DstXprType::SizeAtCompileTime != Dynamic
|
unroll = DstXprType::SizeAtCompileTime != Dynamic
|
||||||
&& SrcEvaluatorType::CoeffReadCost < HugeCost
|
&& SrcEvaluatorType::CoeffReadCost < HugeCost
|
||||||
&& DstXprType::SizeAtCompileTime * (DstEvaluatorType::CoeffReadCost+SrcEvaluatorType::CoeffReadCost) / 2 <= EIGEN_UNROLLING_LIMIT
|
&& DstXprType::SizeAtCompileTime * (DstEvaluatorType::CoeffReadCost+SrcEvaluatorType::CoeffReadCost) / 2 <= EIGEN_UNROLLING_LIMIT
|
||||||
};
|
};
|
||||||
|
|
||||||
triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel);
|
triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -827,8 +827,8 @@ struct Assignment<DstXprType, SrcXprType, Functor, Triangular2Triangular>
|
|||||||
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
||||||
{
|
{
|
||||||
eigen_assert(int(DstXprType::Mode) == int(SrcXprType::Mode));
|
eigen_assert(int(DstXprType::Mode) == int(SrcXprType::Mode));
|
||||||
|
|
||||||
call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);
|
call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -837,7 +837,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, Triangular2Dense>
|
|||||||
{
|
{
|
||||||
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
||||||
{
|
{
|
||||||
call_triangular_assignment_loop<SrcXprType::Mode, (SrcXprType::Mode&SelfAdjoint)==0>(dst, src, func);
|
call_triangular_assignment_loop<SrcXprType::Mode, (SrcXprType::Mode&SelfAdjoint)==0>(dst, src, func);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -846,7 +846,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, Dense2Triangular>
|
|||||||
{
|
{
|
||||||
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
||||||
{
|
{
|
||||||
call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);
|
call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -857,19 +857,19 @@ struct triangular_assignment_loop
|
|||||||
// FIXME: this is not very clean, perhaps this information should be provided by the kernel?
|
// FIXME: this is not very clean, perhaps this information should be provided by the kernel?
|
||||||
typedef typename Kernel::DstEvaluatorType DstEvaluatorType;
|
typedef typename Kernel::DstEvaluatorType DstEvaluatorType;
|
||||||
typedef typename DstEvaluatorType::XprType DstXprType;
|
typedef typename DstEvaluatorType::XprType DstXprType;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
col = (UnrollCount-1) / DstXprType::RowsAtCompileTime,
|
col = (UnrollCount-1) / DstXprType::RowsAtCompileTime,
|
||||||
row = (UnrollCount-1) % DstXprType::RowsAtCompileTime
|
row = (UnrollCount-1) % DstXprType::RowsAtCompileTime
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef typename Kernel::Scalar Scalar;
|
typedef typename Kernel::Scalar Scalar;
|
||||||
|
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
static inline void run(Kernel &kernel)
|
static inline void run(Kernel &kernel)
|
||||||
{
|
{
|
||||||
triangular_assignment_loop<Kernel, Mode, UnrollCount-1, SetOpposite>::run(kernel);
|
triangular_assignment_loop<Kernel, Mode, UnrollCount-1, SetOpposite>::run(kernel);
|
||||||
|
|
||||||
if(row==col)
|
if(row==col)
|
||||||
kernel.assignDiagonalCoeff(row);
|
kernel.assignDiagonalCoeff(row);
|
||||||
else if( ((Mode&Lower) && row>col) || ((Mode&Upper) && row<col) )
|
else if( ((Mode&Lower) && row>col) || ((Mode&Upper) && row<col) )
|
||||||
@ -912,10 +912,10 @@ struct triangular_assignment_loop<Kernel, Mode, Dynamic, SetOpposite>
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
i = maxi;
|
i = maxi;
|
||||||
|
|
||||||
if(i<kernel.rows()) // then i==j
|
if(i<kernel.rows()) // then i==j
|
||||||
kernel.assignDiagonalCoeff(i++);
|
kernel.assignDiagonalCoeff(i++);
|
||||||
|
|
||||||
if (((Mode&Upper) && SetOpposite) || (Mode&Lower))
|
if (((Mode&Upper) && SetOpposite) || (Mode&Lower))
|
||||||
{
|
{
|
||||||
for(; i < kernel.rows(); ++i)
|
for(; i < kernel.rows(); ++i)
|
||||||
@ -932,20 +932,20 @@ struct triangular_assignment_loop<Kernel, Mode, Dynamic, SetOpposite>
|
|||||||
* If the matrix is triangular, the opposite part is set to zero. */
|
* If the matrix is triangular, the opposite part is set to zero. */
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename DenseDerived>
|
template<typename DenseDerived>
|
||||||
void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
|
EIGEN_DEVICE_FUNC void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
|
||||||
{
|
{
|
||||||
other.derived().resize(this->rows(), this->cols());
|
other.derived().resize(this->rows(), this->cols());
|
||||||
internal::call_triangular_assignment_loop<Derived::Mode,(Derived::Mode&SelfAdjoint)==0 /* SetOpposite */>(other.derived(), derived().nestedExpression());
|
internal::call_triangular_assignment_loop<Derived::Mode,(Derived::Mode&SelfAdjoint)==0 /* SetOpposite */>(other.derived(), derived().nestedExpression());
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
// Triangular = Product
|
// Triangular = Product
|
||||||
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
|
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
|
||||||
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>
|
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>
|
||||||
{
|
{
|
||||||
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
|
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename SrcXprType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename SrcXprType::Scalar> &)
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
Index dstCols = src.cols();
|
Index dstCols = src.cols();
|
||||||
@ -961,7 +961,7 @@ template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
|
|||||||
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>
|
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>
|
||||||
{
|
{
|
||||||
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
|
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,typename SrcXprType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,typename SrcXprType::Scalar> &)
|
||||||
{
|
{
|
||||||
dst._assignProduct(src, 1, 1);
|
dst._assignProduct(src, 1, 1);
|
||||||
}
|
}
|
||||||
@ -972,7 +972,7 @@ template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>
|
|||||||
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>
|
struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>
|
||||||
{
|
{
|
||||||
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
|
typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,typename SrcXprType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,typename SrcXprType::Scalar> &)
|
||||||
{
|
{
|
||||||
dst._assignProduct(src, -1, 1);
|
dst._assignProduct(src, -1, 1);
|
||||||
}
|
}
|
||||||
|
@ -670,7 +670,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
|
|||||||
* \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
|
* \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline typename DenseBase<Derived>::ColwiseReturnType
|
EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ColwiseReturnType
|
||||||
DenseBase<Derived>::colwise()
|
DenseBase<Derived>::colwise()
|
||||||
{
|
{
|
||||||
return ColwiseReturnType(derived());
|
return ColwiseReturnType(derived());
|
||||||
@ -684,7 +684,7 @@ DenseBase<Derived>::colwise()
|
|||||||
* \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
|
* \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
|
||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
inline typename DenseBase<Derived>::RowwiseReturnType
|
EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::RowwiseReturnType
|
||||||
DenseBase<Derived>::rowwise()
|
DenseBase<Derived>::rowwise()
|
||||||
{
|
{
|
||||||
return RowwiseReturnType(derived());
|
return RowwiseReturnType(derived());
|
||||||
|
@ -167,10 +167,10 @@ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploadu<double2>(const d
|
|||||||
return make_double2(from[0], from[1]);
|
return make_double2(from[0], from[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> EIGEN_STRONG_INLINE float4 ploaddup<float4>(const float* from) {
|
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploaddup<float4>(const float* from) {
|
||||||
return make_float4(from[0], from[0], from[1], from[1]);
|
return make_float4(from[0], from[0], from[1], from[1]);
|
||||||
}
|
}
|
||||||
template<> EIGEN_STRONG_INLINE double2 ploaddup<double2>(const double* from) {
|
template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploaddup<double2>(const double* from) {
|
||||||
return make_double2(from[0], from[0]);
|
return make_double2(from[0], from[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H
|
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H
|
||||||
#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H
|
#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjLhs, bool ConjRhs>
|
template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjLhs, bool ConjRhs>
|
||||||
struct selfadjoint_rank1_update;
|
struct selfadjoint_rank1_update;
|
||||||
@ -27,7 +27,7 @@ namespace internal {
|
|||||||
// forward declarations (defined at the end of this file)
|
// forward declarations (defined at the end of this file)
|
||||||
template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int ResInnerStride, int UpLo>
|
template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int ResInnerStride, int UpLo>
|
||||||
struct tribb_kernel;
|
struct tribb_kernel;
|
||||||
|
|
||||||
/* Optimized matrix-matrix product evaluating only one triangular half */
|
/* Optimized matrix-matrix product evaluating only one triangular half */
|
||||||
template <typename Index,
|
template <typename Index,
|
||||||
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
|
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
|
||||||
@ -164,7 +164,7 @@ struct tribb_kernel
|
|||||||
if(UpLo==Upper)
|
if(UpLo==Upper)
|
||||||
gebp_kernel1(res.getSubMapper(0, j), blockA, actual_b, j, depth, actualBlockSize, alpha,
|
gebp_kernel1(res.getSubMapper(0, j), blockA, actual_b, j, depth, actualBlockSize, alpha,
|
||||||
-1, -1, 0, 0);
|
-1, -1, 0, 0);
|
||||||
|
|
||||||
// selfadjoint micro block
|
// selfadjoint micro block
|
||||||
{
|
{
|
||||||
Index i = j;
|
Index i = j;
|
||||||
@ -186,7 +186,7 @@ struct tribb_kernel
|
|||||||
if(UpLo==Lower)
|
if(UpLo==Lower)
|
||||||
{
|
{
|
||||||
Index i = j+actualBlockSize;
|
Index i = j+actualBlockSize;
|
||||||
gebp_kernel1(res.getSubMapper(i, j), blockA+depth*i, actual_b, size-i,
|
gebp_kernel1(res.getSubMapper(i, j), blockA+depth*i, actual_b, size-i,
|
||||||
depth, actualBlockSize, alpha, -1, -1, 0, 0);
|
depth, actualBlockSize, alpha, -1, -1, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -207,13 +207,13 @@ struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,true>
|
|||||||
static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha, bool beta)
|
static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha, bool beta)
|
||||||
{
|
{
|
||||||
typedef typename MatrixType::Scalar Scalar;
|
typedef typename MatrixType::Scalar Scalar;
|
||||||
|
|
||||||
typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;
|
typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;
|
||||||
typedef internal::blas_traits<Lhs> LhsBlasTraits;
|
typedef internal::blas_traits<Lhs> LhsBlasTraits;
|
||||||
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
|
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
|
||||||
typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
|
typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
|
||||||
typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
|
typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
|
||||||
|
|
||||||
typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;
|
typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;
|
||||||
typedef internal::blas_traits<Rhs> RhsBlasTraits;
|
typedef internal::blas_traits<Rhs> RhsBlasTraits;
|
||||||
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
|
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
|
||||||
@ -230,18 +230,18 @@ struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,true>
|
|||||||
UseLhsDirectly = _ActualLhs::InnerStrideAtCompileTime==1,
|
UseLhsDirectly = _ActualLhs::InnerStrideAtCompileTime==1,
|
||||||
UseRhsDirectly = _ActualRhs::InnerStrideAtCompileTime==1
|
UseRhsDirectly = _ActualRhs::InnerStrideAtCompileTime==1
|
||||||
};
|
};
|
||||||
|
|
||||||
internal::gemv_static_vector_if<Scalar,Lhs::SizeAtCompileTime,Lhs::MaxSizeAtCompileTime,!UseLhsDirectly> static_lhs;
|
internal::gemv_static_vector_if<Scalar,Lhs::SizeAtCompileTime,Lhs::MaxSizeAtCompileTime,!UseLhsDirectly> static_lhs;
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, actualLhsPtr, actualLhs.size(),
|
ei_declare_aligned_stack_constructed_variable(Scalar, actualLhsPtr, actualLhs.size(),
|
||||||
(UseLhsDirectly ? const_cast<Scalar*>(actualLhs.data()) : static_lhs.data()));
|
(UseLhsDirectly ? const_cast<Scalar*>(actualLhs.data()) : static_lhs.data()));
|
||||||
if(!UseLhsDirectly) Map<typename _ActualLhs::PlainObject>(actualLhsPtr, actualLhs.size()) = actualLhs;
|
if(!UseLhsDirectly) Map<typename _ActualLhs::PlainObject>(actualLhsPtr, actualLhs.size()) = actualLhs;
|
||||||
|
|
||||||
internal::gemv_static_vector_if<Scalar,Rhs::SizeAtCompileTime,Rhs::MaxSizeAtCompileTime,!UseRhsDirectly> static_rhs;
|
internal::gemv_static_vector_if<Scalar,Rhs::SizeAtCompileTime,Rhs::MaxSizeAtCompileTime,!UseRhsDirectly> static_rhs;
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, actualRhsPtr, actualRhs.size(),
|
ei_declare_aligned_stack_constructed_variable(Scalar, actualRhsPtr, actualRhs.size(),
|
||||||
(UseRhsDirectly ? const_cast<Scalar*>(actualRhs.data()) : static_rhs.data()));
|
(UseRhsDirectly ? const_cast<Scalar*>(actualRhs.data()) : static_rhs.data()));
|
||||||
if(!UseRhsDirectly) Map<typename _ActualRhs::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
|
if(!UseRhsDirectly) Map<typename _ActualRhs::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
|
||||||
|
|
||||||
|
|
||||||
selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
|
selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
|
||||||
LhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
|
LhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
|
||||||
RhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex>
|
RhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex>
|
||||||
@ -259,7 +259,7 @@ struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,false>
|
|||||||
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
|
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
|
||||||
typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
|
typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
|
||||||
typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
|
typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
|
||||||
|
|
||||||
typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;
|
typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;
|
||||||
typedef internal::blas_traits<Rhs> RhsBlasTraits;
|
typedef internal::blas_traits<Rhs> RhsBlasTraits;
|
||||||
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
|
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
|
||||||
@ -302,13 +302,13 @@ struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,false>
|
|||||||
|
|
||||||
template<typename _MatrixType, unsigned int _Mode>
|
template<typename _MatrixType, unsigned int _Mode>
|
||||||
template<typename ProductType>
|
template<typename ProductType>
|
||||||
TriangularView<_MatrixType,_Mode>& TriangularViewImpl<_MatrixType,_Mode,Dense>::_assignProduct(const ProductType& prod, const Scalar& alpha, bool beta)
|
EIGEN_DEVICE_FUNC TriangularView<_MatrixType,_Mode>& TriangularViewImpl<_MatrixType,_Mode,Dense>::_assignProduct(const ProductType& prod, const Scalar& alpha, bool beta)
|
||||||
{
|
{
|
||||||
EIGEN_STATIC_ASSERT((_Mode&UnitDiag)==0, WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED);
|
EIGEN_STATIC_ASSERT((_Mode&UnitDiag)==0, WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED);
|
||||||
eigen_assert(derived().nestedExpression().rows() == prod.rows() && derived().cols() == prod.cols());
|
eigen_assert(derived().nestedExpression().rows() == prod.rows() && derived().cols() == prod.cols());
|
||||||
|
|
||||||
general_product_to_triangular_selector<_MatrixType, ProductType, _Mode, internal::traits<ProductType>::InnerSize==1>::run(derived().nestedExpression().const_cast_derived(), prod, alpha, beta);
|
general_product_to_triangular_selector<_MatrixType, ProductType, _Mode, internal::traits<ProductType>::InnerSize==1>::run(derived().nestedExpression().const_cast_derived(), prod, alpha, beta);
|
||||||
|
|
||||||
return derived();
|
return derived();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
* It corresponds to the level 3 SYRK and level 2 SYR Blas routines.
|
* It corresponds to the level 3 SYRK and level 2 SYR Blas routines.
|
||||||
**********************************************************************/
|
**********************************************************************/
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
|
|
||||||
template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
|
template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
|
||||||
@ -68,10 +68,10 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
|
|||||||
|
|
||||||
ei_declare_aligned_stack_constructed_variable(Scalar, actualOtherPtr, other.size(),
|
ei_declare_aligned_stack_constructed_variable(Scalar, actualOtherPtr, other.size(),
|
||||||
(UseOtherDirectly ? const_cast<Scalar*>(actualOther.data()) : static_other.data()));
|
(UseOtherDirectly ? const_cast<Scalar*>(actualOther.data()) : static_other.data()));
|
||||||
|
|
||||||
if(!UseOtherDirectly)
|
if(!UseOtherDirectly)
|
||||||
Map<typename _ActualOtherType::PlainObject>(actualOtherPtr, actualOther.size()) = actualOther;
|
Map<typename _ActualOtherType::PlainObject>(actualOtherPtr, actualOther.size()) = actualOther;
|
||||||
|
|
||||||
selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
|
selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
|
||||||
OtherBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
|
OtherBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
|
||||||
(!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex>
|
(!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex>
|
||||||
@ -120,7 +120,7 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
|
|||||||
|
|
||||||
template<typename MatrixType, unsigned int UpLo>
|
template<typename MatrixType, unsigned int UpLo>
|
||||||
template<typename DerivedU>
|
template<typename DerivedU>
|
||||||
SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
|
EIGEN_DEVICE_FUNC SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
|
||||||
::rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha)
|
::rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha)
|
||||||
{
|
{
|
||||||
selfadjoint_product_selector<MatrixType,DerivedU,UpLo>::run(_expression().const_cast_derived(), u.derived(), alpha);
|
selfadjoint_product_selector<MatrixType,DerivedU,UpLo>::run(_expression().const_cast_derived(), u.derived(), alpha);
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#ifndef EIGEN_SELFADJOINTRANK2UPTADE_H
|
#ifndef EIGEN_SELFADJOINTRANK2UPTADE_H
|
||||||
#define EIGEN_SELFADJOINTRANK2UPTADE_H
|
#define EIGEN_SELFADJOINTRANK2UPTADE_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
@ -57,7 +57,7 @@ template<bool Cond, typename T> struct conj_expr_if
|
|||||||
|
|
||||||
template<typename MatrixType, unsigned int UpLo>
|
template<typename MatrixType, unsigned int UpLo>
|
||||||
template<typename DerivedU, typename DerivedV>
|
template<typename DerivedU, typename DerivedV>
|
||||||
SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
|
EIGEN_DEVICE_FUNC SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
|
||||||
::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha)
|
::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha)
|
||||||
{
|
{
|
||||||
typedef internal::blas_traits<DerivedU> UBlasTraits;
|
typedef internal::blas_traits<DerivedU> UBlasTraits;
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#ifndef EIGEN_MATRIXBASEEIGENVALUES_H
|
#ifndef EIGEN_MATRIXBASEEIGENVALUES_H
|
||||||
#define EIGEN_MATRIXBASEEIGENVALUES_H
|
#define EIGEN_MATRIXBASEEIGENVALUES_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
@ -42,13 +42,13 @@ struct eigenvalues_selector<Derived, false>
|
|||||||
|
|
||||||
} // end namespace internal
|
} // end namespace internal
|
||||||
|
|
||||||
/** \brief Computes the eigenvalues of a matrix
|
/** \brief Computes the eigenvalues of a matrix
|
||||||
* \returns Column vector containing the eigenvalues.
|
* \returns Column vector containing the eigenvalues.
|
||||||
*
|
*
|
||||||
* \eigenvalues_module
|
* \eigenvalues_module
|
||||||
* This function computes the eigenvalues with the help of the EigenSolver
|
* This function computes the eigenvalues with the help of the EigenSolver
|
||||||
* class (for real matrices) or the ComplexEigenSolver class (for complex
|
* class (for real matrices) or the ComplexEigenSolver class (for complex
|
||||||
* matrices).
|
* matrices).
|
||||||
*
|
*
|
||||||
* The eigenvalues are repeated according to their algebraic multiplicity,
|
* The eigenvalues are repeated according to their algebraic multiplicity,
|
||||||
* so there are as many eigenvalues as rows in the matrix.
|
* so there are as many eigenvalues as rows in the matrix.
|
||||||
@ -83,8 +83,8 @@ MatrixBase<Derived>::eigenvalues() const
|
|||||||
*
|
*
|
||||||
* \sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues()
|
* \sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues()
|
||||||
*/
|
*/
|
||||||
template<typename MatrixType, unsigned int UpLo>
|
template<typename MatrixType, unsigned int UpLo>
|
||||||
inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType
|
EIGEN_DEVICE_FUNC inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType
|
||||||
SelfAdjointView<MatrixType, UpLo>::eigenvalues() const
|
SelfAdjointView<MatrixType, UpLo>::eigenvalues() const
|
||||||
{
|
{
|
||||||
PlainObject thisAsMatrix(*this);
|
PlainObject thisAsMatrix(*this);
|
||||||
@ -147,7 +147,7 @@ MatrixBase<Derived>::operatorNorm() const
|
|||||||
* \sa eigenvalues(), MatrixBase::operatorNorm()
|
* \sa eigenvalues(), MatrixBase::operatorNorm()
|
||||||
*/
|
*/
|
||||||
template<typename MatrixType, unsigned int UpLo>
|
template<typename MatrixType, unsigned int UpLo>
|
||||||
inline typename SelfAdjointView<MatrixType, UpLo>::RealScalar
|
EIGEN_DEVICE_FUNC inline typename SelfAdjointView<MatrixType, UpLo>::RealScalar
|
||||||
SelfAdjointView<MatrixType, UpLo>::operatorNorm() const
|
SelfAdjointView<MatrixType, UpLo>::operatorNorm() const
|
||||||
{
|
{
|
||||||
return eigenvalues().cwiseAbs().maxCoeff();
|
return eigenvalues().cwiseAbs().maxCoeff();
|
||||||
|
@ -53,7 +53,7 @@ template<typename _MatrixType> struct traits<FullPivLU<_MatrixType> >
|
|||||||
* Output: \verbinclude class_FullPivLU.out
|
* Output: \verbinclude class_FullPivLU.out
|
||||||
*
|
*
|
||||||
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
||||||
*
|
*
|
||||||
* \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse()
|
* \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse()
|
||||||
*/
|
*/
|
||||||
template<typename _MatrixType> class FullPivLU
|
template<typename _MatrixType> class FullPivLU
|
||||||
@ -744,7 +744,7 @@ struct image_retval<FullPivLU<_MatrixType> >
|
|||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename _MatrixType>
|
template<typename _MatrixType>
|
||||||
template<typename RhsType, typename DstType>
|
template<typename RhsType, typename DstType>
|
||||||
void FullPivLU<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
EIGEN_DEVICE_FUNC void FullPivLU<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
||||||
{
|
{
|
||||||
/* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}.
|
/* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}.
|
||||||
* So we proceed as follows:
|
* So we proceed as follows:
|
||||||
@ -792,7 +792,7 @@ void FullPivLU<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
|||||||
|
|
||||||
template<typename _MatrixType>
|
template<typename _MatrixType>
|
||||||
template<bool Conjugate, typename RhsType, typename DstType>
|
template<bool Conjugate, typename RhsType, typename DstType>
|
||||||
void FullPivLU<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
|
EIGEN_DEVICE_FUNC void FullPivLU<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
|
||||||
{
|
{
|
||||||
/* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1},
|
/* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1},
|
||||||
* and since permutations are real and unitary, we can write this
|
* and since permutations are real and unitary, we can write this
|
||||||
@ -864,7 +864,7 @@ struct Assignment<DstXprType, Inverse<FullPivLU<MatrixType> >, internal::assign_
|
|||||||
{
|
{
|
||||||
typedef FullPivLU<MatrixType> LuType;
|
typedef FullPivLU<MatrixType> LuType;
|
||||||
typedef Inverse<LuType> SrcXprType;
|
typedef Inverse<LuType> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename MatrixType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename MatrixType::Scalar> &)
|
||||||
{
|
{
|
||||||
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
|
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#ifndef EIGEN_INVERSE_IMPL_H
|
#ifndef EIGEN_INVERSE_IMPL_H
|
||||||
#define EIGEN_INVERSE_IMPL_H
|
#define EIGEN_INVERSE_IMPL_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 1>
|
|||||||
****************************/
|
****************************/
|
||||||
|
|
||||||
template<typename MatrixType, typename ResultType>
|
template<typename MatrixType, typename ResultType>
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
inline void compute_inverse_size2_helper(
|
inline void compute_inverse_size2_helper(
|
||||||
const MatrixType& matrix, const typename ResultType::Scalar& invdet,
|
const MatrixType& matrix, const typename ResultType::Scalar& invdet,
|
||||||
ResultType& result)
|
ResultType& result)
|
||||||
@ -122,7 +122,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 2>
|
|||||||
****************************/
|
****************************/
|
||||||
|
|
||||||
template<typename MatrixType, int i, int j>
|
template<typename MatrixType, int i, int j>
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
inline typename MatrixType::Scalar cofactor_3x3(const MatrixType& m)
|
inline typename MatrixType::Scalar cofactor_3x3(const MatrixType& m)
|
||||||
{
|
{
|
||||||
enum {
|
enum {
|
||||||
@ -200,7 +200,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 3>
|
|||||||
****************************/
|
****************************/
|
||||||
|
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
inline const typename Derived::Scalar general_det3_helper
|
inline const typename Derived::Scalar general_det3_helper
|
||||||
(const MatrixBase<Derived>& matrix, int i1, int i2, int i3, int j1, int j2, int j3)
|
(const MatrixBase<Derived>& matrix, int i1, int i2, int i3, int j1, int j2, int j3)
|
||||||
{
|
{
|
||||||
@ -209,7 +209,7 @@ inline const typename Derived::Scalar general_det3_helper
|
|||||||
}
|
}
|
||||||
|
|
||||||
template<typename MatrixType, int i, int j>
|
template<typename MatrixType, int i, int j>
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
inline typename MatrixType::Scalar cofactor_4x4(const MatrixType& matrix)
|
inline typename MatrixType::Scalar cofactor_4x4(const MatrixType& matrix)
|
||||||
{
|
{
|
||||||
enum {
|
enum {
|
||||||
@ -290,13 +290,13 @@ template<typename DstXprType, typename XprType>
|
|||||||
struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar>, Dense2Dense>
|
struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar>, Dense2Dense>
|
||||||
{
|
{
|
||||||
typedef Inverse<XprType> SrcXprType;
|
typedef Inverse<XprType> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar> &)
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
Index dstCols = src.cols();
|
Index dstCols = src.cols();
|
||||||
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
||||||
dst.resize(dstRows, dstCols);
|
dst.resize(dstRows, dstCols);
|
||||||
|
|
||||||
const int Size = EIGEN_PLAIN_ENUM_MIN(XprType::ColsAtCompileTime,DstXprType::ColsAtCompileTime);
|
const int Size = EIGEN_PLAIN_ENUM_MIN(XprType::ColsAtCompileTime,DstXprType::ColsAtCompileTime);
|
||||||
EIGEN_ONLY_USED_FOR_DEBUG(Size);
|
EIGEN_ONLY_USED_FOR_DEBUG(Size);
|
||||||
eigen_assert(( (Size<=1) || (Size>4) || (extract_data(src.nestedExpression())!=extract_data(dst)))
|
eigen_assert(( (Size<=1) || (Size>4) || (extract_data(src.nestedExpression())!=extract_data(dst)))
|
||||||
@ -304,14 +304,14 @@ struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename Dst
|
|||||||
|
|
||||||
typedef typename internal::nested_eval<XprType,XprType::ColsAtCompileTime>::type ActualXprType;
|
typedef typename internal::nested_eval<XprType,XprType::ColsAtCompileTime>::type ActualXprType;
|
||||||
typedef typename internal::remove_all<ActualXprType>::type ActualXprTypeCleanded;
|
typedef typename internal::remove_all<ActualXprType>::type ActualXprTypeCleanded;
|
||||||
|
|
||||||
ActualXprType actual_xpr(src.nestedExpression());
|
ActualXprType actual_xpr(src.nestedExpression());
|
||||||
|
|
||||||
compute_inverse<ActualXprTypeCleanded, DstXprType>::run(actual_xpr, dst);
|
compute_inverse<ActualXprTypeCleanded, DstXprType>::run(actual_xpr, dst);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
} // end namespace internal
|
} // end namespace internal
|
||||||
|
|
||||||
/** \lu_module
|
/** \lu_module
|
||||||
|
@ -69,7 +69,7 @@ struct enable_if_ref<Ref<T>,Derived> {
|
|||||||
* The data of the LU decomposition can be directly accessed through the methods matrixLU(), permutationP().
|
* The data of the LU decomposition can be directly accessed through the methods matrixLU(), permutationP().
|
||||||
*
|
*
|
||||||
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
||||||
*
|
*
|
||||||
* \sa MatrixBase::partialPivLu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse(), class FullPivLU
|
* \sa MatrixBase::partialPivLu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse(), class FullPivLU
|
||||||
*/
|
*/
|
||||||
template<typename _MatrixType> class PartialPivLU
|
template<typename _MatrixType> class PartialPivLU
|
||||||
@ -572,7 +572,7 @@ struct Assignment<DstXprType, Inverse<PartialPivLU<MatrixType> >, internal::assi
|
|||||||
{
|
{
|
||||||
typedef PartialPivLU<MatrixType> LuType;
|
typedef PartialPivLU<MatrixType> LuType;
|
||||||
typedef Inverse<LuType> SrcXprType;
|
typedef Inverse<LuType> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename LuType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename LuType::Scalar> &)
|
||||||
{
|
{
|
||||||
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
|
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ template<typename _MatrixType> struct traits<ColPivHouseholderQR<_MatrixType> >
|
|||||||
* numerical stability. It is slower than HouseholderQR, and faster than FullPivHouseholderQR.
|
* numerical stability. It is slower than HouseholderQR, and faster than FullPivHouseholderQR.
|
||||||
*
|
*
|
||||||
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
||||||
*
|
*
|
||||||
* \sa MatrixBase::colPivHouseholderQr()
|
* \sa MatrixBase::colPivHouseholderQr()
|
||||||
*/
|
*/
|
||||||
template<typename _MatrixType> class ColPivHouseholderQR
|
template<typename _MatrixType> class ColPivHouseholderQR
|
||||||
@ -582,7 +582,7 @@ void ColPivHouseholderQR<MatrixType>::computeInPlace()
|
|||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename _MatrixType>
|
template<typename _MatrixType>
|
||||||
template<typename RhsType, typename DstType>
|
template<typename RhsType, typename DstType>
|
||||||
void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
EIGEN_DEVICE_FUNC void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
||||||
{
|
{
|
||||||
eigen_assert(rhs.rows() == rows());
|
eigen_assert(rhs.rows() == rows());
|
||||||
|
|
||||||
@ -618,7 +618,7 @@ struct Assignment<DstXprType, Inverse<ColPivHouseholderQR<MatrixType> >, interna
|
|||||||
{
|
{
|
||||||
typedef ColPivHouseholderQR<MatrixType> QrType;
|
typedef ColPivHouseholderQR<MatrixType> QrType;
|
||||||
typedef Inverse<QrType> SrcXprType;
|
typedef Inverse<QrType> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &)
|
||||||
{
|
{
|
||||||
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
|
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ struct traits<CompleteOrthogonalDecomposition<_MatrixType> >
|
|||||||
* size rank-by-rank. \b A may be rank deficient.
|
* size rank-by-rank. \b A may be rank deficient.
|
||||||
*
|
*
|
||||||
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
||||||
*
|
*
|
||||||
* \sa MatrixBase::completeOrthogonalDecomposition()
|
* \sa MatrixBase::completeOrthogonalDecomposition()
|
||||||
*/
|
*/
|
||||||
template <typename _MatrixType>
|
template <typename _MatrixType>
|
||||||
@ -489,7 +489,7 @@ void CompleteOrthogonalDecomposition<MatrixType>::applyZAdjointOnTheLeftInPlace(
|
|||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template <typename _MatrixType>
|
template <typename _MatrixType>
|
||||||
template <typename RhsType, typename DstType>
|
template <typename RhsType, typename DstType>
|
||||||
void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl(
|
EIGEN_DEVICE_FUNC void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl(
|
||||||
const RhsType& rhs, DstType& dst) const {
|
const RhsType& rhs, DstType& dst) const {
|
||||||
eigen_assert(rhs.rows() == this->rows());
|
eigen_assert(rhs.rows() == this->rows());
|
||||||
|
|
||||||
@ -532,7 +532,7 @@ struct Assignment<DstXprType, Inverse<CompleteOrthogonalDecomposition<MatrixType
|
|||||||
{
|
{
|
||||||
typedef CompleteOrthogonalDecomposition<MatrixType> CodType;
|
typedef CompleteOrthogonalDecomposition<MatrixType> CodType;
|
||||||
typedef Inverse<CodType> SrcXprType;
|
typedef Inverse<CodType> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename CodType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename CodType::Scalar> &)
|
||||||
{
|
{
|
||||||
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.rows()));
|
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.rows()));
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#ifndef EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
|
#ifndef EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
|
||||||
#define EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
|
#define EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
@ -40,18 +40,18 @@ struct traits<FullPivHouseholderQRMatrixQReturnType<MatrixType> >
|
|||||||
* \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition
|
* \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition
|
||||||
*
|
*
|
||||||
* This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b P', \b Q and \b R
|
* This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b P', \b Q and \b R
|
||||||
* such that
|
* such that
|
||||||
* \f[
|
* \f[
|
||||||
* \mathbf{P} \, \mathbf{A} \, \mathbf{P}' = \mathbf{Q} \, \mathbf{R}
|
* \mathbf{P} \, \mathbf{A} \, \mathbf{P}' = \mathbf{Q} \, \mathbf{R}
|
||||||
* \f]
|
* \f]
|
||||||
* by using Householder transformations. Here, \b P and \b P' are permutation matrices, \b Q a unitary matrix
|
* by using Householder transformations. Here, \b P and \b P' are permutation matrices, \b Q a unitary matrix
|
||||||
* and \b R an upper triangular matrix.
|
* and \b R an upper triangular matrix.
|
||||||
*
|
*
|
||||||
* This decomposition performs a very prudent full pivoting in order to be rank-revealing and achieve optimal
|
* This decomposition performs a very prudent full pivoting in order to be rank-revealing and achieve optimal
|
||||||
* numerical stability. The trade-off is that it is slower than HouseholderQR and ColPivHouseholderQR.
|
* numerical stability. The trade-off is that it is slower than HouseholderQR and ColPivHouseholderQR.
|
||||||
*
|
*
|
||||||
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
|
||||||
*
|
*
|
||||||
* \sa MatrixBase::fullPivHouseholderQr()
|
* \sa MatrixBase::fullPivHouseholderQr()
|
||||||
*/
|
*/
|
||||||
template<typename _MatrixType> class FullPivHouseholderQR
|
template<typename _MatrixType> class FullPivHouseholderQR
|
||||||
@ -114,12 +114,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
|||||||
*
|
*
|
||||||
* This constructor computes the QR factorization of the matrix \a matrix by calling
|
* This constructor computes the QR factorization of the matrix \a matrix by calling
|
||||||
* the method compute(). It is a short cut for:
|
* the method compute(). It is a short cut for:
|
||||||
*
|
*
|
||||||
* \code
|
* \code
|
||||||
* FullPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
|
* FullPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
|
||||||
* qr.compute(matrix);
|
* qr.compute(matrix);
|
||||||
* \endcode
|
* \endcode
|
||||||
*
|
*
|
||||||
* \sa compute()
|
* \sa compute()
|
||||||
*/
|
*/
|
||||||
template<typename InputType>
|
template<typename InputType>
|
||||||
@ -317,9 +317,9 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
|||||||
|
|
||||||
inline Index rows() const { return m_qr.rows(); }
|
inline Index rows() const { return m_qr.rows(); }
|
||||||
inline Index cols() const { return m_qr.cols(); }
|
inline Index cols() const { return m_qr.cols(); }
|
||||||
|
|
||||||
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
|
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
|
||||||
*
|
*
|
||||||
* For advanced uses only.
|
* For advanced uses only.
|
||||||
*/
|
*/
|
||||||
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
|
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
|
||||||
@ -392,7 +392,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
|||||||
* diagonal coefficient of U.
|
* diagonal coefficient of U.
|
||||||
*/
|
*/
|
||||||
RealScalar maxPivot() const { return m_maxpivot; }
|
RealScalar maxPivot() const { return m_maxpivot; }
|
||||||
|
|
||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename RhsType, typename DstType>
|
template<typename RhsType, typename DstType>
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
@ -400,14 +400,14 @@ template<typename _MatrixType> class FullPivHouseholderQR
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
static void check_template_parameters()
|
static void check_template_parameters()
|
||||||
{
|
{
|
||||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
}
|
}
|
||||||
|
|
||||||
void computeInPlace();
|
void computeInPlace();
|
||||||
|
|
||||||
MatrixType m_qr;
|
MatrixType m_qr;
|
||||||
HCoeffsType m_hCoeffs;
|
HCoeffsType m_hCoeffs;
|
||||||
IntDiagSizeVectorType m_rows_transpositions;
|
IntDiagSizeVectorType m_rows_transpositions;
|
||||||
@ -463,7 +463,7 @@ void FullPivHouseholderQR<MatrixType>::computeInPlace()
|
|||||||
Index cols = m_qr.cols();
|
Index cols = m_qr.cols();
|
||||||
Index size = (std::min)(rows,cols);
|
Index size = (std::min)(rows,cols);
|
||||||
|
|
||||||
|
|
||||||
m_hCoeffs.resize(size);
|
m_hCoeffs.resize(size);
|
||||||
|
|
||||||
m_temp.resize(cols);
|
m_temp.resize(cols);
|
||||||
@ -539,7 +539,7 @@ void FullPivHouseholderQR<MatrixType>::computeInPlace()
|
|||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename _MatrixType>
|
template<typename _MatrixType>
|
||||||
template<typename RhsType, typename DstType>
|
template<typename RhsType, typename DstType>
|
||||||
void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
EIGEN_DEVICE_FUNC void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
||||||
{
|
{
|
||||||
eigen_assert(rhs.rows() == rows());
|
eigen_assert(rhs.rows() == rows());
|
||||||
const Index l_rank = rank();
|
const Index l_rank = rank();
|
||||||
@ -574,14 +574,14 @@ void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
template<typename DstXprType, typename MatrixType>
|
template<typename DstXprType, typename MatrixType>
|
||||||
struct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivHouseholderQR<MatrixType>::Scalar>, Dense2Dense>
|
struct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivHouseholderQR<MatrixType>::Scalar>, Dense2Dense>
|
||||||
{
|
{
|
||||||
typedef FullPivHouseholderQR<MatrixType> QrType;
|
typedef FullPivHouseholderQR<MatrixType> QrType;
|
||||||
typedef Inverse<QrType> SrcXprType;
|
typedef Inverse<QrType> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &)
|
||||||
{
|
{
|
||||||
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
|
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
#ifndef EIGEN_QR_H
|
#ifndef EIGEN_QR_H
|
||||||
#define EIGEN_QR_H
|
#define EIGEN_QR_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
/** \ingroup QR_Module
|
/** \ingroup QR_Module
|
||||||
*
|
*
|
||||||
@ -24,7 +24,7 @@ namespace Eigen {
|
|||||||
* \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition
|
* \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition
|
||||||
*
|
*
|
||||||
* This class performs a QR decomposition of a matrix \b A into matrices \b Q and \b R
|
* This class performs a QR decomposition of a matrix \b A into matrices \b Q and \b R
|
||||||
* such that
|
* such that
|
||||||
* \f[
|
* \f[
|
||||||
* \mathbf{A} = \mathbf{Q} \, \mathbf{R}
|
* \mathbf{A} = \mathbf{Q} \, \mathbf{R}
|
||||||
* \f]
|
* \f]
|
||||||
@ -85,12 +85,12 @@ template<typename _MatrixType> class HouseholderQR
|
|||||||
*
|
*
|
||||||
* This constructor computes the QR factorization of the matrix \a matrix by calling
|
* This constructor computes the QR factorization of the matrix \a matrix by calling
|
||||||
* the method compute(). It is a short cut for:
|
* the method compute(). It is a short cut for:
|
||||||
*
|
*
|
||||||
* \code
|
* \code
|
||||||
* HouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
|
* HouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
|
||||||
* qr.compute(matrix);
|
* qr.compute(matrix);
|
||||||
* \endcode
|
* \endcode
|
||||||
*
|
*
|
||||||
* \sa compute()
|
* \sa compute()
|
||||||
*/
|
*/
|
||||||
template<typename InputType>
|
template<typename InputType>
|
||||||
@ -204,13 +204,13 @@ template<typename _MatrixType> class HouseholderQR
|
|||||||
|
|
||||||
inline Index rows() const { return m_qr.rows(); }
|
inline Index rows() const { return m_qr.rows(); }
|
||||||
inline Index cols() const { return m_qr.cols(); }
|
inline Index cols() const { return m_qr.cols(); }
|
||||||
|
|
||||||
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
|
/** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
|
||||||
*
|
*
|
||||||
* For advanced uses only.
|
* For advanced uses only.
|
||||||
*/
|
*/
|
||||||
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
|
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
|
||||||
|
|
||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename RhsType, typename DstType>
|
template<typename RhsType, typename DstType>
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
@ -218,14 +218,14 @@ template<typename _MatrixType> class HouseholderQR
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
static void check_template_parameters()
|
static void check_template_parameters()
|
||||||
{
|
{
|
||||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
}
|
}
|
||||||
|
|
||||||
void computeInPlace();
|
void computeInPlace();
|
||||||
|
|
||||||
MatrixType m_qr;
|
MatrixType m_qr;
|
||||||
HCoeffsType m_hCoeffs;
|
HCoeffsType m_hCoeffs;
|
||||||
RowVectorType m_temp;
|
RowVectorType m_temp;
|
||||||
@ -347,7 +347,7 @@ struct householder_qr_inplace_blocked
|
|||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename _MatrixType>
|
template<typename _MatrixType>
|
||||||
template<typename RhsType, typename DstType>
|
template<typename RhsType, typename DstType>
|
||||||
void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
EIGEN_DEVICE_FUNC void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
||||||
{
|
{
|
||||||
const Index rank = (std::min)(rows(), cols());
|
const Index rank = (std::min)(rows(), cols());
|
||||||
eigen_assert(rhs.rows() == rows());
|
eigen_assert(rhs.rows() == rows());
|
||||||
@ -379,7 +379,7 @@ template<typename MatrixType>
|
|||||||
void HouseholderQR<MatrixType>::computeInPlace()
|
void HouseholderQR<MatrixType>::computeInPlace()
|
||||||
{
|
{
|
||||||
check_template_parameters();
|
check_template_parameters();
|
||||||
|
|
||||||
Index rows = m_qr.rows();
|
Index rows = m_qr.rows();
|
||||||
Index cols = m_qr.cols();
|
Index cols = m_qr.cols();
|
||||||
Index size = (std::min)(rows,cols);
|
Index size = (std::min)(rows,cols);
|
||||||
|
@ -34,12 +34,12 @@ namespace Eigen {
|
|||||||
*
|
*
|
||||||
* Singular values are always sorted in decreasing order.
|
* Singular values are always sorted in decreasing order.
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
|
* You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
|
||||||
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
|
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
|
||||||
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
|
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
|
||||||
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
|
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
|
||||||
*
|
*
|
||||||
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
|
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
|
||||||
* terminate in finite (and reasonable) time.
|
* terminate in finite (and reasonable) time.
|
||||||
* \sa class BDCSVD, class JacobiSVD
|
* \sa class BDCSVD, class JacobiSVD
|
||||||
@ -67,7 +67,7 @@ public:
|
|||||||
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixUType;
|
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixUType;
|
||||||
typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime> MatrixVType;
|
typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime> MatrixVType;
|
||||||
typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
|
typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
|
||||||
|
|
||||||
Derived& derived() { return *static_cast<Derived*>(this); }
|
Derived& derived() { return *static_cast<Derived*>(this); }
|
||||||
const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ public:
|
|||||||
eigen_assert(m_isInitialized && "SVD is not initialized.");
|
eigen_assert(m_isInitialized && "SVD is not initialized.");
|
||||||
return m_nonzeroSingularValues;
|
return m_nonzeroSingularValues;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns the rank of the matrix of which \c *this is the SVD.
|
/** \returns the rank of the matrix of which \c *this is the SVD.
|
||||||
*
|
*
|
||||||
* \note This method has to determine which singular values should be considered nonzero.
|
* \note This method has to determine which singular values should be considered nonzero.
|
||||||
@ -137,7 +137,7 @@ public:
|
|||||||
while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
|
while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
|
||||||
return i+1;
|
return i+1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
|
/** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
|
||||||
* which need to determine when singular values are to be considered nonzero.
|
* which need to determine when singular values are to be considered nonzero.
|
||||||
* This is not used for the SVD decomposition itself.
|
* This is not used for the SVD decomposition itself.
|
||||||
@ -193,7 +193,7 @@ public:
|
|||||||
|
|
||||||
inline Index rows() const { return m_rows; }
|
inline Index rows() const { return m_rows; }
|
||||||
inline Index cols() const { return m_cols; }
|
inline Index cols() const { return m_cols; }
|
||||||
|
|
||||||
/** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
|
/** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
|
||||||
*
|
*
|
||||||
* \param b the right-hand-side of the equation to solve.
|
* \param b the right-hand-side of the equation to solve.
|
||||||
@ -211,7 +211,7 @@ public:
|
|||||||
eigen_assert(computeU() && computeV() && "SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
|
eigen_assert(computeU() && computeV() && "SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
|
||||||
return Solve<Derived, Rhs>(derived(), b.derived());
|
return Solve<Derived, Rhs>(derived(), b.derived());
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename RhsType, typename DstType>
|
template<typename RhsType, typename DstType>
|
||||||
EIGEN_DEVICE_FUNC
|
EIGEN_DEVICE_FUNC
|
||||||
@ -219,12 +219,12 @@ public:
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
static void check_template_parameters()
|
static void check_template_parameters()
|
||||||
{
|
{
|
||||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||||
}
|
}
|
||||||
|
|
||||||
// return true if already allocated
|
// return true if already allocated
|
||||||
bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
|
bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
|
||||||
|
|
||||||
@ -258,7 +258,7 @@ protected:
|
|||||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename RhsType, typename DstType>
|
template<typename RhsType, typename DstType>
|
||||||
void SVDBase<Derived>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
EIGEN_DEVICE_FUNC void SVDBase<Derived>::_solve_impl(const RhsType &rhs, DstType &dst) const
|
||||||
{
|
{
|
||||||
eigen_assert(rhs.rows() == rows());
|
eigen_assert(rhs.rows() == rows());
|
||||||
|
|
||||||
|
@ -10,9 +10,9 @@
|
|||||||
#ifndef EIGEN_SPARSEASSIGN_H
|
#ifndef EIGEN_SPARSEASSIGN_H
|
||||||
#define EIGEN_SPARSEASSIGN_H
|
#define EIGEN_SPARSEASSIGN_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
|
Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
|
||||||
{
|
{
|
||||||
@ -104,7 +104,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
|
|||||||
|
|
||||||
enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
|
enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
|
||||||
|
|
||||||
|
|
||||||
DstXprType temp(src.rows(), src.cols());
|
DstXprType temp(src.rows(), src.cols());
|
||||||
|
|
||||||
temp.reserve((std::max)(src.rows(),src.cols())*2);
|
temp.reserve((std::max)(src.rows(),src.cols())*2);
|
||||||
@ -127,7 +127,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
|
|||||||
template< typename DstXprType, typename SrcXprType, typename Functor>
|
template< typename DstXprType, typename SrcXprType, typename Functor>
|
||||||
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
|
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
|
||||||
{
|
{
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||||
{
|
{
|
||||||
assign_sparse_to_sparse(dst.derived(), src.derived());
|
assign_sparse_to_sparse(dst.derived(), src.derived());
|
||||||
}
|
}
|
||||||
@ -137,15 +137,15 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
|
|||||||
template< typename DstXprType, typename SrcXprType, typename Functor>
|
template< typename DstXprType, typename SrcXprType, typename Functor>
|
||||||
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
|
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
|
||||||
{
|
{
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
||||||
{
|
{
|
||||||
if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
|
if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
|
||||||
dst.setZero();
|
dst.setZero();
|
||||||
|
|
||||||
internal::evaluator<SrcXprType> srcEval(src);
|
internal::evaluator<SrcXprType> srcEval(src);
|
||||||
resize_if_allowed(dst, src, func);
|
resize_if_allowed(dst, src, func);
|
||||||
internal::evaluator<DstXprType> dstEval(dst);
|
internal::evaluator<DstXprType> dstEval(dst);
|
||||||
|
|
||||||
const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
|
const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
|
||||||
for (Index j=0; j<outerEvaluationSize; ++j)
|
for (Index j=0; j<outerEvaluationSize; ++j)
|
||||||
for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
|
for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
|
||||||
@ -159,7 +159,7 @@ template<typename DstXprType, typename DecType, typename RhsType, typename Scala
|
|||||||
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>
|
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>
|
||||||
{
|
{
|
||||||
typedef Solve<DecType,RhsType> SrcXprType;
|
typedef Solve<DecType,RhsType> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
Index dstCols = src.cols();
|
Index dstCols = src.cols();
|
||||||
@ -182,7 +182,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
|
|||||||
typedef Array<StorageIndex,Dynamic,1> ArrayXI;
|
typedef Array<StorageIndex,Dynamic,1> ArrayXI;
|
||||||
typedef Array<Scalar,Dynamic,1> ArrayXS;
|
typedef Array<Scalar,Dynamic,1> ArrayXS;
|
||||||
template<int Options>
|
template<int Options>
|
||||||
static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
static EIGEN_DEVICE_FUNC void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
Index dstCols = src.cols();
|
Index dstCols = src.cols();
|
||||||
@ -196,16 +196,16 @@ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
|
|||||||
Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
|
Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
|
||||||
Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
|
Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename DstDerived>
|
template<typename DstDerived>
|
||||||
static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||||
{
|
{
|
||||||
dst.diagonal() = src.diagonal();
|
dst.diagonal() = src.diagonal();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||||
{ dst.diagonal() += src.diagonal(); }
|
{ dst.diagonal() += src.diagonal(); }
|
||||||
|
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||||
{ dst.diagonal() -= src.diagonal(); }
|
{ dst.diagonal() -= src.diagonal(); }
|
||||||
};
|
};
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#ifndef EIGEN_SPARSEPRODUCT_H
|
#ifndef EIGEN_SPARSEPRODUCT_H
|
||||||
#define EIGEN_SPARSEPRODUCT_H
|
#define EIGEN_SPARSEPRODUCT_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
/** \returns an expression of the product of two sparse matrices.
|
/** \returns an expression of the product of two sparse matrices.
|
||||||
* By default a conservative product preserving the symbolic non zeros is performed.
|
* By default a conservative product preserving the symbolic non zeros is performed.
|
||||||
@ -102,13 +102,13 @@ template< typename DstXprType, typename Lhs, typename Rhs>
|
|||||||
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
|
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
|
||||||
{
|
{
|
||||||
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
|
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
|
||||||
{
|
{
|
||||||
Index dstRows = src.rows();
|
Index dstRows = src.rows();
|
||||||
Index dstCols = src.cols();
|
Index dstCols = src.cols();
|
||||||
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
||||||
dst.resize(dstRows, dstCols);
|
dst.resize(dstRows, dstCols);
|
||||||
|
|
||||||
generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
|
generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -118,7 +118,7 @@ template< typename DstXprType, typename Lhs, typename Rhs>
|
|||||||
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
|
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
|
||||||
{
|
{
|
||||||
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
|
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
|
||||||
{
|
{
|
||||||
generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs());
|
generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs());
|
||||||
}
|
}
|
||||||
@ -129,7 +129,7 @@ template< typename DstXprType, typename Lhs, typename Rhs>
|
|||||||
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
|
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
|
||||||
{
|
{
|
||||||
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
|
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
|
||||||
{
|
{
|
||||||
generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs());
|
generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs());
|
||||||
}
|
}
|
||||||
|
@ -10,8 +10,8 @@
|
|||||||
#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
|
#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
|
||||||
#define EIGEN_SPARSE_SELFADJOINTVIEW_H
|
#define EIGEN_SPARSE_SELFADJOINTVIEW_H
|
||||||
|
|
||||||
namespace Eigen {
|
namespace Eigen {
|
||||||
|
|
||||||
/** \ingroup SparseCore_Module
|
/** \ingroup SparseCore_Module
|
||||||
* \class SparseSelfAdjointView
|
* \class SparseSelfAdjointView
|
||||||
*
|
*
|
||||||
@ -27,7 +27,7 @@ namespace Eigen {
|
|||||||
* \sa SparseMatrixBase::selfadjointView()
|
* \sa SparseMatrixBase::selfadjointView()
|
||||||
*/
|
*/
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
template<typename MatrixType, unsigned int Mode>
|
template<typename MatrixType, unsigned int Mode>
|
||||||
struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
|
struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
|
||||||
};
|
};
|
||||||
@ -44,7 +44,7 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
|||||||
: public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
|
: public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
Mode = _Mode,
|
Mode = _Mode,
|
||||||
TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
|
TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
|
||||||
@ -58,7 +58,7 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
|||||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||||
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
|
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
|
||||||
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
||||||
|
|
||||||
explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
|
explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
|
||||||
{
|
{
|
||||||
eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
|
eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
|
||||||
@ -94,7 +94,7 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
|||||||
{
|
{
|
||||||
return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
|
return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Efficient sparse self-adjoint matrix times dense vector/matrix product */
|
/** Efficient sparse self-adjoint matrix times dense vector/matrix product */
|
||||||
template<typename OtherDerived>
|
template<typename OtherDerived>
|
||||||
Product<SparseSelfAdjointView,OtherDerived>
|
Product<SparseSelfAdjointView,OtherDerived>
|
||||||
@ -121,7 +121,7 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
|||||||
*/
|
*/
|
||||||
template<typename DerivedU>
|
template<typename DerivedU>
|
||||||
SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
|
SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
|
||||||
|
|
||||||
/** \returns an expression of P H P^-1 */
|
/** \returns an expression of P H P^-1 */
|
||||||
// TODO implement twists in a more evaluator friendly fashion
|
// TODO implement twists in a more evaluator friendly fashion
|
||||||
SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
|
SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
|
||||||
@ -148,7 +148,7 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
|||||||
PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
|
PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
|
||||||
return *this = src.twistedBy(pnull);
|
return *this = src.twistedBy(pnull);
|
||||||
}
|
}
|
||||||
|
|
||||||
void resize(Index rows, Index cols)
|
void resize(Index rows, Index cols)
|
||||||
{
|
{
|
||||||
EIGEN_ONLY_USED_FOR_DEBUG(rows);
|
EIGEN_ONLY_USED_FOR_DEBUG(rows);
|
||||||
@ -156,7 +156,7 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
|||||||
eigen_assert(rows == this->rows() && cols == this->cols()
|
eigen_assert(rows == this->rows() && cols == this->cols()
|
||||||
&& "SparseSelfadjointView::resize() does not actually allow to resize.");
|
&& "SparseSelfadjointView::resize() does not actually allow to resize.");
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
MatrixTypeNested m_matrix;
|
MatrixTypeNested m_matrix;
|
||||||
@ -203,7 +203,7 @@ SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<Derive
|
|||||||
}
|
}
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
|
// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
|
||||||
// in the future selfadjoint-ness should be defined by the expression traits
|
// in the future selfadjoint-ness should be defined by the expression traits
|
||||||
// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
|
// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
|
||||||
@ -226,7 +226,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
|
|||||||
typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
|
typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
|
||||||
|
|
||||||
template<typename DestScalar,int StorageOrder>
|
template<typename DestScalar,int StorageOrder>
|
||||||
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
|
static EIGEN_DEVICE_FUNC void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
|
||||||
{
|
{
|
||||||
internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
|
internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
|
||||||
}
|
}
|
||||||
@ -257,7 +257,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
|
|||||||
run(tmp, src, AssignOpType());
|
run(tmp, src, AssignOpType());
|
||||||
dst -= tmp;
|
dst -= tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename DestScalar>
|
template<typename DestScalar>
|
||||||
static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
|
static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
|
||||||
{
|
{
|
||||||
@ -280,13 +280,13 @@ template<int Mode, typename SparseLhsType, typename DenseRhsType, typename Dense
|
|||||||
inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
|
inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
|
||||||
{
|
{
|
||||||
EIGEN_ONLY_USED_FOR_DEBUG(alpha);
|
EIGEN_ONLY_USED_FOR_DEBUG(alpha);
|
||||||
|
|
||||||
typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
|
typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
|
||||||
typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
|
typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
|
||||||
typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
|
typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
|
||||||
typedef typename LhsEval::InnerIterator LhsIterator;
|
typedef typename LhsEval::InnerIterator LhsIterator;
|
||||||
typedef typename SparseLhsType::Scalar LhsScalar;
|
typedef typename SparseLhsType::Scalar LhsScalar;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
|
LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
|
||||||
ProcessFirstHalf =
|
ProcessFirstHalf =
|
||||||
@ -295,7 +295,7 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
|
|||||||
|| ( (Mode&Lower) && LhsIsRowMajor),
|
|| ( (Mode&Lower) && LhsIsRowMajor),
|
||||||
ProcessSecondHalf = !ProcessFirstHalf
|
ProcessSecondHalf = !ProcessFirstHalf
|
||||||
};
|
};
|
||||||
|
|
||||||
SparseLhsTypeNested lhs_nested(lhs);
|
SparseLhsTypeNested lhs_nested(lhs);
|
||||||
LhsEval lhsEval(lhs_nested);
|
LhsEval lhsEval(lhs_nested);
|
||||||
|
|
||||||
@ -349,7 +349,7 @@ struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, Pr
|
|||||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||||
LhsNested lhsNested(lhsView.matrix());
|
LhsNested lhsNested(lhsView.matrix());
|
||||||
RhsNested rhsNested(rhs);
|
RhsNested rhsNested(rhs);
|
||||||
|
|
||||||
internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
|
internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -366,7 +366,7 @@ struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, Pr
|
|||||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||||
LhsNested lhsNested(lhs);
|
LhsNested lhsNested(lhs);
|
||||||
RhsNested rhsNested(rhsView.matrix());
|
RhsNested rhsNested(rhsView.matrix());
|
||||||
|
|
||||||
// transpose everything
|
// transpose everything
|
||||||
Transpose<Dest> dstT(dst);
|
Transpose<Dest> dstT(dst);
|
||||||
internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
|
internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
|
||||||
@ -390,7 +390,7 @@ struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, Spar
|
|||||||
::new (static_cast<Base*>(this)) Base(m_result);
|
::new (static_cast<Base*>(this)) Base(m_result);
|
||||||
generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
|
generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
typename Rhs::PlainObject m_lhs;
|
typename Rhs::PlainObject m_lhs;
|
||||||
PlainObject m_result;
|
PlainObject m_result;
|
||||||
@ -410,7 +410,7 @@ struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, Spar
|
|||||||
::new (static_cast<Base*>(this)) Base(m_result);
|
::new (static_cast<Base*>(this)) Base(m_result);
|
||||||
generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
|
generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
typename Lhs::PlainObject m_rhs;
|
typename Lhs::PlainObject m_rhs;
|
||||||
PlainObject m_result;
|
PlainObject m_result;
|
||||||
@ -432,13 +432,13 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
|||||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||||
typedef evaluator<MatrixType> MatEval;
|
typedef evaluator<MatrixType> MatEval;
|
||||||
typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
|
typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
|
||||||
|
|
||||||
MatEval matEval(mat);
|
MatEval matEval(mat);
|
||||||
Dest& dest(_dest.derived());
|
Dest& dest(_dest.derived());
|
||||||
enum {
|
enum {
|
||||||
StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
|
StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
|
||||||
};
|
};
|
||||||
|
|
||||||
Index size = mat.rows();
|
Index size = mat.rows();
|
||||||
VectorI count;
|
VectorI count;
|
||||||
count.resize(size);
|
count.resize(size);
|
||||||
@ -465,7 +465,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Index nnz = count.sum();
|
Index nnz = count.sum();
|
||||||
|
|
||||||
// reserve space
|
// reserve space
|
||||||
dest.resizeNonZeros(nnz);
|
dest.resizeNonZeros(nnz);
|
||||||
dest.outerIndexPtr()[0] = 0;
|
dest.outerIndexPtr()[0] = 0;
|
||||||
@ -473,7 +473,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
|||||||
dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
|
dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
|
||||||
for(Index j=0; j<size; ++j)
|
for(Index j=0; j<size; ++j)
|
||||||
count[j] = dest.outerIndexPtr()[j];
|
count[j] = dest.outerIndexPtr()[j];
|
||||||
|
|
||||||
// copy data
|
// copy data
|
||||||
for(StorageIndex j = 0; j<size; ++j)
|
for(StorageIndex j = 0; j<size; ++j)
|
||||||
{
|
{
|
||||||
@ -482,10 +482,10 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
|
|||||||
StorageIndex i = internal::convert_index<StorageIndex>(it.index());
|
StorageIndex i = internal::convert_index<StorageIndex>(it.index());
|
||||||
Index r = it.row();
|
Index r = it.row();
|
||||||
Index c = it.col();
|
Index c = it.col();
|
||||||
|
|
||||||
StorageIndex jp = perm ? perm[j] : j;
|
StorageIndex jp = perm ? perm[j] : j;
|
||||||
StorageIndex ip = perm ? perm[i] : i;
|
StorageIndex ip = perm ? perm[i] : i;
|
||||||
|
|
||||||
if(Mode==int(Upper|Lower))
|
if(Mode==int(Upper|Lower))
|
||||||
{
|
{
|
||||||
Index k = count[StorageOrderMatch ? jp : ip]++;
|
Index k = count[StorageOrderMatch ? jp : ip]++;
|
||||||
@ -531,7 +531,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
|||||||
};
|
};
|
||||||
|
|
||||||
MatEval matEval(mat);
|
MatEval matEval(mat);
|
||||||
|
|
||||||
Index size = mat.rows();
|
Index size = mat.rows();
|
||||||
VectorI count(size);
|
VectorI count(size);
|
||||||
count.setZero();
|
count.setZero();
|
||||||
@ -544,7 +544,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
|||||||
StorageIndex i = it.index();
|
StorageIndex i = it.index();
|
||||||
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
|
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
StorageIndex ip = perm ? perm[i] : i;
|
StorageIndex ip = perm ? perm[i] : i;
|
||||||
count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||||
}
|
}
|
||||||
@ -555,22 +555,22 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
|||||||
dest.resizeNonZeros(dest.outerIndexPtr()[size]);
|
dest.resizeNonZeros(dest.outerIndexPtr()[size]);
|
||||||
for(Index j=0; j<size; ++j)
|
for(Index j=0; j<size; ++j)
|
||||||
count[j] = dest.outerIndexPtr()[j];
|
count[j] = dest.outerIndexPtr()[j];
|
||||||
|
|
||||||
for(StorageIndex j = 0; j<size; ++j)
|
for(StorageIndex j = 0; j<size; ++j)
|
||||||
{
|
{
|
||||||
|
|
||||||
for(MatIterator it(matEval,j); it; ++it)
|
for(MatIterator it(matEval,j); it; ++it)
|
||||||
{
|
{
|
||||||
StorageIndex i = it.index();
|
StorageIndex i = it.index();
|
||||||
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
|
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
StorageIndex jp = perm ? perm[j] : j;
|
StorageIndex jp = perm ? perm[j] : j;
|
||||||
StorageIndex ip = perm? perm[i] : i;
|
StorageIndex ip = perm? perm[i] : i;
|
||||||
|
|
||||||
Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||||
dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
|
dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
|
||||||
|
|
||||||
if(!StorageOrderMatch) std::swap(ip,jp);
|
if(!StorageOrderMatch) std::swap(ip,jp);
|
||||||
if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
|
if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
|
||||||
dest.valuePtr()[k] = numext::conj(it.value());
|
dest.valuePtr()[k] = numext::conj(it.value());
|
||||||
@ -609,17 +609,17 @@ class SparseSymmetricPermutationProduct
|
|||||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||||
typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
|
typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
|
||||||
|
|
||||||
SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
|
SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
|
||||||
: m_matrix(mat), m_perm(perm)
|
: m_matrix(mat), m_perm(perm)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
inline Index rows() const { return m_matrix.rows(); }
|
inline Index rows() const { return m_matrix.rows(); }
|
||||||
inline Index cols() const { return m_matrix.cols(); }
|
inline Index cols() const { return m_matrix.cols(); }
|
||||||
|
|
||||||
const NestedExpression& matrix() const { return m_matrix; }
|
const NestedExpression& matrix() const { return m_matrix; }
|
||||||
const Perm& perm() const { return m_perm; }
|
const Perm& perm() const { return m_perm; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
MatrixTypeNested m_matrix;
|
MatrixTypeNested m_matrix;
|
||||||
const Perm& m_perm;
|
const Perm& m_perm;
|
||||||
@ -627,21 +627,21 @@ class SparseSymmetricPermutationProduct
|
|||||||
};
|
};
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
|
template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
|
||||||
struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
|
struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
|
||||||
{
|
{
|
||||||
typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
|
typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
|
||||||
typedef typename DstXprType::StorageIndex DstIndex;
|
typedef typename DstXprType::StorageIndex DstIndex;
|
||||||
template<int Options>
|
template<int Options>
|
||||||
static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
|
static EIGEN_DEVICE_FUNC void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
|
||||||
{
|
{
|
||||||
// internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
|
// internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
|
||||||
SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
|
SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
|
||||||
internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
|
internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
|
||||||
dst = tmp;
|
dst = tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename DestType,unsigned int DestMode>
|
template<typename DestType,unsigned int DestMode>
|
||||||
static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
|
static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
|
||||||
{
|
{
|
||||||
|
@ -42,31 +42,31 @@ namespace internal {
|
|||||||
* \ingroup SparseQR_Module
|
* \ingroup SparseQR_Module
|
||||||
* \class SparseQR
|
* \class SparseQR
|
||||||
* \brief Sparse left-looking rank-revealing QR factorization
|
* \brief Sparse left-looking rank-revealing QR factorization
|
||||||
*
|
*
|
||||||
* This class implements a left-looking rank-revealing QR decomposition
|
* This class implements a left-looking rank-revealing QR decomposition
|
||||||
* of sparse matrices. When a column has a norm less than a given tolerance
|
* of sparse matrices. When a column has a norm less than a given tolerance
|
||||||
* it is implicitly permuted to the end. The QR factorization thus obtained is
|
* it is implicitly permuted to the end. The QR factorization thus obtained is
|
||||||
* given by A*P = Q*R where R is upper triangular or trapezoidal.
|
* given by A*P = Q*R where R is upper triangular or trapezoidal.
|
||||||
*
|
*
|
||||||
* P is the column permutation which is the product of the fill-reducing and the
|
* P is the column permutation which is the product of the fill-reducing and the
|
||||||
* rank-revealing permutations. Use colsPermutation() to get it.
|
* rank-revealing permutations. Use colsPermutation() to get it.
|
||||||
*
|
*
|
||||||
* Q is the orthogonal matrix represented as products of Householder reflectors.
|
* Q is the orthogonal matrix represented as products of Householder reflectors.
|
||||||
* Use matrixQ() to get an expression and matrixQ().adjoint() to get the adjoint.
|
* Use matrixQ() to get an expression and matrixQ().adjoint() to get the adjoint.
|
||||||
* You can then apply it to a vector.
|
* You can then apply it to a vector.
|
||||||
*
|
*
|
||||||
* R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient.
|
* R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient.
|
||||||
* matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank.
|
* matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank.
|
||||||
*
|
*
|
||||||
* \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
|
* \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
|
||||||
* \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module
|
* \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module
|
||||||
* OrderingMethods \endlink module for the list of built-in and external ordering methods.
|
* OrderingMethods \endlink module for the list of built-in and external ordering methods.
|
||||||
*
|
*
|
||||||
* \implsparsesolverconcept
|
* \implsparsesolverconcept
|
||||||
*
|
*
|
||||||
* \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
|
* \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
|
||||||
* \warning For complex matrices matrixQ().transpose() will actually return the adjoint matrix.
|
* \warning For complex matrices matrixQ().transpose() will actually return the adjoint matrix.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template<typename _MatrixType, typename _OrderingType>
|
template<typename _MatrixType, typename _OrderingType>
|
||||||
class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
||||||
@ -90,26 +90,26 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
|||||||
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
||||||
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
|
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
|
||||||
};
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SparseQR () : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
|
SparseQR () : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
/** Construct a QR factorization of the matrix \a mat.
|
/** Construct a QR factorization of the matrix \a mat.
|
||||||
*
|
*
|
||||||
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
|
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
|
||||||
*
|
*
|
||||||
* \sa compute()
|
* \sa compute()
|
||||||
*/
|
*/
|
||||||
explicit SparseQR(const MatrixType& mat) : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
|
explicit SparseQR(const MatrixType& mat) : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
|
||||||
{
|
{
|
||||||
compute(mat);
|
compute(mat);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Computes the QR factorization of the sparse matrix \a mat.
|
/** Computes the QR factorization of the sparse matrix \a mat.
|
||||||
*
|
*
|
||||||
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
|
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
|
||||||
*
|
*
|
||||||
* \sa analyzePattern(), factorize()
|
* \sa analyzePattern(), factorize()
|
||||||
*/
|
*/
|
||||||
void compute(const MatrixType& mat)
|
void compute(const MatrixType& mat)
|
||||||
@ -119,15 +119,15 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
|||||||
}
|
}
|
||||||
void analyzePattern(const MatrixType& mat);
|
void analyzePattern(const MatrixType& mat);
|
||||||
void factorize(const MatrixType& mat);
|
void factorize(const MatrixType& mat);
|
||||||
|
|
||||||
/** \returns the number of rows of the represented matrix.
|
/** \returns the number of rows of the represented matrix.
|
||||||
*/
|
*/
|
||||||
inline Index rows() const { return m_pmat.rows(); }
|
inline Index rows() const { return m_pmat.rows(); }
|
||||||
|
|
||||||
/** \returns the number of columns of the represented matrix.
|
/** \returns the number of columns of the represented matrix.
|
||||||
*/
|
*/
|
||||||
inline Index cols() const { return m_pmat.cols();}
|
inline Index cols() const { return m_pmat.cols();}
|
||||||
|
|
||||||
/** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization.
|
/** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization.
|
||||||
* \warning The entries of the returned matrix are not sorted. This means that using it in algorithms
|
* \warning The entries of the returned matrix are not sorted. This means that using it in algorithms
|
||||||
* expecting sorted entries will fail. This include random coefficient accesses (SpaseMatrix::coeff()),
|
* expecting sorted entries will fail. This include random coefficient accesses (SpaseMatrix::coeff()),
|
||||||
@ -142,7 +142,7 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
|||||||
* \endcode
|
* \endcode
|
||||||
*/
|
*/
|
||||||
const QRMatrixType& matrixR() const { return m_R; }
|
const QRMatrixType& matrixR() const { return m_R; }
|
||||||
|
|
||||||
/** \returns the number of non linearly dependent columns as determined by the pivoting threshold.
|
/** \returns the number of non linearly dependent columns as determined by the pivoting threshold.
|
||||||
*
|
*
|
||||||
* \sa setPivotThreshold()
|
* \sa setPivotThreshold()
|
||||||
@ -150,9 +150,9 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
|||||||
Index rank() const
|
Index rank() const
|
||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
|
eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
|
||||||
return m_nonzeropivots;
|
return m_nonzeropivots;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns an expression of the matrix Q as products of sparse Householder reflectors.
|
/** \returns an expression of the matrix Q as products of sparse Householder reflectors.
|
||||||
* The common usage of this function is to apply it to a dense matrix or vector
|
* The common usage of this function is to apply it to a dense matrix or vector
|
||||||
* \code
|
* \code
|
||||||
@ -171,23 +171,23 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
|||||||
* reflectors are stored unsorted, two transpositions are needed to sort
|
* reflectors are stored unsorted, two transpositions are needed to sort
|
||||||
* them before performing the product.
|
* them before performing the product.
|
||||||
*/
|
*/
|
||||||
SparseQRMatrixQReturnType<SparseQR> matrixQ() const
|
SparseQRMatrixQReturnType<SparseQR> matrixQ() const
|
||||||
{ return SparseQRMatrixQReturnType<SparseQR>(*this); }
|
{ return SparseQRMatrixQReturnType<SparseQR>(*this); }
|
||||||
|
|
||||||
/** \returns a const reference to the column permutation P that was applied to A such that A*P = Q*R
|
/** \returns a const reference to the column permutation P that was applied to A such that A*P = Q*R
|
||||||
* It is the combination of the fill-in reducing permutation and numerical column pivoting.
|
* It is the combination of the fill-in reducing permutation and numerical column pivoting.
|
||||||
*/
|
*/
|
||||||
const PermutationType& colsPermutation() const
|
const PermutationType& colsPermutation() const
|
||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
|
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
|
||||||
return m_outputPerm_c;
|
return m_outputPerm_c;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns A string describing the type of error.
|
/** \returns A string describing the type of error.
|
||||||
* This method is provided to ease debugging, not to handle errors.
|
* This method is provided to ease debugging, not to handle errors.
|
||||||
*/
|
*/
|
||||||
std::string lastErrorMessage() const { return m_lastError; }
|
std::string lastErrorMessage() const { return m_lastError; }
|
||||||
|
|
||||||
/** \internal */
|
/** \internal */
|
||||||
template<typename Rhs, typename Dest>
|
template<typename Rhs, typename Dest>
|
||||||
bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &dest) const
|
bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &dest) const
|
||||||
@ -196,21 +196,21 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
|||||||
eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
|
eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
|
||||||
|
|
||||||
Index rank = this->rank();
|
Index rank = this->rank();
|
||||||
|
|
||||||
// Compute Q^* * b;
|
// Compute Q^* * b;
|
||||||
typename Dest::PlainObject y, b;
|
typename Dest::PlainObject y, b;
|
||||||
y = this->matrixQ().adjoint() * B;
|
y = this->matrixQ().adjoint() * B;
|
||||||
b = y;
|
b = y;
|
||||||
|
|
||||||
// Solve with the triangular matrix R
|
// Solve with the triangular matrix R
|
||||||
y.resize((std::max<Index>)(cols(),y.rows()),y.cols());
|
y.resize((std::max<Index>)(cols(),y.rows()),y.cols());
|
||||||
y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank));
|
y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank));
|
||||||
y.bottomRows(y.rows()-rank).setZero();
|
y.bottomRows(y.rows()-rank).setZero();
|
||||||
|
|
||||||
// Apply the column permutation
|
// Apply the column permutation
|
||||||
if (m_perm_c.size()) dest = colsPermutation() * y.topRows(cols());
|
if (m_perm_c.size()) dest = colsPermutation() * y.topRows(cols());
|
||||||
else dest = y.topRows(cols());
|
else dest = y.topRows(cols());
|
||||||
|
|
||||||
m_info = Success;
|
m_info = Success;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -225,13 +225,13 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
|||||||
m_useDefaultThreshold = false;
|
m_useDefaultThreshold = false;
|
||||||
m_threshold = threshold;
|
m_threshold = threshold;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
|
/** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
|
||||||
*
|
*
|
||||||
* \sa compute()
|
* \sa compute()
|
||||||
*/
|
*/
|
||||||
template<typename Rhs>
|
template<typename Rhs>
|
||||||
inline const Solve<SparseQR, Rhs> solve(const MatrixBase<Rhs>& B) const
|
inline const Solve<SparseQR, Rhs> solve(const MatrixBase<Rhs>& B) const
|
||||||
{
|
{
|
||||||
eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
|
eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
|
||||||
eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
|
eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
|
||||||
@ -244,14 +244,14 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
|||||||
eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
|
eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
|
||||||
return Solve<SparseQR, Rhs>(*this, B.derived());
|
return Solve<SparseQR, Rhs>(*this, B.derived());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \brief Reports whether previous computation was successful.
|
/** \brief Reports whether previous computation was successful.
|
||||||
*
|
*
|
||||||
* \returns \c Success if computation was successful,
|
* \returns \c Success if computation was successful,
|
||||||
* \c NumericalIssue if the QR factorization reports a numerical problem
|
* \c NumericalIssue if the QR factorization reports a numerical problem
|
||||||
* \c InvalidInput if the input matrix is invalid
|
* \c InvalidInput if the input matrix is invalid
|
||||||
*
|
*
|
||||||
* \sa iparm()
|
* \sa iparm()
|
||||||
*/
|
*/
|
||||||
ComputationInfo info() const
|
ComputationInfo info() const
|
||||||
{
|
{
|
||||||
@ -270,7 +270,7 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
|||||||
this->m_isQSorted = true;
|
this->m_isQSorted = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
bool m_analysisIsok;
|
bool m_analysisIsok;
|
||||||
bool m_factorizationIsok;
|
bool m_factorizationIsok;
|
||||||
@ -290,18 +290,18 @@ class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
|
|||||||
IndexVector m_firstRowElt; // First element in each row
|
IndexVector m_firstRowElt; // First element in each row
|
||||||
bool m_isQSorted; // whether Q is sorted or not
|
bool m_isQSorted; // whether Q is sorted or not
|
||||||
bool m_isEtreeOk; // whether the elimination tree match the initial input matrix
|
bool m_isEtreeOk; // whether the elimination tree match the initial input matrix
|
||||||
|
|
||||||
template <typename, typename > friend struct SparseQR_QProduct;
|
template <typename, typename > friend struct SparseQR_QProduct;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/** \brief Preprocessing step of a QR factorization
|
/** \brief Preprocessing step of a QR factorization
|
||||||
*
|
*
|
||||||
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
|
* \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
|
||||||
*
|
*
|
||||||
* In this step, the fill-reducing permutation is computed and applied to the columns of A
|
* In this step, the fill-reducing permutation is computed and applied to the columns of A
|
||||||
* and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited.
|
* and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited.
|
||||||
*
|
*
|
||||||
* \note In this step it is assumed that there is no empty row in the matrix \a mat.
|
* \note In this step it is assumed that there is no empty row in the matrix \a mat.
|
||||||
*/
|
*/
|
||||||
template <typename MatrixType, typename OrderingType>
|
template <typename MatrixType, typename OrderingType>
|
||||||
@ -311,26 +311,26 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
|
|||||||
// Copy to a column major matrix if the input is rowmajor
|
// Copy to a column major matrix if the input is rowmajor
|
||||||
typename internal::conditional<MatrixType::IsRowMajor,QRMatrixType,const MatrixType&>::type matCpy(mat);
|
typename internal::conditional<MatrixType::IsRowMajor,QRMatrixType,const MatrixType&>::type matCpy(mat);
|
||||||
// Compute the column fill reducing ordering
|
// Compute the column fill reducing ordering
|
||||||
OrderingType ord;
|
OrderingType ord;
|
||||||
ord(matCpy, m_perm_c);
|
ord(matCpy, m_perm_c);
|
||||||
Index n = mat.cols();
|
Index n = mat.cols();
|
||||||
Index m = mat.rows();
|
Index m = mat.rows();
|
||||||
Index diagSize = (std::min)(m,n);
|
Index diagSize = (std::min)(m,n);
|
||||||
|
|
||||||
if (!m_perm_c.size())
|
if (!m_perm_c.size())
|
||||||
{
|
{
|
||||||
m_perm_c.resize(n);
|
m_perm_c.resize(n);
|
||||||
m_perm_c.indices().setLinSpaced(n, 0,StorageIndex(n-1));
|
m_perm_c.indices().setLinSpaced(n, 0,StorageIndex(n-1));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute the column elimination tree of the permuted matrix
|
// Compute the column elimination tree of the permuted matrix
|
||||||
m_outputPerm_c = m_perm_c.inverse();
|
m_outputPerm_c = m_perm_c.inverse();
|
||||||
internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
|
internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
|
||||||
m_isEtreeOk = true;
|
m_isEtreeOk = true;
|
||||||
|
|
||||||
m_R.resize(m, n);
|
m_R.resize(m, n);
|
||||||
m_Q.resize(m, diagSize);
|
m_Q.resize(m, diagSize);
|
||||||
|
|
||||||
// Allocate space for nonzero elements : rough estimation
|
// Allocate space for nonzero elements : rough estimation
|
||||||
m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
|
m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
|
||||||
m_Q.reserve(2*mat.nonZeros());
|
m_Q.reserve(2*mat.nonZeros());
|
||||||
@ -339,17 +339,17 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** \brief Performs the numerical QR factorization of the input matrix
|
/** \brief Performs the numerical QR factorization of the input matrix
|
||||||
*
|
*
|
||||||
* The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with
|
* The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with
|
||||||
* a matrix having the same sparsity pattern than \a mat.
|
* a matrix having the same sparsity pattern than \a mat.
|
||||||
*
|
*
|
||||||
* \param mat The sparse column-major matrix
|
* \param mat The sparse column-major matrix
|
||||||
*/
|
*/
|
||||||
template <typename MatrixType, typename OrderingType>
|
template <typename MatrixType, typename OrderingType>
|
||||||
void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
||||||
{
|
{
|
||||||
using std::abs;
|
using std::abs;
|
||||||
|
|
||||||
eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
|
eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
|
||||||
StorageIndex m = StorageIndex(mat.rows());
|
StorageIndex m = StorageIndex(mat.rows());
|
||||||
StorageIndex n = StorageIndex(mat.cols());
|
StorageIndex n = StorageIndex(mat.cols());
|
||||||
@ -359,7 +359,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
|
Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
|
||||||
ScalarVector tval(m); // The dense vector used to compute the current column
|
ScalarVector tval(m); // The dense vector used to compute the current column
|
||||||
RealScalar pivotThreshold = m_threshold;
|
RealScalar pivotThreshold = m_threshold;
|
||||||
|
|
||||||
m_R.setZero();
|
m_R.setZero();
|
||||||
m_Q.setZero();
|
m_Q.setZero();
|
||||||
m_pmat = mat;
|
m_pmat = mat;
|
||||||
@ -371,12 +371,12 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
}
|
}
|
||||||
|
|
||||||
m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
|
m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
|
||||||
|
|
||||||
// Apply the fill-in reducing permutation lazily:
|
// Apply the fill-in reducing permutation lazily:
|
||||||
{
|
{
|
||||||
// If the input is row major, copy the original column indices,
|
// If the input is row major, copy the original column indices,
|
||||||
// otherwise directly use the input matrix
|
// otherwise directly use the input matrix
|
||||||
//
|
//
|
||||||
IndexVector originalOuterIndicesCpy;
|
IndexVector originalOuterIndicesCpy;
|
||||||
const StorageIndex *originalOuterIndices = mat.outerIndexPtr();
|
const StorageIndex *originalOuterIndices = mat.outerIndexPtr();
|
||||||
if(MatrixType::IsRowMajor)
|
if(MatrixType::IsRowMajor)
|
||||||
@ -384,20 +384,20 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1);
|
originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1);
|
||||||
originalOuterIndices = originalOuterIndicesCpy.data();
|
originalOuterIndices = originalOuterIndicesCpy.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < n; i++)
|
for (int i = 0; i < n; i++)
|
||||||
{
|
{
|
||||||
Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
|
Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
|
||||||
m_pmat.outerIndexPtr()[p] = originalOuterIndices[i];
|
m_pmat.outerIndexPtr()[p] = originalOuterIndices[i];
|
||||||
m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i];
|
m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Compute the default threshold as in MatLab, see:
|
/* Compute the default threshold as in MatLab, see:
|
||||||
* Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
|
* Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
|
||||||
* Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
|
* Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
|
||||||
*/
|
*/
|
||||||
if(m_useDefaultThreshold)
|
if(m_useDefaultThreshold)
|
||||||
{
|
{
|
||||||
RealScalar max2Norm = 0.0;
|
RealScalar max2Norm = 0.0;
|
||||||
for (int j = 0; j < n; j++) max2Norm = numext::maxi(max2Norm, m_pmat.col(j).norm());
|
for (int j = 0; j < n; j++) max2Norm = numext::maxi(max2Norm, m_pmat.col(j).norm());
|
||||||
@ -405,10 +405,10 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
max2Norm = RealScalar(1);
|
max2Norm = RealScalar(1);
|
||||||
pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
|
pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the numerical permutation
|
// Initialize the numerical permutation
|
||||||
m_pivotperm.setIdentity(n);
|
m_pivotperm.setIdentity(n);
|
||||||
|
|
||||||
StorageIndex nonzeroCol = 0; // Record the number of valid pivots
|
StorageIndex nonzeroCol = 0; // Record the number of valid pivots
|
||||||
m_Q.startVec(0);
|
m_Q.startVec(0);
|
||||||
|
|
||||||
@ -421,8 +421,8 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
Qidx(0) = nonzeroCol;
|
Qidx(0) = nonzeroCol;
|
||||||
nzcolR = 0; nzcolQ = 1;
|
nzcolR = 0; nzcolQ = 1;
|
||||||
bool found_diag = nonzeroCol>=m;
|
bool found_diag = nonzeroCol>=m;
|
||||||
tval.setZero();
|
tval.setZero();
|
||||||
|
|
||||||
// Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,
|
// Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,
|
||||||
// all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k.
|
// all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k.
|
||||||
// Note: if the diagonal entry does not exist, then its contribution must be explicitly added,
|
// Note: if the diagonal entry does not exist, then its contribution must be explicitly added,
|
||||||
@ -432,7 +432,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
StorageIndex curIdx = nonzeroCol;
|
StorageIndex curIdx = nonzeroCol;
|
||||||
if(itp) curIdx = StorageIndex(itp.row());
|
if(itp) curIdx = StorageIndex(itp.row());
|
||||||
if(curIdx == nonzeroCol) found_diag = true;
|
if(curIdx == nonzeroCol) found_diag = true;
|
||||||
|
|
||||||
// Get the nonzeros indexes of the current column of R
|
// Get the nonzeros indexes of the current column of R
|
||||||
StorageIndex st = m_firstRowElt(curIdx); // The traversal of the etree starts here
|
StorageIndex st = m_firstRowElt(curIdx); // The traversal of the etree starts here
|
||||||
if (st < 0 )
|
if (st < 0 )
|
||||||
@ -442,7 +442,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Traverse the etree
|
// Traverse the etree
|
||||||
Index bi = nzcolR;
|
Index bi = nzcolR;
|
||||||
for (; mark(st) != col; st = m_etree(st))
|
for (; mark(st) != col; st = m_etree(st))
|
||||||
{
|
{
|
||||||
@ -454,13 +454,13 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
// Reverse the list to get the topological ordering
|
// Reverse the list to get the topological ordering
|
||||||
Index nt = nzcolR-bi;
|
Index nt = nzcolR-bi;
|
||||||
for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1));
|
for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1));
|
||||||
|
|
||||||
// Copy the current (curIdx,pcol) value of the input matrix
|
// Copy the current (curIdx,pcol) value of the input matrix
|
||||||
if(itp) tval(curIdx) = itp.value();
|
if(itp) tval(curIdx) = itp.value();
|
||||||
else tval(curIdx) = Scalar(0);
|
else tval(curIdx) = Scalar(0);
|
||||||
|
|
||||||
// Compute the pattern of Q(:,k)
|
// Compute the pattern of Q(:,k)
|
||||||
if(curIdx > nonzeroCol && mark(curIdx) != col )
|
if(curIdx > nonzeroCol && mark(curIdx) != col )
|
||||||
{
|
{
|
||||||
Qidx(nzcolQ) = curIdx; // Add this row to the pattern of Q,
|
Qidx(nzcolQ) = curIdx; // Add this row to the pattern of Q,
|
||||||
mark(curIdx) = col; // and mark it as visited
|
mark(curIdx) = col; // and mark it as visited
|
||||||
@ -472,15 +472,15 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
for (Index i = nzcolR-1; i >= 0; i--)
|
for (Index i = nzcolR-1; i >= 0; i--)
|
||||||
{
|
{
|
||||||
Index curIdx = Ridx(i);
|
Index curIdx = Ridx(i);
|
||||||
|
|
||||||
// Apply the curIdx-th householder vector to the current column (temporarily stored into tval)
|
// Apply the curIdx-th householder vector to the current column (temporarily stored into tval)
|
||||||
Scalar tdot(0);
|
Scalar tdot(0);
|
||||||
|
|
||||||
// First compute q' * tval
|
// First compute q' * tval
|
||||||
tdot = m_Q.col(curIdx).dot(tval);
|
tdot = m_Q.col(curIdx).dot(tval);
|
||||||
|
|
||||||
tdot *= m_hcoeffs(curIdx);
|
tdot *= m_hcoeffs(curIdx);
|
||||||
|
|
||||||
// Then update tval = tval - q * tau
|
// Then update tval = tval - q * tau
|
||||||
// FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient "dense ?= sparse")
|
// FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient "dense ?= sparse")
|
||||||
for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
|
for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
|
||||||
@ -500,16 +500,16 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // End update current column
|
} // End update current column
|
||||||
|
|
||||||
Scalar tau = RealScalar(0);
|
Scalar tau = RealScalar(0);
|
||||||
RealScalar beta = 0;
|
RealScalar beta = 0;
|
||||||
|
|
||||||
if(nonzeroCol < diagSize)
|
if(nonzeroCol < diagSize)
|
||||||
{
|
{
|
||||||
// Compute the Householder reflection that eliminate the current column
|
// Compute the Householder reflection that eliminate the current column
|
||||||
// FIXME this step should call the Householder module.
|
// FIXME this step should call the Householder module.
|
||||||
Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);
|
Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);
|
||||||
|
|
||||||
// First, the squared norm of Q((col+1):m, col)
|
// First, the squared norm of Q((col+1):m, col)
|
||||||
RealScalar sqrNorm = 0.;
|
RealScalar sqrNorm = 0.;
|
||||||
for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
|
for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
|
||||||
@ -528,7 +528,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
for (Index itq = 1; itq < nzcolQ; ++itq)
|
for (Index itq = 1; itq < nzcolQ; ++itq)
|
||||||
tval(Qidx(itq)) /= (c0 - beta);
|
tval(Qidx(itq)) /= (c0 - beta);
|
||||||
tau = numext::conj((beta-c0) / beta);
|
tau = numext::conj((beta-c0) / beta);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -536,7 +536,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
for (Index i = nzcolR-1; i >= 0; i--)
|
for (Index i = nzcolR-1; i >= 0; i--)
|
||||||
{
|
{
|
||||||
Index curIdx = Ridx(i);
|
Index curIdx = Ridx(i);
|
||||||
if(curIdx < nonzeroCol)
|
if(curIdx < nonzeroCol)
|
||||||
{
|
{
|
||||||
m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx);
|
m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx);
|
||||||
tval(curIdx) = Scalar(0.);
|
tval(curIdx) = Scalar(0.);
|
||||||
@ -562,17 +562,17 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Zero pivot found: move implicitly this column to the end
|
// Zero pivot found: move implicitly this column to the end
|
||||||
for (Index j = nonzeroCol; j < n-1; j++)
|
for (Index j = nonzeroCol; j < n-1; j++)
|
||||||
std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]);
|
std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]);
|
||||||
|
|
||||||
// Recompute the column elimination tree
|
// Recompute the column elimination tree
|
||||||
internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
|
internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
|
||||||
m_isEtreeOk = false;
|
m_isEtreeOk = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m_hcoeffs.tail(diagSize-nonzeroCol).setZero();
|
m_hcoeffs.tail(diagSize-nonzeroCol).setZero();
|
||||||
|
|
||||||
// Finalize the column pointers of the sparse matrices R and Q
|
// Finalize the column pointers of the sparse matrices R and Q
|
||||||
m_Q.finalize();
|
m_Q.finalize();
|
||||||
m_Q.makeCompressed();
|
m_Q.makeCompressed();
|
||||||
@ -581,18 +581,18 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
|||||||
m_isQSorted = false;
|
m_isQSorted = false;
|
||||||
|
|
||||||
m_nonzeropivots = nonzeroCol;
|
m_nonzeropivots = nonzeroCol;
|
||||||
|
|
||||||
if(nonzeroCol<n)
|
if(nonzeroCol<n)
|
||||||
{
|
{
|
||||||
// Permute the triangular factor to put the 'dead' columns to the end
|
// Permute the triangular factor to put the 'dead' columns to the end
|
||||||
QRMatrixType tempR(m_R);
|
QRMatrixType tempR(m_R);
|
||||||
m_R = tempR * m_pivotperm;
|
m_R = tempR * m_pivotperm;
|
||||||
|
|
||||||
// Update the column permutation
|
// Update the column permutation
|
||||||
m_outputPerm_c = m_outputPerm_c * m_pivotperm;
|
m_outputPerm_c = m_outputPerm_c * m_pivotperm;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_isInitialized = true;
|
m_isInitialized = true;
|
||||||
m_factorizationIsok = true;
|
m_factorizationIsok = true;
|
||||||
m_info = Success;
|
m_info = Success;
|
||||||
}
|
}
|
||||||
@ -602,12 +602,12 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
|
|||||||
{
|
{
|
||||||
typedef typename SparseQRType::QRMatrixType MatrixType;
|
typedef typename SparseQRType::QRMatrixType MatrixType;
|
||||||
typedef typename SparseQRType::Scalar Scalar;
|
typedef typename SparseQRType::Scalar Scalar;
|
||||||
// Get the references
|
// Get the references
|
||||||
SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) :
|
SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) :
|
||||||
m_qr(qr),m_other(other),m_transpose(transpose) {}
|
m_qr(qr),m_other(other),m_transpose(transpose) {}
|
||||||
inline Index rows() const { return m_qr.matrixQ().rows(); }
|
inline Index rows() const { return m_qr.matrixQ().rows(); }
|
||||||
inline Index cols() const { return m_other.cols(); }
|
inline Index cols() const { return m_other.cols(); }
|
||||||
|
|
||||||
// Assign to a vector
|
// Assign to a vector
|
||||||
template<typename DesType>
|
template<typename DesType>
|
||||||
void evalTo(DesType& res) const
|
void evalTo(DesType& res) const
|
||||||
@ -651,7 +651,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const SparseQRType& m_qr;
|
const SparseQRType& m_qr;
|
||||||
const Derived& m_other;
|
const Derived& m_other;
|
||||||
bool m_transpose; // TODO this actually means adjoint
|
bool m_transpose; // TODO this actually means adjoint
|
||||||
@ -659,7 +659,7 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
|
|||||||
|
|
||||||
template<typename SparseQRType>
|
template<typename SparseQRType>
|
||||||
struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<SparseQRType> >
|
struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<SparseQRType> >
|
||||||
{
|
{
|
||||||
typedef typename SparseQRType::Scalar Scalar;
|
typedef typename SparseQRType::Scalar Scalar;
|
||||||
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
|
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
|
||||||
enum {
|
enum {
|
||||||
@ -701,7 +701,7 @@ struct SparseQRMatrixQTransposeReturnType
|
|||||||
};
|
};
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
template<typename SparseQRType>
|
template<typename SparseQRType>
|
||||||
struct evaluator_traits<SparseQRMatrixQReturnType<SparseQRType> >
|
struct evaluator_traits<SparseQRMatrixQReturnType<SparseQRType> >
|
||||||
{
|
{
|
||||||
@ -716,7 +716,7 @@ struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal:
|
|||||||
typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
|
typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
|
||||||
typedef typename DstXprType::Scalar Scalar;
|
typedef typename DstXprType::Scalar Scalar;
|
||||||
typedef typename DstXprType::StorageIndex StorageIndex;
|
typedef typename DstXprType::StorageIndex StorageIndex;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
|
||||||
{
|
{
|
||||||
typename DstXprType::PlainObject idMat(src.rows(), src.cols());
|
typename DstXprType::PlainObject idMat(src.rows(), src.cols());
|
||||||
idMat.setIdentity();
|
idMat.setIdentity();
|
||||||
@ -732,7 +732,7 @@ struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal:
|
|||||||
typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
|
typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
|
||||||
typedef typename DstXprType::Scalar Scalar;
|
typedef typename DstXprType::Scalar Scalar;
|
||||||
typedef typename DstXprType::StorageIndex StorageIndex;
|
typedef typename DstXprType::StorageIndex StorageIndex;
|
||||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
|
static EIGEN_DEVICE_FUNC void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
|
||||||
{
|
{
|
||||||
dst = src.m_qr.matrixQ() * DstXprType::Identity(src.m_qr.rows(), src.m_qr.rows());
|
dst = src.m_qr.matrixQ() * DstXprType::Identity(src.m_qr.rows(), src.m_qr.rows());
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
include(EigenTesting)
|
include(EigenTesting)
|
||||||
include(CheckCXXSourceCompiles)
|
include(CheckCXXSourceCompiles)
|
||||||
|
|
||||||
# configure the "site" and "buildname"
|
# configure the "site" and "buildname"
|
||||||
ei_set_sitename()
|
ei_set_sitename()
|
||||||
|
|
||||||
# retrieve and store the build string
|
# retrieve and store the build string
|
||||||
@ -11,6 +11,15 @@ add_custom_target(buildtests)
|
|||||||
add_custom_target(check COMMAND "ctest")
|
add_custom_target(check COMMAND "ctest")
|
||||||
add_dependencies(check buildtests)
|
add_dependencies(check buildtests)
|
||||||
|
|
||||||
|
# Convenience target for only building GPU tests.
|
||||||
|
add_custom_target(buildtests_gpu)
|
||||||
|
add_custom_target(check_gpu COMMAND "ctest" "--output-on-failure"
|
||||||
|
"--no-compress-output"
|
||||||
|
"--build-no-clean"
|
||||||
|
"-T" "test"
|
||||||
|
"-L" "gpu")
|
||||||
|
add_dependencies(check_gpu buildtests_gpu)
|
||||||
|
|
||||||
# check whether /bin/bash exists (disabled as not used anymore)
|
# check whether /bin/bash exists (disabled as not used anymore)
|
||||||
# find_file(EIGEN_BIN_BASH_EXISTS "/bin/bash" PATHS "/" NO_DEFAULT_PATH)
|
# find_file(EIGEN_BIN_BASH_EXISTS "/bin/bash" PATHS "/" NO_DEFAULT_PATH)
|
||||||
|
|
||||||
@ -50,7 +59,7 @@ if(CMAKE_COMPILER_IS_GNUCXX)
|
|||||||
set(CTEST_CUSTOM_COVERAGE_EXCLUDE "/test/")
|
set(CTEST_CUSTOM_COVERAGE_EXCLUDE "/test/")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_FLAGS}")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_FLAGS}")
|
||||||
endif(EIGEN_COVERAGE_TESTING)
|
endif(EIGEN_COVERAGE_TESTING)
|
||||||
|
|
||||||
elseif(MSVC)
|
elseif(MSVC)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /D_CRT_SECURE_NO_WARNINGS /D_SCL_SECURE_NO_WARNINGS")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /D_CRT_SECURE_NO_WARNINGS /D_SCL_SECURE_NO_WARNINGS")
|
||||||
endif(CMAKE_COMPILER_IS_GNUCXX)
|
endif(CMAKE_COMPILER_IS_GNUCXX)
|
||||||
|
@ -18,7 +18,9 @@ macro(ei_add_test_internal testname testname_with_suffix)
|
|||||||
set(filename ${testname}.cpp)
|
set(filename ${testname}.cpp)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(is_gpu_test OFF)
|
||||||
if(EIGEN_ADD_TEST_FILENAME_EXTENSION STREQUAL cu)
|
if(EIGEN_ADD_TEST_FILENAME_EXTENSION STREQUAL cu)
|
||||||
|
set(is_gpu_test ON)
|
||||||
if(EIGEN_TEST_CUDA_CLANG)
|
if(EIGEN_TEST_CUDA_CLANG)
|
||||||
set_source_files_properties(${filename} PROPERTIES LANGUAGE CXX)
|
set_source_files_properties(${filename} PROPERTIES LANGUAGE CXX)
|
||||||
if(CUDA_64_BIT_DEVICE_CODE)
|
if(CUDA_64_BIT_DEVICE_CODE)
|
||||||
@ -48,6 +50,9 @@ macro(ei_add_test_internal testname testname_with_suffix)
|
|||||||
else()
|
else()
|
||||||
add_dependencies(buildtests ${targetname})
|
add_dependencies(buildtests ${targetname})
|
||||||
endif()
|
endif()
|
||||||
|
if (is_gpu_test)
|
||||||
|
add_dependencies(buildtests_gpu ${targetname})
|
||||||
|
endif()
|
||||||
|
|
||||||
if(EIGEN_NO_ASSERTION_CHECKING)
|
if(EIGEN_NO_ASSERTION_CHECKING)
|
||||||
ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_NO_ASSERTION_CHECKING=1")
|
ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_NO_ASSERTION_CHECKING=1")
|
||||||
@ -98,6 +103,10 @@ macro(ei_add_test_internal testname testname_with_suffix)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_test(${testname_with_suffix} "${targetname}")
|
add_test(${testname_with_suffix} "${targetname}")
|
||||||
|
if (is_gpu_test)
|
||||||
|
# Add gpu tag for testing only GPU tests.
|
||||||
|
set_property(TEST ${testname_with_suffix} APPEND PROPERTY LABELS "gpu")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Specify target and test labels accoirding to EIGEN_CURRENT_SUBPROJECT
|
# Specify target and test labels accoirding to EIGEN_CURRENT_SUBPROJECT
|
||||||
get_property(current_subproject GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT)
|
get_property(current_subproject GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT)
|
||||||
|
@ -362,14 +362,35 @@ if(EIGEN_TEST_CUDA)
|
|||||||
find_package(CUDA 5.0)
|
find_package(CUDA 5.0)
|
||||||
if(CUDA_FOUND)
|
if(CUDA_FOUND)
|
||||||
|
|
||||||
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
|
if( (NOT EIGEN_TEST_CXX11) OR (CMAKE_VERSION VERSION_LESS 3.3))
|
||||||
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
string(APPEND EIGEN_CUDA_CXX11_FLAGS " -std=c++11")
|
||||||
set(CUDA_NVCC_FLAGS "-ccbin ${CMAKE_C_COMPILER}" CACHE STRING "nvcc flags" FORCE)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(EIGEN_TEST_CUDA_CLANG)
|
if(EIGEN_TEST_CUDA_CLANG)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 --cuda-gpu-arch=sm_30")
|
string(APPEND CMAKE_CXX_FLAGS " --cuda-path=${CUDA_TOOLKIT_ROOT_DIR}")
|
||||||
|
foreach(GPU IN LISTS EIGEN_CUDA_COMPUTE_ARCH)
|
||||||
|
string(APPEND CMAKE_CXX_FLAGS " --cuda-gpu-arch=sm_${GPU}")
|
||||||
|
endforeach()
|
||||||
|
string(APPEND CMAKE_CXX_FLAGS " ${EIGEN_CUDA_CXX_FLAGS}")
|
||||||
|
else()
|
||||||
|
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
|
||||||
|
set(NVCC_ARCH_FLAGS)
|
||||||
|
# Define an -arch=sm_<arch>, otherwise if GPU does not exactly match one of
|
||||||
|
# those in the arch list for -gencode, the kernels will fail to run with
|
||||||
|
# cudaErrorNoKernelImageForDevice
|
||||||
|
# This can happen with newer cards (e.g. sm_75) and compiling with older
|
||||||
|
# versions of nvcc (e.g. 9.2) that do not support their specific arch.
|
||||||
|
list(LENGTH EIGEN_CUDA_COMPUTE_ARCH EIGEN_CUDA_COMPUTE_ARCH_SIZE)
|
||||||
|
if(EIGEN_CUDA_COMPUTE_ARCH_SIZE)
|
||||||
|
list(GET EIGEN_CUDA_COMPUTE_ARCH 0 EIGEN_CUDA_COMPUTE_DEFAULT)
|
||||||
|
set(NVCC_ARCH_FLAGS " -arch=sm_${EIGEN_CUDA_COMPUTE_DEFAULT}")
|
||||||
|
endif()
|
||||||
|
foreach(ARCH IN LISTS EIGEN_CUDA_COMPUTE_ARCH)
|
||||||
|
string(APPEND NVCC_ARCH_FLAGS " -gencode arch=compute_${ARCH},code=sm_${ARCH}")
|
||||||
|
endforeach()
|
||||||
|
set(CUDA_NVCC_FLAGS "--expt-relaxed-constexpr -Xcudafe \"--display_error_number\" ${NVCC_ARCH_FLAGS} ${CUDA_NVCC_FLAGS} ${EIGEN_CUDA_CXX_FLAGS}")
|
||||||
|
cuda_include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CUDA_TOOLKIT_ROOT_DIR}/include")
|
||||||
endif()
|
endif()
|
||||||
cuda_include_directories(${CMAKE_CURRENT_BINARY_DIR})
|
|
||||||
set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
|
set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu")
|
||||||
|
|
||||||
ei_add_test(cuda_basic)
|
ei_add_test(cuda_basic)
|
||||||
|
@ -37,26 +37,26 @@ void run_on_cuda(const Kernel& ker, int n, const Input& in, Output& out)
|
|||||||
typename Output::Scalar* d_out;
|
typename Output::Scalar* d_out;
|
||||||
std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar);
|
std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar);
|
||||||
std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar);
|
std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar);
|
||||||
|
|
||||||
cudaMalloc((void**)(&d_in), in_bytes);
|
cudaMalloc((void**)(&d_in), in_bytes);
|
||||||
cudaMalloc((void**)(&d_out), out_bytes);
|
cudaMalloc((void**)(&d_out), out_bytes);
|
||||||
|
|
||||||
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
|
cudaMemcpy(d_in, in.data(), in_bytes, cudaMemcpyHostToDevice);
|
||||||
cudaMemcpy(d_out, out.data(), out_bytes, cudaMemcpyHostToDevice);
|
cudaMemcpy(d_out, out.data(), out_bytes, cudaMemcpyHostToDevice);
|
||||||
|
|
||||||
// Simple and non-optimal 1D mapping assuming n is not too large
|
// Simple and non-optimal 1D mapping assuming n is not too large
|
||||||
// That's only for unit testing!
|
// That's only for unit testing!
|
||||||
dim3 Blocks(128);
|
dim3 Blocks(128);
|
||||||
dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) );
|
dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) );
|
||||||
|
|
||||||
cudaThreadSynchronize();
|
cudaDeviceSynchronize();
|
||||||
run_on_cuda_meta_kernel<<<Grids,Blocks>>>(ker, n, d_in, d_out);
|
run_on_cuda_meta_kernel<<<Grids,Blocks>>>(ker, n, d_in, d_out);
|
||||||
cudaThreadSynchronize();
|
cudaDeviceSynchronize();
|
||||||
|
|
||||||
// check inputs have not been modified
|
// check inputs have not been modified
|
||||||
cudaMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, cudaMemcpyDeviceToHost);
|
cudaMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, cudaMemcpyDeviceToHost);
|
||||||
cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost);
|
cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost);
|
||||||
|
|
||||||
cudaFree(d_in);
|
cudaFree(d_in);
|
||||||
cudaFree(d_out);
|
cudaFree(d_out);
|
||||||
}
|
}
|
||||||
|
@ -678,15 +678,15 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType, MakePointer_>,
|
|||||||
template <typename S, typename O, bool V> friend struct internal::FullReducerShard;
|
template <typename S, typename O, bool V> friend struct internal::FullReducerShard;
|
||||||
#endif
|
#endif
|
||||||
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
|
#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
|
||||||
template <int B, int N, typename S, typename R, typename I> friend void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*);
|
template <int B, int N, typename S, typename R, typename I> friend __global__ void internal::FullReductionKernel(R, const S, I, typename S::CoeffReturnType*, unsigned int*);
|
||||||
#ifdef EIGEN_HAS_CUDA_FP16
|
#ifdef EIGEN_HAS_CUDA_FP16
|
||||||
template <typename S, typename R, typename I> friend void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*);
|
template <typename S, typename R, typename I> friend __global__ void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I, half2*);
|
||||||
template <int B, int N, typename S, typename R, typename I> friend void internal::FullReductionKernelHalfFloat(R, const S, I, half*, half2*);
|
template <int B, int N, typename S, typename R, typename I> friend __global__ void internal::FullReductionKernelHalfFloat(R, const S, I, half*, half2*);
|
||||||
template <int NPT, typename S, typename R, typename I> friend void internal::InnerReductionKernelHalfFloat(R, const S, I, I, half*);
|
template <int NPT, typename S, typename R, typename I> friend __global__ void internal::InnerReductionKernelHalfFloat(R, const S, I, I, half*);
|
||||||
#endif
|
#endif
|
||||||
template <int NPT, typename S, typename R, typename I> friend void internal::InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
|
template <int NPT, typename S, typename R, typename I> friend __global__ void internal::InnerReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
|
||||||
|
|
||||||
template <int NPT, typename S, typename R, typename I> friend void internal::OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
|
template <int NPT, typename S, typename R, typename I> friend __global__ void internal::OuterReductionKernel(R, const S, I, I, typename S::CoeffReturnType*);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
template <typename S, typename O, typename D> friend struct internal::InnerReducer;
|
template <typename S, typename O, typename D> friend struct internal::InnerReducer;
|
||||||
|
@ -168,7 +168,12 @@ __global__ void FullReductionKernel(Reducer reducer, const Self input, Index num
|
|||||||
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int offset = warpSize/2; offset > 0; offset /= 2) {
|
for (int offset = warpSize/2; offset > 0; offset /= 2) {
|
||||||
|
#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000
|
||||||
|
|
||||||
reducer.reduce(__shfl_down(accum, offset, warpSize), &accum);
|
reducer.reduce(__shfl_down(accum, offset, warpSize), &accum);
|
||||||
|
#else
|
||||||
|
reducer.reduce(__shfl_down_sync(0xFFFFFFFF, accum, offset, warpSize), &accum);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((threadIdx.x & (warpSize - 1)) == 0) {
|
if ((threadIdx.x & (warpSize - 1)) == 0) {
|
||||||
@ -244,7 +249,11 @@ __global__ void FullReductionKernelHalfFloat(Reducer reducer, const Self input,
|
|||||||
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int offset = warpSize/2; offset > 0; offset /= 2) {
|
for (int offset = warpSize/2; offset > 0; offset /= 2) {
|
||||||
|
#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000
|
||||||
reducer.reducePacket(__shfl_down(accum, offset, warpSize), &accum);
|
reducer.reducePacket(__shfl_down(accum, offset, warpSize), &accum);
|
||||||
|
#else
|
||||||
|
reducer.reducePacket(__shfl_down_sync(0xFFFFFFFF, accum, offset, warpSize), &accum);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((threadIdx.x & (warpSize - 1)) == 0) {
|
if ((threadIdx.x & (warpSize - 1)) == 0) {
|
||||||
@ -426,7 +435,11 @@ __global__ void InnerReductionKernel(Reducer reducer, const Self input, Index nu
|
|||||||
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int offset = warpSize/2; offset > 0; offset /= 2) {
|
for (int offset = warpSize/2; offset > 0; offset /= 2) {
|
||||||
|
#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000
|
||||||
reducer.reduce(__shfl_down(reduced_val, offset), &reduced_val);
|
reducer.reduce(__shfl_down(reduced_val, offset), &reduced_val);
|
||||||
|
#else
|
||||||
|
reducer.reduce(__shfl_down_sync(0xFFFFFFFF, reduced_val, offset), &reduced_val);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((threadIdx.x & (warpSize - 1)) == 0) {
|
if ((threadIdx.x & (warpSize - 1)) == 0) {
|
||||||
@ -516,8 +529,15 @@ __global__ void InnerReductionKernelHalfFloat(Reducer reducer, const Self input,
|
|||||||
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int offset = warpSize/2; offset > 0; offset /= 2) {
|
for (int offset = warpSize/2; offset > 0; offset /= 2) {
|
||||||
|
#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000
|
||||||
|
|
||||||
reducer.reducePacket(__shfl_down(reduced_val1, offset, warpSize), &reduced_val1);
|
reducer.reducePacket(__shfl_down(reduced_val1, offset, warpSize), &reduced_val1);
|
||||||
reducer.reducePacket(__shfl_down(reduced_val2, offset, warpSize), &reduced_val2);
|
reducer.reducePacket(__shfl_down(reduced_val2, offset, warpSize), &reduced_val2);
|
||||||
|
#else
|
||||||
|
reducer.reducePacket(__shfl_down_sync(0xFFFFFFFF, reduced_val1, offset, warpSize), &reduced_val1);
|
||||||
|
reducer.reducePacket(__shfl_down_sync(0xFFFFFFFF, reduced_val2, offset, warpSize), &reduced_val2);
|
||||||
|
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
half val1 = __low2half(reduced_val1);
|
half val1 = __low2half(reduced_val1);
|
||||||
|
@ -15,27 +15,27 @@ namespace Eigen {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
/** \internal \returns the ln(|gamma(\a a)|) (coeff-wise) */
|
/** \internal \returns the ln(|gamma(\a a)|) (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet plgamma(const Packet& a) { using numext::lgamma; return lgamma(a); }
|
Packet plgamma(const Packet& a) { using numext::lgamma; return lgamma(a); }
|
||||||
|
|
||||||
/** \internal \returns the derivative of lgamma, psi(\a a) (coeff-wise) */
|
/** \internal \returns the derivative of lgamma, psi(\a a) (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet pdigamma(const Packet& a) { using numext::digamma; return digamma(a); }
|
Packet pdigamma(const Packet& a) { using numext::digamma; return digamma(a); }
|
||||||
|
|
||||||
/** \internal \returns the zeta function of two arguments (coeff-wise) */
|
/** \internal \returns the zeta function of two arguments (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet pzeta(const Packet& x, const Packet& q) { using numext::zeta; return zeta(x, q); }
|
Packet pzeta(const Packet& x, const Packet& q) { using numext::zeta; return zeta(x, q); }
|
||||||
|
|
||||||
/** \internal \returns the polygamma function (coeff-wise) */
|
/** \internal \returns the polygamma function (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet ppolygamma(const Packet& n, const Packet& x) { using numext::polygamma; return polygamma(n, x); }
|
Packet ppolygamma(const Packet& n, const Packet& x) { using numext::polygamma; return polygamma(n, x); }
|
||||||
|
|
||||||
/** \internal \returns the erf(\a a) (coeff-wise) */
|
/** \internal \returns the erf(\a a) (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet perf(const Packet& a) { using numext::erf; return erf(a); }
|
Packet perf(const Packet& a) { using numext::erf; return erf(a); }
|
||||||
|
|
||||||
/** \internal \returns the erfc(\a a) (coeff-wise) */
|
/** \internal \returns the erfc(\a a) (coeff-wise) */
|
||||||
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
|
||||||
Packet perfc(const Packet& a) { using numext::erfc; return erfc(a); }
|
Packet perfc(const Packet& a) { using numext::erfc; return erfc(a); }
|
||||||
|
|
||||||
/** \internal \returns the incomplete gamma function igamma(\a a, \a x) */
|
/** \internal \returns the incomplete gamma function igamma(\a a, \a x) */
|
||||||
|
@ -216,17 +216,14 @@ if(CUDA_FOUND AND EIGEN_TEST_CUDA)
|
|||||||
message(STATUS "Flags used to compile cuda code: " ${CMAKE_CXX_FLAGS})
|
message(STATUS "Flags used to compile cuda code: " ${CMAKE_CXX_FLAGS})
|
||||||
|
|
||||||
if( (NOT EIGEN_TEST_CXX11) OR (CMAKE_VERSION VERSION_LESS 3.3))
|
if( (NOT EIGEN_TEST_CXX11) OR (CMAKE_VERSION VERSION_LESS 3.3))
|
||||||
set(EIGEN_CUDA_CXX11_FLAG "-std=c++11")
|
string(APPEND EIGEN_CUDA_CXX11_FLAGS " -std=c++11")
|
||||||
else()
|
|
||||||
# otherwise the flag has already been added because of the above set(CMAKE_CXX_STANDARD 11)
|
|
||||||
set(EIGEN_CUDA_CXX11_FLAG "")
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||||
set(CUDA_NVCC_FLAGS "-ccbin ${CMAKE_C_COMPILER}" CACHE STRING "nvcc flags" FORCE)
|
set(CUDA_NVCC_FLAGS "-ccbin ${CMAKE_C_COMPILER}" CACHE STRING "nvcc flags" FORCE)
|
||||||
endif()
|
endif()
|
||||||
if(EIGEN_TEST_CUDA_CLANG)
|
if(EIGEN_TEST_CUDA_CLANG)
|
||||||
string(APPEND CMAKE_CXX_FLAGS " --cuda-path=${CUDA_TOOLKIT_ROOT_DIR} ${EIGEN_CUDA_CXX11_FLAG}")
|
string(APPEND CMAKE_CXX_FLAGS " --cuda-path=${CUDA_TOOLKIT_ROOT_DIR} ${EIGEN_CUDA_CXX11_FLAGS}")
|
||||||
foreach(ARCH IN LISTS EIGEN_CUDA_COMPUTE_ARCH)
|
foreach(ARCH IN LISTS EIGEN_CUDA_COMPUTE_ARCH)
|
||||||
string(APPEND CMAKE_CXX_FLAGS " --cuda-gpu-arch=sm_${ARCH}")
|
string(APPEND CMAKE_CXX_FLAGS " --cuda-gpu-arch=sm_${ARCH}")
|
||||||
endforeach()
|
endforeach()
|
||||||
@ -246,7 +243,7 @@ if(CUDA_FOUND AND EIGEN_TEST_CUDA)
|
|||||||
foreach(ARCH IN LISTS EIGEN_CUDA_COMPUTE_ARCH)
|
foreach(ARCH IN LISTS EIGEN_CUDA_COMPUTE_ARCH)
|
||||||
string(APPEND NVCC_ARCH_FLAGS " -gencode arch=compute_${ARCH},code=sm_${ARCH}")
|
string(APPEND NVCC_ARCH_FLAGS " -gencode arch=compute_${ARCH},code=sm_${ARCH}")
|
||||||
endforeach()
|
endforeach()
|
||||||
set(CUDA_NVCC_FLAGS "--expt-relaxed-constexpr -Xcudafe \"--display_error_number\" ${NVCC_ARCH_FLAGS} ${CUDA_NVCC_FLAGS} ${EIGEN_CUDA_CXX11_FLAG}")
|
set(CUDA_NVCC_FLAGS "--expt-relaxed-constexpr -Xcudafe \"--display_error_number\" ${NVCC_ARCH_FLAGS} ${CUDA_NVCC_FLAGS} ${EIGEN_CUDA_CXX11_FLAGS}")
|
||||||
cuda_include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CUDA_TOOLKIT_ROOT_DIR}/include")
|
cuda_include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CUDA_TOOLKIT_ROOT_DIR}/include")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user